|
1
|
|
|
from app import db |
|
2
|
|
|
from .models import Schedule, Skip |
|
3
|
|
|
from datetime import datetime, date |
|
4
|
|
|
import pandas as pd |
|
5
|
|
|
import json |
|
6
|
|
|
import plotly |
|
7
|
|
|
import os |
|
8
|
|
|
from dateutil.relativedelta import relativedelta |
|
9
|
|
|
from natsort import index_natsorted |
|
10
|
|
|
import numpy as np |
|
11
|
|
|
import decimal |
|
12
|
|
|
import plotly.graph_objs as go |
|
13
|
|
|
|
|
14
|
|
|
|
|
15
|
|
|
def update_cash(balance, schedules, holds, skips): |
|
16
|
|
|
""" |
|
17
|
|
|
Calculate cash flow with pre-filtered user data |
|
18
|
|
|
|
|
19
|
|
|
Args: |
|
20
|
|
|
balance: Current balance amount (Decimal) |
|
21
|
|
|
schedules: List of Schedule objects (pre-filtered for user) |
|
22
|
|
|
holds: List of Hold objects (pre-filtered for user) |
|
23
|
|
|
skips: List of Skip objects (pre-filtered for user) |
|
24
|
|
|
|
|
25
|
|
|
Returns: |
|
26
|
|
|
trans: DataFrame of upcoming transactions |
|
27
|
|
|
run: DataFrame of running balance projections |
|
28
|
|
|
""" |
|
29
|
|
|
# calculate total events for the year amount |
|
30
|
|
|
total = calc_schedule(schedules, holds, skips) |
|
31
|
|
|
|
|
32
|
|
|
# calculate sum of running transactions |
|
33
|
|
|
trans, run = calc_transactions(balance, total) |
|
34
|
|
|
|
|
35
|
|
|
return trans, run |
|
36
|
|
|
|
|
37
|
|
|
|
|
38
|
|
|
def calc_schedule(schedules, holds, skips): |
|
39
|
|
|
""" |
|
40
|
|
|
Process schedules, holds, and skips into projected transactions |
|
41
|
|
|
|
|
42
|
|
|
Args: |
|
43
|
|
|
schedules: List of Schedule objects (pre-filtered for user) |
|
44
|
|
|
holds: List of Hold objects (pre-filtered for user) |
|
45
|
|
|
skips: List of Skip objects (pre-filtered for user) |
|
46
|
|
|
|
|
47
|
|
|
Returns: |
|
48
|
|
|
DataFrame of all projected transactions |
|
49
|
|
|
""" |
|
50
|
|
|
months = 13 |
|
51
|
|
|
weeks = 53 |
|
52
|
|
|
years = 1 |
|
53
|
|
|
quarters = 4 |
|
54
|
|
|
biweeks = 27 |
|
55
|
|
|
|
|
56
|
|
|
# Create lookup dictionaries to avoid re-querying |
|
57
|
|
|
schedule_objects = {s.name: s for s in schedules} |
|
58
|
|
|
skip_objects = {s.name: s for s in skips} |
|
59
|
|
|
|
|
60
|
|
|
# Convert schedules to DataFrame |
|
61
|
|
|
if schedules: |
|
62
|
|
|
df = pd.DataFrame([{ |
|
63
|
|
|
'name': s.name, |
|
64
|
|
|
'startdate': s.startdate.strftime('%Y-%m-%d') if s.startdate else None, |
|
65
|
|
|
'firstdate': s.firstdate.strftime('%Y-%m-%d') if s.firstdate else None, |
|
66
|
|
|
'frequency': s.frequency, |
|
67
|
|
|
'amount': s.amount, |
|
68
|
|
|
'type': s.type |
|
69
|
|
|
} for s in schedules]) |
|
70
|
|
|
else: |
|
71
|
|
|
# Empty DataFrame if no schedules |
|
72
|
|
|
df = pd.DataFrame(columns=['name', 'startdate', 'firstdate', 'frequency', 'amount', 'type']) |
|
73
|
|
|
|
|
74
|
|
|
total_dict = {} |
|
75
|
|
|
|
|
76
|
|
|
# loop through the schedule and create transactions in a table out to the future number of years |
|
77
|
|
|
todaydate = datetime.today().date() |
|
78
|
|
|
for i in df.itertuples(index=False): |
|
79
|
|
|
format = '%Y-%m-%d' |
|
80
|
|
|
name = i.name |
|
81
|
|
|
startdate = i.startdate |
|
82
|
|
|
firstdate = i.firstdate |
|
83
|
|
|
frequency = i.frequency |
|
84
|
|
|
amount = i.amount |
|
85
|
|
|
type = i.type |
|
86
|
|
|
existing = schedule_objects.get(name) |
|
87
|
|
|
if not existing: |
|
88
|
|
|
continue # Skip if schedule object not found |
|
89
|
|
|
if not firstdate: |
|
90
|
|
|
existing.firstdate = datetime.strptime(startdate, format).date() |
|
91
|
|
|
firstdate = existing.firstdate.strftime(format) |
|
92
|
|
|
db.session.commit() |
|
93
|
|
|
if frequency == 'Monthly': |
|
94
|
|
|
for k in range(months): |
|
95
|
|
|
futuredate = datetime.strptime(startdate, format).date() + relativedelta(months=k) |
|
96
|
|
|
futuredateday = futuredate.day |
|
97
|
|
|
firstdateday = datetime.strptime(firstdate, format).date().day |
|
98
|
|
|
if firstdateday > futuredateday: |
|
99
|
|
|
try: |
|
100
|
|
|
for m in range(3): |
|
101
|
|
|
futuredateday += 1 |
|
102
|
|
|
if firstdateday >= futuredateday: |
|
103
|
|
|
futuredate = futuredate.replace(day=futuredateday) |
|
104
|
|
|
except ValueError: |
|
105
|
|
|
pass |
|
106
|
|
View Code Duplication |
if futuredate <= todaydate and datetime.today().weekday() < 5: |
|
|
|
|
|
|
107
|
|
|
existing.startdate = futuredate + relativedelta(months=1) |
|
108
|
|
|
daycheckdate = futuredate + relativedelta(months=1) |
|
109
|
|
|
daycheck = daycheckdate.day |
|
110
|
|
|
if firstdateday > daycheck: |
|
111
|
|
|
try: |
|
112
|
|
|
for m in range(3): |
|
113
|
|
|
daycheck += 1 |
|
114
|
|
|
if firstdateday >= daycheck: |
|
115
|
|
|
existing.startdate = daycheckdate.replace(day=daycheck) |
|
116
|
|
|
except ValueError: |
|
117
|
|
|
pass |
|
118
|
|
|
if type == 'Income': |
|
119
|
|
|
rollbackdate = datetime.combine(futuredate, datetime.min.time()) |
|
120
|
|
|
# Create a new row |
|
121
|
|
|
new_row = { |
|
122
|
|
|
'type': type, |
|
123
|
|
|
'name': name, |
|
124
|
|
|
'amount': amount, |
|
125
|
|
|
'date': pd.tseries.offsets.BDay(1).rollback(rollbackdate).date() |
|
126
|
|
|
} |
|
127
|
|
|
# Append the row to the DataFrame |
|
128
|
|
|
total_dict[len(total_dict)] = new_row |
|
|
|
|
|
|
129
|
|
|
else: |
|
130
|
|
|
# Create a new row |
|
131
|
|
|
new_row = { |
|
132
|
|
|
'type': type, |
|
133
|
|
|
'name': name, |
|
134
|
|
|
'amount': amount, |
|
135
|
|
|
'date': (futuredate - pd.tseries.offsets.BDay(0)).date() |
|
136
|
|
|
} |
|
137
|
|
|
# Append the row to the DataFrame |
|
138
|
|
|
total_dict[len(total_dict)] = new_row |
|
139
|
|
|
elif frequency == 'Weekly': |
|
140
|
|
|
for k in range(weeks): |
|
141
|
|
|
futuredate = datetime.strptime(startdate, format).date() + relativedelta(weeks=k) |
|
142
|
|
|
if futuredate <= todaydate and datetime.today().weekday() < 5: |
|
143
|
|
|
existing.startdate = futuredate + relativedelta(weeks=1) |
|
144
|
|
|
# Create a new row |
|
145
|
|
|
new_row = { |
|
146
|
|
|
'type': type, |
|
147
|
|
|
'name': name, |
|
148
|
|
|
'amount': amount, |
|
149
|
|
|
'date': (futuredate - pd.tseries.offsets.BDay(0)).date() |
|
150
|
|
|
} |
|
151
|
|
|
# Append the row to the DataFrame |
|
152
|
|
|
total_dict[len(total_dict)] = new_row |
|
153
|
|
|
elif frequency == 'Yearly': |
|
154
|
|
|
for k in range(years): |
|
155
|
|
|
futuredate = datetime.strptime(startdate, format).date() + relativedelta(years=k) |
|
156
|
|
|
if futuredate <= todaydate and datetime.today().weekday() < 5: |
|
157
|
|
|
existing.startdate = futuredate + relativedelta(years=1) |
|
158
|
|
|
# Create a new row |
|
159
|
|
|
new_row = { |
|
160
|
|
|
'type': type, |
|
161
|
|
|
'name': name, |
|
162
|
|
|
'amount': amount, |
|
163
|
|
|
'date': (futuredate - pd.tseries.offsets.BDay(0)).date() |
|
164
|
|
|
} |
|
165
|
|
|
# Append the row to the DataFrame |
|
166
|
|
|
total_dict[len(total_dict)] = new_row |
|
167
|
|
|
elif frequency == 'Quarterly': |
|
168
|
|
|
for k in range(quarters): |
|
169
|
|
|
futuredate = datetime.strptime(startdate, format).date() + relativedelta(months=3 * k) |
|
170
|
|
|
futuredateday = futuredate.day |
|
171
|
|
|
firstdateday = datetime.strptime(firstdate, format).date().day |
|
172
|
|
|
if firstdateday > futuredateday: |
|
173
|
|
|
try: |
|
174
|
|
|
for m in range(3): |
|
175
|
|
|
futuredateday += 1 |
|
176
|
|
|
if firstdateday >= futuredateday: |
|
177
|
|
|
futuredate = futuredate.replace(day=futuredateday) |
|
178
|
|
|
except ValueError: |
|
179
|
|
|
pass |
|
180
|
|
View Code Duplication |
if futuredate <= todaydate and datetime.today().weekday() < 5: |
|
|
|
|
|
|
181
|
|
|
existing.startdate = futuredate + relativedelta(months=3) |
|
182
|
|
|
daycheckdate = futuredate + relativedelta(months=3) |
|
183
|
|
|
daycheck = daycheckdate.day |
|
184
|
|
|
if firstdateday > daycheck: |
|
185
|
|
|
try: |
|
186
|
|
|
for m in range(3): |
|
187
|
|
|
daycheck += 1 |
|
188
|
|
|
if firstdateday >= daycheck: |
|
189
|
|
|
existing.startdate = daycheckdate.replace(day=daycheck) |
|
190
|
|
|
except ValueError: |
|
191
|
|
|
pass |
|
192
|
|
|
# Create a new row |
|
193
|
|
|
new_row = { |
|
194
|
|
|
'type': type, |
|
195
|
|
|
'name': name, |
|
196
|
|
|
'amount': amount, |
|
197
|
|
|
'date': (futuredate - pd.tseries.offsets.BDay(0)).date() |
|
198
|
|
|
} |
|
199
|
|
|
# Append the row to the DataFrame |
|
200
|
|
|
total_dict[len(total_dict)] = new_row |
|
201
|
|
|
elif frequency == 'BiWeekly': |
|
202
|
|
|
for k in range(biweeks): |
|
203
|
|
|
futuredate = datetime.strptime(startdate, format).date() + relativedelta(weeks=2 * k) |
|
204
|
|
|
if futuredate <= todaydate and datetime.today().weekday() < 5: |
|
205
|
|
|
existing.startdate = futuredate + relativedelta(weeks=2) |
|
206
|
|
|
# Create a new row |
|
207
|
|
|
new_row = { |
|
208
|
|
|
'type': type, |
|
209
|
|
|
'name': name, |
|
210
|
|
|
'amount': amount, |
|
211
|
|
|
'date': (futuredate - pd.tseries.offsets.BDay(0)).date() |
|
212
|
|
|
} |
|
213
|
|
|
# Append the row to the DataFrame |
|
214
|
|
|
total_dict[len(total_dict)] = new_row |
|
215
|
|
|
elif frequency == 'Onetime': |
|
216
|
|
|
futuredate = datetime.strptime(startdate, format).date() |
|
217
|
|
|
if futuredate < todaydate: |
|
218
|
|
|
db.session.delete(existing) |
|
219
|
|
|
else: |
|
220
|
|
|
# Create a new row |
|
221
|
|
|
new_row = { |
|
222
|
|
|
'type': type, |
|
223
|
|
|
'name': name, |
|
224
|
|
|
'amount': amount, |
|
225
|
|
|
'date': futuredate |
|
226
|
|
|
} |
|
227
|
|
|
# Append the row to the DataFrame |
|
228
|
|
|
total_dict[len(total_dict)] = new_row |
|
229
|
|
|
db.session.commit() |
|
230
|
|
|
|
|
231
|
|
|
# add the hold items |
|
232
|
|
|
for hold in holds: |
|
233
|
|
|
# Create a new row |
|
234
|
|
|
new_row = { |
|
235
|
|
|
'type': hold.type, |
|
236
|
|
|
'name': hold.name, |
|
237
|
|
|
'amount': hold.amount, |
|
238
|
|
|
'date': todaydate + relativedelta(days=1) |
|
239
|
|
|
} |
|
240
|
|
|
# Append the row to the DataFrame |
|
241
|
|
|
total_dict[len(total_dict)] = new_row |
|
242
|
|
|
|
|
243
|
|
|
# add the skip items |
|
244
|
|
|
for skip in skips: |
|
245
|
|
|
format = '%Y-%m-%d' |
|
246
|
|
|
skip_date = skip.date if isinstance(skip.date, date) else datetime.strptime(skip.date, format).date() |
|
247
|
|
|
|
|
248
|
|
|
if skip_date < todaydate: |
|
249
|
|
|
# Delete past skip items |
|
250
|
|
|
db.session.delete(skip) |
|
251
|
|
|
else: |
|
252
|
|
|
# Create a new row |
|
253
|
|
|
new_row = { |
|
254
|
|
|
'type': skip.type, |
|
255
|
|
|
'name': skip.name, |
|
256
|
|
|
'amount': skip.amount, |
|
257
|
|
|
'date': skip_date |
|
258
|
|
|
} |
|
259
|
|
|
# Append the row to the DataFrame |
|
260
|
|
|
total_dict[len(total_dict)] = new_row |
|
261
|
|
|
|
|
262
|
|
|
# Create DataFrame from total_dict |
|
263
|
|
|
if total_dict: |
|
264
|
|
|
total = pd.DataFrame.from_dict(total_dict, orient="index") |
|
265
|
|
|
else: |
|
266
|
|
|
# Return empty DataFrame with expected columns |
|
267
|
|
|
total = pd.DataFrame(columns=['type', 'name', 'amount', 'date']) |
|
268
|
|
|
|
|
269
|
|
|
return total |
|
270
|
|
|
|
|
271
|
|
|
|
|
272
|
|
|
def calc_transactions(balance, total): |
|
273
|
|
|
# retrieve the total future transactions |
|
274
|
|
|
# Check if total DataFrame is empty |
|
275
|
|
|
if total.empty: |
|
276
|
|
|
# Return empty DataFrames if no transactions |
|
277
|
|
|
trans = pd.DataFrame(columns=['name', 'type', 'amount', 'date']) |
|
278
|
|
|
# Convert balance to float for type consistency |
|
279
|
|
|
run_dict = {0: {'amount': float(balance), 'date': datetime.today().date()}} |
|
280
|
|
|
run = pd.DataFrame.from_dict(run_dict, orient="index") |
|
281
|
|
|
return trans, run |
|
282
|
|
|
|
|
283
|
|
|
df = total.sort_values(by="date", key=lambda x: np.argsort(index_natsorted(total["date"]))).reset_index(drop=True) |
|
284
|
|
|
trans_dict = {} |
|
285
|
|
|
# collect the next 60 days of transactions for the transactions table |
|
286
|
|
|
todaydate = datetime.today().date() |
|
287
|
|
|
todaydateplus = todaydate + relativedelta(months=2) |
|
288
|
|
|
for i in df.itertuples(index=False): |
|
289
|
|
|
if todaydateplus > \ |
|
290
|
|
|
i.date > todaydate and "(SKIP)" not in i.name: |
|
291
|
|
|
# Create a new row from i[1] |
|
292
|
|
|
new_row = { |
|
293
|
|
|
'name': i.name, # Accessing the 4th column value |
|
294
|
|
|
'type': i.type, |
|
295
|
|
|
'amount': i.amount, |
|
296
|
|
|
'date': i.date |
|
297
|
|
|
} |
|
298
|
|
|
# Append the row to the DataFrame |
|
299
|
|
|
trans_dict[len(trans_dict)] = new_row |
|
|
|
|
|
|
300
|
|
|
|
|
301
|
|
|
trans = pd.DataFrame.from_dict(trans_dict, orient="index") |
|
302
|
|
|
|
|
303
|
|
|
# for schedules marked as expenses, make the value negative for the sum |
|
304
|
|
|
# Create a copy to avoid modifying during iteration |
|
305
|
|
|
df = df.copy() |
|
306
|
|
|
# Convert all amounts to float to avoid Decimal/float mixing |
|
307
|
|
|
df['amount'] = df['amount'].astype(float) |
|
308
|
|
|
for idx in df.index: |
|
309
|
|
|
if df.loc[idx, 'type'] == 'Expense': |
|
310
|
|
|
df.loc[idx, 'amount'] = df.loc[idx, 'amount'] * -1 |
|
311
|
|
|
|
|
312
|
|
|
# group total transactions by date and sum the amounts for each date |
|
313
|
|
|
df = df.groupby("date")['amount'].sum().reset_index() |
|
314
|
|
|
|
|
315
|
|
|
# loop through the total transactions by date and add the sums to the total balance amount |
|
316
|
|
|
# Convert balance to float to avoid Decimal/float mixing |
|
317
|
|
|
runbalance = float(balance) |
|
318
|
|
|
run_dict = {} |
|
319
|
|
|
# Create a new row |
|
320
|
|
|
new_row = { |
|
321
|
|
|
'amount': runbalance, |
|
322
|
|
|
'date': datetime.today().date() |
|
323
|
|
|
} |
|
324
|
|
|
# Append the row to the DataFrame |
|
325
|
|
|
run_dict[len(run_dict)] = new_row |
|
326
|
|
|
for i in df.itertuples(index=False): |
|
327
|
|
|
rundate = i.date |
|
328
|
|
|
amount = i.amount |
|
329
|
|
|
if i.date > todaydate: |
|
330
|
|
|
runbalance += amount |
|
331
|
|
|
# Create a new row |
|
332
|
|
|
new_row = { |
|
333
|
|
|
'amount': runbalance, |
|
334
|
|
|
'date': rundate |
|
335
|
|
|
} |
|
336
|
|
|
# Append the row to the DataFrame |
|
337
|
|
|
run_dict[len(run_dict)] = new_row |
|
338
|
|
|
|
|
339
|
|
|
run = pd.DataFrame.from_dict(run_dict, orient="index") |
|
340
|
|
|
|
|
341
|
|
|
return trans, run |
|
342
|
|
|
|
|
343
|
|
|
|
|
344
|
|
|
def plot_cash(run): |
|
345
|
|
|
# plot the running balances by date on a line plot |
|
346
|
|
|
df = run.sort_values(by='date', ascending=False) |
|
347
|
|
|
# Convert amounts to float to avoid Decimal/float mixing |
|
348
|
|
|
df['amount'] = df['amount'].astype(float) |
|
349
|
|
|
minbalance = df['amount'].min() |
|
350
|
|
|
minbalance = decimal.Decimal(str(minbalance)).quantize(decimal.Decimal('.01')) |
|
351
|
|
|
if float(minbalance) >= 0: |
|
352
|
|
|
minrange = 0.0 |
|
353
|
|
|
else: |
|
354
|
|
|
minrange = float(minbalance) * 1.1 |
|
355
|
|
|
maxbalance = 0.0 |
|
356
|
|
|
todaydate = datetime.today().date() |
|
357
|
|
|
todaydateplus = todaydate + relativedelta(months=2) |
|
358
|
|
|
for i in df.itertuples(index=False): |
|
359
|
|
|
if todaydateplus > i.date > todaydate: |
|
360
|
|
|
if i.amount > maxbalance: |
|
361
|
|
|
maxbalance = i.amount |
|
362
|
|
|
maxrange = maxbalance * 1.1 |
|
363
|
|
|
start_date = str(datetime.today().date()) |
|
364
|
|
|
end_date = str(datetime.today().date() + relativedelta(months=2)) |
|
365
|
|
|
layout = go.Layout(yaxis=dict(range=[minrange, maxrange]), xaxis=dict(range=[start_date, end_date]), |
|
366
|
|
|
margin=dict(l=5, r=20, t=35, b=5), dragmode='pan') |
|
367
|
|
|
fig = go.Figure(data=go.Scatter(x=df['date'].values.tolist(), y=df['amount'].values.tolist(), mode='lines', line=dict(shape='spline', smoothing=0.8))) |
|
368
|
|
|
fig.update_layout(layout) |
|
369
|
|
|
fig.update_xaxes(title_text='Date') |
|
370
|
|
|
fig.update_yaxes(title_text='Amount') |
|
371
|
|
|
fig.update_layout(paper_bgcolor="PaleTurquoise") |
|
372
|
|
|
fig.update_layout(title="Cash Flow") |
|
373
|
|
|
fig.update_layout(xaxis_type='date') |
|
374
|
|
|
fig.update_layout(yaxis_tickformat='$,.2f') |
|
375
|
|
|
|
|
376
|
|
|
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder) |
|
377
|
|
|
|
|
378
|
|
|
return minbalance, graphJSON |