|
1
|
|
|
import pandas as pd |
|
2
|
|
|
import os.path |
|
3
|
|
|
from googleapiclient.discovery import build |
|
4
|
|
|
from google_auth_oauthlib.flow import InstalledAppFlow |
|
5
|
|
|
from google.auth.transport.requests import Request |
|
6
|
|
|
from google.oauth2.credentials import Credentials |
|
7
|
|
|
import json |
|
8
|
|
|
from modules import get_settings |
|
9
|
|
|
|
|
10
|
|
|
SCOPES = ['https://www.googleapis.com/auth/spreadsheets'] |
|
11
|
|
|
SAMPLE_RANGE_NAME = 'A1:AA68' |
|
12
|
|
|
CREDENTIALS_FILE = 'pull_config/credentials/client_secret.com.json ' |
|
13
|
|
|
|
|
14
|
|
|
SAMPLE_SPREADSHEET_ID_input = get_settings.get_settings("EXCEL_ID") |
|
15
|
|
|
|
|
16
|
|
|
|
|
17
|
|
|
def import_from_sheets(): |
|
18
|
|
|
""" |
|
19
|
|
|
|
|
20
|
|
|
:return: |
|
21
|
|
|
:rtype: |
|
22
|
|
|
""" |
|
23
|
|
|
token = "token.json" |
|
24
|
|
|
creds = None |
|
25
|
|
|
# The file token.json stores the user's access and refresh tokens, and is |
|
26
|
|
|
# created automatically when the authorization flow completes for the first time |
|
27
|
|
|
if os.path.exists(token): |
|
28
|
|
|
creds = Credentials.from_authorized_user_file(token, SCOPES) |
|
29
|
|
|
# If there are no (valid) credentials available, let the user log in |
|
30
|
|
|
if not creds or not creds.valid: |
|
31
|
|
|
if creds and creds.expired and creds.refresh_token: |
|
32
|
|
|
creds.refresh(Request()) |
|
33
|
|
|
else: |
|
34
|
|
|
flow = InstalledAppFlow.from_client_secrets_file( |
|
35
|
|
|
CREDENTIALS_FILE, SCOPES) |
|
36
|
|
|
creds = flow.run_local_server(port=0) |
|
37
|
|
|
# Save the credentials for the next run |
|
38
|
|
|
with open(token, 'w') as token: |
|
39
|
|
|
token.write(creds.to_json()) |
|
40
|
|
|
|
|
41
|
|
|
service = build('sheets', 'v4', credentials=creds) |
|
42
|
|
|
|
|
43
|
|
|
# Call the Sheets API |
|
44
|
|
|
sheet = service.spreadsheets() |
|
45
|
|
|
result_input = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID_input, range=SAMPLE_RANGE_NAME).execute() |
|
46
|
|
|
values_input = result_input.get('values', []) |
|
47
|
|
|
|
|
48
|
|
|
if not values_input: |
|
49
|
|
|
print(f"[{get_config.__name__}]: No data found.") |
|
50
|
|
|
return values_input |
|
51
|
|
|
|
|
52
|
|
|
|
|
53
|
|
|
def create_trigger_list(triggers) -> list: |
|
54
|
|
|
triggers_list = [] |
|
55
|
|
|
for row in triggers.itertuples(index=False): |
|
56
|
|
|
help_ser = pd.Series(row) |
|
57
|
|
|
help_ser = help_ser[~help_ser.isna()] |
|
58
|
|
|
# Drop empty strings |
|
59
|
|
|
help_ser = pd.Series(filter(None, help_ser)) |
|
60
|
|
|
# Copy strings with spaces without keeping them |
|
61
|
|
|
for trigger in help_ser: |
|
62
|
|
|
trigger_nospace = trigger.replace(' ', '') |
|
63
|
|
|
help_ser = help_ser.append(pd.Series(trigger_nospace)) |
|
64
|
|
|
help_ser = help_ser.drop_duplicates() |
|
65
|
|
|
triggers_list.append(help_ser) |
|
66
|
|
|
return triggers_list |
|
67
|
|
|
|
|
68
|
|
|
|
|
69
|
|
|
def create_output(monsters_df) -> dict: |
|
70
|
|
|
types = {'id': [4, 3, 2, 1, 0], 'label': ["Common", "Event0", "Event1", "Legendary", "Rare"]} |
|
71
|
|
|
types_df = pd.DataFrame(data=types) |
|
72
|
|
|
total_milestones = {"Rare Spotter": [150], "tescior": 151, "Pepega Spotter": [1000], "Pog Spotter": [2000], |
|
73
|
|
|
"Pogmare Spotter": [3000], |
|
74
|
|
|
"Legendary Spotter": [4000], "Mythic Spotter": [5000]} |
|
75
|
|
|
total_milestones_df = pd.DataFrame(data=total_milestones) |
|
76
|
|
|
common_milestones = {"Common Killer": [100], "Common Slayer": [150]} |
|
77
|
|
|
common_milestones_df = pd.DataFrame(data=common_milestones) |
|
78
|
|
|
json_final = {'total_milestones': total_milestones_df, 'common_milestones': common_milestones_df, |
|
79
|
|
|
'types': types_df, 'commands': monsters_df} |
|
80
|
|
|
# convert dataframes into dictionaries |
|
81
|
|
|
data_dict = { |
|
82
|
|
|
key: json_final[key].to_dict(orient='records') |
|
83
|
|
|
for key in json_final |
|
84
|
|
|
} |
|
85
|
|
|
return data_dict |
|
86
|
|
|
|
|
87
|
|
|
|
|
88
|
|
|
def get_config(): |
|
89
|
|
|
""" |
|
90
|
|
|
|
|
91
|
|
|
:return: |
|
92
|
|
|
:rtype: |
|
93
|
|
|
""" |
|
94
|
|
|
pd.set_option('mode.chained_assignment', None) |
|
95
|
|
|
print(f"[{get_config.__name__}]: Loading data") |
|
96
|
|
|
values_input = import_from_sheets() |
|
97
|
|
|
df = pd.DataFrame(values_input[1:], columns=values_input[0]) |
|
98
|
|
|
|
|
99
|
|
|
print(f"[{get_config.__name__}]: Transforming data") |
|
100
|
|
|
monsters_df = df[["name", "type"]] |
|
101
|
|
|
monsters_df["type"] = pd.to_numeric(df["type"]) |
|
102
|
|
|
|
|
103
|
|
|
triggers = df.drop(['name', 'role', 'type', 'id'], axis=1) |
|
104
|
|
|
triggers = triggers.applymap(lambda s: s.lower() if type(s) == str else s) |
|
105
|
|
|
|
|
106
|
|
|
triggers_list = create_trigger_list(triggers) |
|
107
|
|
|
|
|
108
|
|
|
print(f"[{get_config.__name__}]: Creating trigger structure") |
|
109
|
|
|
triggers_def = [] |
|
110
|
|
|
for i in triggers_list: |
|
111
|
|
|
triggers_def.append(list(i)) |
|
112
|
|
|
triggers_def_series = pd.Series(triggers_def) |
|
113
|
|
|
monsters_df.insert(loc=0, column='triggers', value=triggers_def_series) |
|
114
|
|
|
|
|
115
|
|
|
print(f"[{get_config.__name__}]: Creating output") |
|
116
|
|
|
data_dict = create_output(monsters_df) |
|
117
|
|
|
|
|
118
|
|
|
# write to disk |
|
119
|
|
|
with open('server_files/config.json', 'w', encoding='utf8') as f: |
|
120
|
|
|
json.dump( |
|
121
|
|
|
data_dict, |
|
122
|
|
|
f, |
|
123
|
|
|
indent=4, |
|
124
|
|
|
ensure_ascii=False, |
|
125
|
|
|
sort_keys=False |
|
126
|
|
|
) |
|
127
|
|
|
print(f"[{get_config.__name__}]: .json saved") |
|
128
|
|
|
|
|
129
|
|
|
|
|
130
|
|
|
def main(): |
|
131
|
|
|
get_config() |
|
132
|
|
|
|
|
133
|
|
|
|
|
134
|
|
|
if __name__ == "__main__": |
|
135
|
|
|
main() |
|
136
|
|
|
|