1
|
|
|
import sys |
2
|
|
|
import pandas as pd |
3
|
|
|
from tqdm import tqdm |
4
|
|
|
import os.path |
5
|
|
|
from os import path |
6
|
|
|
from googleapiclient.discovery import build |
7
|
|
|
from google_auth_oauthlib.flow import InstalledAppFlow |
8
|
|
|
from google.auth.transport.requests import Request |
9
|
|
|
from google.oauth2.credentials import Credentials |
10
|
|
|
import json |
11
|
|
|
from modules import get_settings |
12
|
|
|
|
13
|
|
|
SCOPES = ['https://www.googleapis.com/auth/spreadsheets'] |
14
|
|
|
SAMPLE_RANGE_NAME = 'A1:AA68' |
15
|
|
|
CREDENTIALS_FILE = 'pull_config/credentials/client_secret_824511649166-rd0kn8jg71odnik0backligb356p0vc8.apps' \ |
16
|
|
|
'.googleusercontent' \ |
17
|
|
|
'.com.json ' |
18
|
|
|
|
19
|
|
|
SAMPLE_SPREADSHEET_ID_input = get_settings.get_settings("EXCEL_ID") |
20
|
|
|
|
21
|
|
|
|
22
|
|
View Code Duplication |
def import_from_sheets(): |
23
|
|
|
""" |
24
|
|
|
|
25
|
|
|
:return: |
26
|
|
|
:rtype: |
27
|
|
|
""" |
28
|
|
|
creds = None |
29
|
|
|
# The file token.json stores the user's access and refresh tokens, and is |
30
|
|
|
# created automatically when the authorization flow completes for the first time |
31
|
|
|
if os.path.exists('token.json'): |
32
|
|
|
creds = Credentials.from_authorized_user_file('token.json', SCOPES) |
33
|
|
|
# If there are no (valid) credentials available, let the user log in |
34
|
|
|
if not creds or not creds.valid: |
35
|
|
|
if creds and creds.expired and creds.refresh_token: |
36
|
|
|
creds.refresh(Request()) |
37
|
|
|
else: |
38
|
|
|
flow = InstalledAppFlow.from_client_secrets_file( |
39
|
|
|
CREDENTIALS_FILE, SCOPES) |
40
|
|
|
creds = flow.run_local_server(port=0) |
41
|
|
|
# Save the credentials for the next run |
42
|
|
|
with open('token.json', 'w') as token: |
43
|
|
|
token.write(creds.to_json()) |
44
|
|
|
|
45
|
|
|
service = build('sheets', 'v4', credentials=creds) |
46
|
|
|
|
47
|
|
|
# Call the Sheets API |
48
|
|
|
sheet = service.spreadsheets() |
49
|
|
|
result_input = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID_input, range=SAMPLE_RANGE_NAME).execute() |
50
|
|
|
values_input = result_input.get('values', []) |
51
|
|
|
|
52
|
|
|
if not values_input: |
53
|
|
|
print('No data found.') |
54
|
|
|
return values_input |
55
|
|
|
|
56
|
|
|
|
57
|
|
View Code Duplication |
def get_config(): |
58
|
|
|
""" |
59
|
|
|
|
60
|
|
|
:return: |
61
|
|
|
:rtype: |
62
|
|
|
""" |
63
|
|
|
pd.set_option('mode.chained_assignment', None) |
64
|
|
|
print("Loading data") |
65
|
|
|
values_input = import_from_sheets() |
66
|
|
|
df = pd.DataFrame(values_input[1:], columns=values_input[0]) |
67
|
|
|
|
68
|
|
|
print("Transforming data") |
69
|
|
|
monsters_df = df[["name", "type"]] |
70
|
|
|
monsters_df["type"] = pd.to_numeric(df["type"]) |
71
|
|
|
|
72
|
|
|
triggers = df.drop(['name', 'role', 'type', 'id'], axis=1) |
73
|
|
|
triggers = triggers.applymap(lambda s: s.lower() if type(s) == str else s) |
74
|
|
|
# triggers = triggers.applymap(lambda s: unidecode.unidecode(s) if type(s) == str else s) |
75
|
|
|
|
76
|
|
|
triggers_list = [] |
77
|
|
|
with tqdm(total=len(triggers), file=sys.stdout) as pbar: |
78
|
|
|
for row in triggers.itertuples(index=False): |
79
|
|
|
helpt = pd.Series(row) |
80
|
|
|
helpt = helpt[~helpt.isna()] |
81
|
|
|
# Drop empty strings |
82
|
|
|
helpt = pd.Series(filter(None, helpt)) |
83
|
|
|
# Copy strings with spaces without keeping them |
84
|
|
|
for trigger in helpt: |
85
|
|
|
trigger_nospace = trigger.replace(' ', '') |
86
|
|
|
if trigger_nospace != trigger: |
87
|
|
|
helpt = helpt.append(pd.Series(trigger_nospace)) |
88
|
|
|
helpt = helpt.drop_duplicates() |
89
|
|
|
triggers_list.append(helpt) |
90
|
|
|
pbar.update(1) |
91
|
|
|
|
92
|
|
|
print("Creating trigger structure") |
93
|
|
|
triggers_def = [] |
94
|
|
|
with tqdm(total=len(triggers_list), file=sys.stdout) as pbar: |
95
|
|
|
for i in triggers_list: |
96
|
|
|
triggers_def.append(list(i)) |
97
|
|
|
pbar.update(1) |
98
|
|
|
triggers_def_series = pd.Series(triggers_def) |
99
|
|
|
monsters_df.insert(loc=0, column='triggers', value=triggers_def_series) |
100
|
|
|
|
101
|
|
|
print("Creating output") |
102
|
|
|
|
103
|
|
|
types = {'id': [4, 3, 2, 1, 0], 'label': ["Common", "Event_Likan", "Event_Ulf", "Legendary", "Rare"]} |
104
|
|
|
types_df = pd.DataFrame(data=types) |
105
|
|
|
milestones = {'total': [150, 1000, 5000], 'name': ["Rare Spotter", "Legendary Spotter", "Mythic Spotter"]} |
106
|
|
|
milestones_df = pd.DataFrame(data=milestones) |
107
|
|
|
json_final = {'milestones': milestones_df, 'types': types_df, 'commands': monsters_df} |
108
|
|
|
|
109
|
|
|
# convert dataframes into dictionaries |
110
|
|
|
data_dict = { |
111
|
|
|
key: json_final[key].to_dict(orient='records') |
112
|
|
|
for key in json_final |
113
|
|
|
} |
114
|
|
|
|
115
|
|
|
# write to disk |
116
|
|
|
with open('json_files/config.json', 'w', encoding='utf8') as f: |
117
|
|
|
json.dump( |
118
|
|
|
data_dict, |
119
|
|
|
f, |
120
|
|
|
indent=4, |
121
|
|
|
ensure_ascii=False, |
122
|
|
|
sort_keys=False |
123
|
|
|
) |
124
|
|
|
with open('modules/pull_config/output/config.txt', 'w', encoding='utf8') as f: |
125
|
|
|
json.dump( |
126
|
|
|
data_dict, |
127
|
|
|
f, |
128
|
|
|
indent=4, |
129
|
|
|
ensure_ascii=False |
130
|
|
|
) |
131
|
|
|
|
132
|
|
|
print(".json saved") |
133
|
|
|
|
134
|
|
|
|
135
|
|
|
if __name__ == "__main__": |
136
|
|
|
get_config() |
137
|
|
|
|