Total Complexity | 20 |
Total Lines | 138 |
Duplicated Lines | 79.71 % |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
1 | import pandas as pd |
||
2 | import sys |
||
3 | from tqdm import tqdm |
||
4 | import os.path |
||
5 | from os import path |
||
6 | from googleapiclient.discovery import build |
||
7 | from google_auth_oauthlib.flow import InstalledAppFlow |
||
8 | from google.auth.transport.requests import Request |
||
9 | from google.oauth2.credentials import Credentials |
||
10 | import json |
||
11 | from modules import get_settings |
||
12 | |||
13 | SCOPES = ['https://www.googleapis.com/auth/spreadsheets'] |
||
14 | SAMPLE_RANGE_NAME = 'A1:AA68' |
||
15 | CREDENTIALS_FILE = 'pull_config/credentials/client_secret_824511649166-rd0kn8jg71odnik0backligb356p0vc8.apps' \ |
||
16 | '.googleusercontent' \ |
||
17 | '.com.json ' |
||
18 | |||
19 | SAMPLE_SPREADSHEET_ID_input = get_settings.get_settings("EXCEL_ID") |
||
20 | |||
21 | |||
22 | View Code Duplication | def import_from_sheets(): |
|
1 ignored issue
–
show
|
|||
23 | """ |
||
24 | |||
25 | :return: |
||
26 | :rtype: |
||
27 | """ |
||
28 | creds = None |
||
29 | # The file token.json stores the user's access and refresh tokens, and is |
||
30 | # created automatically when the authorization flow completes for the first |
||
31 | # time |
||
32 | if os.path.exists('token.json'): |
||
33 | creds = Credentials.from_authorized_user_file('token.json', SCOPES) |
||
34 | # If there are no (valid) credentials available, let the user log in |
||
35 | if not creds or not creds.valid: |
||
36 | if creds and creds.expired and creds.refresh_token: |
||
37 | creds.refresh(Request()) |
||
38 | else: |
||
39 | flow = InstalledAppFlow.from_client_secrets_file( |
||
40 | CREDENTIALS_FILE, SCOPES) |
||
41 | creds = flow.run_local_server(port=0) |
||
42 | # Save the credentials for the next run |
||
43 | with open('token.json', 'w') as token: |
||
44 | token.write(creds.to_json()) |
||
45 | |||
46 | service = build('sheets', 'v4', credentials=creds) |
||
47 | |||
48 | # Call the Sheets API |
||
49 | sheet = service.spreadsheets() |
||
50 | result_input = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID_input, range=SAMPLE_RANGE_NAME).execute() |
||
51 | values_input = result_input.get('values', []) |
||
52 | |||
53 | if not values_input: |
||
54 | print('No data found.') |
||
55 | return values_input |
||
56 | |||
57 | |||
58 | View Code Duplication | def get_config(): |
|
1 ignored issue
–
show
|
|||
59 | """ |
||
60 | |||
61 | :return: |
||
62 | :rtype: |
||
63 | """ |
||
64 | pd.set_option('mode.chained_assignment', None) |
||
65 | print("Loading data") |
||
66 | values_input = import_from_sheets() |
||
67 | df = pd.DataFrame(values_input[1:], columns=values_input[0]) |
||
68 | |||
69 | print("Transforming data") |
||
70 | monsters_df = df[["name", "type"]] |
||
71 | monsters_df["type"] = pd.to_numeric(df["type"]) |
||
72 | |||
73 | triggers = df.drop(['name', 'role', 'type', 'id'], axis=1) |
||
74 | triggers = triggers.applymap(lambda s: s.lower() if isinstance(s) == str else s) |
||
75 | # triggers = triggers.applymap(lambda s: unidecode.unidecode(s) if type(s) == str else s) |
||
76 | |||
77 | triggers_list = [] |
||
78 | with tqdm(total=len(triggers), file=sys.stdout) as pbar: |
||
79 | for row in triggers.itertuples(index=False): |
||
80 | helpt = pd.Series(row) |
||
81 | helpt = helpt[~helpt.isna()] |
||
82 | # Drop empty strings |
||
83 | helpt = pd.Series(filter(None, helpt)) |
||
84 | # Copy strings with spaces without keeping them |
||
85 | for trigger in helpt: |
||
86 | trigger_nospace = trigger.replace(' ', '') |
||
87 | if trigger_nospace != trigger: |
||
88 | helpt = helpt.append(pd.Series(trigger_nospace)) |
||
89 | helpt = helpt.drop_duplicates() |
||
90 | triggers_list.append(helpt) |
||
91 | pbar.update(1) |
||
92 | |||
93 | print("Creating trigger structure") |
||
94 | triggers_def = [] |
||
95 | with tqdm(total=len(triggers_list), file=sys.stdout) as pbar: |
||
96 | for i in triggers_list: |
||
97 | triggers_def.append(list(i)) |
||
98 | pbar.update(1) |
||
99 | triggers_def_series = pd.Series(triggers_def) |
||
100 | monsters_df.insert(loc=0, column='triggers', value=triggers_def_series) |
||
101 | |||
102 | print("Creating output") |
||
103 | |||
104 | types = {'id': [4, 3, 2, 1, 0], 'label': ["Common", "Event_Likan", "Event_Ulf", "Legendary", "Rare"]} |
||
105 | types_df = pd.DataFrame(data=types) |
||
106 | milestones = {'total': [150, 1000, 5000], 'name': ["Rare Spotter", "Legendary Spotter", "Mythic Spotter"]} |
||
107 | milestones_df = pd.DataFrame(data=milestones) |
||
108 | json_final = {'milestones': milestones_df, 'types': types_df, 'commands': monsters_df} |
||
109 | |||
110 | # convert dataframes into dictionaries |
||
111 | data_dict = { |
||
112 | key: json_final[key].to_dict(orient='records') |
||
113 | for key in json_final |
||
114 | } |
||
115 | |||
116 | # write to disk |
||
117 | with open('json_files/config.json', 'w', encoding='utf8') as f: |
||
118 | json.dump( |
||
119 | data_dict, |
||
120 | f, |
||
121 | indent=4, |
||
122 | ensure_ascii=False, |
||
123 | sort_keys=False |
||
124 | ) |
||
125 | with open('modules/pull_config/output/config.txt', 'w', encoding='utf8') as f: |
||
126 | json.dump( |
||
127 | data_dict, |
||
128 | f, |
||
129 | indent=4, |
||
130 | ensure_ascii=False |
||
131 | ) |
||
132 | |||
133 | print(".json saved") |
||
134 | |||
135 | |||
136 | if __name__ == "__main__": |
||
137 | get_config() |
||
138 |