| Conditions | 34 |
| Total Lines | 148 |
| Code Lines | 82 |
| Lines | 0 |
| Ratio | 0 % |
| Tests | 54 |
| CRAP Score | 49.8609 |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like crowdtruth.load.processFile() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | 1 | import os |
|
| 116 | 1 | return results, config |
|
| 117 | |||
| 118 | |||
| 119 | 1 | def process_file(filename, config): |
|
| 120 | |||
| 121 | 1 | job = filename.split('.csv')[0] |
|
| 122 | |||
| 123 | 1 | judgments = pd.read_csv(filename)#, encoding=result['encoding']) |
|
| 124 | |||
| 125 | 1 | platform = get_platform(judgments) |
|
| 126 | |||
| 127 | 1 | if platform == False: |
|
| 128 | logging.info("Custom crowdsourcing platform!") |
||
| 129 | no_of_columns = len(config.customPlatformColumns) |
||
| 130 | if no_of_columns != 5: |
||
| 131 | logging.warning("The following column names are required: judgment id, " |
||
| 132 | "unit id, worker id, start time, submit time") |
||
| 133 | raise ValueError('No custom platform configuration was provided') |
||
| 134 | else: |
||
| 135 | |||
| 136 | platform = { |
||
| 137 | #'id' : 'custom', |
||
| 138 | config.customPlatformColumns[0] : 'judgment', |
||
| 139 | config.customPlatformColumns[1] : 'unit', |
||
| 140 | config.customPlatformColumns[2] : 'worker', |
||
| 141 | config.customPlatformColumns[3] : 'started', |
||
| 142 | config.customPlatformColumns[4] : 'submitted' |
||
| 143 | } |
||
| 144 | |||
| 145 | |||
| 146 | # we must establish which fields were part of the input data and which are output judgments |
||
| 147 | # if there is a config, check if there is a definition of which fields to use |
||
| 148 | #config = [] |
||
| 149 | # else use the default and select them automatically |
||
| 150 | 1 | config = get_column_types(judgments, config) |
|
| 151 | |||
| 152 | # remove rows where the worker did not give an answer (AMT issue) |
||
| 153 | 1 | empty_rows = set() |
|
| 154 | 1 | for col in config.outputColumns: |
|
| 155 | 1 | empty_rows = empty_rows.union(judgments[pd.isnull(judgments[col]) == True].index) |
|
| 156 | 1 | for col in config.outputColumns: |
|
| 157 | 1 | judgments = judgments[pd.isnull(judgments[col]) == False] |
|
| 158 | 1 | judgments = judgments.reset_index(drop=True) |
|
| 159 | 1 | count_empty_rows = len(empty_rows) |
|
| 160 | 1 | if count_empty_rows > 0: |
|
| 161 | if count_empty_rows == 1: |
||
| 162 | logging.warning(str(count_empty_rows) + " row with incomplete information in " |
||
| 163 | "output columns was removed.") |
||
| 164 | else: |
||
| 165 | logging.warning(str(count_empty_rows) + " rows with incomplete information in " |
||
| 166 | "output columns were removed.") |
||
| 167 | |||
| 168 | # allow customization of the judgments |
||
| 169 | 1 | judgments = config.processJudgments(judgments) |
|
| 170 | |||
| 171 | # update the config after the preprocessing of judgments |
||
| 172 | 1 | config = get_column_types(judgments, config) |
|
| 173 | |||
| 174 | 1 | all_columns = dict(list(config.input.items()) + list(config.output.items()) \ |
|
| 175 | + list(platform.items())) |
||
| 176 | # allColumns = dict(config.input.items() | config.output.items() | platform.items()) |
||
| 177 | 1 | judgments = judgments.rename(columns=all_columns) |
|
| 178 | |||
| 179 | # remove columns we don't care about |
||
| 180 | 1 | judgments = judgments[list(all_columns.values())] |
|
| 181 | |||
| 182 | 1 | judgments['job'] = job |
|
| 183 | |||
| 184 | # make output values safe keys |
||
| 185 | 1 | for col in config.output.values(): |
|
| 186 | 1 | if type(judgments[col].iloc[0]) is dict: |
|
| 187 | logging.info("Values stored as dictionary") |
||
| 188 | if config.open_ended_task: |
||
| 189 | judgments[col] = judgments[col].apply(lambda x: OrderedCounter(x)) |
||
| 190 | else: |
||
| 191 | judgments[col] = judgments[col].apply(lambda x: create_ordered_counter( \ |
||
| 192 | OrderedCounter(x), config.annotation_vector)) |
||
| 193 | else: |
||
| 194 | 1 | logging.info("Values not stored as dictionary") |
|
| 195 | 1 | if config.open_ended_task: |
|
| 196 | 1 | judgments[col] = judgments[col].apply(lambda x: OrderedCounter( \ |
|
| 197 | x.split(config.annotation_separator))) |
||
| 198 | else: |
||
| 199 | 1 | judgments[col] = judgments[col].apply(lambda x: create_ordered_counter( \ |
|
| 200 | OrderedCounter(x.split(config.annotation_separator)), \ |
||
| 201 | config.annotation_vector)) |
||
| 202 | |||
| 203 | 1 | judgments['started'] = judgments['started'].apply(lambda x: pd.to_datetime(str(x))) |
|
| 204 | 1 | judgments['submitted'] = judgments['submitted'].apply(lambda x: pd.to_datetime(str(x))) |
|
| 205 | 1 | judgments['duration'] = judgments.apply(lambda row: (row['submitted'] - row['started']).seconds, |
|
| 206 | axis=1) |
||
| 207 | |||
| 208 | # remove units with just 1 judgment |
||
| 209 | 1 | units_1work = judgments.groupby('unit').filter(lambda x: len(x) == 1)["unit"] |
|
| 210 | 1 | judgments = judgments[~judgments['unit'].isin(units_1work)] |
|
| 211 | 1 | judgments = judgments.reset_index(drop=True) |
|
| 212 | 1 | no_units_1work = len(units_1work) |
|
| 213 | 1 | if no_units_1work > 0: |
|
| 214 | if no_units_1work == 1: |
||
| 215 | logging.warning(str(no_units_1work) + " Media Unit that was annotated by only" |
||
| 216 | " 1 Worker was omitted, since agreement cannot be calculated.") |
||
| 217 | else: |
||
| 218 | logging.warning(str(no_units_1work) + " Media Units that were annotated by only" |
||
| 219 | " 1 Worker were omitted, since agreement cannot be calculated.") |
||
| 220 | |||
| 221 | # |
||
| 222 | # aggregate units |
||
| 223 | # |
||
| 224 | 1 | units = Unit.aggregate(judgments, config) |
|
| 225 | |||
| 226 | 1 | for col in config.output.values(): |
|
| 227 | 1 | judgments[col+'.count'] = judgments[col].apply(lambda x: sum(x.values())) |
|
| 228 | 1 | judgments[col+'.unique'] = judgments[col].apply(lambda x: len(x)) |
|
| 229 | |||
| 230 | |||
| 231 | # |
||
| 232 | # aggregate workers |
||
| 233 | # |
||
| 234 | 1 | workers = Worker.aggregate(judgments, config) |
|
| 235 | |||
| 236 | |||
| 237 | # |
||
| 238 | # aggregate annotations |
||
| 239 | # i.e. output columns |
||
| 240 | # |
||
| 241 | 1 | annotations = pd.DataFrame() |
|
| 242 | 1 | for col in config.output.values(): |
|
| 243 | 1 | res = pd.DataFrame(judgments[col].apply(lambda x: \ |
|
| 244 | pd.Series(list(x.keys())).value_counts()).sum(), columns=[col]) |
||
| 245 | 1 | annotations = pd.concat([annotations, res], axis=0) |
|
| 246 | |||
| 247 | # |
||
| 248 | # aggregate job |
||
| 249 | # |
||
| 250 | 1 | job = Job.aggregate(units, judgments, config) |
|
| 251 | |||
| 252 | # Clean up judgments |
||
| 253 | # remove input columns from judgments |
||
| 254 | 1 | output_cols = [col for col in judgments.columns.values \ |
|
| 255 | if col.startswith('output') or col.startswith('metric')] |
||
| 256 | 1 | judgments = judgments[output_cols + list(platform.values()) + ['duration', 'job']] |
|
| 257 | |||
| 258 | # set judgment id as index |
||
| 259 | 1 | judgments.set_index('judgment', inplace=True) |
|
| 260 | |||
| 261 | # add missing vector values if closed task |
||
| 262 | 1 | for col in config.output.values(): |
|
| 263 | 1 | try: |
|
| 264 | # openended = config.open_ended_task |
||
| 383 |