| Conditions | 34 |
| Total Lines | 239 |
| Lines | 0 |
| Ratio | 0 % |
| Changes | 1 | ||
| Bugs | 0 | Features | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like jobs() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | ''' |
||
| 87 | @csrf_exempt |
||
| 88 | def jobs(request): |
||
| 89 | ''' This is the view used by the executor.py scripts for getting / putting the test results. |
||
| 90 | Fetching some file for testing is changing the database, so using GET here is not really RESTish. Whatever. |
||
| 91 | A visible shared secret in the request is no problem, since the executors come |
||
| 92 | from trusted networks. The secret only protects this view from outside foreigners. |
||
| 93 | |||
| 94 | TODO: Make it a real API, based on some framework. |
||
| 95 | TODO: Factor out state model from this method into some model. |
||
| 96 | |||
| 97 | POST requests with 'Action'='get_config' are expected to contain the following parameters: |
||
| 98 | 'MachineId', |
||
| 99 | 'Config', |
||
| 100 | 'Secret', |
||
| 101 | 'UUID' |
||
| 102 | |||
| 103 | All other POST requests are expected to contain the following parameters: |
||
| 104 | 'SubmissionFileId', |
||
| 105 | 'Message', |
||
| 106 | 'ErrorCode', |
||
| 107 | 'Action', |
||
| 108 | 'Secret', |
||
| 109 | 'UUID' |
||
| 110 | |||
| 111 | GET requests are expected to contain the following parameters: |
||
| 112 | 'Secret', |
||
| 113 | 'UUID' |
||
| 114 | |||
| 115 | GET reponses deliver the following elements in the header: |
||
| 116 | 'SubmissionFileId', |
||
| 117 | 'Timeout', |
||
| 118 | 'Action', |
||
| 119 | 'PostRunValidation' |
||
| 120 | ''' |
||
| 121 | try: |
||
| 122 | if request.method == 'GET': |
||
| 123 | secret = request.GET['Secret'] |
||
| 124 | uuid = request.GET['UUID'] |
||
| 125 | elif request.method == 'POST': |
||
| 126 | secret = request.POST['Secret'] |
||
| 127 | uuid = request.POST['UUID'] |
||
| 128 | except Exception as e: |
||
| 129 | logger.error( |
||
| 130 | "Error finding the neccessary data in the executor request: " + str(e)) |
||
| 131 | raise PermissionDenied |
||
| 132 | |||
| 133 | if secret != settings.JOB_EXECUTOR_SECRET: |
||
| 134 | raise PermissionDenied |
||
| 135 | |||
| 136 | # Update last_contact information for test machine |
||
| 137 | machine, created = TestMachine.objects.update_or_create( |
||
| 138 | host=uuid, defaults={'last_contact': datetime.now()}) |
||
| 139 | if created: |
||
| 140 | # ask for configuration of new execution hosts by returning the according action |
||
| 141 | logger.debug( |
||
| 142 | "Test machine is unknown, creating entry and asking executor for configuration.") |
||
| 143 | response = HttpResponse() |
||
| 144 | response['Action'] = 'get_config' |
||
| 145 | response['APIVersion'] = '1.0.0' # semantic versioning |
||
| 146 | response['MachineId'] = machine.pk |
||
| 147 | return response |
||
| 148 | |||
| 149 | if not machine.enabled: |
||
| 150 | # Act like no jobs are given for him |
||
| 151 | raise Http404 |
||
| 152 | |||
| 153 | if request.method == "GET": |
||
| 154 | # Clean up submissions where the answer from the executors took too long |
||
| 155 | pending_submissions = Submission.pending_tests.filter( |
||
| 156 | file_upload__fetched__isnull=False) |
||
| 157 | #logger.debug("%u pending submission(s)"%(len(pending_submissions))) |
||
| 158 | for sub in pending_submissions: |
||
| 159 | max_delay = timedelta( |
||
| 160 | seconds=sub.assignment.attachment_test_timeout) |
||
| 161 | # There is a small chance that meanwhile the result was delivered, so fetched became NULL |
||
| 162 | if sub.file_upload.fetched and sub.file_upload.fetched + max_delay < datetime.now(): |
||
| 163 | logger.debug( |
||
| 164 | "Resetting executor fetch status for submission %u, due to timeout" % sub.pk) |
||
| 165 | # TODO: Late delivery for such a submission by the executor may lead to result overwriting. Check this. |
||
| 166 | sub.clean_fetch_date() |
||
| 167 | if sub.state == Submission.TEST_VALIDITY_PENDING: |
||
| 168 | sub.save_validation_result( |
||
| 169 | machine, "Killed due to non-reaction. Please check your application for deadlocks or keyboard input.", "Killed due to non-reaction on timeout signals.") |
||
| 170 | sub.state = Submission.TEST_VALIDITY_FAILED |
||
| 171 | sub.inform_student(sub.state) |
||
| 172 | if sub.state == Submission.TEST_FULL_PENDING: |
||
| 173 | sub.save_fulltest_result( |
||
| 174 | machine, "Killed due to non-reaction on timeout signals. Student not informed, since this was the full test.") |
||
| 175 | sub.state = Submission.TEST_FULL_FAILED |
||
| 176 | sub.save() |
||
| 177 | |||
| 178 | # Now get an appropriate submission. |
||
| 179 | submissions = Submission.pending_tests |
||
| 180 | submissions = submissions.filter(assignment__in=machine.assignments.all()) \ |
||
| 181 | .filter(file_upload__isnull=False) \ |
||
| 182 | .filter(file_upload__fetched__isnull=True) |
||
| 183 | if len(submissions) == 0: |
||
| 184 | # Nothing found to be fetchable |
||
| 185 | #logger.debug("No pending work for executors") |
||
| 186 | raise Http404 |
||
| 187 | else: |
||
| 188 | sub = submissions[0] |
||
| 189 | sub.save_fetch_date() |
||
| 190 | sub.modified = datetime.now() |
||
| 191 | sub.save() |
||
| 192 | |||
| 193 | # create HTTP response with file download |
||
| 194 | f = sub.file_upload.attachment |
||
| 195 | # on dev server, we sometimes have stale database entries |
||
| 196 | if not os.access(f.path, os.F_OK): |
||
| 197 | mail_managers('Warning: Missing file', |
||
| 198 | 'Missing file on storage for submission file entry %u: %s' % ( |
||
| 199 | sub.file_upload.pk, str(sub.file_upload.attachment)), fail_silently=True) |
||
| 200 | raise Http404 |
||
| 201 | response = HttpResponse(f, content_type='application/binary') |
||
| 202 | response['APIVersion'] = '1.0.0' # semantic versioning |
||
| 203 | response['Content-Disposition'] = 'attachment; filename="%s"' % sub.file_upload.basename() |
||
| 204 | response['SubmissionFileId'] = str(sub.file_upload.pk) |
||
| 205 | response['SubmissionOriginalFilename'] = sub.file_upload.original_filename |
||
| 206 | response['SubmissionId'] = str(sub.pk) |
||
| 207 | response['SubmitterName'] = sub.submitter.get_full_name() |
||
| 208 | response['SubmitterStudentId'] = sub.submitter.profile.student_id |
||
| 209 | response['AuthorNames'] = sub.authors.all() |
||
| 210 | response['SubmitterStudyProgram'] = str(sub.submitter.profile.study_program) |
||
| 211 | response['Course'] = str(sub.assignment.course) |
||
| 212 | response['Assignment'] = str(sub.assignment) |
||
| 213 | response['Timeout'] = sub.assignment.attachment_test_timeout |
||
| 214 | if sub.state == Submission.TEST_VALIDITY_PENDING: |
||
| 215 | response['Action'] = 'test_validity' |
||
| 216 | response['PostRunValidation'] = sub.assignment.validity_test_url() |
||
| 217 | elif sub.state == Submission.TEST_FULL_PENDING or sub.state == Submission.CLOSED_TEST_FULL_PENDING: |
||
| 218 | response['Action'] = 'test_full' |
||
| 219 | response['PostRunValidation'] = sub.assignment.full_test_url() |
||
| 220 | else: |
||
| 221 | assert (False) |
||
| 222 | logger.debug("Delivering submission %u as new %s job" % |
||
| 223 | (sub.pk, response['Action'])) |
||
| 224 | return response |
||
| 225 | |||
| 226 | elif request.method == "POST": |
||
| 227 | # first check if this is just configuration data, and not a job result |
||
| 228 | if request.POST['Action'] == 'get_config': |
||
| 229 | machine = TestMachine.objects.get( |
||
| 230 | pk=int(request.POST['MachineId'])) |
||
| 231 | machine.config = request.POST['Config'] |
||
| 232 | machine.save() |
||
| 233 | return HttpResponse(status=201) |
||
| 234 | |||
| 235 | # executor.py is providing the results as POST parameters |
||
| 236 | sid = request.POST['SubmissionFileId'] |
||
| 237 | submission_file = get_object_or_404(SubmissionFile, pk=sid) |
||
| 238 | sub = submission_file.submissions.all()[0] |
||
| 239 | logger.debug("Storing executor results for submission %u" % (sub.pk)) |
||
| 240 | error_code = int(request.POST['ErrorCode']) |
||
| 241 | # Job state: Waiting for validity test |
||
| 242 | # Possible with + without full test |
||
| 243 | # Possible with + without grading |
||
| 244 | if request.POST['Action'] == 'test_validity' and sub.state == Submission.TEST_VALIDITY_PENDING: |
||
| 245 | sub.save_validation_result( |
||
| 246 | machine, request.POST['Message'], request.POST['MessageTutor']) |
||
| 247 | if error_code == 0: |
||
| 248 | # We have a full test |
||
| 249 | if sub.assignment.attachment_test_full: |
||
| 250 | logger.debug( |
||
| 251 | "Validity test working, setting state to pending full test") |
||
| 252 | sub.state = Submission.TEST_FULL_PENDING |
||
| 253 | # We have no full test |
||
| 254 | else: |
||
| 255 | logger.debug( |
||
| 256 | "Validity test working, setting state to tested") |
||
| 257 | sub.state = Submission.SUBMITTED_TESTED |
||
| 258 | if not sub.assignment.is_graded(): |
||
| 259 | # Assignment is not graded. We are done here. |
||
| 260 | sub.state = Submission.CLOSED |
||
| 261 | sub.inform_student(Submission.CLOSED) |
||
| 262 | else: |
||
| 263 | logger.debug( |
||
| 264 | "Validity test not working, setting state to failed") |
||
| 265 | sub.state = Submission.TEST_VALIDITY_FAILED |
||
| 266 | sub.inform_student(sub.state) |
||
| 267 | # Job state: Waiting for full test |
||
| 268 | # Possible with + without grading |
||
| 269 | elif request.POST['Action'] == 'test_full' and sub.state == Submission.TEST_FULL_PENDING: |
||
| 270 | sub.save_fulltest_result( |
||
| 271 | machine, request.POST['MessageTutor']) |
||
| 272 | if error_code == 0: |
||
| 273 | if sub.assignment.is_graded(): |
||
| 274 | logger.debug("Full test working, setting state to tested (since graded)") |
||
| 275 | sub.state = Submission.SUBMITTED_TESTED |
||
| 276 | else: |
||
| 277 | logger.debug("Full test working, setting state to closed (since not graded)") |
||
| 278 | sub.state = Submission.CLOSED |
||
| 279 | inform_student(sub, Submission.CLOSED) |
||
| 280 | else: |
||
| 281 | logger.debug("Full test not working, setting state to failed") |
||
| 282 | sub.state = Submission.TEST_FULL_FAILED |
||
| 283 | # full tests may be performed several times and are meant to be a silent activity |
||
| 284 | # therefore, we send no mail to the student here |
||
| 285 | # Job state: Waiting for full test of already closed jobs ("re-test") |
||
| 286 | # Grading is already done |
||
| 287 | elif request.POST['Action'] == 'test_full' and sub.state == Submission.CLOSED_TEST_FULL_PENDING: |
||
| 288 | logger.debug( |
||
| 289 | "Closed full test done, setting state to closed again") |
||
| 290 | sub.save_fulltest_result( |
||
| 291 | machine, request.POST['MessageTutor']) |
||
| 292 | sub.state = Submission.CLOSED |
||
| 293 | # full tests may be performed several times and are meant to be a silent activity |
||
| 294 | # therefore, we send no mail to the student here |
||
| 295 | elif request.POST['Action'] == 'test_validity' and sub.state == Submission.TEST_VALIDITY_FAILED: |
||
| 296 | # Can happen if the validation is set to failed due to timeout, but the executor delivers the late result. |
||
| 297 | # Happens in reality only with >= 2 executors, since the second one is pulling for new jobs and triggers |
||
| 298 | # the timeout check while the first one is still stucked with the big job. |
||
| 299 | # Can be ignored. |
||
| 300 | logger.debug( |
||
| 301 | "Ignoring executor result, since the submission is already marked as failed.") |
||
| 302 | else: |
||
| 303 | msg = ''' |
||
| 304 | Dear OpenSubmit administrator, |
||
| 305 | |||
| 306 | the executors returned some result, but this does not fit to the current submission state. |
||
| 307 | This is a strong indication for a bug in OpenSubmit - sorry for that. |
||
| 308 | The system will ignore the report from executor and mark the job as to be repeated. |
||
| 309 | Please report this on the project GitHub page for further investigation. |
||
| 310 | |||
| 311 | Submission ID: %u |
||
| 312 | Submission File ID reported by the executor: %u |
||
| 313 | Action reported by the executor: %s |
||
| 314 | Current state of the submission: %s (%s) |
||
| 315 | Message from the executor: %s |
||
| 316 | Error code from the executor: %u |
||
| 317 | ''' % (sub.pk, submission_file.pk, request.POST['Action'], |
||
| 318 | sub.state_for_tutors(), sub.state, |
||
| 319 | request.POST['Message'], error_code) |
||
| 320 | mail_managers('Warning: Inconsistent job state', |
||
| 321 | msg, fail_silently=True) |
||
| 322 | # Mark work as done |
||
| 323 | sub.save() |
||
| 324 | sub.clean_fetch_date() |
||
| 325 | return HttpResponse(status=201) |
||
| 326 | |||
| 327 |