|
@@ 79-102 (lines=24) @@
|
| 76 |
|
logger.info(add_extra_to_log_message('Automatic cleanup element', extra=element)) |
| 77 |
|
|
| 78 |
|
|
| 79 |
|
@app.task(name='tasks.auto_delete_size_is_exceeded', ignore_result=True) |
| 80 |
|
def auto_delete_size_is_exceeded(): |
| 81 |
|
logger = logging.getLogger('limitation') |
| 82 |
|
model_list = [ |
| 83 |
|
('crash', 'Crash'), |
| 84 |
|
('feedback', 'Feedback') |
| 85 |
|
] |
| 86 |
|
for model in model_list: |
| 87 |
|
result = delete_size_is_exceeded(*model) |
| 88 |
|
if result.get('count', 0): |
| 89 |
|
log_id = str(uuid.uuid4()) |
| 90 |
|
params = dict(log_id=log_id) |
| 91 |
|
splunk_url = get_splunk_url(params) |
| 92 |
|
splunk_filter = 'log_id=%s' % log_id if splunk_url else None |
| 93 |
|
ids_list = sorted([element['id'] for element in result['elements']]) |
| 94 |
|
raven_extra = {"id": log_id, "splunk_url": splunk_url, "splunk_filter": splunk_filter, "%s_list" % (model[1]): ids_list} |
| 95 |
|
raven.captureMessage("[Limitation]Periodic task 'Size is exceeded' cleaned up %d %s, total size of cleaned space is %s [%d]" % |
| 96 |
|
(result['count'], model[1], filters.filesizeformat(result['size']).replace(u'\xa0', u' '), time.time()), |
| 97 |
|
data=dict(level=20, logger='limitation'), extra=raven_extra) |
| 98 |
|
extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace(u'\xa0', u' '), model=model[1], reason='size_is_exceeded') |
| 99 |
|
logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra)) |
| 100 |
|
for element in result['elements']: |
| 101 |
|
element.update({"log_id": log_id, "%s_id" % (model[1]): element.pop('id')}) |
| 102 |
|
logger.info(add_extra_to_log_message('Automatic cleanup element', extra=element)) |
| 103 |
|
|
| 104 |
|
|
| 105 |
|
@app.task(name='tasks.auto_delete_duplicate_crashes', ignore_result=True) |
|
@@ 53-76 (lines=24) @@
|
| 50 |
|
statistics.collect_statistics(parse_request(request), ip=ip) |
| 51 |
|
|
| 52 |
|
|
| 53 |
|
@app.task(name='tasks.auto_delete_older_then', ignore_result=True) |
| 54 |
|
def auto_delete_older_than(): |
| 55 |
|
logger = logging.getLogger('limitation') |
| 56 |
|
model_list = [ |
| 57 |
|
('crash', 'Crash'), |
| 58 |
|
('feedback', 'Feedback') |
| 59 |
|
] |
| 60 |
|
for model in model_list: |
| 61 |
|
result = delete_older_than(*model) |
| 62 |
|
if result.get('count', 0): |
| 63 |
|
log_id = str(uuid.uuid4()) |
| 64 |
|
params = dict(log_id=log_id) |
| 65 |
|
splunk_url = get_splunk_url(params) |
| 66 |
|
splunk_filter = 'log_id=%s' % log_id if splunk_url else None |
| 67 |
|
ids_list = sorted([element['id'] for element in result['elements']]) |
| 68 |
|
raven_extra = {"id": log_id, "splunk_url": splunk_url, "splunk_filter": splunk_filter, "%s_list" % (model[1]): ids_list} |
| 69 |
|
raven.captureMessage("[Limitation]Periodic task 'Older than' cleaned up %d %s, total size of cleaned space is %s [%d]" % |
| 70 |
|
(result['count'], model[1], filters.filesizeformat(result['size']).replace(u'\xa0', u' '), time.time()), |
| 71 |
|
data=dict(level=20, logger='limitation'), extra=raven_extra) |
| 72 |
|
extra = dict(log_id=log_id, meta=True, count=result['count'], size=filters.filesizeformat(result['size']).replace(u'\xa0', u' '), model=model[1], reason='old') |
| 73 |
|
logger.info(add_extra_to_log_message('Automatic cleanup', extra=extra)) |
| 74 |
|
for element in result['elements']: |
| 75 |
|
element.update({"log_id": log_id, "%s_id" % (model[1]): element.pop('id')}) |
| 76 |
|
logger.info(add_extra_to_log_message('Automatic cleanup element', extra=element)) |
| 77 |
|
|
| 78 |
|
|
| 79 |
|
@app.task(name='tasks.auto_delete_size_is_exceeded', ignore_result=True) |