1
|
|
|
# -*- coding: utf-8 -*- |
2
|
|
|
# |
3
|
|
|
# This file is part of SENAITE.CORE. |
4
|
|
|
# |
5
|
|
|
# SENAITE.CORE is free software: you can redistribute it and/or modify it under |
6
|
|
|
# the terms of the GNU General Public License as published by the Free Software |
7
|
|
|
# Foundation, version 2. |
8
|
|
|
# |
9
|
|
|
# This program is distributed in the hope that it will be useful, but WITHOUT |
10
|
|
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
11
|
|
|
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more |
12
|
|
|
# details. |
13
|
|
|
# |
14
|
|
|
# You should have received a copy of the GNU General Public License along with |
15
|
|
|
# this program; if not, write to the Free Software Foundation, Inc., 51 |
16
|
|
|
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
17
|
|
|
# |
18
|
|
|
# Copyright 2018-2019 by it's authors. |
19
|
|
|
# Some rights reserved, see README and LICENSE. |
20
|
|
|
|
21
|
|
|
import itertools |
22
|
|
|
import re |
23
|
|
|
|
24
|
|
|
import transaction |
25
|
|
|
from bika.lims import api |
26
|
|
|
from bika.lims import logger |
27
|
|
|
from bika.lims.alphanumber import Alphanumber |
28
|
|
|
from bika.lims.alphanumber import to_alpha |
29
|
|
|
from bika.lims.browser.fields.uidreferencefield import \ |
30
|
|
|
get_backreferences as get_backuidreferences |
31
|
|
|
from bika.lims.interfaces import IAnalysisRequest |
32
|
|
|
from bika.lims.interfaces import IAnalysisRequestPartition |
33
|
|
|
from bika.lims.interfaces import IAnalysisRequestRetest |
34
|
|
|
from bika.lims.interfaces import IAnalysisRequestSecondary |
35
|
|
|
from bika.lims.interfaces import IIdServer |
36
|
|
|
from bika.lims.numbergenerator import INumberGenerator |
37
|
|
|
from DateTime import DateTime |
38
|
|
|
from Products.ATContentTypes.utils import DT2dt |
39
|
|
|
from zope.component import getAdapters |
40
|
|
|
from zope.component import getUtility |
41
|
|
|
|
42
|
|
|
AR_TYPES = [ |
43
|
|
|
"AnalysisRequest", |
44
|
|
|
"AnalysisRequestRetest", |
45
|
|
|
"AnalysisRequestPartition", |
46
|
|
|
"AnalysisRequestSecondary", |
47
|
|
|
] |
48
|
|
|
|
49
|
|
|
|
50
|
|
|
def get_objects_in_sequence(brain_or_object, ctype, cref): |
51
|
|
|
"""Return a list of items |
52
|
|
|
""" |
53
|
|
|
obj = api.get_object(brain_or_object) |
54
|
|
|
if ctype == "backreference": |
55
|
|
|
return get_backreferences(obj, cref) |
56
|
|
|
if ctype == "contained": |
57
|
|
|
return get_contained_items(obj, cref) |
58
|
|
|
raise ValueError("Reference value is mandatory for sequence type counter") |
59
|
|
|
|
60
|
|
|
|
61
|
|
|
def get_backreferences(obj, relationship): |
62
|
|
|
"""Returns the backreferences |
63
|
|
|
""" |
64
|
|
|
refs = get_backuidreferences(obj, relationship) |
65
|
|
|
|
66
|
|
|
# TODO remove after all ReferenceField get ported to UIDReferenceField |
67
|
|
|
# At this moment, there are still some content types that are using the |
68
|
|
|
# ReferenceField, so we need to fallback to traditional getBackReferences |
69
|
|
|
# for these cases. |
70
|
|
|
if not refs: |
71
|
|
|
refs = obj.getBackReferences(relationship) |
72
|
|
|
|
73
|
|
|
return refs |
74
|
|
|
|
75
|
|
|
|
76
|
|
|
def get_contained_items(obj, spec): |
77
|
|
|
"""Returns a list of (id, subobject) tuples of the current context. |
78
|
|
|
If 'spec' is specified, returns only objects whose meta_type match 'spec' |
79
|
|
|
""" |
80
|
|
|
return obj.objectItems(spec) |
81
|
|
|
|
82
|
|
|
|
83
|
|
|
def get_type_id(context, **kw): |
84
|
|
|
"""Returns the type id for the context passed in |
85
|
|
|
""" |
86
|
|
|
portal_type = kw.get("portal_type", None) |
87
|
|
|
if portal_type: |
88
|
|
|
return portal_type |
89
|
|
|
|
90
|
|
|
# Override by provided marker interface |
91
|
|
|
if IAnalysisRequestPartition.providedBy(context): |
92
|
|
|
return "AnalysisRequestPartition" |
93
|
|
|
elif IAnalysisRequestRetest.providedBy(context): |
94
|
|
|
return "AnalysisRequestRetest" |
95
|
|
|
elif IAnalysisRequestSecondary.providedBy(context): |
96
|
|
|
return "AnalysisRequestSecondary" |
97
|
|
|
|
98
|
|
|
return api.get_portal_type(context) |
99
|
|
|
|
100
|
|
|
|
101
|
|
|
def get_suffix(id, regex="-[A-Z]{1}[0-9]{1,2}$"): |
102
|
|
|
"""Get the suffix of the ID, e.g. '-R01' or '-P05' |
103
|
|
|
|
104
|
|
|
The current regex determines a pattern of a single uppercase character with |
105
|
|
|
at most 2 numbers following at the end of the ID as the suffix. |
106
|
|
|
""" |
107
|
|
|
parts = re.findall(regex, id) |
108
|
|
|
if not parts: |
109
|
|
|
return "" |
110
|
|
|
return parts[0] |
111
|
|
|
|
112
|
|
|
|
113
|
|
|
def strip_suffix(id): |
114
|
|
|
"""Split off any suffix from ID |
115
|
|
|
|
116
|
|
|
This mimics the old behavior of the Sample ID. |
117
|
|
|
""" |
118
|
|
|
suffix = get_suffix(id) |
119
|
|
|
if not suffix: |
120
|
|
|
return id |
121
|
|
|
return re.split(suffix, id)[0] |
122
|
|
|
|
123
|
|
|
|
124
|
|
|
def get_retest_count(context, default=0): |
125
|
|
|
"""Returns the number of retests of this AR |
126
|
|
|
""" |
127
|
|
|
if not is_ar(context): |
128
|
|
|
return default |
129
|
|
|
|
130
|
|
|
invalidated = context.getInvalidated() |
131
|
|
|
|
132
|
|
|
count = 0 |
133
|
|
|
while invalidated: |
134
|
|
|
count += 1 |
135
|
|
|
invalidated = invalidated.getInvalidated() |
136
|
|
|
|
137
|
|
|
return count |
138
|
|
|
|
139
|
|
|
|
140
|
|
|
def get_partition_count(context, default=0): |
141
|
|
|
"""Returns the number of partitions of this AR |
142
|
|
|
""" |
143
|
|
|
if not is_ar(context): |
144
|
|
|
return default |
145
|
|
|
|
146
|
|
|
parent = context.getParentAnalysisRequest() |
147
|
|
|
|
148
|
|
|
if not parent: |
149
|
|
|
return default |
150
|
|
|
|
151
|
|
|
return len(parent.getDescendants()) |
152
|
|
|
|
153
|
|
|
|
154
|
|
|
def get_secondary_count(context, default=0): |
155
|
|
|
"""Returns the number of secondary ARs of this AR |
156
|
|
|
""" |
157
|
|
|
if not is_ar(context): |
158
|
|
|
return default |
159
|
|
|
|
160
|
|
|
primary = context.getPrimaryAnalysisRequest() |
161
|
|
|
|
162
|
|
|
if not primary: |
163
|
|
|
return default |
164
|
|
|
|
165
|
|
|
return len(primary.getSecondaryAnalysisRequests()) |
166
|
|
|
|
167
|
|
|
|
168
|
|
|
def is_ar(context): |
169
|
|
|
"""Checks if the context is an AR |
170
|
|
|
""" |
171
|
|
|
return IAnalysisRequest.providedBy(context) |
172
|
|
|
|
173
|
|
|
|
174
|
|
|
def get_config(context, **kw): |
175
|
|
|
"""Fetch the config dict from the Bika Setup for the given portal_type |
176
|
|
|
""" |
177
|
|
|
# get the ID formatting config |
178
|
|
|
config_map = api.get_bika_setup().getIDFormatting() |
179
|
|
|
|
180
|
|
|
# allow portal_type override |
181
|
|
|
portal_type = get_type_id(context, **kw) |
182
|
|
|
|
183
|
|
|
# check if we have a config for the given portal_type |
184
|
|
|
for config in config_map: |
185
|
|
|
if config['portal_type'].lower() == portal_type.lower(): |
186
|
|
|
return config |
187
|
|
|
|
188
|
|
|
# return a default config |
189
|
|
|
default_config = { |
190
|
|
|
'form': '%s-{seq}' % portal_type.lower(), |
191
|
|
|
'sequence_type': 'generated', |
192
|
|
|
'prefix': '%s' % portal_type.lower(), |
193
|
|
|
} |
194
|
|
|
return default_config |
195
|
|
|
|
196
|
|
|
|
197
|
|
|
def get_variables(context, **kw): |
198
|
|
|
"""Prepares a dictionary of key->value pairs usable for ID formatting |
199
|
|
|
""" |
200
|
|
|
# allow portal_type override |
201
|
|
|
portal_type = get_type_id(context, **kw) |
202
|
|
|
|
203
|
|
|
# The variables map hold the values that might get into the constructed id |
204
|
|
|
variables = { |
205
|
|
|
"context": context, |
206
|
|
|
"id": api.get_id(context), |
207
|
|
|
"portal_type": portal_type, |
208
|
|
|
"year": get_current_year(), |
209
|
|
|
"parent": api.get_parent(context), |
210
|
|
|
"seq": 0, |
211
|
|
|
"alpha": Alphanumber(0), |
212
|
|
|
} |
213
|
|
|
|
214
|
|
|
# Augment the variables map depending on the portal type |
215
|
|
|
if portal_type in AR_TYPES: |
216
|
|
|
now = DateTime() |
217
|
|
|
sampling_date = context.getSamplingDate() |
218
|
|
|
sampling_date = sampling_date and DT2dt(sampling_date) or DT2dt(now) |
219
|
|
|
date_sampled = context.getDateSampled() |
220
|
|
|
date_sampled = date_sampled and DT2dt(date_sampled) or DT2dt(now) |
221
|
|
|
test_count = 1 |
222
|
|
|
|
223
|
|
|
variables.update({ |
224
|
|
|
"clientId": context.getClientID(), |
225
|
|
|
"dateSampled": date_sampled, |
226
|
|
|
"samplingDate": sampling_date, |
227
|
|
|
"sampleType": context.getSampleType().getPrefix(), |
228
|
|
|
"test_count": test_count |
229
|
|
|
}) |
230
|
|
|
|
231
|
|
|
# Partition |
232
|
|
|
if portal_type == "AnalysisRequestPartition": |
233
|
|
|
parent_ar = context.getParentAnalysisRequest() |
234
|
|
|
parent_ar_id = api.get_id(parent_ar) |
235
|
|
|
parent_base_id = strip_suffix(parent_ar_id) |
236
|
|
|
partition_count = get_partition_count(context) |
237
|
|
|
variables.update({ |
238
|
|
|
"parent_analysisrequest": parent_ar, |
239
|
|
|
"parent_ar_id": parent_ar_id, |
240
|
|
|
"parent_base_id": parent_base_id, |
241
|
|
|
"partition_count": partition_count, |
242
|
|
|
}) |
243
|
|
|
|
244
|
|
|
# Retest |
245
|
|
|
elif portal_type == "AnalysisRequestRetest": |
246
|
|
|
# Note: we use "parent" instead of "invalidated" for simplicity |
247
|
|
|
parent_ar = context.getInvalidated() |
248
|
|
|
parent_ar_id = api.get_id(parent_ar) |
249
|
|
|
parent_base_id = strip_suffix(parent_ar_id) |
250
|
|
|
# keep the full ID if the retracted AR is a partition |
251
|
|
|
if context.isPartition(): |
252
|
|
|
parent_base_id = parent_ar_id |
253
|
|
|
retest_count = get_retest_count(context) |
254
|
|
|
test_count = test_count + retest_count |
255
|
|
|
variables.update({ |
256
|
|
|
"parent_analysisrequest": parent_ar, |
257
|
|
|
"parent_ar_id": parent_ar_id, |
258
|
|
|
"parent_base_id": parent_base_id, |
259
|
|
|
"retest_count": retest_count, |
260
|
|
|
"test_count": test_count, |
261
|
|
|
}) |
262
|
|
|
|
263
|
|
|
# Secondary |
264
|
|
|
elif portal_type == "AnalysisRequestSecondary": |
265
|
|
|
primary_ar = context.getPrimaryAnalysisRequest() |
266
|
|
|
primary_ar_id = api.get_id(primary_ar) |
267
|
|
|
parent_base_id = strip_suffix(primary_ar_id) |
268
|
|
|
secondary_count = get_secondary_count(context) |
269
|
|
|
variables.update({ |
270
|
|
|
"parent_analysisrequest": primary_ar, |
271
|
|
|
"parent_ar_id": primary_ar_id, |
272
|
|
|
"parent_base_id": parent_base_id, |
273
|
|
|
"secondary_count": secondary_count, |
274
|
|
|
}) |
275
|
|
|
|
276
|
|
|
elif portal_type == "ARReport": |
277
|
|
|
variables.update({ |
278
|
|
|
"clientId": context.aq_parent.getClientID(), |
279
|
|
|
}) |
280
|
|
|
|
281
|
|
|
return variables |
282
|
|
|
|
283
|
|
|
|
284
|
|
|
def split(string, separator="-"): |
285
|
|
|
""" split a string on the given separator |
286
|
|
|
""" |
287
|
|
|
if not isinstance(string, basestring): |
288
|
|
|
return [] |
289
|
|
|
return string.split(separator) |
290
|
|
|
|
291
|
|
|
|
292
|
|
|
def to_int(thing, default=0): |
293
|
|
|
"""Convert a thing to an integer |
294
|
|
|
""" |
295
|
|
|
try: |
296
|
|
|
return int(thing) |
297
|
|
|
except (TypeError, ValueError): |
298
|
|
|
return default |
299
|
|
|
|
300
|
|
|
|
301
|
|
|
def slice(string, separator="-", start=None, end=None): |
302
|
|
|
"""Slice out a segment of a string, which is splitted on both the wildcards |
303
|
|
|
and the separator passed in, if any |
304
|
|
|
""" |
305
|
|
|
# split by wildcards/keywords first |
306
|
|
|
# AR-{sampleType}-{parentId}{alpha:3a2d} |
307
|
|
|
segments = filter(None, re.split('(\{.+?\})', string)) |
308
|
|
|
# ['AR-', '{sampleType}', '-', '{parentId}', '{alpha:3a2d}'] |
309
|
|
|
if separator: |
310
|
|
|
# Keep track of singleton separators as empties |
311
|
|
|
# We need to do this to prevent duplicates later, when splitting |
312
|
|
|
segments = map(lambda seg: seg!=separator and seg or "", segments) |
313
|
|
|
# ['AR-', '{sampleType}', '', '{parentId}', '{alpha:3a2d}'] |
314
|
|
|
# Split each segment at the given separator |
315
|
|
|
segments = map(lambda seg: split(seg, separator), segments) |
316
|
|
|
# [['AR', ''], ['{sampleType}'], [''], ['{parentId}'], ['{alpha:3a2d}']] |
317
|
|
|
# Flatten the list |
318
|
|
|
segments = list(itertools.chain.from_iterable(segments)) |
319
|
|
|
# ['AR', '', '{sampleType}', '', '{parentId}', '{alpha:3a2d}'] |
320
|
|
|
# And replace empties with separator |
321
|
|
|
segments = map(lambda seg: seg!="" and seg or separator, segments) |
322
|
|
|
# ['AR', '-', '{sampleType}', '-', '{parentId}', '{alpha:3a2d}'] |
323
|
|
|
|
324
|
|
|
# Get the start and end positions from the segments without separator |
325
|
|
|
cleaned_segments = filter(lambda seg: seg!=separator, segments) |
326
|
|
|
start_pos = to_int(start, 0) |
327
|
|
|
# Note "end" is not a position, but the number of elements to join! |
328
|
|
|
end_pos = to_int(end, len(cleaned_segments) - start_pos) + start_pos - 1 |
329
|
|
|
|
330
|
|
|
# Map the positions against the segments with separator |
331
|
|
|
start = segments.index(cleaned_segments[start_pos]) |
332
|
|
|
end = segments.index(cleaned_segments[end_pos]) + 1 |
333
|
|
|
|
334
|
|
|
# Return all segments joined |
335
|
|
|
sliced_parts = segments[start:end] |
336
|
|
|
return "".join(sliced_parts) |
337
|
|
|
|
338
|
|
|
|
339
|
|
|
def get_current_year(): |
340
|
|
|
"""Returns the current year as a two digit string |
341
|
|
|
""" |
342
|
|
|
return DateTime().strftime("%Y")[2:] |
343
|
|
|
|
344
|
|
|
|
345
|
|
|
def search_by_prefix(portal_type, prefix): |
346
|
|
|
"""Returns brains which share the same portal_type and ID prefix |
347
|
|
|
""" |
348
|
|
|
catalog = api.get_tool("uid_catalog") |
349
|
|
|
brains = catalog({"portal_type": portal_type}) |
350
|
|
|
# Filter brains with the same ID prefix |
351
|
|
|
return filter(lambda brain: api.get_id(brain).startswith(prefix), brains) |
352
|
|
|
|
353
|
|
|
|
354
|
|
|
def get_ids_with_prefix(portal_type, prefix): |
355
|
|
|
"""Return a list of ids sharing the same portal type and prefix |
356
|
|
|
""" |
357
|
|
|
brains = search_by_prefix(portal_type, prefix) |
358
|
|
|
ids = map(api.get_id, brains) |
359
|
|
|
return ids |
360
|
|
|
|
361
|
|
|
|
362
|
|
|
def make_storage_key(portal_type, prefix=None): |
363
|
|
|
"""Make a storage (dict-) key for the number generator |
364
|
|
|
""" |
365
|
|
|
key = portal_type.lower() |
366
|
|
|
if prefix: |
367
|
|
|
key = "{}-{}".format(key, prefix) |
368
|
|
|
return key |
369
|
|
|
|
370
|
|
|
|
371
|
|
|
def get_seq_number_from_id(id, id_template, prefix, **kw): |
372
|
|
|
"""Return the sequence number of the given ID |
373
|
|
|
""" |
374
|
|
|
separator = kw.get("separator", "-") |
375
|
|
|
postfix = id.replace(prefix, "").strip(separator) |
376
|
|
|
postfix_segments = postfix.split(separator) |
377
|
|
|
seq_number = 0 |
378
|
|
|
possible_seq_nums = filter(lambda n: n.isalnum(), postfix_segments) |
379
|
|
|
if possible_seq_nums: |
380
|
|
|
seq_number = possible_seq_nums[-1] |
381
|
|
|
|
382
|
|
|
# Check if this id has to be expressed as an alphanumeric number |
383
|
|
|
seq_number = get_alpha_or_number(seq_number, id_template) |
384
|
|
|
seq_number = to_int(seq_number) |
385
|
|
|
return seq_number |
386
|
|
|
|
387
|
|
|
|
388
|
|
|
def get_alpha_or_number(number, template): |
389
|
|
|
"""Returns an Alphanumber that represents the number passed in, expressed |
390
|
|
|
as defined in the template. Otherwise, returns the number |
391
|
|
|
""" |
392
|
|
|
match = re.match(r".*\{alpha:(\d+a\d+d)\}$", template.strip()) |
393
|
|
|
if match and match.groups(): |
394
|
|
|
format = match.groups()[0] |
395
|
|
|
return to_alpha(number, format) |
396
|
|
|
return number |
397
|
|
|
|
398
|
|
|
|
399
|
|
|
def get_counted_number(context, config, variables, **kw): |
400
|
|
|
"""Compute the number for the sequence type "Counter" |
401
|
|
|
""" |
402
|
|
|
# This "context" is defined by the user in the Setup and can be actually |
403
|
|
|
# anything. However, we assume it is something like "sample" or similar |
404
|
|
|
ctx = config.get("context") |
405
|
|
|
|
406
|
|
|
# get object behind the context name (falls back to the current context) |
407
|
|
|
obj = variables.get(ctx, context) |
408
|
|
|
|
409
|
|
|
# get the counter type, which is either "backreference" or "contained" |
410
|
|
|
counter_type = config.get("counter_type") |
411
|
|
|
|
412
|
|
|
# the counter reference is either the "relationship" for |
413
|
|
|
# "backreference" or the meta type for contained objects |
414
|
|
|
counter_reference = config.get("counter_reference") |
415
|
|
|
|
416
|
|
|
# This should be a list of existing items, including the current context |
417
|
|
|
# object |
418
|
|
|
seq_items = get_objects_in_sequence(obj, counter_type, counter_reference) |
419
|
|
|
|
420
|
|
|
number = len(seq_items) |
421
|
|
|
return number |
422
|
|
|
|
423
|
|
|
|
424
|
|
|
def get_generated_number(context, config, variables, **kw): |
425
|
|
|
"""Generate a new persistent number with the number generator for the |
426
|
|
|
sequence type "Generated" |
427
|
|
|
""" |
428
|
|
|
# separator where to split the ID |
429
|
|
|
separator = kw.get('separator', '-') |
430
|
|
|
|
431
|
|
|
# allow portal_type override |
432
|
|
|
portal_type = get_type_id(context, **kw) |
433
|
|
|
|
434
|
|
|
# The ID format for string interpolation, e.g. WS-{seq:03d} |
435
|
|
|
id_template = config.get("form", "") |
436
|
|
|
|
437
|
|
|
# The split length defines where the key is splitted from the value |
438
|
|
|
split_length = config.get("split_length", 1) |
439
|
|
|
|
440
|
|
|
# The prefix template is the static part of the ID |
441
|
|
|
prefix_template = slice(id_template, separator=separator, end=split_length) |
442
|
|
|
|
443
|
|
|
# get the number generator |
444
|
|
|
number_generator = getUtility(INumberGenerator) |
445
|
|
|
|
446
|
|
|
# generate the key for the number generator storage |
447
|
|
|
prefix = prefix_template.format(**variables) |
448
|
|
|
|
449
|
|
|
# normalize out any unicode characters like Ö, É, etc. from the prefix |
450
|
|
|
prefix = api.normalize_filename(prefix) |
451
|
|
|
|
452
|
|
|
# The key used for the storage |
453
|
|
|
key = make_storage_key(portal_type, prefix) |
454
|
|
|
|
455
|
|
|
# Handle flushed storage |
456
|
|
|
if key not in number_generator: |
457
|
|
|
max_num = 0 |
458
|
|
|
existing = get_ids_with_prefix(portal_type, prefix) |
459
|
|
|
numbers = map(lambda id: get_seq_number_from_id(id, id_template, prefix), existing) |
460
|
|
|
# figure out the highest number in the sequence |
461
|
|
|
if numbers: |
462
|
|
|
max_num = max(numbers) |
463
|
|
|
# set the number generator |
464
|
|
|
logger.info("*** SEEDING Prefix '{}' to {}".format(prefix, max_num)) |
465
|
|
|
number_generator.set_number(key, max_num) |
466
|
|
|
|
467
|
|
|
if not kw.get("dry_run", False): |
468
|
|
|
# Generate a new number |
469
|
|
|
# NOTE Even when the number exceeds the given ID sequence format, |
470
|
|
|
# it will overflow gracefully, e.g. |
471
|
|
|
# >>> {sampleId}-R{seq:03d}'.format(sampleId="Water", seq=999999) |
472
|
|
|
# 'Water-R999999‘ |
473
|
|
|
number = number_generator.generate_number(key=key) |
474
|
|
|
else: |
475
|
|
|
# => This allows us to "preview" the next generated ID in the UI |
476
|
|
|
# TODO Show the user the next generated number somewhere in the UI |
477
|
|
|
number = number_generator.get(key, 1) |
478
|
|
|
|
479
|
|
|
# Return an int or Alphanumber |
480
|
|
|
return get_alpha_or_number(number, id_template) |
481
|
|
|
|
482
|
|
|
|
483
|
|
|
def generateUniqueId(context, **kw): |
484
|
|
|
""" Generate pretty content IDs. |
485
|
|
|
""" |
486
|
|
|
|
487
|
|
|
# get the config for this portal type from the system setup |
488
|
|
|
config = get_config(context, **kw) |
489
|
|
|
|
490
|
|
|
# get the variables map for later string interpolation |
491
|
|
|
variables = get_variables(context, **kw) |
492
|
|
|
|
493
|
|
|
# The new generate sequence number |
494
|
|
|
number = 0 |
495
|
|
|
|
496
|
|
|
# get the sequence type from the global config |
497
|
|
|
sequence_type = config.get("sequence_type", "generated") |
498
|
|
|
|
499
|
|
|
# Sequence Type is "Counter", so we use the length of the backreferences or |
500
|
|
|
# contained objects of the evaluated "context" defined in the config |
501
|
|
|
if sequence_type in ["counter"]: |
502
|
|
|
number = get_counted_number(context, config, variables, **kw) |
503
|
|
|
|
504
|
|
|
# Sequence Type is "Generated", so the ID is constructed according to the |
505
|
|
|
# configured split length |
506
|
|
|
if sequence_type in ["generated"]: |
507
|
|
|
number = get_generated_number(context, config, variables, **kw) |
508
|
|
|
|
509
|
|
|
# store the new sequence number to the variables map for str interpolation |
510
|
|
|
if isinstance(number, Alphanumber): |
511
|
|
|
variables["alpha"] = number |
512
|
|
|
variables["seq"] = to_int(number) |
513
|
|
|
|
514
|
|
|
# The ID formatting template from user config, e.g. {sampleId}-R{seq:02d} |
515
|
|
|
id_template = config.get("form", "") |
516
|
|
|
|
517
|
|
|
# Interpolate the ID template |
518
|
|
|
try: |
519
|
|
|
new_id = id_template.format(**variables) |
520
|
|
|
except KeyError, e: |
521
|
|
|
logger.error('KeyError: {} not in id_template {}'.format( |
522
|
|
|
e, id_template)) |
523
|
|
|
raise |
524
|
|
|
normalized_id = api.normalize_filename(new_id) |
525
|
|
|
logger.info("generateUniqueId: {}".format(normalized_id)) |
526
|
|
|
|
527
|
|
|
return normalized_id |
528
|
|
|
|
529
|
|
|
|
530
|
|
|
def renameAfterCreation(obj): |
531
|
|
|
"""Rename the content after it was created/added |
532
|
|
|
""" |
533
|
|
|
# Can't rename without a subtransaction commit when using portal_factory |
534
|
|
|
transaction.savepoint(optimistic=True) |
535
|
|
|
# The id returned should be normalized already |
536
|
|
|
new_id = None |
537
|
|
|
# Checking if an adapter exists for this content type. If yes, we will |
538
|
|
|
# get new_id from adapter. |
539
|
|
|
for name, adapter in getAdapters((obj, ), IIdServer): |
540
|
|
|
if new_id: |
541
|
|
|
logger.warn(('More than one ID Generator Adapter found for' |
542
|
|
|
'content type -> %s') % obj.portal_type) |
543
|
|
|
new_id = adapter.generate_id(obj.portal_type) |
544
|
|
|
if not new_id: |
545
|
|
|
new_id = generateUniqueId(obj) |
546
|
|
|
|
547
|
|
|
# TODO: This is a naive check just in current folder |
548
|
|
|
# -> this should check globally for duplicate objects with same prefix |
549
|
|
|
# N.B. a check like `search_by_prefix` each time would probably slow things |
550
|
|
|
# down too much! |
551
|
|
|
# -> A solution could be to store all IDs with a certain prefix in a storage |
552
|
|
|
parent = api.get_parent(obj) |
553
|
|
|
if new_id in parent.objectIds(): |
554
|
|
|
# XXX We could do the check in a `while` loop and generate a new one. |
555
|
|
|
raise KeyError("The ID {} is already taken in the path {}".format( |
556
|
|
|
new_id, api.get_path(parent))) |
557
|
|
|
# rename the object to the new id |
558
|
|
|
parent.manage_renameObject(obj.id, new_id) |
559
|
|
|
|
560
|
|
|
return new_id |
561
|
|
|
|