Passed
Push — master ( ae6efc...6ec3c6 )
by Konstantin
02:55
created

ocrd_utils.str.get_local_filename()   B

Complexity

Conditions 8

Size

Total Lines 22
Code Lines 14

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 14
dl 0
loc 22
rs 7.3333
c 0
b 0
f 0
cc 8
nop 2
1
"""
2
Utility functions for strings, paths and URL.
3
"""
4
5
import re
6
import json
7
from typing import List, Union
8
from .constants import REGEX_FILE_ID, SPARKLINE_CHARS
9
from .deprecate import deprecation_warning
10
from warnings import warn
11
from numpy import array_split
12
13
__all__ = [
14
    'assert_file_grp_cardinality',
15
    'concat_padded',
16
    'get_local_filename',
17
    'is_local_filename',
18
    'partition_list',
19
    'is_string',
20
    'make_file_id',
21
    'nth_url_segment',
22
    'parse_json_string_or_file',
23
    'parse_json_string_with_comments',
24
    'remove_non_path_from_url',
25
    'safe_filename',
26
]
27
28
29
def assert_file_grp_cardinality(grps, n, msg=None):
30
    """
31
    Assert that a string of comma-separated fileGrps contains exactly ``n`` entries.
32
    """
33
    if isinstance(grps, str):
34
        grps = grps.split(',')
35
    assert len(grps) == n, \
36
            "Expected exactly %d output file group%s%s, but '%s' has %d" % (
37
                n,
38
                '' if n == 1 else 's',
39
                ' (%s)' % msg if msg else '',
40
                grps,
41
                len(grps)
42
            )
43
44
def concat_padded(base, *args):
45
    """
46
    Concatenate string and zero-padded 4 digit number
47
    """
48
    ret = base
49
    for n in args:
50
        if is_string(n):
51
            ret = "%s_%s" % (ret, n)
52
        else:
53
            ret = "%s_%04i"  % (ret, n)
54
    return ret
55
56
def remove_non_path_from_url(url):
57
    """
58
    Remove everything from URL after path.
59
    """
60
    url = url.split('?', 1)[0]    # query
61
    url = url.split('#', 1)[0]    # fragment identifier
62
    url = re.sub(r"/+$", "", url) # trailing slashes
63
    return url
64
65
def make_file_id(ocrd_file, output_file_grp):
66
    """
67
    Derive a new file ID for an output file from an existing input file ``ocrd_file``
68
    and the name of the output file's ``fileGrp/@USE``, ``output_file_grp``.
69
    If ``ocrd_file``'s ID contains the input file's fileGrp name, then replace it by ``output_file_grp``.
70
    Else if ``ocrd_file`` has a ``pageId`` but it is not contained in the ``ocrd_file.ID``, then
71
        concatenate ``output_file_grp`` and ``ocrd_file.pageId``.
72
    Otherwise concatenate ``output_file_grp`` with the ``ocrd_file.ID``.
73
74
    Note: ``make_file_id`` cannot guarantee that the new ID is unique within an actual
75
    :py:class:`ocrd_models.ocrd_mets.OcrdMets`.
76
    The caller is responsible for ensuring uniqueness of files to be added.
77
    Ultimately, ID conflicts will lead to :py:meth:`ocrd_models.ocrd_mets.OcrdMets.add_file`
78
    raising an exception.
79
    This can be avoided if all processors use ``make_file_id`` consistently for ID generation.
80
81
    Note: ``make_file_id`` generates page-specific IDs. For IDs representing page segments
82
    or ``pc:AlternativeImage`` files, the output of ``make_file_id`` may need to be concatenated
83
    with a unique string for that sub-page element, such as `".IMG"` or the segment ID.
84
    """
85
    # considerations for this behaviour:
86
    # - uniqueness (in spite of different METS and processor conventions)
87
    # - predictability (i.e. output name can be anticipated from the input name)
88
    # - stability (i.e. output at least as much sorted and consistent as the input)
89
    # ... and all this in spite of --page-id selection and --overwrite
90
    # (i.e. --overwrite should target the existing ID, and input vs output
91
    #  IDs should be different, except when overwriting the input fileGrp)
92
    ret = ocrd_file.ID.replace(ocrd_file.fileGrp, output_file_grp)
93
    if ret == ocrd_file.ID and output_file_grp != ocrd_file.fileGrp:
94
        if ocrd_file.pageId and ocrd_file.pageId not in ocrd_file.ID:
95
            ret = output_file_grp + '_' + ocrd_file.pageId
96
        else:
97
            ret = output_file_grp + '_' + ocrd_file.ID
98
    if not REGEX_FILE_ID.fullmatch(ret):
99
        ret = ret.replace(':', '_')
100
        ret = re.sub(r'^([^a-zA-Z_])', r'id_\1', ret)
101
        ret = re.sub(r'[^\w.-]', r'', ret)
102
    return ret
103
104
def nth_url_segment(url, n=-1):
105
    """
106
    Return the last /-delimited segment of a URL-like string
107
108
    Arguments:
109
        url (string):
110
        n (integer): index of segment, default: -1
111
    """
112
    segments = remove_non_path_from_url(url).split('/')
113
    try:
114
        return segments[n]
115
    except IndexError:
116
        return ''
117
118
def get_local_filename(url, start=None):
119
    """
120
    Return local filename, optionally relative to ``start``
121
122
    Arguments:
123
        url (string): filename or URL
124
        start (string): Base path to remove from filename. Raise an exception if not a prefix of url
125
    """
126
    if url.startswith('https://') or url.startswith('http:'):
127
        raise ValueError("Can't determine local filename of http(s) URL")
128
    if url.startswith('file://'):
129
        url = url[len('file://'):]
130
    # Goobi/Kitodo produces those, they are always absolute
131
    if url.startswith('file:/'):
132
        url = url[len('file:'):]
133
    if start:
134
        if not url.startswith(start):
135
            raise ValueError("Cannot remove prefix %s from url %s" % (start, url))
136
        if not start.endswith('/'):
137
            start += '/'
138
        url = url[len(start):]
139
    return url
140
141
def is_local_filename(url):
142
    """
143
    Whether a url is a local filename.
144
    """
145
    # deprecation_warning("Deprecated so we spot inconsistent URL/file handling")
146
    return url.startswith('file://') or not('://' in url)
147
148
def is_string(val):
149
    """
150
    Return whether a value is a ``str``.
151
    """
152
    return isinstance(val, str)
153
154
155
def parse_json_string_with_comments(val):
156
    """
157
    Parse a string of JSON interspersed with #-prefixed full-line comments
158
    """
159
    jsonstr = re.sub(r'^\s*#.*$', '', val, flags=re.MULTILINE)
160
    return json.loads(jsonstr)
161
162
def parse_json_string_or_file(*values):    # pylint: disable=unused-argument
163
    """
164
    Parse a string as either the path to a JSON object or a literal JSON object.
165
166
    Empty strings are equivalent to '{}'
167
    """
168
    ret = {}
169
    for value in values:
170
        err = None
171
        value_parsed = None
172
        if re.fullmatch(r"\s*", value):
173
            continue
174
        try:
175
            try:
176
                with open(value, 'r') as f:
177
                    value_parsed = parse_json_string_with_comments(f.read())
178
            except (FileNotFoundError, OSError):
179
                value_parsed = parse_json_string_with_comments(value.strip())
180
            if not isinstance(value_parsed, dict):
181
                err = ValueError("Not a valid JSON object: '%s' (parsed as '%s')" % (value, value_parsed))
182
        except json.decoder.JSONDecodeError as e:
183
            err = ValueError("Error parsing '%s': %s" % (value, e))
184
        if err:
185
            raise err       # pylint: disable=raising-bad-type
186
        ret = {**ret, **value_parsed}
187
    return ret
188
189
def safe_filename(url):
190
    """
191
    Sanitize input to be safely used as the basename of a local file.
192
    """
193
    ret = re.sub(r'[^\w]+', '_', url)
194
    ret = re.sub(r'^\.*', '', ret)
195
    ret = re.sub(r'\.\.*', '.', ret)
196
    #  print('safe filename: %s -> %s' % (url, ret))
197
    return ret
198
199
def generate_range(start, end):
200
    """
201
    Generate a list of strings by incrementing the number part of ``start`` until including ``end``.
202
    """
203
    ret = []
204
    try:
205
        start_num, end_num = re.findall(r'\d+', start)[-1], re.findall(r'\d+', end)[-1]
206
    except IndexError:
207
        raise ValueError("Range '%s..%s': could not find numeric part" % (start, end))
208
    if start_num == end_num:
209
        warn("Range '%s..%s': evaluates to the same number")
210
    for i in range(int(start_num), int(end_num) + 1):
211
        ret.append(start.replace(start_num, str(i).zfill(len(start_num))))
212
    return ret
213
214
215
def partition_list(lst, chunks, chunk_index=None):
216
    """
217
    Partition a list into roughly equally-sized chunks
218
219
    Args:
220
        lst (list): list to partition
221
        chunks (int): number of chunks to generate (not per chunk!)
222
223
    Keyword Args:
224
        chunk_index (None|int): If provided, return only a list consisting of this chunk
225
226
    Returns:
227
        list(list())
228
    """
229
    if not lst:
230
        return []
231
    # Catch potential empty ranges returned by numpy.array_split
232
    #  which are problematic in the ocr-d scope
233
    if chunks > len(lst):
234
        raise ValueError("Amount of chunks bigger than list size")
235
    ret = [x.tolist() for x in array_split(lst, chunks)]
236
    if chunk_index is not None:
237
        return [ret[chunk_index]]
238
    return ret
239
240
def sparkline(values : List[int]) -> str:
241
    """
242
    Render a list of points with block characters
243
    """
244
    if any(x is None or not isinstance(x, (int, float)) or x < 0 for x in values):
245
        # return an empty string on non-positive-int values, better not to
246
        # output a sparkline than to cancel execution due to problematic input
247
        return ''
248
    max_value = max(values)
249
    max_mapping = len(SPARKLINE_CHARS) - 1
250
    # normalize to 0..1 and convert to index in SPARKLINE_CHARS
251
    mapped = [int(x / max_value * max_mapping) for x in values]
252
    return ''.join(SPARKLINE_CHARS[x] for x in mapped)
253
254