Passed
Pull Request — master (#623)
by Osma
02:31
created

annif.cli   F

Complexity

Total Complexity 61

Size/Duplication

Total Lines 622
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
wmc 61
eloc 447
dl 0
loc 622
rs 3.52
c 0
b 0
f 0

21 Functions

Rating   Name   Duplication   Size   Complexity  
A validate_backend_params() 0 6 2
A backend_param_option() 0 6 1
A set_project_config_file_path() 0 5 3
A generate_filter_batches() 0 9 3
A get_vocab() 0 12 2
A common_options() 0 9 1
A get_project() 0 11 2
A run_clear_project() 0 9 1
A open_documents() 0 26 5
A parse_backend_params() 0 10 2
A run_list_vocabs() 0 25 3
A run_list_projects() 0 23 2
A run_show_project() 0 17 1
A run_hyperopt() 0 41 2
A run_train() 0 36 3
B run_optimize() 0 74 7
A run_load_vocab() 0 30 4
A run_suggest() 0 27 2
A run_learn() 0 21 1
B run_index() 0 46 7
C run_eval() 0 100 7

How to fix   Complexity   

Complexity

Complex classes like annif.cli often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
"""Definitions for command-line (Click) commands for invoking Annif
2
operations and printing the results to console."""
3
4
5
import collections
6
import os.path
7
import re
8
import sys
9
import json
10
import click
11
import click_log
12
from flask import current_app
13
from flask.cli import FlaskGroup, ScriptInfo
14
import annif
15
import annif.corpus
16
import annif.parallel
17
import annif.project
18
import annif.registry
19
from annif.project import Access
20
from annif.suggestion import SuggestionFilter, ListSuggestionResult
21
from annif.exception import ConfigurationException, NotSupportedException
22
from annif.exception import NotInitializedException
23
from annif.util import metric_code
24
25
logger = annif.logger
26
click_log.basic_config(logger)
27
28
cli = FlaskGroup(create_app=annif.create_app, add_version_option=False)
29
cli = click.version_option(message='%(version)s')(cli)
30
31
32
def get_project(project_id):
33
    """
34
    Helper function to get a project by ID and bail out if it doesn't exist"""
35
    try:
36
        return annif.registry.get_project(project_id,
37
                                          min_access=Access.private)
38
    except ValueError:
39
        click.echo(
40
            "No projects found with id \'{0}\'.".format(project_id),
41
            err=True)
42
        sys.exit(1)
43
44
45
def get_vocab(vocab_id):
46
    """
47
    Helper function to get a vocabulary by ID and bail out if it doesn't
48
    exist"""
49
    try:
50
        return annif.registry.get_vocab(vocab_id,
51
                                        min_access=Access.private)
52
    except ValueError:
53
        click.echo(
54
            f"No vocabularies found with the id '{vocab_id}'.",
55
            err=True)
56
        sys.exit(1)
57
58
59
def open_documents(paths, subject_index, vocab_lang, docs_limit):
60
    """Helper function to open a document corpus from a list of pathnames,
61
    each of which is either a TSV file or a directory of TXT files. For
62
    directories with subjects in TSV files, the given vocabulary language
63
    will be used to convert subject labels into URIs. The corpus will be
64
    returned as an instance of DocumentCorpus or LimitingDocumentCorpus."""
65
66
    def open_doc_path(path, subject_index):
67
        """open a single path and return it as a DocumentCorpus"""
68
        if os.path.isdir(path):
69
            return annif.corpus.DocumentDirectory(path, subject_index,
70
                                                  vocab_lang,
71
                                                  require_subjects=True)
72
        return annif.corpus.DocumentFile(path, subject_index)
73
74
    if len(paths) == 0:
75
        logger.warning('Reading empty file')
76
        docs = open_doc_path(os.path.devnull, subject_index)
77
    elif len(paths) == 1:
78
        docs = open_doc_path(paths[0], subject_index)
79
    else:
80
        corpora = [open_doc_path(path, subject_index) for path in paths]
81
        docs = annif.corpus.CombinedCorpus(corpora)
82
    if docs_limit is not None:
83
        docs = annif.corpus.LimitingDocumentCorpus(docs, docs_limit)
84
    return docs
85
86
87
def parse_backend_params(backend_param, project):
88
    """Parse a list of backend parameters given with the --backend-param
89
    option into a nested dict structure"""
90
    backend_params = collections.defaultdict(dict)
91
    for beparam in backend_param:
92
        backend, param = beparam.split('.', 1)
93
        key, val = param.split('=', 1)
94
        validate_backend_params(backend, beparam, project)
95
        backend_params[backend][key] = val
96
    return backend_params
97
98
99
def validate_backend_params(backend, beparam, project):
100
    if backend != project.config['backend']:
101
        raise ConfigurationException(
102
            'The backend {} in CLI option "-b {}" not matching the project'
103
            ' backend {}.'
104
            .format(backend, beparam, project.config['backend']))
105
106
107
BATCH_MAX_LIMIT = 15
108
109
110
def generate_filter_batches(subjects):
111
    import annif.eval
112
    filter_batches = collections.OrderedDict()
113
    for limit in range(1, BATCH_MAX_LIMIT + 1):
114
        for threshold in [i * 0.05 for i in range(20)]:
115
            hit_filter = SuggestionFilter(subjects, limit, threshold)
116
            batch = annif.eval.EvaluationBatch(subjects)
117
            filter_batches[(limit, threshold)] = (hit_filter, batch)
118
    return filter_batches
119
120
121
def set_project_config_file_path(ctx, param, value):
122
    """Override the default path or the path given in env by CLI option"""
123
    with ctx.ensure_object(ScriptInfo).load_app().app_context():
124
        if value:
125
            current_app.config['PROJECTS_CONFIG_PATH'] = value
126
127
128
def common_options(f):
129
    """Decorator to add common options for all CLI commands"""
130
    f = click.option(
131
        '-p', '--projects',
132
        help='Set path to project configuration file or directory',
133
        type=click.Path(dir_okay=True, exists=True),
134
        callback=set_project_config_file_path, expose_value=False,
135
        is_eager=True)(f)
136
    return click_log.simple_verbosity_option(logger)(f)
137
138
139
def backend_param_option(f):
140
    """Decorator to add an option for CLI commands to override BE parameters"""
141
    return click.option(
142
        '--backend-param', '-b', multiple=True,
143
        help='Override backend parameter of the config file. ' +
144
        'Syntax: `-b <backend>.<parameter>=<value>`.')(f)
145
146
147
@cli.command('list-projects')
148
@common_options
149
@click_log.simple_verbosity_option(logger, default='ERROR')
150
def run_list_projects():
151
    """
152
    List available projects.
153
    \f
154
    Show a list of currently defined projects. Projects are defined in a
155
    configuration file, normally called ``projects.cfg``. See `Project
156
    configuration
157
    <https://github.com/NatLibFi/Annif/wiki/Project-configuration>`_
158
    for details.
159
    """
160
161
    template = "{0: <25}{1: <45}{2: <10}{3: <7}"
162
    header = template.format(
163
        "Project ID", "Project Name", "Language", "Trained")
164
    click.echo(header)
165
    click.echo("-" * len(header))
166
    for proj in annif.registry.get_projects(
167
            min_access=Access.private).values():
168
        click.echo(template.format(
169
            proj.project_id, proj.name, proj.language, str(proj.is_trained)))
170
171
172
@cli.command('show-project')
173
@click.argument('project_id')
174
@common_options
175
def run_show_project(project_id):
176
    """
177
    Show information about a project.
178
    """
179
180
    proj = get_project(project_id)
181
    click.echo(f'Project ID:        {proj.project_id}')
182
    click.echo(f'Project Name:      {proj.name}')
183
    click.echo(f'Language:          {proj.language}')
184
    click.echo(f'Vocabulary:        {proj.vocab.vocab_id}')
185
    click.echo(f'Vocab language:    {proj.vocab_lang}')
186
    click.echo(f'Access:            {proj.access.name}')
187
    click.echo(f'Trained:           {proj.is_trained}')
188
    click.echo(f'Modification time: {proj.modification_time}')
189
190
191
@cli.command('clear')
192
@click.argument('project_id')
193
@common_options
194
def run_clear_project(project_id):
195
    """
196
    Initialize the project to its original, untrained state.
197
    """
198
    proj = get_project(project_id)
199
    proj.remove_model_data()
200
201
202
@cli.command('list-vocabs')
203
@common_options
204
@click_log.simple_verbosity_option(logger, default='ERROR')
205
def run_list_vocabs():
206
    """
207
    List available vocabularies.
208
    """
209
210
    template = "{0: <20}{1: <20}{2: >10}  {3: <6}"
211
    header = template.format(
212
        "Vocabulary ID", "Languages", "Size", "Loaded")
213
    click.echo(header)
214
    click.echo("-" * len(header))
215
    for vocab in annif.registry.get_vocabs(
216
            min_access=Access.private).values():
217
        try:
218
            languages = ','.join(sorted(vocab.languages))
219
            size = len(vocab)
220
            loaded = True
221
        except NotInitializedException:
222
            languages = '-'
223
            size = '-'
224
            loaded = False
225
        click.echo(template.format(
226
            vocab.vocab_id, languages, size, str(loaded)))
227
228
229
@cli.command('load-vocab')
230
@click.argument('vocab_id')
231
@click.argument('subjectfile', type=click.Path(exists=True, dir_okay=False))
232
@click.option('--language', '-L', help='Language of subject file')
233
@click.option('--force', '-f', default=False, is_flag=True,
234
              help='Replace existing vocabulary completely ' +
235
                   'instead of updating it')
236
@common_options
237
def run_load_vocab(vocab_id, language, force, subjectfile):
238
    """
239
    Load a vocabulary from a subject file.
240
    """
241
    vocab = get_vocab(vocab_id)
242
    if annif.corpus.SubjectFileSKOS.is_rdf_file(subjectfile):
243
        # SKOS/RDF file supported by rdflib
244
        subjects = annif.corpus.SubjectFileSKOS(subjectfile)
245
        click.echo(f"Loading vocabulary from SKOS file {subjectfile}...")
246
    elif annif.corpus.SubjectFileCSV.is_csv_file(subjectfile):
247
        # CSV file
248
        subjects = annif.corpus.SubjectFileCSV(subjectfile)
249
        click.echo(f"Loading vocabulary from CSV file {subjectfile}...")
250
    else:
251
        # probably a TSV file - we need to know its language
252
        if not language:
253
            click.echo("Please use --language option to set the language of " +
254
                       "a TSV vocabulary.", err=True)
255
            sys.exit(1)
256
        click.echo(f"Loading vocabulary from TSV file {subjectfile}...")
257
        subjects = annif.corpus.SubjectFileTSV(subjectfile, language)
258
    vocab.load_vocabulary(subjects, force=force)
259
260
261
@cli.command('train')
262
@click.argument('project_id')
263
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
264
@click.option('--cached/--no-cached', '-c/-C', default=False,
265
              help='Reuse preprocessed training data from previous run')
266
@click.option('--docs-limit', '-d', default=None,
267
              type=click.IntRange(0, None),
268
              help='Maximum number of documents to use')
269
@click.option('--jobs',
270
              '-j',
271
              default=0,
272
              help='Number of parallel jobs (0 means choose automatically)')
273
@backend_param_option
274
@common_options
275
def run_train(project_id, paths, cached, docs_limit, jobs, backend_param):
276
    """
277
    Train a project on a collection of documents.
278
    \f
279
    This will train the project using the documents from ``PATHS`` (directories
280
    or possibly gzipped TSV files) in a single batch operation. If ``--cached``
281
    is set, preprocessed training data from the previous run is reused instead
282
    of documents input; see `Reusing preprocessed training data
283
    <https://github.com/NatLibFi/Annif/wiki/
284
    Reusing-preprocessed-training-data>`_.
285
    """
286
    proj = get_project(project_id)
287
    backend_params = parse_backend_params(backend_param, proj)
288
    if cached:
289
        if len(paths) > 0:
290
            raise click.UsageError(
291
                "Corpus paths cannot be given when using --cached option.")
292
        documents = 'cached'
293
    else:
294
        documents = open_documents(paths, proj.subjects,
295
                                   proj.vocab_lang, docs_limit)
296
    proj.train(documents, backend_params, jobs)
297
298
299
@cli.command('learn')
300
@click.argument('project_id')
301
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
302
@click.option('--docs-limit', '-d', default=None,
303
              type=click.IntRange(0, None),
304
              help='Maximum number of documents to use')
305
@backend_param_option
306
@common_options
307
def run_learn(project_id, paths, docs_limit, backend_param):
308
    """
309
    Further train an existing project on a collection of documents.
310
    \f
311
    Similar to the ``train`` command. This will continue training an already
312
    trained project using the documents given by ``PATHS`` in a single batch
313
    operation. Not supported by all backends.
314
    """
315
    proj = get_project(project_id)
316
    backend_params = parse_backend_params(backend_param, proj)
317
    documents = open_documents(paths, proj.subjects,
318
                               proj.vocab_lang, docs_limit)
319
    proj.learn(documents, backend_params)
320
321
322
@cli.command('suggest')
323
@click.argument('project_id')
324
@click.option('--limit', '-l', default=10, help='Maximum number of subjects')
325
@click.option('--threshold', '-t', default=0.0, help='Minimum score threshold')
326
@backend_param_option
327
@common_options
328
def run_suggest(project_id, limit, threshold, backend_param):
329
    """
330
    Suggest subjects for a single document from standard input.
331
    \f
332
    This will read a text document from standard input and suggest subjects for
333
    it.
334
    """
335
    project = get_project(project_id)
336
    text = sys.stdin.read()
337
    backend_params = parse_backend_params(backend_param, project)
338
    hit_filter = SuggestionFilter(project.subjects, limit, threshold)
339
    hits = hit_filter(project.suggest(text, backend_params))
340
    for hit in hits.as_list():
341
        subj = project.subjects[hit.subject_id]
342
        click.echo(
343
            "<{}>\t{}\t{}".format(
344
                subj.uri,
345
                '\t'.join(filter(None,
346
                                 (subj.labels[project.vocab_lang],
347
                                  subj.notation))),
348
                hit.score))
349
350
351
@cli.command('index')
352
@click.argument('project_id')
353
@click.argument('directory', type=click.Path(exists=True, file_okay=False))
354
@click.option(
355
    '--suffix',
356
    '-s',
357
    default='.annif',
358
    help='File name suffix for result files')
359
@click.option('--force/--no-force', '-f/-F', default=False,
360
              help='Force overwriting of existing result files')
361
@click.option('--limit', '-l', default=10, help='Maximum number of subjects')
362
@click.option('--threshold', '-t', default=0.0, help='Minimum score threshold')
363
@backend_param_option
364
@common_options
365
def run_index(project_id, directory, suffix, force,
366
              limit, threshold, backend_param):
367
    """
368
    Index a directory with documents, suggesting subjects for each document.
369
    Write the results in TSV files with the given suffix (``.annif`` by
370
    default).
371
    """
372
    project = get_project(project_id)
373
    backend_params = parse_backend_params(backend_param, project)
374
    hit_filter = SuggestionFilter(project.subjects, limit, threshold)
375
376
    for docfilename, dummy_subjectfn in annif.corpus.DocumentDirectory(
377
            directory, project.subjects, project.vocab_lang,
378
            require_subjects=False):
379
        with open(docfilename, encoding='utf-8-sig') as docfile:
380
            text = docfile.read()
381
        subjectfilename = re.sub(r'\.txt$', suffix, docfilename)
382
        if os.path.exists(subjectfilename) and not force:
383
            click.echo(
384
                "Not overwriting {} (use --force to override)".format(
385
                    subjectfilename))
386
            continue
387
        with open(subjectfilename, 'w', encoding='utf-8') as subjfile:
388
            results = project.suggest(text, backend_params)
389
            for hit in hit_filter(results).as_list():
390
                subj = project.subjects[hit.subject_id]
391
                line = "<{}>\t{}\t{}".format(
392
                    subj.uri,
393
                    '\t'.join(filter(None, (subj.labels[project.vocab_lang],
394
                                            subj.notation))),
395
                    hit.score)
396
                click.echo(line, file=subjfile)
397
398
399
@cli.command('eval')
400
@click.argument('project_id')
401
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
402
@click.option('--limit', '-l', default=10, help='Maximum number of subjects')
403
@click.option('--threshold', '-t', default=0.0, help='Minimum score threshold')
404
@click.option('--docs-limit', '-d', default=None,
405
              type=click.IntRange(0, None),
406
              help='Maximum number of documents to use')
407
@click.option('--metric', '-m', default=[], multiple=True,
408
              help='Metric to calculate (default: all)')
409
@click.option(
410
    '--metrics-file',
411
    '-M',
412
    type=click.File(
413
        'w',
414
        encoding='utf-8',
415
        errors='ignore',
416
        lazy=True),
417
    help="""Specify file in order to write evaluation metrics in JSON format.
418
    File directory must exist, existing file will be overwritten.""")
419
@click.option(
420
    '--results-file',
421
    '-r',
422
    type=click.File(
423
        'w',
424
        encoding='utf-8',
425
        errors='ignore',
426
        lazy=True),
427
    help="""Specify file in order to write non-aggregated results per subject.
428
    File directory must exist, existing file will be overwritten.""")
429
@click.option('--jobs',
430
              '-j',
431
              default=1,
432
              help='Number of parallel jobs (0 means all CPUs)')
433
@backend_param_option
434
@common_options
435
def run_eval(
436
        project_id,
437
        paths,
438
        limit,
439
        threshold,
440
        docs_limit,
441
        metric,
442
        metrics_file,
443
        results_file,
444
        jobs,
445
        backend_param):
446
    """
447
    Suggest subjects for documents and evaluate the results by comparing
448
    against a gold standard.
449
    \f
450
    With this command the documents from ``PATHS`` (directories or possibly
451
    gzipped TSV files) will be assigned subject suggestions and then
452
    statistical measures are calculated that quantify how well the suggested
453
    subjects match the gold-standard subjects in the documents.
454
455
    Normally the output is the list of the metrics calculated across documents.
456
    If ``--results-file <FILENAME>`` option is given, the metrics are
457
    calculated separately for each subject, and written to the given file.
458
    """
459
460
    project = get_project(project_id)
461
    backend_params = parse_backend_params(backend_param, project)
462
463
    import annif.eval
464
    eval_batch = annif.eval.EvaluationBatch(project.subjects)
465
466
    if results_file:
467
        try:
468
            print('', end='', file=results_file)
469
            click.echo('Writing per subject evaluation results to {!s}'.format(
470
                results_file.name))
471
        except Exception as e:
472
            raise NotSupportedException(
473
                "cannot open results-file for writing: " + str(e))
474
    docs = open_documents(paths, project.subjects,
475
                          project.vocab_lang, docs_limit)
476
477
    jobs, pool_class = annif.parallel.get_pool(jobs)
478
479
    project.initialize(parallel=True)
480
    psmap = annif.parallel.ProjectSuggestMap(
481
        project.registry, [project_id], backend_params, limit, threshold)
482
483
    with pool_class(jobs) as pool:
484
        for hits, subject_set in pool.imap_unordered(
485
                psmap.suggest, docs.documents):
486
            eval_batch.evaluate(hits[project_id],
487
                                subject_set)
488
489
    template = "{0:<30}\t{1}"
490
    metrics = eval_batch.results(metrics=metric,
491
                                 results_file=results_file,
492
                                 language=project.vocab_lang)
493
    for metric, score in metrics.items():
494
        click.echo(template.format(metric + ":", score))
495
    if metrics_file:
496
        json.dump(
497
            {metric_code(mname): val for mname, val in metrics.items()},
498
            metrics_file, indent=2)
499
500
501
@cli.command('optimize')
502
@click.argument('project_id')
503
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
504
@click.option('--docs-limit', '-d', default=None,
505
              type=click.IntRange(0, None),
506
              help='Maximum number of documents to use')
507
@backend_param_option
508
@common_options
509
def run_optimize(project_id, paths, docs_limit, backend_param):
510
    """
511
    Suggest subjects for documents, testing multiple limits and thresholds.
512
    \f
513
    This command will use different limit (maximum number of subjects) and
514
    score threshold values when assigning subjects to each document given by
515
    ``PATHS`` and compare the results against the gold standard subjects in the
516
    documents. The output is a list of parameter combinations and their scores.
517
    From the output, you can determine the optimum limit and threshold
518
    parameters depending on which measure you want to target.
519
    """
520
    project = get_project(project_id)
521
    backend_params = parse_backend_params(backend_param, project)
522
523
    filter_batches = generate_filter_batches(project.subjects)
524
525
    ndocs = 0
526
    docs = open_documents(paths, project.subjects,
527
                          project.vocab_lang, docs_limit)
528
    for doc in docs.documents:
529
        raw_hits = project.suggest(doc.text, backend_params)
530
        hits = raw_hits.filter(project.subjects, limit=BATCH_MAX_LIMIT)
531
        assert isinstance(hits, ListSuggestionResult), \
532
            "Optimize should only be done with ListSuggestionResult " + \
533
            "as it would be very slow with VectorSuggestionResult."
534
        for hit_filter, batch in filter_batches.values():
535
            batch.evaluate(hit_filter(hits), doc.subject_set)
536
        ndocs += 1
537
538
    click.echo("\t".join(('Limit', 'Thresh.', 'Prec.', 'Rec.', 'F1')))
539
540
    best_scores = collections.defaultdict(float)
541
    best_params = {}
542
543
    template = "{:d}\t{:.02f}\t{:.04f}\t{:.04f}\t{:.04f}"
544
    # Store the batches in a list that gets consumed along the way
545
    # This way GC will have a chance to reclaim the memory
546
    filter_batches = list(filter_batches.items())
547
    while filter_batches:
548
        params, filter_batch = filter_batches.pop(0)
549
        metrics = ['Precision (doc avg)',
550
                   'Recall (doc avg)',
551
                   'F1 score (doc avg)']
552
        results = filter_batch[1].results(metrics=metrics)
553
        for metric, score in results.items():
554
            if score >= best_scores[metric]:
555
                best_scores[metric] = score
556
                best_params[metric] = params
557
        click.echo(
558
            template.format(
559
                params[0],
560
                params[1],
561
                results['Precision (doc avg)'],
562
                results['Recall (doc avg)'],
563
                results['F1 score (doc avg)']))
564
565
    click.echo()
566
    template2 = "Best {:>19}: {:.04f}\tLimit: {:d}\tThreshold: {:.02f}"
567
    for metric in metrics:
568
        click.echo(
569
            template2.format(
570
                metric,
571
                best_scores[metric],
572
                best_params[metric][0],
573
                best_params[metric][1]))
574
    click.echo("Documents evaluated:\t{}".format(ndocs))
575
576
577
@cli.command('hyperopt')
578
@click.argument('project_id')
579
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
580
@click.option('--docs-limit', '-d', default=None,
581
              type=click.IntRange(0, None),
582
              help='Maximum number of documents to use')
583
@click.option('--trials', '-T', default=10, help='Number of trials')
584
@click.option('--jobs',
585
              '-j',
586
              default=1,
587
              help='Number of parallel runs (0 means all CPUs)')
588
@click.option('--metric', '-m', default='NDCG',
589
              help='Metric to optimize (default: NDCG)')
590
@click.option(
591
    '--results-file',
592
    '-r',
593
    type=click.File(
594
        'w',
595
        encoding='utf-8',
596
        errors='ignore',
597
        lazy=True),
598
    help="""Specify file path to write trial results as CSV.
599
    File directory must exist, existing file will be overwritten.""")
600
@common_options
601
def run_hyperopt(project_id, paths, docs_limit, trials, jobs, metric,
602
                 results_file):
603
    """
604
    Optimize the hyperparameters of a project using validation documents from
605
    ``PATHS``. Not supported by all backends. Output is a list of trial results
606
    and a report of the best performing parameters.
607
    """
608
    proj = get_project(project_id)
609
    documents = open_documents(paths, proj.subjects,
610
                               proj.vocab_lang, docs_limit)
611
    click.echo(f"Looking for optimal hyperparameters using {trials} trials")
612
    rec = proj.hyperopt(documents, trials, jobs, metric, results_file)
613
    click.echo(f"Got best {metric} score {rec.score:.4f} with:")
614
    click.echo("---")
615
    for line in rec.lines:
616
        click.echo(line)
617
    click.echo("---")
618
619
620
if __name__ == '__main__':
621
    cli()
622