annif.cli.run_suggest()   B
last analyzed

Complexity

Conditions 6

Size

Total Lines 51
Code Lines 38

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 6
eloc 38
nop 8
dl 0
loc 51
rs 8.0346
c 0
b 0
f 0

How to fix   Long Method    Many Parameters   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

Many Parameters

Methods with many parameters are not only hard to understand, but their parameters also often become inconsistent when you need more, or different data.

There are several approaches to avoid long parameter lists:

1
"""Definitions for command-line (Click) commands for invoking Annif
2
operations and printing the results to console."""
3
4
import collections
5
import importlib
6
import json
7
import os.path
8
import re
9
import sys
10
11
import click
12
import click_log
13
from flask.cli import FlaskGroup
14
15
import annif
16
import annif.parallel
17
import annif.project
18
import annif.registry
19
from annif import cli_util, hfh_util
20
from annif.corpus import Document, DocumentDirectory
21
from annif.exception import (
22
    NotInitializedException,
23
    NotSupportedException,
24
    OperationFailedException,
25
)
26
from annif.project import Access
27
from annif.simplemma_util import detect_language
28
from annif.util import metric_code, suggestion_to_dict
29
30
logger = annif.logger
31
click_log.basic_config(logger)
32
33
create_app = annif.create_flask_app
34
cli = FlaskGroup(
35
    create_app=create_app, add_default_commands=False, add_version_option=False
36
)
37
cli = click.version_option(message="%(version)s")(cli)
38
cli.params = [opt for opt in cli.params if opt.name not in ("env_file", "app")]
39
40
41
@cli.command("list-projects")
42
@cli_util.common_options
43
@click_log.simple_verbosity_option(logger, default="ERROR")
44
def run_list_projects():
45
    """
46
    List available projects.
47
    \f
48
    Show a list of currently defined projects. Projects are defined in a
49
    configuration file, normally called ``projects.cfg``. See `Project
50
    configuration
51
    <https://github.com/NatLibFi/Annif/wiki/Project-configuration>`_
52
    for details.
53
    """
54
55
    column_headings = (
56
        "Project ID",
57
        "Project Name",
58
        "Vocabulary ID",
59
        "Language",
60
        "Trained",
61
        "Modification time",
62
    )
63
    table = [
64
        (
65
            proj.project_id,
66
            proj.name,
67
            proj.vocab.vocab_id if proj.vocab_spec else "-",
68
            proj.language,
69
            str(proj.is_trained),
70
            cli_util.format_datetime(proj.modification_time),
71
        )
72
        for proj in annif.registry.get_projects(min_access=Access.private).values()
73
    ]
74
    template = cli_util.make_list_template(column_headings, *table)
75
    header = template.format(*column_headings)
76
    click.echo(header)
77
    click.echo("-" * len(header))
78
    for row in table:
79
        click.echo(template.format(*row))
80
81
82
@cli.command("show-project")
83
@cli_util.project_id
84
@cli_util.common_options
85
def run_show_project(project_id):
86
    """
87
    Show information about a project.
88
    """
89
90
    proj = cli_util.get_project(project_id)
91
    click.echo(f"Project ID:        {proj.project_id}")
92
    click.echo(f"Project Name:      {proj.name}")
93
    click.echo(f"Language:          {proj.language}")
94
    click.echo(f"Vocabulary:        {proj.vocab.vocab_id}")
95
    click.echo(f"Vocab language:    {proj.vocab_lang}")
96
    click.echo(f"Access:            {proj.access.name}")
97
    click.echo(f"Backend:           {proj.backend.name}")
98
    click.echo(f"Trained:           {proj.is_trained}")
99
    click.echo(f"Modification time: {cli_util.format_datetime(proj.modification_time)}")
100
101
102
@cli.command("clear")
103
@cli_util.project_id
104
@cli_util.common_options
105
def run_clear_project(project_id):
106
    """
107
    Initialize the project to its original, untrained state.
108
    """
109
    proj = cli_util.get_project(project_id)
110
    proj.remove_model_data()
111
112
113
@cli.command("list-vocabs")
114
@cli_util.common_options
115
@click_log.simple_verbosity_option(logger, default="ERROR")
116
def run_list_vocabs():
117
    """
118
    List available vocabularies.
119
    """
120
121
    column_headings = ("Vocabulary ID", "Languages", "Size", "Loaded")
122
    table = []
123
    for vocab in annif.registry.get_vocabs(min_access=Access.private).values():
124
        try:
125
            languages = ",".join(sorted(vocab.languages))
126
            size = len(vocab)
127
            loaded = True
128
        except NotInitializedException:
129
            languages = "-"
130
            size = "-"
131
            loaded = False
132
        row = (vocab.vocab_id, languages, str(size), str(loaded))
133
        table.append(row)
134
135
    template = cli_util.make_list_template(column_headings, *table)
136
    header = template.format(*column_headings)
137
    click.echo(header)
138
    click.echo("-" * len(header))
139
    for row in table:
140
        click.echo(template.format(*row))
141
142
143
@cli.command("load-vocab")
144
@click.argument("vocab_id", shell_complete=cli_util.complete_param)
145
@click.argument("vocab_file", type=click.Path(exists=True, dir_okay=False))
146
@click.option("--language", "-L", help="Language of TSV vocabulary file")
147
@click.option(
148
    "--force",
149
    "-f",
150
    default=False,
151
    is_flag=True,
152
    help="Replace existing vocabulary completely instead of updating it",
153
)
154
@cli_util.common_options
155
def run_load_vocab(vocab_id, language, force, vocab_file):
156
    """
157
    Load a vocabulary from a subject file.
158
    """
159
    vocab = cli_util.get_vocab(vocab_id)
160
    if annif.vocab.VocabFileSKOS.is_rdf_file(vocab_file):
161
        # SKOS/RDF file supported by rdflib
162
        vocab_file = annif.vocab.VocabFileSKOS(vocab_file)
163
        click.echo(f"Loading vocabulary from SKOS file {vocab_file}...")
164
    elif annif.vocab.VocabFileCSV.is_csv_file(vocab_file):
165
        # CSV file
166
        vocab_file = annif.vocab.VocabFileCSV(vocab_file)
167
        click.echo(f"Loading vocabulary from CSV file {vocab_file}...")
168
    else:
169
        # probably a TSV file - we need to know its language
170
        if not language:
171
            click.echo(
172
                "Please use --language option to set the language of a TSV vocabulary.",
173
                err=True,
174
            )
175
            sys.exit(1)
176
        click.echo(f"Loading vocabulary from TSV file {vocab_file}...")
177
        vocab_file = annif.vocab.VocabFileTSV(vocab_file, language)
178
    vocab.load_vocabulary(vocab_file, force=force)
179
180
181
@cli.command("train")
182
@cli_util.project_id
183
@click.argument("paths", type=click.Path(exists=True), nargs=-1)
184
@click.option(
185
    "--cached/--no-cached",
186
    "-c/-C",
187
    default=False,
188
    help="Reuse preprocessed training data from previous run",
189
)
190
@click.option(
191
    "--jobs",
192
    "-j",
193
    default=0,
194
    help="Number of parallel jobs (0 means choose automatically)",
195
)
196
@cli_util.docs_limit_option
197
@cli_util.backend_param_option
198
@cli_util.common_options
199
def run_train(project_id, paths, cached, docs_limit, jobs, backend_param):
200
    """
201
    Train a project on a collection of documents.
202
    \f
203
    This will train the project using the documents from ``PATHS`` (directories
204
    or possibly gzipped TSV files) in a single batch operation. If ``--cached``
205
    is set, preprocessed training data from the previous run is reused instead
206
    of documents input; see `Reusing preprocessed training data
207
    <https://github.com/NatLibFi/Annif/wiki/
208
    Reusing-preprocessed-training-data>`_.
209
    """
210
    proj = cli_util.get_project(project_id)
211
    backend_params = cli_util.parse_backend_params(backend_param, proj)
212
    if cached:
213
        if len(paths) > 0:
214
            raise click.UsageError(
215
                "Corpus paths cannot be given when using --cached option."
216
            )
217
        documents = "cached"
218
    else:
219
        documents = cli_util.open_documents(
220
            paths, proj.subjects, proj.vocab_lang, docs_limit
221
        )
222
    proj.train(documents, backend_params, jobs)
223
224
225
@cli.command("learn")
226
@cli_util.project_id
227
@click.argument("paths", type=click.Path(exists=True), nargs=-1)
228
@cli_util.docs_limit_option
229
@cli_util.backend_param_option
230
@cli_util.common_options
231
def run_learn(project_id, paths, docs_limit, backend_param):
232
    """
233
    Further train an existing project on a collection of documents.
234
    \f
235
    Similar to the ``train`` command. This will continue training an already
236
    trained project using the documents given by ``PATHS`` in a single batch
237
    operation. Not supported by all backends.
238
    """
239
    proj = cli_util.get_project(project_id)
240
    backend_params = cli_util.parse_backend_params(backend_param, proj)
241
    documents = cli_util.open_documents(
242
        paths, proj.subjects, proj.vocab_lang, docs_limit
243
    )
244
    proj.learn(documents, backend_params)
245
246
247
@cli.command("suggest")
248
@cli_util.project_id
249
@click.argument(
250
    "paths", type=click.Path(dir_okay=False, exists=True, allow_dash=True), nargs=-1
251
)
252
@click.option("--limit", "-l", default=10, help="Maximum number of subjects")
253
@click.option("--threshold", "-t", default=0.0, help="Minimum score threshold")
254
@click.option("--language", "-L", help="Language of subject labels")
255
@cli_util.docs_limit_option
256
@cli_util.backend_param_option
257
@click.option(
258
    "--metadata",
259
    "-D",
260
    multiple=True,
261
    help="Additional metadata for a document read from standard input. "
262
    + "Syntax: `-D <field>=<value>`.",
263
)
264
@cli_util.common_options
265
def run_suggest(
266
    project_id, paths, limit, threshold, language, backend_param, metadata, docs_limit
267
):
268
    """
269
    Suggest subjects for a single document from standard input (optionally
270
    with metadata) or for one or more document file(s) given its/their
271
    path(s).
272
    \f
273
    This will read a text document from standard input and suggest subjects for
274
    it, or if given path(s) to file(s), suggest subjects for it/them.
275
    """
276
    project = cli_util.get_project(project_id)
277
    lang = language or project.vocab_lang
278
    if lang not in project.vocab.languages:
279
        raise click.BadParameter(f'language "{lang}" not supported by vocabulary')
280
    backend_params = cli_util.parse_backend_params(backend_param, project)
281
282
    if paths and not (len(paths) == 1 and paths[0] == "-"):
283
        docs = cli_util.open_text_documents(paths, docs_limit)
284
        results = project.suggest_corpus(docs, backend_params).filter(limit, threshold)
285
        for (
286
            suggestions,
287
            path,
288
        ) in zip(results, paths):
289
            click.echo(f"Suggestions for {path}")
290
            cli_util.show_hits(suggestions, project, lang)
291
    else:
292
        text = sys.stdin.read()
293
        doc_metadata = cli_util.parse_metadata(metadata)
294
        suggestions = project.suggest(
295
            [Document(text=text, metadata=doc_metadata)], backend_params
296
        ).filter(limit, threshold)[0]
297
        cli_util.show_hits(suggestions, project, lang)
298
299
300
@cli.command("index")
301
@cli_util.project_id
302
@click.argument("directory", type=click.Path(exists=True, file_okay=False))
303
@click.option(
304
    "--suffix", "-s", default=".annif", help="File name suffix for result files"
305
)
306
@click.option(
307
    "--force/--no-force",
308
    "-f/-F",
309
    default=False,
310
    help="Force overwriting of existing result files",
311
)
312
@click.option("--limit", "-l", default=10, help="Maximum number of subjects")
313
@click.option("--threshold", "-t", default=0.0, help="Minimum score threshold")
314
@click.option("--language", "-L", help="Language of subject labels")
315
@cli_util.backend_param_option
316
@cli_util.common_options
317
def run_index(
318
    project_id, directory, suffix, force, limit, threshold, language, backend_param
319
):
320
    """
321
    Index a directory with documents, suggesting subjects for each document.
322
    Write the results in TSV files with the given suffix (``.annif`` by
323
    default).
324
    """
325
    project = cli_util.get_project(project_id)
326
    lang = language or project.vocab_lang
327
    if lang not in project.vocab.languages:
328
        raise click.BadParameter(f'language "{lang}" not supported by vocabulary')
329
    backend_params = cli_util.parse_backend_params(backend_param, project)
330
331
    corpus = DocumentDirectory(directory, require_subjects=False)
332
    results = project.suggest_corpus(corpus, backend_params).filter(limit, threshold)
333
334
    for doc, suggestions in zip(corpus.documents, results):
335
        subjectfilename = re.sub(r"\.(txt|json)$", suffix, doc.file_path)
336
        if os.path.exists(subjectfilename) and not force:
337
            click.echo(
338
                "Not overwriting {} (use --force to override)".format(subjectfilename)
339
            )
340
            continue
341
        with open(subjectfilename, "w", encoding="utf-8") as subjfile:
342
            cli_util.show_hits(suggestions, project, lang, file=subjfile)
343
344
345
@cli.command("index-file")
346
@cli_util.project_id
347
@click.argument("paths", type=click.Path(exists=True, dir_okay=False), nargs=-1)
348
@click.option(
349
    "--suffix", "-s", default=".annif.jsonl", help="File name suffix for result files"
350
)
351
@click.option(
352
    "--gzip/--no-gzip",
353
    "-z/-Z",
354
    "use_gzip",
355
    default=False,
356
    help="Gzip compress result files",
357
)
358
@click.option(
359
    "--output",
360
    "-O",
361
    type=click.Path(dir_okay=False, writable=True),
362
    default=None,
363
    help="Redirect all output to the given file (or '-' for stdout)",
364
)
365
@click.option(
366
    "--force/--no-force",
367
    "-f/-F",
368
    default=False,
369
    help="Force overwriting of existing result files",
370
)
371
@click.option(
372
    "--include-doc/--no-include-doc",
373
    "-i/-I",
374
    default=True,
375
    help="Include input documents in output",
376
)
377
@click.option("--limit", "-l", default=10, help="Maximum number of subjects")
378
@click.option("--threshold", "-t", default=0.0, help="Minimum score threshold")
379
@click.option("--language", "-L", help="Language of subject labels")
380
@cli_util.backend_param_option
381
@cli_util.common_options
382
def run_index_file(
383
    project_id,
384
    paths,
385
    suffix,
386
    use_gzip,
387
    output,
388
    force,
389
    include_doc,
390
    limit,
391
    threshold,
392
    language,
393
    backend_param,
394
):
395
    """
396
    Index file(s) containing documents, suggesting subjects for each document.
397
    Write the results in JSONL files with the given suffix (``.annif.jsonl`` by
398
    default).
399
    """
400
401
    project = cli_util.get_project(project_id)
402
    lang = language or project.vocab_lang
403
    if lang not in project.vocab.languages:
404
        raise click.BadParameter(f'language "{lang}" not supported by vocabulary')
405
    backend_params = cli_util.parse_backend_params(backend_param, project)
406
407
    for path in paths:
408
        corpus = cli_util.open_doc_path(
409
            path, project.subjects, lang, require_subjects=False
410
        )
411
        results = project.suggest_corpus(corpus, backend_params).filter(
412
            limit, threshold
413
        )
414
415
        stream_cm = cli_util.get_output_stream(path, suffix, output, use_gzip, force)
416
        if stream_cm is None:
417
            continue
418
419
        with stream_cm as stream:
420
            for doc, suggestions in zip(corpus.documents, results):
421
                output_data = doc.as_dict(project.subjects, lang) if include_doc else {}
422
                output_data["results"] = [
423
                    suggestion_to_dict(suggestion, project.subjects, lang)
424
                    for suggestion in suggestions
425
                ]
426
                stream.write(json.dumps(output_data) + "\n")
427
428
429
@cli.command("eval")
430
@cli_util.project_id
431
@click.argument("paths", type=click.Path(exists=True), nargs=-1)
432
@click.option("--limit", "-l", default=10, help="Maximum number of subjects")
433
@click.option("--threshold", "-t", default=0.0, help="Minimum score threshold")
434
@click.option(
435
    "--metric",
436
    "-m",
437
    default=[],
438
    multiple=True,
439
    help="Metric to calculate (default: all)",
440
)
441
@click.option(
442
    "--metrics-file",
443
    "-M",
444
    type=click.File("w", encoding="utf-8", errors="ignore", lazy=True),
445
    help="""Specify file in order to write evaluation metrics in JSON format.
446
    File directory must exist, existing file will be overwritten.""",
447
)
448
@click.option(
449
    "--results-file",
450
    "-r",
451
    type=click.File("w", encoding="utf-8", errors="ignore", lazy=True),
452
    help="""Specify file in order to write non-aggregated results per subject.
453
    File directory must exist, existing file will be overwritten.""",
454
)
455
@click.option(
456
    "--jobs", "-j", default=1, help="Number of parallel jobs (0 means all CPUs)"
457
)
458
@cli_util.docs_limit_option
459
@cli_util.backend_param_option
460
@cli_util.common_options
461
def run_eval(
462
    project_id,
463
    paths,
464
    limit,
465
    threshold,
466
    docs_limit,
467
    metric,
468
    metrics_file,
469
    results_file,
470
    jobs,
471
    backend_param,
472
):
473
    """
474
    Suggest subjects for documents and evaluate the results by comparing
475
    against a gold standard.
476
    \f
477
    With this command the documents from ``PATHS`` (directories or possibly
478
    gzipped TSV files) will be assigned subject suggestions and then
479
    statistical measures are calculated that quantify how well the suggested
480
    subjects match the gold-standard subjects in the documents.
481
482
    Normally the output is the list of the metrics calculated across documents.
483
    If ``--results-file <FILENAME>`` option is given, the metrics are
484
    calculated separately for each subject, and written to the given file.
485
    """
486
487
    project = cli_util.get_project(project_id)
488
    backend_params = cli_util.parse_backend_params(backend_param, project)
489
490
    import annif.eval
491
492
    eval_batch = annif.eval.EvaluationBatch(project.subjects)
493
494
    if results_file:
495
        try:
496
            print("", end="", file=results_file)
497
            click.echo(
498
                "Writing per subject evaluation results to {!s}".format(
499
                    results_file.name
500
                )
501
            )
502
        except Exception as e:
503
            raise NotSupportedException(
504
                "cannot open results-file for writing: " + str(e)
505
            )
506
    corpus = cli_util.open_documents(
507
        paths, project.subjects, project.vocab_lang, docs_limit
508
    )
509
    jobs, pool_class = annif.parallel.get_pool(jobs)
510
511
    project.initialize(parallel=True)
512
    psmap = annif.parallel.ProjectSuggestMap(
513
        project.registry, [project_id], backend_params, limit, threshold
514
    )
515
516
    with pool_class(jobs) as pool:
517
        for hit_sets, subject_sets in pool.imap_unordered(
518
            psmap.suggest_batch, corpus.doc_batches
519
        ):
520
            eval_batch.evaluate_many(hit_sets[project_id], subject_sets)
521
522
    template = "{0:<30}\t{1:{fmt_spec}}"
523
    metrics = eval_batch.results(
524
        metrics=metric, results_file=results_file, language=project.vocab_lang
525
    )
526
    for metric, score in metrics.items():
527
        if isinstance(score, int):
528
            fmt_spec = "d"
529
        elif isinstance(score, float):
530
            fmt_spec = ".04f"
531
        click.echo(template.format(metric + ":", score, fmt_spec=fmt_spec))
532
    if metrics_file:
533
        json.dump(
534
            {metric_code(mname): val for mname, val in metrics.items()},
535
            metrics_file,
536
            indent=2,
537
        )
538
539
540
@cli.command("run")
541
@click.option("--host", type=str, default="127.0.0.1")
542
@click.option("--port", type=int, default=5000)
543
@click.option("--log-level")
544
@click_log.simple_verbosity_option(logger, default="ERROR")
545
def run_app(**kwargs):
546
    """
547
    Run Annif in server mode for development.
548
    \f
549
    The server is for development purposes only.
550
    """
551
    kwargs = {k: v for k, v in kwargs.items() if v is not None}
552
    cxapp = annif.create_cx_app()
553
    cxapp.run(**kwargs)
554
555
556
FILTER_BATCH_MAX_LIMIT = 15
557
OPTIMIZE_METRICS = ["Precision (doc avg)", "Recall (doc avg)", "F1 score (doc avg)"]
558
559
560
@cli.command("optimize")
561
@cli_util.project_id
562
@click.argument("paths", type=click.Path(exists=True), nargs=-1)
563
@click.option(
564
    "--jobs", "-j", default=1, help="Number of parallel jobs (0 means all CPUs)"
565
)
566
@cli_util.docs_limit_option
567
@cli_util.backend_param_option
568
@cli_util.common_options
569
def run_optimize(project_id, paths, jobs, docs_limit, backend_param):
570
    """
571
    Suggest subjects for documents, testing multiple limits and thresholds.
572
    \f
573
    This command will use different limit (maximum number of subjects) and
574
    score threshold values when assigning subjects to each document given by
575
    ``PATHS`` and compare the results against the gold standard subjects in the
576
    documents. The output is a list of parameter combinations and their scores.
577
    From the output, you can determine the optimum limit and threshold
578
    parameters depending on which measure you want to target.
579
    """
580
    project = cli_util.get_project(project_id)
581
    backend_params = cli_util.parse_backend_params(backend_param, project)
582
    filter_params = cli_util.generate_filter_params(FILTER_BATCH_MAX_LIMIT)
583
584
    import annif.eval
585
586
    corpus = cli_util.open_documents(
587
        paths, project.subjects, project.vocab_lang, docs_limit
588
    )
589
590
    jobs, pool_class = annif.parallel.get_pool(jobs)
591
592
    project.initialize(parallel=True)
593
    psmap = annif.parallel.ProjectSuggestMap(
594
        project.registry,
595
        [project_id],
596
        backend_params,
597
        limit=FILTER_BATCH_MAX_LIMIT,
598
        threshold=0.0,
599
    )
600
601
    ndocs = 0
602
    suggestion_batches = []
603
    subject_set_batches = []
604
    with pool_class(jobs) as pool:
605
        for suggestion_batch, subject_sets in pool.imap_unordered(
606
            psmap.suggest_batch, corpus.doc_batches
607
        ):
608
            ndocs += len(suggestion_batch[project_id])
609
            suggestion_batches.append(suggestion_batch[project_id])
610
            subject_set_batches.append(subject_sets)
611
612
    from annif.suggestion import SuggestionResults
613
614
    orig_suggestion_results = SuggestionResults(suggestion_batches)
615
616
    click.echo("\t".join(("Limit", "Thresh.", "Prec.", "Rec.", "F1")))
617
618
    best_scores = collections.defaultdict(float)
619
    best_params = {}
620
621
    template = "{:d}\t{:.02f}\t{:.04f}\t{:.04f}\t{:.04f}"
622
    import annif.eval
623
624
    for limit, threshold in filter_params:
625
        eval_batch = annif.eval.EvaluationBatch(project.subjects)
626
        filtered_results = orig_suggestion_results.filter(limit, threshold)
627
        for batch, subject_sets in zip(filtered_results.batches, subject_set_batches):
628
            eval_batch.evaluate_many(batch, subject_sets)
629
        results = eval_batch.results(metrics=OPTIMIZE_METRICS)
630
        for metric, score in results.items():
631
            if score >= best_scores[metric]:
632
                best_scores[metric] = score
633
                best_params[metric] = (limit, threshold)
634
        click.echo(
635
            template.format(
636
                limit,
637
                threshold,
638
                results["Precision (doc avg)"],
639
                results["Recall (doc avg)"],
640
                results["F1 score (doc avg)"],
641
            )
642
        )
643
644
    click.echo()
645
    template2 = "Best {:>19}: {:.04f}\tLimit: {:d}\tThreshold: {:.02f}"
646
    for metric in OPTIMIZE_METRICS:
647
        click.echo(
648
            template2.format(
649
                metric,
650
                best_scores[metric],
651
                best_params[metric][0],
652
                best_params[metric][1],
653
            )
654
        )
655
    click.echo("Documents evaluated:\t{}".format(ndocs))
656
657
658
@cli.command("hyperopt")
659
@cli_util.project_id
660
@click.argument("paths", type=click.Path(exists=True), nargs=-1)
661
@click.option("--trials", "-T", default=10, help="Number of trials")
662
@click.option(
663
    "--jobs", "-j", default=1, help="Number of parallel runs (0 means all CPUs)"
664
)
665
@click.option(
666
    "--metric", "-m", default="NDCG", help="Metric to optimize (default: NDCG)"
667
)
668
@click.option(
669
    "--results-file",
670
    "-r",
671
    type=click.File("w", encoding="utf-8", errors="ignore", lazy=True),
672
    help="""Specify file path to write trial results as TSV.
673
    File directory must exist, existing file will be overwritten.""",
674
)
675
@cli_util.docs_limit_option
676
@cli_util.common_options
677
def run_hyperopt(project_id, paths, docs_limit, trials, jobs, metric, results_file):
678
    """
679
    Optimize the hyperparameters of a project using validation documents from
680
    ``PATHS``. Not supported by all backends. Output is a list of trial results
681
    and a report of the best performing parameters.
682
    """
683
    proj = cli_util.get_project(project_id)
684
    documents = cli_util.open_documents(
685
        paths, proj.subjects, proj.vocab_lang, docs_limit
686
    )
687
    click.echo(f"Looking for optimal hyperparameters using {trials} trials")
688
    rec = proj.hyperopt(documents, trials, jobs, metric, results_file)
689
    click.echo(f"Got best {metric} score {rec.score:.4f} with:")
690
    click.echo("---")
691
    for line in rec.lines:
692
        click.echo(line)
693
    click.echo("---")
694
695
696
@cli.command("upload")
697
@click.argument("project_ids_pattern", shell_complete=cli_util.complete_param)
698
@click.argument("repo_id")
699
@click.option(
700
    "--token",
701
    help="""Authentication token, obtained from the Hugging Face Hub.
702
    Will default to the stored token.""",
703
)
704
@click.option(
705
    "--revision",
706
    help="""An optional git revision to commit from. Defaults to the head of the "main"
707
    branch.""",
708
)
709
@click.option(
710
    "--commit-message",
711
    help="""The summary / title / first line of the generated commit.""",
712
)
713
@click.option(
714
    "--modelcard/--no-modelcard",
715
    default=True,
716
    help="Update or create a Model Card with upload.",
717
)
718
@cli_util.common_options
719
def run_upload(
720
    project_ids_pattern, repo_id, token, revision, commit_message, modelcard
721
):
722
    """
723
    Upload selected projects and their vocabularies to a Hugging Face Hub repository.
724
    \f
725
    This command zips the project directories and vocabularies of the projects
726
    that match the given `project_ids_pattern` to archive files, and uploads the
727
    archives along with the project configurations to the specified Hugging Face
728
    Hub repository. An authentication token and commit message can be given with
729
    options. If the README.md does not exist in the repository it is
730
    created with default contents and metadata of the uploaded projects, if it exists,
731
    its metadata are updated as necessary.
732
    """
733
    from huggingface_hub import HfApi
734
    from huggingface_hub.utils import HfHubHTTPError, HFValidationError
735
736
    projects = hfh_util.get_matching_projects(project_ids_pattern)
737
    click.echo(f"Uploading project(s): {', '.join([p.project_id for p in projects])}")
738
739
    commit_message = (
740
        commit_message
741
        if commit_message is not None
742
        else f"Upload project(s) {project_ids_pattern} with Annif"
743
    )
744
745
    fobjs, operations = [], []
746
    try:
747
        fobjs, operations = hfh_util.prepare_commits(projects, repo_id, token)
748
        api = HfApi()
749
        api.create_commit(
750
            repo_id=repo_id,
751
            operations=operations,
752
            commit_message=commit_message,
753
            revision=revision,
754
            token=token,
755
        )
756
    except (HfHubHTTPError, HFValidationError) as err:
757
        raise OperationFailedException(str(err))
758
    else:
759
        if modelcard:
760
            hfh_util.upsert_modelcard(repo_id, projects, token, revision)
761
    finally:
762
        for fobj in fobjs:
763
            fobj.close()
764
765
766
@cli.command("download")
767
@click.argument("project_ids_pattern")
768
@click.argument("repo_id")
769
@click.option(
770
    "--token",
771
    help="""Authentication token, obtained from the Hugging Face Hub.
772
    Will default to the stored token.""",
773
)
774
@click.option(
775
    "--revision",
776
    help="""
777
    An optional Git revision id which can be a branch name, a tag, or a commit
778
    hash.
779
    """,
780
)
781
@click.option(
782
    "--force",
783
    "-f",
784
    default=False,
785
    is_flag=True,
786
    help="Replace an existing project/vocabulary/config with the downloaded one",
787
)
788
@click.option(
789
    "--trust-repo",
790
    default=False,
791
    is_flag=True,
792
    help="Allow download from the repository even when it has no entries in the cache",
793
)
794
@cli_util.common_options
795
def run_download(project_ids_pattern, repo_id, token, revision, force, trust_repo):
796
    """
797
    Download selected projects and their vocabularies from a Hugging Face Hub
798
    repository.
799
    \f
800
    This command downloads the project and vocabulary archives and the
801
    configuration files of the projects that match the given
802
    `project_ids_pattern` from the specified Hugging Face Hub repository and
803
    unzips the archives to `data/` directory and places the configuration files
804
    to `projects.d/` directory. An authentication token and revision can be given with
805
    options. If the repository hasn’t been used for downloads previously
806
    (i.e., it doesn’t appear in the Hugging Face Hub cache on local system), the
807
    `--trust-repo` option needs to be used.
808
    """
809
810
    hfh_util.check_is_download_allowed(trust_repo, repo_id)
811
812
    project_ids = hfh_util.get_matching_project_ids_from_hf_hub(
813
        project_ids_pattern, repo_id, token, revision
814
    )
815
    click.echo(f"Downloading project(s): {', '.join(project_ids)}")
816
817
    vocab_ids = set()
818
    for project_id in project_ids:
819
        project_zip_cache_path = hfh_util.download_from_hf_hub(
820
            f"projects/{project_id}.zip", repo_id, token, revision
821
        )
822
        hfh_util.unzip_archive(project_zip_cache_path, force)
823
        config_file_cache_path = hfh_util.download_from_hf_hub(
824
            f"{project_id}.cfg", repo_id, token, revision
825
        )
826
        vocab_ids.add(hfh_util.get_vocab_id_from_config(config_file_cache_path))
827
        hfh_util.copy_project_config(config_file_cache_path, force)
828
829
    for vocab_id in vocab_ids:
830
        vocab_zip_cache_path = hfh_util.download_from_hf_hub(
831
            f"vocabs/{vocab_id}.zip", repo_id, token, revision
832
        )
833
        hfh_util.unzip_archive(vocab_zip_cache_path, force)
834
835
836
@cli.command("completion")
837
@click.option("--bash", "shell", flag_value="bash")
838
@click.option("--zsh", "shell", flag_value="zsh")
839
@click.option("--fish", "shell", flag_value="fish")
840
def run_completion(shell):
841
    """Generate the script for tab-key autocompletion for the given shell. To enable the
842
    completion support in your current bash terminal session run\n
843
        source <(annif completion --bash)
844
845
    To enable the completion support in all new sessions first add the completion script
846
    in your home directory:\n
847
        annif completion --bash > ~/.annif-complete.bash
848
849
    Then make the script to be automatically sourced for new terminal sessions by adding
850
    the following to your ~/.bashrc file (or in some alternative startup file)\n
851
        source ~/.annif-complete.bash
852
    """
853
854
    if shell is None:
855
        raise click.UsageError("Shell not given, try --bash, --zsh or --fish")
856
857
    script = os.popen(f"_ANNIF_COMPLETE={shell}_source annif").read()
858
    click.echo(f"# Generated by Annif {importlib.metadata.version('annif')}")
859
    click.echo(script)
860
861
862
@cli.command("detect-language")
863
@click.argument("languages")
864
@click.argument(
865
    "paths", type=click.Path(dir_okay=False, exists=True, allow_dash=True), nargs=-1
866
)
867
def run_detect_language(languages, paths):
868
    """
869
    Detect the language of a single text document from standard input or for one or more
870
    document file(s) given its/their path(s).
871
    """
872
873
    langs = tuple(languages.split(","))
874
875
    def detect_language_and_show(text, languages):
876
        try:
877
            proportions = detect_language(text, languages)
878
        except ValueError as e:
879
            raise click.UsageError(e)
880
        for lang, score in proportions.items():
881
            if lang == "unk":
882
                lang = "?"
883
            click.echo(f"{lang}\t{score:.04f}")
884
885
    if paths and not (len(paths) == 1 and paths[0] == "-"):
886
        doclist = cli_util.open_text_documents(paths, docs_limit=None)
887
        for doc, path in zip(doclist.documents, paths):
888
            click.echo(f"Detected languages for {path}")
889
            detect_language_and_show(doc.text, langs)
890
    else:
891
        text = sys.stdin.read()
892
        detect_language_and_show(text, langs)
893
894
895
if __name__ == "__main__":
896
    cli()
897