Passed
Pull Request — main (#877)
by Osma
06:34 queued 03:38
created

annif.cli   F

Complexity

Total Complexity 75

Size/Duplication

Total Lines 869
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
eloc 568
dl 0
loc 869
rs 2.4
c 0
b 0
f 0
wmc 75

18 Functions

Rating   Name   Duplication   Size   Complexity  
A run_clear_project() 0 9 1
A run_train() 0 42 3
A run_load_vocab() 0 36 4
A run_learn() 0 20 1
A run_list_vocabs() 0 28 4
A run_list_projects() 0 39 3
A run_show_project() 0 18 1
B run_suggest() 0 51 6
B run_index() 0 43 6
A run_hyperopt() 0 36 2
A run_completion() 0 24 2
C run_optimize() 0 96 8
B run_download() 0 68 3
A run_app() 0 14 1
B run_upload() 0 68 6
B run_index_text() 0 54 7
B run_detect_language() 0 31 8
C run_eval() 0 108 9

How to fix   Complexity   

Complexity

Complex classes like annif.cli often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
"""Definitions for command-line (Click) commands for invoking Annif
2
operations and printing the results to console."""
3
4
import collections
5
import importlib
6
import json
7
import os.path
8
import re
9
import sys
10
11
import click
12
import click_log
13
from flask.cli import FlaskGroup
14
15
import annif
16
import annif.parallel
17
import annif.project
18
import annif.registry
19
from annif import cli_util, hfh_util
20
from annif.corpus import Document, DocumentDirectory
21
from annif.exception import (
22
    NotInitializedException,
23
    NotSupportedException,
24
    OperationFailedException,
25
)
26
from annif.project import Access
27
from annif.simplemma_util import detect_language
28
from annif.util import metric_code, suggestion_to_dict
29
30
logger = annif.logger
31
click_log.basic_config(logger)
32
33
create_app = annif.create_flask_app
34
cli = FlaskGroup(
35
    create_app=create_app, add_default_commands=False, add_version_option=False
36
)
37
cli = click.version_option(message="%(version)s")(cli)
38
cli.params = [opt for opt in cli.params if opt.name not in ("env_file", "app")]
39
40
41
@cli.command("list-projects")
42
@cli_util.common_options
43
@click_log.simple_verbosity_option(logger, default="ERROR")
44
def run_list_projects():
45
    """
46
    List available projects.
47
    \f
48
    Show a list of currently defined projects. Projects are defined in a
49
    configuration file, normally called ``projects.cfg``. See `Project
50
    configuration
51
    <https://github.com/NatLibFi/Annif/wiki/Project-configuration>`_
52
    for details.
53
    """
54
55
    column_headings = (
56
        "Project ID",
57
        "Project Name",
58
        "Vocabulary ID",
59
        "Language",
60
        "Trained",
61
        "Modification time",
62
    )
63
    table = [
64
        (
65
            proj.project_id,
66
            proj.name,
67
            proj.vocab.vocab_id if proj.vocab_spec else "-",
68
            proj.language,
69
            str(proj.is_trained),
70
            cli_util.format_datetime(proj.modification_time),
71
        )
72
        for proj in annif.registry.get_projects(min_access=Access.private).values()
73
    ]
74
    template = cli_util.make_list_template(column_headings, *table)
75
    header = template.format(*column_headings)
76
    click.echo(header)
77
    click.echo("-" * len(header))
78
    for row in table:
79
        click.echo(template.format(*row))
80
81
82
@cli.command("show-project")
83
@cli_util.project_id
84
@cli_util.common_options
85
def run_show_project(project_id):
86
    """
87
    Show information about a project.
88
    """
89
90
    proj = cli_util.get_project(project_id)
91
    click.echo(f"Project ID:        {proj.project_id}")
92
    click.echo(f"Project Name:      {proj.name}")
93
    click.echo(f"Language:          {proj.language}")
94
    click.echo(f"Vocabulary:        {proj.vocab.vocab_id}")
95
    click.echo(f"Vocab language:    {proj.vocab_lang}")
96
    click.echo(f"Access:            {proj.access.name}")
97
    click.echo(f"Backend:           {proj.backend.name}")
98
    click.echo(f"Trained:           {proj.is_trained}")
99
    click.echo(f"Modification time: {cli_util.format_datetime(proj.modification_time)}")
100
101
102
@cli.command("clear")
103
@cli_util.project_id
104
@cli_util.common_options
105
def run_clear_project(project_id):
106
    """
107
    Initialize the project to its original, untrained state.
108
    """
109
    proj = cli_util.get_project(project_id)
110
    proj.remove_model_data()
111
112
113
@cli.command("list-vocabs")
114
@cli_util.common_options
115
@click_log.simple_verbosity_option(logger, default="ERROR")
116
def run_list_vocabs():
117
    """
118
    List available vocabularies.
119
    """
120
121
    column_headings = ("Vocabulary ID", "Languages", "Size", "Loaded")
122
    table = []
123
    for vocab in annif.registry.get_vocabs(min_access=Access.private).values():
124
        try:
125
            languages = ",".join(sorted(vocab.languages))
126
            size = len(vocab)
127
            loaded = True
128
        except NotInitializedException:
129
            languages = "-"
130
            size = "-"
131
            loaded = False
132
        row = (vocab.vocab_id, languages, str(size), str(loaded))
133
        table.append(row)
134
135
    template = cli_util.make_list_template(column_headings, *table)
136
    header = template.format(*column_headings)
137
    click.echo(header)
138
    click.echo("-" * len(header))
139
    for row in table:
140
        click.echo(template.format(*row))
141
142
143
@cli.command("load-vocab")
144
@click.argument("vocab_id", shell_complete=cli_util.complete_param)
145
@click.argument("vocab_file", type=click.Path(exists=True, dir_okay=False))
146
@click.option("--language", "-L", help="Language of TSV vocabulary file")
147
@click.option(
148
    "--force",
149
    "-f",
150
    default=False,
151
    is_flag=True,
152
    help="Replace existing vocabulary completely instead of updating it",
153
)
154
@cli_util.common_options
155
def run_load_vocab(vocab_id, language, force, vocab_file):
156
    """
157
    Load a vocabulary from a subject file.
158
    """
159
    vocab = cli_util.get_vocab(vocab_id)
160
    if annif.vocab.VocabFileSKOS.is_rdf_file(vocab_file):
161
        # SKOS/RDF file supported by rdflib
162
        vocab_file = annif.vocab.VocabFileSKOS(vocab_file)
163
        click.echo(f"Loading vocabulary from SKOS file {vocab_file}...")
164
    elif annif.vocab.VocabFileCSV.is_csv_file(vocab_file):
165
        # CSV file
166
        vocab_file = annif.vocab.VocabFileCSV(vocab_file)
167
        click.echo(f"Loading vocabulary from CSV file {vocab_file}...")
168
    else:
169
        # probably a TSV file - we need to know its language
170
        if not language:
171
            click.echo(
172
                "Please use --language option to set the language of a TSV vocabulary.",
173
                err=True,
174
            )
175
            sys.exit(1)
176
        click.echo(f"Loading vocabulary from TSV file {vocab_file}...")
177
        vocab_file = annif.vocab.VocabFileTSV(vocab_file, language)
178
    vocab.load_vocabulary(vocab_file, force=force)
179
180
181
@cli.command("train")
182
@cli_util.project_id
183
@click.argument("paths", type=click.Path(exists=True), nargs=-1)
184
@click.option(
185
    "--cached/--no-cached",
186
    "-c/-C",
187
    default=False,
188
    help="Reuse preprocessed training data from previous run",
189
)
190
@click.option(
191
    "--jobs",
192
    "-j",
193
    default=0,
194
    help="Number of parallel jobs (0 means choose automatically)",
195
)
196
@cli_util.docs_limit_option
197
@cli_util.backend_param_option
198
@cli_util.common_options
199
def run_train(project_id, paths, cached, docs_limit, jobs, backend_param):
200
    """
201
    Train a project on a collection of documents.
202
    \f
203
    This will train the project using the documents from ``PATHS`` (directories
204
    or possibly gzipped TSV files) in a single batch operation. If ``--cached``
205
    is set, preprocessed training data from the previous run is reused instead
206
    of documents input; see `Reusing preprocessed training data
207
    <https://github.com/NatLibFi/Annif/wiki/
208
    Reusing-preprocessed-training-data>`_.
209
    """
210
    proj = cli_util.get_project(project_id)
211
    backend_params = cli_util.parse_backend_params(backend_param, proj)
212
    if cached:
213
        if len(paths) > 0:
214
            raise click.UsageError(
215
                "Corpus paths cannot be given when using --cached option."
216
            )
217
        documents = "cached"
218
    else:
219
        documents = cli_util.open_documents(
220
            paths, proj.subjects, proj.vocab_lang, docs_limit
221
        )
222
    proj.train(documents, backend_params, jobs)
223
224
225
@cli.command("learn")
226
@cli_util.project_id
227
@click.argument("paths", type=click.Path(exists=True), nargs=-1)
228
@cli_util.docs_limit_option
229
@cli_util.backend_param_option
230
@cli_util.common_options
231
def run_learn(project_id, paths, docs_limit, backend_param):
232
    """
233
    Further train an existing project on a collection of documents.
234
    \f
235
    Similar to the ``train`` command. This will continue training an already
236
    trained project using the documents given by ``PATHS`` in a single batch
237
    operation. Not supported by all backends.
238
    """
239
    proj = cli_util.get_project(project_id)
240
    backend_params = cli_util.parse_backend_params(backend_param, proj)
241
    documents = cli_util.open_documents(
242
        paths, proj.subjects, proj.vocab_lang, docs_limit
243
    )
244
    proj.learn(documents, backend_params)
245
246
247
@cli.command("suggest")
248
@cli_util.project_id
249
@click.argument(
250
    "paths", type=click.Path(dir_okay=False, exists=True, allow_dash=True), nargs=-1
251
)
252
@click.option("--limit", "-l", default=10, help="Maximum number of subjects")
253
@click.option("--threshold", "-t", default=0.0, help="Minimum score threshold")
254
@click.option("--language", "-L", help="Language of subject labels")
255
@cli_util.docs_limit_option
256
@cli_util.backend_param_option
257
@click.option(
258
    "--metadata",
259
    "-D",
260
    multiple=True,
261
    help="Additional metadata for a document read from standard input. "
262
    + "Syntax: `-D <field>=<value>`.",
263
)
264
@cli_util.common_options
265
def run_suggest(
266
    project_id, paths, limit, threshold, language, backend_param, metadata, docs_limit
267
):
268
    """
269
    Suggest subjects for a single document from standard input (optionally
270
    with metadata) or for one or more document file(s) given its/their
271
    path(s).
272
    \f
273
    This will read a text document from standard input and suggest subjects for
274
    it, or if given path(s) to file(s), suggest subjects for it/them.
275
    """
276
    project = cli_util.get_project(project_id)
277
    lang = language or project.vocab_lang
278
    if lang not in project.vocab.languages:
279
        raise click.BadParameter(f'language "{lang}" not supported by vocabulary')
280
    backend_params = cli_util.parse_backend_params(backend_param, project)
281
282
    if paths and not (len(paths) == 1 and paths[0] == "-"):
283
        docs = cli_util.open_text_documents(paths, docs_limit)
284
        results = project.suggest_corpus(docs, backend_params).filter(limit, threshold)
285
        for (
286
            suggestions,
287
            path,
288
        ) in zip(results, paths):
289
            click.echo(f"Suggestions for {path}")
290
            cli_util.show_hits(suggestions, project, lang)
291
    else:
292
        text = sys.stdin.read()
293
        doc_metadata = cli_util.parse_metadata(metadata)
294
        suggestions = project.suggest(
295
            [Document(text=text, metadata=doc_metadata)], backend_params
296
        ).filter(limit, threshold)[0]
297
        cli_util.show_hits(suggestions, project, lang)
298
299
300
@cli.command("index")
301
@cli_util.project_id
302
@click.argument("directory", type=click.Path(exists=True, file_okay=False))
303
@click.option(
304
    "--suffix", "-s", default=".annif", help="File name suffix for result files"
305
)
306
@click.option(
307
    "--force/--no-force",
308
    "-f/-F",
309
    default=False,
310
    help="Force overwriting of existing result files",
311
)
312
@click.option("--limit", "-l", default=10, help="Maximum number of subjects")
313
@click.option("--threshold", "-t", default=0.0, help="Minimum score threshold")
314
@click.option("--language", "-L", help="Language of subject labels")
315
@cli_util.backend_param_option
316
@cli_util.common_options
317
def run_index(
318
    project_id, directory, suffix, force, limit, threshold, language, backend_param
319
):
320
    """
321
    Index a directory with documents, suggesting subjects for each document.
322
    Write the results in TSV files with the given suffix (``.annif`` by
323
    default).
324
    """
325
    project = cli_util.get_project(project_id)
326
    lang = language or project.vocab_lang
327
    if lang not in project.vocab.languages:
328
        raise click.BadParameter(f'language "{lang}" not supported by vocabulary')
329
    backend_params = cli_util.parse_backend_params(backend_param, project)
330
331
    corpus = DocumentDirectory(directory, require_subjects=False)
332
    results = project.suggest_corpus(corpus, backend_params).filter(limit, threshold)
333
334
    for doc, suggestions in zip(corpus.documents, results):
335
        subjectfilename = re.sub(r"\.(txt|json)$", suffix, doc.file_path)
336
        if os.path.exists(subjectfilename) and not force:
337
            click.echo(
338
                "Not overwriting {} (use --force to override)".format(subjectfilename)
339
            )
340
            continue
341
        with open(subjectfilename, "w", encoding="utf-8") as subjfile:
342
            cli_util.show_hits(suggestions, project, lang, file=subjfile)
343
344
345
@cli.command("index-text")
346
@cli_util.project_id
347
@click.argument("paths", type=click.Path(exists=True, dir_okay=False), nargs=-1)
348
@click.option(
349
    "--suffix", "-s", default=".annif.jsonl", help="File name suffix for result files"
350
)
351
@click.option(
352
    "--force/--no-force",
353
    "-f/-F",
354
    default=False,
355
    help="Force overwriting of existing result files",
356
)
357
@click.option("--limit", "-l", default=10, help="Maximum number of subjects")
358
@click.option("--threshold", "-t", default=0.0, help="Minimum score threshold")
359
@click.option("--language", "-L", help="Language of subject labels")
360
@cli_util.backend_param_option
361
@cli_util.common_options
362
def run_index_text(
363
    project_id, paths, suffix, force, limit, threshold, language, backend_param
364
):
365
    """
366
    Index a file with documents, suggesting subjects for each document.
367
    Write the results in JSONL files with the given suffix (``.annif.jsonl`` by
368
    default).
369
    """
370
    project = cli_util.get_project(project_id)
371
    lang = language or project.vocab_lang
372
    if lang not in project.vocab.languages:
373
        raise click.BadParameter(f'language "{lang}" not supported by vocabulary')
374
    backend_params = cli_util.parse_backend_params(backend_param, project)
375
376
    for path in paths:
377
        corpus = cli_util.open_doc_path(
378
            path, project.subjects, lang, require_subjects=False
379
        )
380
        results = project.suggest_corpus(corpus, backend_params).filter(
381
            limit, threshold
382
        )
383
384
        outfilename = re.sub(r"\.(csv|tsv|jsonl)$", suffix, path)
385
        if os.path.exists(outfilename) and not force:
386
            click.echo(
387
                "Not overwriting {} (use --force to override)".format(outfilename)
388
            )
389
            continue
390
391
        with open(outfilename, "w", encoding="utf-8") as outfile:
392
            for doc, suggestions in zip(corpus.documents, results):
393
                out_suggestions = [
394
                    suggestion_to_dict(suggestion, project.subjects, lang)
395
                    for suggestion in suggestions
396
                ]
397
                output = {"results": out_suggestions}
398
                outfile.write(json.dumps(output) + "\n")
399
400
401
@cli.command("eval")
402
@cli_util.project_id
403
@click.argument("paths", type=click.Path(exists=True), nargs=-1)
404
@click.option("--limit", "-l", default=10, help="Maximum number of subjects")
405
@click.option("--threshold", "-t", default=0.0, help="Minimum score threshold")
406
@click.option(
407
    "--metric",
408
    "-m",
409
    default=[],
410
    multiple=True,
411
    help="Metric to calculate (default: all)",
412
)
413
@click.option(
414
    "--metrics-file",
415
    "-M",
416
    type=click.File("w", encoding="utf-8", errors="ignore", lazy=True),
417
    help="""Specify file in order to write evaluation metrics in JSON format.
418
    File directory must exist, existing file will be overwritten.""",
419
)
420
@click.option(
421
    "--results-file",
422
    "-r",
423
    type=click.File("w", encoding="utf-8", errors="ignore", lazy=True),
424
    help="""Specify file in order to write non-aggregated results per subject.
425
    File directory must exist, existing file will be overwritten.""",
426
)
427
@click.option(
428
    "--jobs", "-j", default=1, help="Number of parallel jobs (0 means all CPUs)"
429
)
430
@cli_util.docs_limit_option
431
@cli_util.backend_param_option
432
@cli_util.common_options
433
def run_eval(
434
    project_id,
435
    paths,
436
    limit,
437
    threshold,
438
    docs_limit,
439
    metric,
440
    metrics_file,
441
    results_file,
442
    jobs,
443
    backend_param,
444
):
445
    """
446
    Suggest subjects for documents and evaluate the results by comparing
447
    against a gold standard.
448
    \f
449
    With this command the documents from ``PATHS`` (directories or possibly
450
    gzipped TSV files) will be assigned subject suggestions and then
451
    statistical measures are calculated that quantify how well the suggested
452
    subjects match the gold-standard subjects in the documents.
453
454
    Normally the output is the list of the metrics calculated across documents.
455
    If ``--results-file <FILENAME>`` option is given, the metrics are
456
    calculated separately for each subject, and written to the given file.
457
    """
458
459
    project = cli_util.get_project(project_id)
460
    backend_params = cli_util.parse_backend_params(backend_param, project)
461
462
    import annif.eval
463
464
    eval_batch = annif.eval.EvaluationBatch(project.subjects)
465
466
    if results_file:
467
        try:
468
            print("", end="", file=results_file)
469
            click.echo(
470
                "Writing per subject evaluation results to {!s}".format(
471
                    results_file.name
472
                )
473
            )
474
        except Exception as e:
475
            raise NotSupportedException(
476
                "cannot open results-file for writing: " + str(e)
477
            )
478
    corpus = cli_util.open_documents(
479
        paths, project.subjects, project.vocab_lang, docs_limit
480
    )
481
    jobs, pool_class = annif.parallel.get_pool(jobs)
482
483
    project.initialize(parallel=True)
484
    psmap = annif.parallel.ProjectSuggestMap(
485
        project.registry, [project_id], backend_params, limit, threshold
486
    )
487
488
    with pool_class(jobs) as pool:
489
        for hit_sets, subject_sets in pool.imap_unordered(
490
            psmap.suggest_batch, corpus.doc_batches
491
        ):
492
            eval_batch.evaluate_many(hit_sets[project_id], subject_sets)
493
494
    template = "{0:<30}\t{1:{fmt_spec}}"
495
    metrics = eval_batch.results(
496
        metrics=metric, results_file=results_file, language=project.vocab_lang
497
    )
498
    for metric, score in metrics.items():
499
        if isinstance(score, int):
500
            fmt_spec = "d"
501
        elif isinstance(score, float):
502
            fmt_spec = ".04f"
503
        click.echo(template.format(metric + ":", score, fmt_spec=fmt_spec))
504
    if metrics_file:
505
        json.dump(
506
            {metric_code(mname): val for mname, val in metrics.items()},
507
            metrics_file,
508
            indent=2,
509
        )
510
511
512
@cli.command("run")
513
@click.option("--host", type=str, default="127.0.0.1")
514
@click.option("--port", type=int, default=5000)
515
@click.option("--log-level")
516
@click_log.simple_verbosity_option(logger, default="ERROR")
517
def run_app(**kwargs):
518
    """
519
    Run Annif in server mode for development.
520
    \f
521
    The server is for development purposes only.
522
    """
523
    kwargs = {k: v for k, v in kwargs.items() if v is not None}
524
    cxapp = annif.create_cx_app()
525
    cxapp.run(**kwargs)
526
527
528
FILTER_BATCH_MAX_LIMIT = 15
529
OPTIMIZE_METRICS = ["Precision (doc avg)", "Recall (doc avg)", "F1 score (doc avg)"]
530
531
532
@cli.command("optimize")
533
@cli_util.project_id
534
@click.argument("paths", type=click.Path(exists=True), nargs=-1)
535
@click.option(
536
    "--jobs", "-j", default=1, help="Number of parallel jobs (0 means all CPUs)"
537
)
538
@cli_util.docs_limit_option
539
@cli_util.backend_param_option
540
@cli_util.common_options
541
def run_optimize(project_id, paths, jobs, docs_limit, backend_param):
542
    """
543
    Suggest subjects for documents, testing multiple limits and thresholds.
544
    \f
545
    This command will use different limit (maximum number of subjects) and
546
    score threshold values when assigning subjects to each document given by
547
    ``PATHS`` and compare the results against the gold standard subjects in the
548
    documents. The output is a list of parameter combinations and their scores.
549
    From the output, you can determine the optimum limit and threshold
550
    parameters depending on which measure you want to target.
551
    """
552
    project = cli_util.get_project(project_id)
553
    backend_params = cli_util.parse_backend_params(backend_param, project)
554
    filter_params = cli_util.generate_filter_params(FILTER_BATCH_MAX_LIMIT)
555
556
    import annif.eval
557
558
    corpus = cli_util.open_documents(
559
        paths, project.subjects, project.vocab_lang, docs_limit
560
    )
561
562
    jobs, pool_class = annif.parallel.get_pool(jobs)
563
564
    project.initialize(parallel=True)
565
    psmap = annif.parallel.ProjectSuggestMap(
566
        project.registry,
567
        [project_id],
568
        backend_params,
569
        limit=FILTER_BATCH_MAX_LIMIT,
570
        threshold=0.0,
571
    )
572
573
    ndocs = 0
574
    suggestion_batches = []
575
    subject_set_batches = []
576
    with pool_class(jobs) as pool:
577
        for suggestion_batch, subject_sets in pool.imap_unordered(
578
            psmap.suggest_batch, corpus.doc_batches
579
        ):
580
            ndocs += len(suggestion_batch[project_id])
581
            suggestion_batches.append(suggestion_batch[project_id])
582
            subject_set_batches.append(subject_sets)
583
584
    from annif.suggestion import SuggestionResults
585
586
    orig_suggestion_results = SuggestionResults(suggestion_batches)
587
588
    click.echo("\t".join(("Limit", "Thresh.", "Prec.", "Rec.", "F1")))
589
590
    best_scores = collections.defaultdict(float)
591
    best_params = {}
592
593
    template = "{:d}\t{:.02f}\t{:.04f}\t{:.04f}\t{:.04f}"
594
    import annif.eval
595
596
    for limit, threshold in filter_params:
597
        eval_batch = annif.eval.EvaluationBatch(project.subjects)
598
        filtered_results = orig_suggestion_results.filter(limit, threshold)
599
        for batch, subject_sets in zip(filtered_results.batches, subject_set_batches):
600
            eval_batch.evaluate_many(batch, subject_sets)
601
        results = eval_batch.results(metrics=OPTIMIZE_METRICS)
602
        for metric, score in results.items():
603
            if score >= best_scores[metric]:
604
                best_scores[metric] = score
605
                best_params[metric] = (limit, threshold)
606
        click.echo(
607
            template.format(
608
                limit,
609
                threshold,
610
                results["Precision (doc avg)"],
611
                results["Recall (doc avg)"],
612
                results["F1 score (doc avg)"],
613
            )
614
        )
615
616
    click.echo()
617
    template2 = "Best {:>19}: {:.04f}\tLimit: {:d}\tThreshold: {:.02f}"
618
    for metric in OPTIMIZE_METRICS:
619
        click.echo(
620
            template2.format(
621
                metric,
622
                best_scores[metric],
623
                best_params[metric][0],
624
                best_params[metric][1],
625
            )
626
        )
627
    click.echo("Documents evaluated:\t{}".format(ndocs))
628
629
630
@cli.command("hyperopt")
631
@cli_util.project_id
632
@click.argument("paths", type=click.Path(exists=True), nargs=-1)
633
@click.option("--trials", "-T", default=10, help="Number of trials")
634
@click.option(
635
    "--jobs", "-j", default=1, help="Number of parallel runs (0 means all CPUs)"
636
)
637
@click.option(
638
    "--metric", "-m", default="NDCG", help="Metric to optimize (default: NDCG)"
639
)
640
@click.option(
641
    "--results-file",
642
    "-r",
643
    type=click.File("w", encoding="utf-8", errors="ignore", lazy=True),
644
    help="""Specify file path to write trial results as TSV.
645
    File directory must exist, existing file will be overwritten.""",
646
)
647
@cli_util.docs_limit_option
648
@cli_util.common_options
649
def run_hyperopt(project_id, paths, docs_limit, trials, jobs, metric, results_file):
650
    """
651
    Optimize the hyperparameters of a project using validation documents from
652
    ``PATHS``. Not supported by all backends. Output is a list of trial results
653
    and a report of the best performing parameters.
654
    """
655
    proj = cli_util.get_project(project_id)
656
    documents = cli_util.open_documents(
657
        paths, proj.subjects, proj.vocab_lang, docs_limit
658
    )
659
    click.echo(f"Looking for optimal hyperparameters using {trials} trials")
660
    rec = proj.hyperopt(documents, trials, jobs, metric, results_file)
661
    click.echo(f"Got best {metric} score {rec.score:.4f} with:")
662
    click.echo("---")
663
    for line in rec.lines:
664
        click.echo(line)
665
    click.echo("---")
666
667
668
@cli.command("upload")
669
@click.argument("project_ids_pattern", shell_complete=cli_util.complete_param)
670
@click.argument("repo_id")
671
@click.option(
672
    "--token",
673
    help="""Authentication token, obtained from the Hugging Face Hub.
674
    Will default to the stored token.""",
675
)
676
@click.option(
677
    "--revision",
678
    help="""An optional git revision to commit from. Defaults to the head of the "main"
679
    branch.""",
680
)
681
@click.option(
682
    "--commit-message",
683
    help="""The summary / title / first line of the generated commit.""",
684
)
685
@click.option(
686
    "--modelcard/--no-modelcard",
687
    default=True,
688
    help="Update or create a Model Card with upload.",
689
)
690
@cli_util.common_options
691
def run_upload(
692
    project_ids_pattern, repo_id, token, revision, commit_message, modelcard
693
):
694
    """
695
    Upload selected projects and their vocabularies to a Hugging Face Hub repository.
696
    \f
697
    This command zips the project directories and vocabularies of the projects
698
    that match the given `project_ids_pattern` to archive files, and uploads the
699
    archives along with the project configurations to the specified Hugging Face
700
    Hub repository. An authentication token and commit message can be given with
701
    options. If the README.md does not exist in the repository it is
702
    created with default contents and metadata of the uploaded projects, if it exists,
703
    its metadata are updated as necessary.
704
    """
705
    from huggingface_hub import HfApi
706
    from huggingface_hub.utils import HfHubHTTPError, HFValidationError
707
708
    projects = hfh_util.get_matching_projects(project_ids_pattern)
709
    click.echo(f"Uploading project(s): {', '.join([p.project_id for p in projects])}")
710
711
    commit_message = (
712
        commit_message
713
        if commit_message is not None
714
        else f"Upload project(s) {project_ids_pattern} with Annif"
715
    )
716
717
    fobjs, operations = [], []
718
    try:
719
        fobjs, operations = hfh_util.prepare_commits(projects, repo_id, token)
720
        api = HfApi()
721
        api.create_commit(
722
            repo_id=repo_id,
723
            operations=operations,
724
            commit_message=commit_message,
725
            revision=revision,
726
            token=token,
727
        )
728
    except (HfHubHTTPError, HFValidationError) as err:
729
        raise OperationFailedException(str(err))
730
    else:
731
        if modelcard:
732
            hfh_util.upsert_modelcard(repo_id, projects, token, revision)
733
    finally:
734
        for fobj in fobjs:
735
            fobj.close()
736
737
738
@cli.command("download")
739
@click.argument("project_ids_pattern")
740
@click.argument("repo_id")
741
@click.option(
742
    "--token",
743
    help="""Authentication token, obtained from the Hugging Face Hub.
744
    Will default to the stored token.""",
745
)
746
@click.option(
747
    "--revision",
748
    help="""
749
    An optional Git revision id which can be a branch name, a tag, or a commit
750
    hash.
751
    """,
752
)
753
@click.option(
754
    "--force",
755
    "-f",
756
    default=False,
757
    is_flag=True,
758
    help="Replace an existing project/vocabulary/config with the downloaded one",
759
)
760
@click.option(
761
    "--trust-repo",
762
    default=False,
763
    is_flag=True,
764
    help="Allow download from the repository even when it has no entries in the cache",
765
)
766
@cli_util.common_options
767
def run_download(project_ids_pattern, repo_id, token, revision, force, trust_repo):
768
    """
769
    Download selected projects and their vocabularies from a Hugging Face Hub
770
    repository.
771
    \f
772
    This command downloads the project and vocabulary archives and the
773
    configuration files of the projects that match the given
774
    `project_ids_pattern` from the specified Hugging Face Hub repository and
775
    unzips the archives to `data/` directory and places the configuration files
776
    to `projects.d/` directory. An authentication token and revision can be given with
777
    options. If the repository hasn’t been used for downloads previously
778
    (i.e., it doesn’t appear in the Hugging Face Hub cache on local system), the
779
    `--trust-repo` option needs to be used.
780
    """
781
782
    hfh_util.check_is_download_allowed(trust_repo, repo_id)
783
784
    project_ids = hfh_util.get_matching_project_ids_from_hf_hub(
785
        project_ids_pattern, repo_id, token, revision
786
    )
787
    click.echo(f"Downloading project(s): {', '.join(project_ids)}")
788
789
    vocab_ids = set()
790
    for project_id in project_ids:
791
        project_zip_cache_path = hfh_util.download_from_hf_hub(
792
            f"projects/{project_id}.zip", repo_id, token, revision
793
        )
794
        hfh_util.unzip_archive(project_zip_cache_path, force)
795
        config_file_cache_path = hfh_util.download_from_hf_hub(
796
            f"{project_id}.cfg", repo_id, token, revision
797
        )
798
        vocab_ids.add(hfh_util.get_vocab_id_from_config(config_file_cache_path))
799
        hfh_util.copy_project_config(config_file_cache_path, force)
800
801
    for vocab_id in vocab_ids:
802
        vocab_zip_cache_path = hfh_util.download_from_hf_hub(
803
            f"vocabs/{vocab_id}.zip", repo_id, token, revision
804
        )
805
        hfh_util.unzip_archive(vocab_zip_cache_path, force)
806
807
808
@cli.command("completion")
809
@click.option("--bash", "shell", flag_value="bash")
810
@click.option("--zsh", "shell", flag_value="zsh")
811
@click.option("--fish", "shell", flag_value="fish")
812
def run_completion(shell):
813
    """Generate the script for tab-key autocompletion for the given shell. To enable the
814
    completion support in your current bash terminal session run\n
815
        source <(annif completion --bash)
816
817
    To enable the completion support in all new sessions first add the completion script
818
    in your home directory:\n
819
        annif completion --bash > ~/.annif-complete.bash
820
821
    Then make the script to be automatically sourced for new terminal sessions by adding
822
    the following to your ~/.bashrc file (or in some alternative startup file)\n
823
        source ~/.annif-complete.bash
824
    """
825
826
    if shell is None:
827
        raise click.UsageError("Shell not given, try --bash, --zsh or --fish")
828
829
    script = os.popen(f"_ANNIF_COMPLETE={shell}_source annif").read()
830
    click.echo(f"# Generated by Annif {importlib.metadata.version('annif')}")
831
    click.echo(script)
832
833
834
@cli.command("detect-language")
835
@click.argument("languages")
836
@click.argument(
837
    "paths", type=click.Path(dir_okay=False, exists=True, allow_dash=True), nargs=-1
838
)
839
def run_detect_language(languages, paths):
840
    """
841
    Detect the language of a single text document from standard input or for one or more
842
    document file(s) given its/their path(s).
843
    """
844
845
    langs = tuple(languages.split(","))
846
847
    def detect_language_and_show(text, languages):
848
        try:
849
            proportions = detect_language(text, languages)
850
        except ValueError as e:
851
            raise click.UsageError(e)
852
        for lang, score in proportions.items():
853
            if lang == "unk":
854
                lang = "?"
855
            click.echo(f"{lang}\t{score:.04f}")
856
857
    if paths and not (len(paths) == 1 and paths[0] == "-"):
858
        doclist = cli_util.open_text_documents(paths, docs_limit=None)
859
        for doc, path in zip(doclist.documents, paths):
860
            click.echo(f"Detected languages for {path}")
861
            detect_language_and_show(doc.text, langs)
862
    else:
863
        text = sys.stdin.read()
864
        detect_language_and_show(text, langs)
865
866
867
if __name__ == "__main__":
868
    cli()
869