Passed
Push — master ( ea3d13...565e54 )
by Marek
93:33 queued 92:04
created

test_suite.parse_args()   B

Complexity

Conditions 1

Size

Total Lines 90
Code Lines 62

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 1
eloc 62
nop 0
dl 0
loc 90
rs 8.2436
c 0
b 0
f 0

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
#!/usr/bin/env python2
2
from __future__ import print_function
3
4
import argparse
5
import logging
6
import os
7
import os.path
8
import time
9
import sys
10
11
from ssg_test_suite.log import LogHelper
12
import ssg_test_suite.oscap
13
import ssg_test_suite.virt
14
import ssg_test_suite.profile
15
import ssg_test_suite.rule
16
from ssg_test_suite import xml_operations
17
18
19
def parse_args():
20
    parser = argparse.ArgumentParser()
21
22
    common_parser = argparse.ArgumentParser(add_help=False)
23
    common_parser.add_argument("--hypervisor",
24
                               dest="hypervisor",
25
                               metavar="HYPERVISOR",
26
                               default="qemu:///session",
27
                               help="libvirt hypervisor")
28
    common_parser.add_argument("--domain",
29
                               dest="domain_name",
30
                               metavar="DOMAIN",
31
                               required=True,
32
                               help=("Specify libvirt domain to be used as a test "
33
                                     "bed. This domain will get remediations "
34
                                     "applied in it, possibly making system "
35
                                     "unusable for a moment. Snapshot will be "
36
                                     "reverted immediately afterwards. "
37
                                     "Domain will be returned without changes"))
38
    common_parser.add_argument("--datastream",
39
                               dest="datastream",
40
                               metavar="DATASTREAM",
41
                               required=True,
42
                               help=("Path to the Source DataStream on this "
43
                                     "machine which is going to be tested"))
44
    common_parser.add_argument("--xccdf-id",
45
                               dest="xccdf_id",
46
                               metavar="REF-ID",
47
                               required=True,
48
                               help="Reference ID related to benchmark to be used."
49
                                    " Get one using 'oscap info <datastream>'.")
50
    common_parser.add_argument("--loglevel",
51
                               dest="loglevel",
52
                               metavar="LOGLEVEL",
53
                               default="INFO",
54
                               help="Default level of console output")
55
    common_parser.add_argument("--logdir",
56
                               dest="logdir",
57
                               metavar="LOGDIR",
58
                               default=None,
59
                               help="Directory to which all output is saved")
60
61
    common_parser.add_argument(
62
        "--remediate-using",
63
        dest="remediate_using",
64
        default="oscap",
65
        choices=ssg_test_suite.oscap.REMEDIATION_RULE_RUNNERS.keys(),
66
        help="What type of remediations to use - openscap online one, "
67
        "or remediation done by using remediation roles "
68
        "that are saved to disk beforehand.")
69
70
    subparsers = parser.add_subparsers(dest='subparser_name',
71
                                       help='Subcommands: profile, rule')
72
73
    parser_profile = subparsers.add_parser('profile',
74
                                           help=('Testing profile-based '
75
                                                 'remediation applied on already '
76
                                                 'installed machine'),
77
                                           parents=[common_parser])
78
    parser_profile.set_defaults(func=ssg_test_suite.profile.perform_profile_check)
79
    parser_rule = subparsers.add_parser('rule',
80
                                        help=('Testing remediations of particular '
81
                                              'rule for various situations - '
82
                                              'currently not supported '
83
                                              'by openscap!'),
84
                                        parents=[common_parser])
85
    parser_rule.set_defaults(func=ssg_test_suite.rule.perform_rule_check)
86
87
    parser_profile.add_argument("target",
88
                                nargs="+",
89
                                metavar="DSPROFILE",
90
                                help=("Profiles to be tested, 'ALL' means every "
91
                                      "profile of particular benchmark will be "
92
                                      "evaluated."))
93
94
    parser_rule.add_argument("target",
95
                             nargs="+",
96
                             metavar="RULE",
97
                             help=("Rule to be tested, 'ALL' means every "
98
                                   "rule-testing scenario will be evaluated. Each "
99
                                   "target is handled as a substring - so you can "
100
                                   "ask for subset of all rules this way. (If you "
101
                                   "type ipv6 as a target, all rules containing "
102
                                   "ipv6 within id will be performed."))
103
    parser_rule.add_argument("--dontclean",
104
                             dest="dont_clean",
105
                             action="store_true",
106
                             help="Do not remove html reports of successful runs")
107
108
    return parser.parse_args()
109
110
111
def main():
112
    options = parse_args()
113
114
    log = logging.getLogger()
115
    # this is general logger level - needs to be
116
    # debug otherwise it cuts silently everything
117
    log.setLevel(logging.DEBUG)
118
119
    try:
120
        bench_id = xml_operations.infer_benchmark_id_from_component_ref_id(
121
            options.datastream, options.xccdf_id)
122
        options.benchmark_id = bench_id
123
    except RuntimeError as exc:
124
        msg = "Error inferring benchmark ID: {}".format(str(exc))
125
        logging.error(msg)
126
        sys.exit(1)
127
128
129
    LogHelper.add_console_logger(log, options.loglevel)
130
    # logging dir needs to be created based on other options
131
    # thus we have to postprocess it
132
    if 'ALL' in options.target:
133
        options.target = ['ALL']
134
    if options.logdir is None:
135
        # default!
136
        prefix = options.subparser_name
137
        body = ""
138
        if 'ALL' in options.target:
139
            body = 'ALL'
140
        else:
141
            body = 'custom'
142
143
        date_string = time.strftime('%Y-%m-%d-%H%M', time.localtime())
144
        logging_dir = os.path.join(os.getcwd(),
145
                                   'logs',
146
                                   '{0}-{1}-{2}'.format(prefix,
147
                                                        body,
148
                                                        date_string))
149
        logging_dir = LogHelper.find_name(logging_dir)
150
    else:
151
        logging_dir = LogHelper.find_name(options.logdir)
152
    LogHelper.add_logging_dir(log, logging_dir)
153
154
    options.func(options)
155
156
157
if __name__ == "__main__":
158
    main()
159