exabgp.configuration.flow.parser   F
last analyzed

Complexity

Total Complexity 106

Size/Duplication

Total Lines 391
Duplicated Lines 5.63 %

Importance

Changes 0
Metric Value
eloc 283
dl 22
loc 391
rs 2
c 0
b 0
f 0
wmc 106

32 Functions

Rating   Name   Duplication   Size   Complexity  
A any_port() 0 3 2
A fragment() 0 3 2
A source_port() 0 3 2
A discard() 0 3 1
A packet_length() 0 3 2
A flow_label() 0 3 2
A destination_port() 0 3 2
A protocol() 0 3 2
A next_header() 0 3 2
A traffic_class() 0 3 2
A dscp() 0 3 2
A next_hop() 0 8 2
A rate_limit() 0 9 4
A icmp_code() 0 3 2
C _generic_condition() 0 33 11
A tcp_flags() 0 3 2
A accept() 0 2 1
A icmp_type() 0 3 2
A redirect_next_hop_ietf() 0 6 2
D _operator_numeric() 0 26 12
B redirect() 0 29 8
A destination() 11 13 4
A copy() 0 2 1
A _operator_binary() 0 10 4
A mark() 0 12 4
A redirect_next_hop() 0 2 1
A interface_set() 0 14 4
A flow() 0 2 1
C _interface_set() 0 29 10
A source() 11 13 4
A action() 0 10 3
A _value() 0 10 3

How to fix   Duplicated Code    Complexity   

Duplicated Code

Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.

Common duplication problems, and corresponding solutions are:

Complexity

 Tip:   Before tackling complexity, make sure that you eliminate any duplication first. This often can reduce the size of classes significantly.

Complex classes like exabgp.configuration.flow.parser often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
from exabgp.protocol.ip import IP
2
from exabgp.protocol.ip import NoNextHop
3
from exabgp.protocol.family import AFI
4
5
from exabgp.bgp.message.open.asn import ASN
6
7
# from exabgp.bgp.message.update.nlri.flow import Flow
8
from exabgp.bgp.message.update.nlri.flow import BinaryOperator
9
from exabgp.bgp.message.update.nlri.flow import NumericOperator
10
from exabgp.bgp.message.update.nlri.flow import Flow4Source
11
from exabgp.bgp.message.update.nlri.flow import Flow4Destination
12
from exabgp.bgp.message.update.nlri.flow import Flow6Source
13
from exabgp.bgp.message.update.nlri.flow import Flow6Destination
14
from exabgp.bgp.message.update.nlri.flow import FlowSourcePort
15
from exabgp.bgp.message.update.nlri.flow import FlowDestinationPort
16
from exabgp.bgp.message.update.nlri.flow import FlowAnyPort
17
from exabgp.bgp.message.update.nlri.flow import FlowIPProtocol
18
from exabgp.bgp.message.update.nlri.flow import FlowNextHeader
19
from exabgp.bgp.message.update.nlri.flow import FlowTCPFlag
20
from exabgp.bgp.message.update.nlri.flow import FlowFragment
21
from exabgp.bgp.message.update.nlri.flow import FlowPacketLength
22
from exabgp.bgp.message.update.nlri.flow import FlowICMPType
23
from exabgp.bgp.message.update.nlri.flow import FlowICMPCode
24
from exabgp.bgp.message.update.nlri.flow import FlowDSCP
25
from exabgp.bgp.message.update.nlri.flow import FlowTrafficClass
26
from exabgp.bgp.message.update.nlri.flow import FlowFlowLabel
27
from exabgp.bgp.message.update.nlri import Flow
28
29
from exabgp.bgp.message.update.attribute import NextHop
30
from exabgp.bgp.message.update.attribute import NextHopSelf
31
from exabgp.bgp.message.update.attribute import Attributes
32
from exabgp.bgp.message.update.attribute.community.extended import TrafficRate
33
from exabgp.bgp.message.update.attribute.community.extended import TrafficAction
34
from exabgp.bgp.message.update.attribute.community.extended import TrafficRedirect
35
from exabgp.bgp.message.update.attribute.community.extended import TrafficRedirectASN4
36
from exabgp.bgp.message.update.attribute.community.extended import TrafficMark
37
from exabgp.bgp.message.update.attribute.community.extended import TrafficRedirectIPv6
38
from exabgp.bgp.message.update.attribute.community.extended import TrafficNextHopIPv4IETF
39
from exabgp.bgp.message.update.attribute.community.extended import TrafficNextHopIPv6IETF
40
from exabgp.bgp.message.update.attribute.community.extended import TrafficNextHopSimpson
41
42
from exabgp.bgp.message.update.attribute.community.extended import InterfaceSet
43
44
from exabgp.bgp.message.update.attribute.community.extended import ExtendedCommunities
45
from exabgp.bgp.message.update.attribute.community.extended import ExtendedCommunitiesIPv6
46
47
from exabgp.rib.change import Change
48
49
from exabgp.logger import log
50
51
52
def flow(tokeniser):
53
    return Change(Flow(), Attributes())
54
55
56
def source(tokeniser):
57
    data = tokeniser()
58 View Code Duplication
    if data.count('.') == 3 and data.count(':') == 0:
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
59
        ip, netmask = data.split('/')
60
        raw = b''.join(bytes([int(_)]) for _ in ip.split('.'))
61
        yield Flow4Source(raw, int(netmask))
62
    elif data.count('/') == 1:
63
        ip, netmask = data.split('/')
64
        offset = 0
65
        yield Flow6Source(IP.pton(ip), int(netmask), int(offset))
66
    else:
67
        ip, netmask, offset = data.split('/')
68
        yield Flow6Source(IP.pton(ip), int(netmask), int(offset))
69
70
71
def destination(tokeniser):
72
    data = tokeniser()
73 View Code Duplication
    if data.count('.') == 3 and data.count(':') == 0:
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
74
        ip, netmask = data.split('/')
75
        raw = b''.join(bytes([int(_)]) for _ in ip.split('.'))
76
        yield Flow4Destination(raw, int(netmask))
77
    elif data.count('/') == 1:
78
        ip, netmask = data.split('/')
79
        offset = 0
80
        yield Flow6Destination(IP.pton(ip), int(netmask), int(offset))
81
    else:
82
        ip, netmask, offset = data.split('/')
83
        yield Flow6Destination(IP.pton(ip), int(netmask), int(offset))
84
85
86
# Expressions
87
88
89
def _operator_numeric(string):
90
    try:
91
        char = string[0].lower()
92
        if char == '=':
93
            return NumericOperator.EQ, string[1:]
94
        elif char == '>':
95
            operator = NumericOperator.GT
96
        elif char == '<':
97
            operator = NumericOperator.LT
98
        elif char == '!':
99
            if string.startswith('!='):
100
                return NumericOperator.NEQ, string[2:]
101
            raise ValueError('invalid operator syntax %s' % string)
102
        elif char == 't' and string.lower().startswith('true'):
103
            return NumericOperator.TRUE, string[4:]
104
        elif char == 'f' and string.lower().startswith('false'):
105
            return NumericOperator.FALSE, string[5:]
106
        else:
107
            return NumericOperator.EQ, string
108
        if string[1] == '=':
109
            operator += NumericOperator.EQ
110
            return operator, string[2:]
111
        else:
112
            return operator, string[1:]
113
    except IndexError:
114
        raise ValueError('Invalid expression (too short) %s' % string)
115
116
117
def _operator_binary(string):
118
    try:
119
        if string[0] == '=':
120
            return BinaryOperator.MATCH, string[1:]
121
        elif string[0] == '!':
122
            return BinaryOperator.NOT, string[1:]
123
        else:
124
            return BinaryOperator.INCLUDE, string
125
    except IndexError:
126
        raise ValueError('Invalid expression (too short) %s' % string)
127
128
129
def _value(string):
130
    ls = 0
131
    for c in string:
132
        if c not in [
133
            '&',
134
        ]:
135
            ls += 1
136
            continue
137
        break
138
    return string[:ls], string[ls:]
139
140
141
# parse [ content1 content2 content3 ]
142
# parse =80 or >80 or <25 or &>10<20
143
def _generic_condition(tokeniser, klass):
144
    _operator = _operator_binary if klass.OPERATION == 'binary' else _operator_numeric
145
    data = tokeniser()
146
    AND = BinaryOperator.NOP
147
    if data == '[':
148
        data = tokeniser()
149
        while True:
150
            if data == ']':
151
                break
152
            operator, _ = _operator(data)
153
            value, data = _value(_)
154
            # XXX: should do a check that the rule is valid for the family
155
            yield klass(AND | operator, klass.converter(value))
156
            if data:
157
                if data[0] != '&':
158
                    raise ValueError("Unknown binary operator %s" % data[0])
159
                AND = BinaryOperator.AND
160
                data = data[1:]
161
                if not data:
162
                    raise ValueError("Can not finish an expresion on an &")
163
            else:
164
                AND = BinaryOperator.NOP
165
                data = tokeniser()
166
    else:
167
        while data:
168
            operator, _ = _operator(data)
169
            value, data = _value(_)
170
            yield klass(operator | AND, klass.converter(value))
171
            if data:
172
                if data[0] != '&':
173
                    raise ValueError("Unknown binary operator %s" % data[0])
174
                AND = BinaryOperator.AND
175
                data = data[1:]
176
177
178
def any_port(tokeniser):
179
    for _ in _generic_condition(tokeniser, FlowAnyPort):
180
        yield _
181
182
183
def source_port(tokeniser):
184
    for _ in _generic_condition(tokeniser, FlowSourcePort):
185
        yield _
186
187
188
def destination_port(tokeniser):
189
    for _ in _generic_condition(tokeniser, FlowDestinationPort):
190
        yield _
191
192
193
def packet_length(tokeniser):
194
    for _ in _generic_condition(tokeniser, FlowPacketLength):
195
        yield _
196
197
198
def tcp_flags(tokeniser):
199
    for _ in _generic_condition(tokeniser, FlowTCPFlag):
200
        yield _
201
202
203
def protocol(tokeniser):
204
    for _ in _generic_condition(tokeniser, FlowIPProtocol):
205
        yield _
206
207
208
def next_header(tokeniser):
209
    for _ in _generic_condition(tokeniser, FlowNextHeader):
210
        yield _
211
212
213
def icmp_type(tokeniser):
214
    for _ in _generic_condition(tokeniser, FlowICMPType):
215
        yield _
216
217
218
def icmp_code(tokeniser):
219
    for _ in _generic_condition(tokeniser, FlowICMPCode):
220
        yield _
221
222
223
def fragment(tokeniser):
224
    for _ in _generic_condition(tokeniser, FlowFragment):
225
        yield _
226
227
228
def dscp(tokeniser):
229
    for _ in _generic_condition(tokeniser, FlowDSCP):
230
        yield _
231
232
233
def traffic_class(tokeniser):
234
    for _ in _generic_condition(tokeniser, FlowTrafficClass):
235
        yield _
236
237
238
def flow_label(tokeniser):
239
    for _ in _generic_condition(tokeniser, FlowFlowLabel):
240
        yield _
241
242
243
def next_hop(tokeniser):
244
    value = tokeniser()
245
246
    if value.lower() == 'self':
247
        return NextHopSelf(AFI.ipv4)
248
    else:
249
        ip = IP.create(value)
250
        return NextHop(ip.top(), ip.pack())
251
252
253
def accept(tokeniser):
254
    return
255
256
257
def discard(tokeniser):
258
    # README: We are setting the ASN as zero as that what Juniper (and Arbor) did when we created a local flow route
259
    return ExtendedCommunities().add(TrafficRate(ASN(0), 0))
260
261
262
def rate_limit(tokeniser):
263
    # README: We are setting the ASN as zero as that what Juniper (and Arbor) did when we created a local flow route
264
    speed = int(tokeniser())
265
    if speed < 9600 and speed != 0:
266
        log.warning("rate-limiting flow under 9600 bytes per seconds may not work", 'configuration')
267
    if speed > 1000000000000:
268
        speed = 1000000000000
269
        log.warning("rate-limiting changed for 1 000 000 000 000 bytes from %s" % speed, 'configuration')
270
    return ExtendedCommunities().add(TrafficRate(ASN(0), speed))
271
272
273
def redirect(tokeniser):
274
    data = tokeniser()
275
    count = data.count(':')
276
    if count == 0:
277
        return IP.create(data), ExtendedCommunities().add(TrafficNextHopSimpson(False))
278
    if count == 1:
279
        prefix, suffix = data.split(':', 1)
280
        if prefix.count('.'):
281
            raise ValueError(
282
                'this format has been deprecated as it does not make sense and it is not supported by other vendors'
283
            )
284
285
        asn = int(prefix)
286
        route_target = int(suffix)
287
288
        if asn >= pow(2, 32):
289
            raise ValueError('asn is a 32 bits number, value too large %s' % asn)
290
        if asn >= pow(2, 16):
291
            if route_target >= pow(2, 16):
292
                raise ValueError('asn is a 32 bits number, route target can only be 16 bit %s' % route_target)
293
            return NoNextHop, ExtendedCommunities().add(TrafficRedirectASN4(asn, route_target))
294
        if route_target >= pow(2, 32):
295
            raise ValueError('route target is a 32 bits number, value too large %s' % route_target)
296
        return NoNextHop, ExtendedCommunities().add(TrafficRedirect(asn, route_target))
297
    else:
298
        elements = data.split(':')
299
        ip = ':'.join(elements[:-1])
300
        asn = int(elements[-1])
301
        return IP.create(ip), ExtendedCommunities().add(TrafficRedirectIPv6(ip, asn))
302
303
304
def redirect_next_hop(tokeniser):
305
    return ExtendedCommunities().add(TrafficNextHopSimpson(False))
306
307
308
def redirect_next_hop_ietf(tokeniser):
309
    ip = IP.create(tokeniser())
310
    if ip.ipv4():
311
        return ExtendedCommunities().add(TrafficNextHopIPv4IETF(ip, False))
312
    else:
313
        return ExtendedCommunitiesIPv6().add(TrafficNextHopIPv6IETF(ip, False))
314
315
316
def copy(tokeniser):
317
    return IP.create(tokeniser()), ExtendedCommunities().add(TrafficNextHopSimpson(True))
318
319
320
def mark(tokeniser):
321
    value = tokeniser()
322
323
    if not value.isdigit():
324
        raise ValueError('dscp is not a number')
325
326
    dscp_value = int(value)
327
328
    if dscp_value < 0 or dscp_value > 0b111111:
329
        raise ValueError('dscp is not a valid number')
330
331
    return ExtendedCommunities().add(TrafficMark(dscp_value))
332
333
334
def action(tokeniser):
335
    value = tokeniser()
336
337
    sample = 'sample' in value
338
    terminal = 'terminal' in value
339
340
    if not sample and not terminal:
341
        raise ValueError('invalid flow action')
342
343
    return ExtendedCommunities().add(TrafficAction(sample, terminal))
344
345
346
def _interface_set(data):
347
    if data.count(':') != 3:
348
        raise ValueError('not a valid format %s' % data)
349
350
    trans, direction, prefix, suffix = data.split(':', 3)
351
352
    if trans == 'transitive':
353
        trans = True
354
    elif trans == 'non-transitive':
355
        trans = False
356
    else:
357
        raise ValueError('Bad transitivity type %s, should be transitive or non-transitive' % trans)
358
    if prefix.count('.'):
359
        raise ValueError('a 32 bits number must be used, invalid value %s' % prefix)
360
    if direction == 'input':
361
        int_direction = 1
362
    elif direction == 'output':
363
        int_direction = 2
364
    elif direction == 'input-output':
365
        int_direction = 3
366
    else:
367
        raise ValueError('Bad direction %s, should be input, output or input-output' % direction)
368
    asn = int(prefix)
369
    route_target = int(suffix)
370
    if asn >= pow(2, 32):
371
        raise ValueError('asn can only be 32 bits, value too large %s' % asn)
372
    if route_target >= pow(2, 14):
373
        raise ValueError('group-id is a 14 bits number, value too large %s' % route_target)
374
    return InterfaceSet(trans, asn, route_target, int_direction)
375
376
377
def interface_set(tokeniser):
378
    communities = ExtendedCommunities()
379
380
    value = tokeniser()
381
    if value == '[':
382
        while True:
383
            value = tokeniser()
384
            if value == ']':
385
                break
386
            communities.add(_interface_set(value))
387
    else:
388
        communities.add(_interface_set(value))
389
390
    return communities
391