|
1
|
|
|
# encoding: utf-8 |
|
2
|
|
|
""" |
|
3
|
|
|
inet/parser.py |
|
4
|
|
|
|
|
5
|
|
|
Created by Thomas Mangin on 2015-06-04. |
|
6
|
|
|
Copyright (c) 2009-2017 Exa Networks. All rights reserved. |
|
7
|
|
|
License: 3-clause BSD. (See the COPYRIGHT file) |
|
8
|
|
|
""" |
|
9
|
|
|
|
|
10
|
|
|
from struct import pack |
|
11
|
|
|
import sys |
|
12
|
|
|
|
|
13
|
|
|
from exabgp.util import character |
|
14
|
|
|
from exabgp.util import concat_bytes_i |
|
15
|
|
|
|
|
16
|
|
|
from exabgp.protocol.ip import IP |
|
17
|
|
|
from exabgp.protocol.ip import IPSelf |
|
18
|
|
|
from exabgp.protocol.ip import IPRange |
|
19
|
|
|
from exabgp.protocol.family import AFI |
|
20
|
|
|
# from exabgp.protocol.family import SAFI |
|
21
|
|
|
|
|
22
|
|
|
from exabgp.bgp.message import OUT |
|
23
|
|
|
from exabgp.bgp.message.update.nlri import CIDR |
|
24
|
|
|
from exabgp.bgp.message.update.nlri import INET |
|
25
|
|
|
from exabgp.bgp.message.update.nlri import IPVPN |
|
26
|
|
|
|
|
27
|
|
|
from exabgp.bgp.message.open import ASN |
|
28
|
|
|
from exabgp.bgp.message.open import RouterID |
|
29
|
|
|
from exabgp.bgp.message.update.attribute import Attribute |
|
30
|
|
|
from exabgp.bgp.message.update.attribute import Attributes |
|
31
|
|
|
from exabgp.bgp.message.update.attribute import NextHop |
|
32
|
|
|
from exabgp.bgp.message.update.attribute import NextHopSelf |
|
33
|
|
|
from exabgp.bgp.message.update.attribute import Origin |
|
34
|
|
|
from exabgp.bgp.message.update.attribute import MED |
|
35
|
|
|
from exabgp.bgp.message.update.attribute import ASPath |
|
36
|
|
|
from exabgp.bgp.message.update.attribute import LocalPreference |
|
37
|
|
|
from exabgp.bgp.message.update.attribute import AtomicAggregate |
|
38
|
|
|
from exabgp.bgp.message.update.attribute import Aggregator |
|
39
|
|
|
from exabgp.bgp.message.update.attribute import Aggregator4 |
|
40
|
|
|
from exabgp.bgp.message.update.attribute import OriginatorID |
|
41
|
|
|
from exabgp.bgp.message.update.attribute import ClusterID |
|
42
|
|
|
from exabgp.bgp.message.update.attribute import ClusterList |
|
43
|
|
|
from exabgp.bgp.message.update.attribute import AIGP |
|
44
|
|
|
from exabgp.bgp.message.update.attribute import GenericAttribute |
|
45
|
|
|
|
|
46
|
|
|
from exabgp.bgp.message.update.attribute.community import Community |
|
47
|
|
|
from exabgp.bgp.message.update.attribute.community import Communities |
|
48
|
|
|
from exabgp.bgp.message.update.attribute.community import LargeCommunity |
|
49
|
|
|
from exabgp.bgp.message.update.attribute.community import LargeCommunities |
|
50
|
|
|
from exabgp.bgp.message.update.attribute.community import ExtendedCommunity |
|
51
|
|
|
from exabgp.bgp.message.update.attribute.community import ExtendedCommunities |
|
52
|
|
|
|
|
53
|
|
|
from exabgp.bgp.message.update.nlri.qualifier import PathInfo |
|
54
|
|
|
|
|
55
|
|
|
from exabgp.rib.change import Change |
|
56
|
|
|
|
|
57
|
|
|
|
|
58
|
|
|
if sys.version_info > (3,): |
|
59
|
|
|
long = int |
|
60
|
|
|
|
|
61
|
|
|
|
|
62
|
|
|
def prefix (tokeniser): |
|
63
|
|
|
# XXX: could raise |
|
64
|
|
|
ip = tokeniser() |
|
65
|
|
|
try: |
|
66
|
|
|
ip,mask = ip.split('/') |
|
67
|
|
|
except ValueError: |
|
68
|
|
|
mask = '32' |
|
69
|
|
|
if ':' in ip: |
|
70
|
|
|
mask = '128' |
|
71
|
|
|
|
|
72
|
|
|
tokeniser.afi = IP.toafi(ip) |
|
73
|
|
|
iprange = IPRange.create(ip,mask) |
|
74
|
|
|
|
|
75
|
|
|
if iprange.address() & iprange.mask.hostmask() != 0: |
|
76
|
|
|
raise ValueError('invalid network %s for netmask %s' % (ip,mask)) |
|
77
|
|
|
|
|
78
|
|
|
return iprange |
|
79
|
|
|
|
|
80
|
|
|
def path_information (tokeniser): |
|
81
|
|
|
pi = tokeniser() |
|
82
|
|
|
if pi.isdigit(): |
|
83
|
|
|
return PathInfo(integer=int(pi)) |
|
84
|
|
|
else: |
|
85
|
|
|
return PathInfo(ip=pi) |
|
86
|
|
|
|
|
87
|
|
|
|
|
88
|
|
|
def next_hop (tokeniser): |
|
89
|
|
|
value = tokeniser() |
|
90
|
|
|
|
|
91
|
|
|
if value.lower() == 'self': |
|
92
|
|
|
return IPSelf(tokeniser.afi),NextHopSelf(tokeniser.afi) |
|
93
|
|
|
else: |
|
94
|
|
|
ip = IP.create(value) |
|
95
|
|
|
return ip,NextHop(ip.top()) |
|
96
|
|
|
|
|
97
|
|
|
# XXX: using OUT.UNSET should we use the following ? |
|
98
|
|
|
# action = OUT.ANNOUNCE if tokeniser.announce else OUT.WITHDRAW |
|
99
|
|
|
|
|
100
|
|
|
def inet (tokeniser): |
|
101
|
|
|
ipmask = prefix(tokeniser) |
|
102
|
|
|
inet = INET( |
|
103
|
|
|
afi=IP.toafi(ipmask.top()), |
|
104
|
|
|
safi=IP.tosafi(ipmask.top()), |
|
105
|
|
|
action=OUT.UNSET |
|
106
|
|
|
) |
|
107
|
|
|
inet.cidr = CIDR(ipmask.ton(),ipmask.mask) |
|
108
|
|
|
|
|
109
|
|
|
return Change( |
|
110
|
|
|
inet, |
|
111
|
|
|
Attributes() |
|
112
|
|
|
) |
|
113
|
|
|
|
|
114
|
|
|
# XXX: using OUT.ANNOUNCE should we use the following ? |
|
115
|
|
|
# action = OUT.ANNOUNCE if tokeniser.announce else OUT.WITHDRAW |
|
116
|
|
|
|
|
117
|
|
|
def mpls (tokeniser): |
|
118
|
|
|
ipmask = prefix(tokeniser) |
|
119
|
|
|
mpls = IPVPN( |
|
120
|
|
|
afi=IP.toafi(ipmask.top()), |
|
121
|
|
|
safi=IP.tosafi(ipmask.top()), |
|
122
|
|
|
action=OUT.ANNOUNCE |
|
123
|
|
|
) |
|
124
|
|
|
mpls.cidr = CIDR(ipmask.ton(),ipmask.mask) |
|
125
|
|
|
|
|
126
|
|
|
return Change( |
|
127
|
|
|
mpls, |
|
128
|
|
|
Attributes() |
|
129
|
|
|
) |
|
130
|
|
|
|
|
131
|
|
|
|
|
132
|
|
|
def attribute (tokeniser): |
|
133
|
|
|
start = tokeniser() |
|
134
|
|
|
if start != '[': |
|
135
|
|
|
raise ValueError('invalid attribute, does not starts with [') |
|
136
|
|
|
|
|
137
|
|
|
code = tokeniser().lower() |
|
138
|
|
|
if not code.startswith('0x'): |
|
139
|
|
|
raise ValueError('invalid attribute, code is not 0x hexadecimal') |
|
140
|
|
|
try: |
|
141
|
|
|
code = int(code,16) |
|
142
|
|
|
except ValueError: |
|
143
|
|
|
raise ValueError('invalid attribute, code is not 0x hexadecimal') |
|
144
|
|
|
|
|
145
|
|
|
flag = tokeniser().lower() |
|
146
|
|
|
if not flag.startswith('0x'): |
|
147
|
|
|
raise ValueError('invalid attribute, flag is not 0x hexadecimal') |
|
148
|
|
|
try: |
|
149
|
|
|
flag = int(flag,16) |
|
150
|
|
|
except ValueError: |
|
151
|
|
|
raise ValueError('invalid attribute, flag is not 0x hexadecimal') |
|
152
|
|
|
|
|
153
|
|
|
data = tokeniser().lower() |
|
154
|
|
|
if not data.startswith('0x'): |
|
155
|
|
|
raise ValueError('invalid attribute, data is not 0x hexadecimal') |
|
156
|
|
|
if len(data) % 2: |
|
157
|
|
|
raise ValueError('invalid attribute, data is not 0x hexadecimal') |
|
158
|
|
|
data = concat_bytes_i(character(int(data[_:_+2],16)) for _ in range(2,len(data),2)) |
|
159
|
|
|
|
|
160
|
|
|
end = tokeniser() |
|
161
|
|
|
if end != ']': |
|
162
|
|
|
raise ValueError('invalid attribute, does not ends with ]') |
|
163
|
|
|
|
|
164
|
|
|
return GenericAttribute(code,flag,data) |
|
165
|
|
|
|
|
166
|
|
|
# for ((ID,flag),klass) in six.iteritems(Attribute.registered_attributes): |
|
167
|
|
|
# length = len(data) |
|
168
|
|
|
# if code == ID and flag | Attribute.Flag.EXTENDED_LENGTH == klass.FLAG | Attribute.Flag.EXTENDED_LENGTH: |
|
169
|
|
|
# # if length > 0xFF or flag & Attribute.Flag.EXTENDED_LENGTH: |
|
170
|
|
|
# # raw = pack('!BBH',flag,code,length & (0xFF-Attribute.Flag.EXTENDED_LENGTH)) + data |
|
171
|
|
|
# # else: |
|
172
|
|
|
# # raw = pack('!BBB',flag,code,length) + data |
|
173
|
|
|
# return klass.unpack(data,None) |
|
174
|
|
|
|
|
175
|
|
|
|
|
176
|
|
|
def aigp (tokeniser): |
|
177
|
|
|
if not tokeniser.tokens: |
|
178
|
|
|
raise ValueError('aigp requires number (decimal or hexadecimal 0x prefixed)') |
|
179
|
|
|
value = tokeniser() |
|
180
|
|
|
base = 16 if value.lower().startswith('0x') else 10 |
|
181
|
|
|
try: |
|
182
|
|
|
number = int(value,base) |
|
183
|
|
|
except ValueError: |
|
184
|
|
|
raise ValueError('aigp requires number (decimal or hexadecimal 0x prefixed)') |
|
185
|
|
|
|
|
186
|
|
|
return AIGP(b'\x01\x00\x0b' + pack('!Q',number)) |
|
187
|
|
|
|
|
188
|
|
|
|
|
189
|
|
|
def origin (tokeniser): |
|
190
|
|
|
value = tokeniser().lower() |
|
191
|
|
|
if value == 'igp': |
|
192
|
|
|
return Origin(Origin.IGP) |
|
193
|
|
|
if value == 'egp': |
|
194
|
|
|
return Origin(Origin.EGP) |
|
195
|
|
|
if value == 'incomplete': |
|
196
|
|
|
return Origin(Origin.INCOMPLETE) |
|
197
|
|
|
raise ValueError('unknown origin %s' % value) |
|
198
|
|
|
|
|
199
|
|
|
|
|
200
|
|
|
def med (tokeniser): |
|
201
|
|
|
value = tokeniser() |
|
202
|
|
|
if not value.isdigit(): |
|
203
|
|
|
raise ValueError('invalid MED %s' % value) |
|
204
|
|
|
return MED(int(value)) |
|
205
|
|
|
|
|
206
|
|
|
|
|
207
|
|
|
def as_path (tokeniser): |
|
208
|
|
|
as_seq = [] |
|
209
|
|
|
as_set = [] |
|
210
|
|
|
value = tokeniser() |
|
211
|
|
|
inset = False |
|
212
|
|
|
try: |
|
213
|
|
|
if value == '[': |
|
214
|
|
|
while True: |
|
215
|
|
|
value = tokeniser() |
|
216
|
|
|
if value == ',': |
|
217
|
|
|
continue |
|
218
|
|
|
if value in ('(','['): |
|
219
|
|
|
inset = True |
|
220
|
|
|
while True: |
|
221
|
|
|
value = tokeniser() |
|
222
|
|
|
if value in (')',']'): |
|
223
|
|
|
break |
|
224
|
|
|
as_set.append(ASN.from_string(value)) |
|
225
|
|
|
if value == ')': |
|
226
|
|
|
inset = False |
|
227
|
|
|
continue |
|
228
|
|
|
if value == ']': |
|
229
|
|
|
if inset: |
|
230
|
|
|
inset = False |
|
231
|
|
|
continue |
|
232
|
|
|
break |
|
233
|
|
|
as_seq.append(ASN.from_string(value)) |
|
234
|
|
|
else: |
|
235
|
|
|
as_seq.append(ASN.from_string(value)) |
|
236
|
|
|
except ValueError: |
|
237
|
|
|
raise ValueError('could not parse as-path') |
|
238
|
|
|
return ASPath(as_seq,as_set) |
|
239
|
|
|
|
|
240
|
|
|
|
|
241
|
|
|
def local_preference (tokeniser): |
|
242
|
|
|
value = tokeniser() |
|
243
|
|
|
if not value.isdigit(): |
|
244
|
|
|
raise ValueError('invalid local preference %s' % value) |
|
245
|
|
|
return LocalPreference(int(value)) |
|
246
|
|
|
|
|
247
|
|
|
|
|
248
|
|
|
def atomic_aggregate (tokeniser): |
|
249
|
|
|
return AtomicAggregate() |
|
250
|
|
|
|
|
251
|
|
|
|
|
252
|
|
|
def aggregator (tokeniser): |
|
253
|
|
|
agg = tokeniser.tokeniser() |
|
254
|
|
|
eat = True if agg == '(' else False |
|
255
|
|
|
|
|
256
|
|
|
if eat: |
|
257
|
|
|
agg = tokeniser() |
|
258
|
|
|
if agg.endswith(')'): |
|
259
|
|
|
eat = False |
|
260
|
|
|
agg = agg[:-1] |
|
261
|
|
|
elif agg.startswith('('): |
|
262
|
|
|
if agg.endswith(')'): |
|
263
|
|
|
eat = False |
|
264
|
|
|
agg = agg[1:-1] |
|
265
|
|
|
else: |
|
266
|
|
|
eat = True |
|
267
|
|
|
agg = agg[1:] |
|
268
|
|
|
|
|
269
|
|
|
try: |
|
270
|
|
|
as_number, address = agg.split(':') |
|
271
|
|
|
local_as = ASN.from_string(as_number) |
|
272
|
|
|
local_address = RouterID(address) |
|
273
|
|
|
except (ValueError,IndexError): |
|
274
|
|
|
raise ValueError('invalid aggregator') |
|
275
|
|
|
|
|
276
|
|
|
if eat: |
|
277
|
|
|
if tokeniser() != ')': |
|
278
|
|
|
raise ValueError('invalid aggregator') |
|
279
|
|
|
|
|
280
|
|
|
return Aggregator(local_as,local_address) |
|
281
|
|
|
|
|
282
|
|
|
|
|
283
|
|
|
def originator_id (tokeniser): |
|
284
|
|
|
value = tokeniser() |
|
285
|
|
|
if value.count('.') != 3: |
|
286
|
|
|
raise ValueError('invalid Originator ID %s' % value) |
|
287
|
|
|
if not all(_.isdigit() for _ in value.split('.')): |
|
288
|
|
|
raise ValueError('invalid Originator ID %s' % value) |
|
289
|
|
|
return OriginatorID(value) |
|
290
|
|
|
|
|
291
|
|
|
|
|
292
|
|
|
def cluster_list (tokeniser): |
|
293
|
|
|
clusterids = [] |
|
294
|
|
|
value = tokeniser() |
|
295
|
|
|
try: |
|
296
|
|
|
if value == '[': |
|
297
|
|
|
while True: |
|
298
|
|
|
value = tokeniser() |
|
299
|
|
|
if value == ']': |
|
300
|
|
|
break |
|
301
|
|
|
clusterids.append(ClusterID(value)) |
|
302
|
|
|
else: |
|
303
|
|
|
clusterids.append(ClusterID(value)) |
|
304
|
|
|
if not clusterids: |
|
305
|
|
|
raise ValueError('no cluster-id in the cluster list') |
|
306
|
|
|
return ClusterList(clusterids) |
|
307
|
|
|
except ValueError: |
|
308
|
|
|
raise ValueError('invalud cluster list') |
|
309
|
|
|
|
|
310
|
|
|
# XXX: Community does does not cache anymore .. we SHOULD really do it ! |
|
311
|
|
|
|
|
312
|
|
|
|
|
313
|
|
|
def _community (value): |
|
314
|
|
|
separator = value.find(':') |
|
315
|
|
|
if separator > 0: |
|
316
|
|
|
prefix = value[:separator] |
|
317
|
|
|
suffix = value[separator+1:] |
|
318
|
|
|
|
|
319
|
|
|
if not prefix.isdigit() or not suffix.isdigit(): |
|
320
|
|
|
raise ValueError('invalid community %s' % value) |
|
321
|
|
|
|
|
322
|
|
|
prefix, suffix = int(prefix), int(suffix) |
|
323
|
|
|
|
|
324
|
|
|
if prefix > Community.MAX: |
|
325
|
|
|
raise ValueError('invalid community %s (prefix too large)' % value) |
|
326
|
|
|
|
|
327
|
|
|
if suffix > Community.MAX: |
|
328
|
|
|
raise ValueError('invalid community %s (suffix too large)' % value) |
|
329
|
|
|
|
|
330
|
|
|
return Community(pack('!L',(prefix << 16) + suffix)) |
|
331
|
|
|
|
|
332
|
|
|
elif value[:2].lower() == '0x': |
|
333
|
|
|
number = long(value,16) |
|
|
|
|
|
|
334
|
|
|
if number > Community.MAX: |
|
335
|
|
|
raise ValueError('invalid community %s (too large)' % value) |
|
336
|
|
|
return Community(pack('!L',number)) |
|
337
|
|
|
|
|
338
|
|
|
else: |
|
339
|
|
|
low = value.lower() |
|
340
|
|
|
if low == 'no-export': |
|
341
|
|
|
return Community(Community.NO_EXPORT) |
|
342
|
|
|
elif low == 'no-advertise': |
|
343
|
|
|
return Community(Community.NO_ADVERTISE) |
|
344
|
|
|
elif low == 'no-export-subconfed': |
|
345
|
|
|
return Community(Community.NO_EXPORT_SUBCONFED) |
|
346
|
|
|
# no-peer is not a correct syntax but I am sure someone will make the mistake :) |
|
347
|
|
|
elif low == 'nopeer' or low == 'no-peer': |
|
348
|
|
|
return Community(Community.NO_PEER) |
|
349
|
|
|
elif low == 'blackhole': |
|
350
|
|
|
return Community(Community.BLACKHOLE) |
|
351
|
|
|
elif value.isdigit(): |
|
352
|
|
|
number = int(value) |
|
353
|
|
|
if number > Community.MAX: |
|
354
|
|
|
raise ValueError('invalid community %s (too large)' % value) |
|
355
|
|
|
return Community(pack('!L',number)) |
|
356
|
|
|
else: |
|
357
|
|
|
raise ValueError('invalid community name %s' % value) |
|
358
|
|
|
|
|
359
|
|
|
|
|
360
|
|
|
def community (tokeniser): |
|
361
|
|
|
communities = Communities() |
|
362
|
|
|
|
|
363
|
|
|
value = tokeniser() |
|
364
|
|
|
if value == '[': |
|
365
|
|
|
while True: |
|
366
|
|
|
value = tokeniser() |
|
367
|
|
|
if value == ']': |
|
368
|
|
|
break |
|
369
|
|
|
communities.add(_community(value)) |
|
370
|
|
|
else: |
|
371
|
|
|
communities.add(_community(value)) |
|
372
|
|
|
|
|
373
|
|
|
return communities |
|
374
|
|
|
|
|
375
|
|
|
|
|
376
|
|
|
def _large_community (value): |
|
377
|
|
|
separator = value.find(':') |
|
378
|
|
|
if separator > 0: |
|
379
|
|
|
prefix, affix, suffix = value.split(':') |
|
380
|
|
|
|
|
381
|
|
|
if not any(map(lambda c:c.isdigit(), [prefix, affix, suffix])): |
|
382
|
|
|
raise ValueError('invalid community %s' % value) |
|
383
|
|
|
|
|
384
|
|
|
prefix, affix, suffix = map(int, [prefix, affix, suffix]) |
|
385
|
|
|
|
|
386
|
|
|
for i in [prefix, affix, suffix]: |
|
387
|
|
|
if i > LargeCommunity.MAX: |
|
388
|
|
|
raise ValueError('invalid community %i in %s too large' % (i, value)) |
|
389
|
|
|
|
|
390
|
|
|
return LargeCommunity(pack('!LLL', prefix, affix, suffix)) |
|
391
|
|
|
|
|
392
|
|
|
elif value[:2].lower() == '0x': |
|
393
|
|
|
number = int(value) |
|
394
|
|
|
if number > LargeCommunity.MAX: |
|
395
|
|
|
raise ValueError('invalid large community %s (too large)' % value) |
|
396
|
|
|
return LargeCommunity(pack('!LLL', number >> 64, (number >> 32) & 0xFFFFFFFF, number & 0xFFFFFFFF)) |
|
397
|
|
|
|
|
398
|
|
|
else: |
|
399
|
|
|
low = value.lower() |
|
400
|
|
|
if value.isdigit(): |
|
401
|
|
|
number = int(value) |
|
402
|
|
|
if number > LargeCommunity.MAX: |
|
403
|
|
|
raise ValueError('invalid large community %s (too large)' % value) |
|
404
|
|
|
return LargeCommunity(pack('!LLL', number >> 64, (number >> 32) & 0xFFFFFFFF, number & 0xFFFFFFFF)) |
|
405
|
|
|
else: |
|
406
|
|
|
raise ValueError('invalid large community name %s' % value) |
|
407
|
|
|
|
|
408
|
|
|
|
|
409
|
|
|
def large_community (tokeniser): |
|
410
|
|
|
large_communities = LargeCommunities() |
|
411
|
|
|
|
|
412
|
|
|
value = tokeniser() |
|
413
|
|
|
if value == '[': |
|
414
|
|
|
while True: |
|
415
|
|
|
value = tokeniser() |
|
416
|
|
|
if value == ']': |
|
417
|
|
|
break |
|
418
|
|
|
lc = _large_community(value) |
|
419
|
|
|
if lc in large_communities.communities: |
|
420
|
|
|
continue |
|
421
|
|
|
large_communities.add(lc) |
|
422
|
|
|
else: |
|
423
|
|
|
large_communities.add(_large_community(value)) |
|
424
|
|
|
|
|
425
|
|
|
return large_communities |
|
426
|
|
|
|
|
427
|
|
|
|
|
428
|
|
|
_HEADER = { |
|
429
|
|
|
# header and subheader |
|
430
|
|
|
'target': character(0x00)+character(0x02), |
|
431
|
|
|
'target4': character(0x02)+character(0x02), |
|
432
|
|
|
'origin': character(0x00)+character(0x03), |
|
433
|
|
|
'origin4': character(0x02)+character(0x03), |
|
434
|
|
|
'redirect': character(0x80)+character(0x08), |
|
435
|
|
|
'l2info': character(0x80)+character(0x0A), |
|
436
|
|
|
'redirect-to-nexthop': character(0x08)+character(0x00), |
|
437
|
|
|
'bandwidth': character(0x40)+character(0x04), |
|
438
|
|
|
} |
|
439
|
|
|
|
|
440
|
|
|
_SIZE = { |
|
441
|
|
|
'target': 2, |
|
442
|
|
|
'target4': 2, |
|
443
|
|
|
'origin': 2, |
|
444
|
|
|
'origin4': 2, |
|
445
|
|
|
'redirect': 2, |
|
446
|
|
|
'l2info': 4, |
|
447
|
|
|
'redirect-to-nexthop': 0, |
|
448
|
|
|
'bandwidth': 2, |
|
449
|
|
|
} |
|
450
|
|
|
|
|
451
|
|
|
_SIZE_H = 0xFFFF |
|
452
|
|
|
|
|
453
|
|
|
|
|
454
|
|
|
def _extended_community (value): |
|
455
|
|
|
if value[:2].lower() == '0x': |
|
456
|
|
|
# we could raise if the length is not 8 bytes (16 chars) |
|
457
|
|
|
if len(value) % 2: |
|
458
|
|
|
raise ValueError('invalid extended community %s' % value) |
|
459
|
|
|
raw = concat_bytes_i(character(int(value[_:_+2],16)) for _ in range(2,len(value),2)) |
|
460
|
|
|
return ExtendedCommunity.unpack(raw) |
|
461
|
|
|
elif value.count(':'): |
|
462
|
|
|
components = value.split(':') |
|
463
|
|
|
command = 'target' if len(components) == 2 else components.pop(0) |
|
464
|
|
|
|
|
465
|
|
|
if command not in _HEADER: |
|
466
|
|
|
raise ValueError('invalid extended community %s (only origin,target or l2info are supported) ' % command) |
|
467
|
|
|
|
|
468
|
|
|
if len(components) != _SIZE[command]: |
|
469
|
|
|
raise ValueError('invalid extended community %s, expecting %d fields ' % (command,len(components))) |
|
470
|
|
|
|
|
471
|
|
|
header = _HEADER.get(command,None) |
|
472
|
|
|
|
|
473
|
|
|
if header is None: |
|
474
|
|
|
raise ValueError('unknown extended community %s' % command) |
|
475
|
|
|
|
|
476
|
|
|
if command == 'l2info': |
|
477
|
|
|
# encaps, control, mtu, site |
|
478
|
|
|
return ExtendedCommunity.unpack(header+pack('!BBHH',*[int(_) for _ in components])) |
|
479
|
|
|
|
|
480
|
|
|
_ga,_la = components |
|
481
|
|
|
ga,la = _ga.upper(),_la.upper() |
|
482
|
|
|
|
|
483
|
|
|
if command in ('target','origin'): |
|
484
|
|
|
# global admin, local admin |
|
485
|
|
|
if '.' in ga or '.' in la: |
|
486
|
|
|
gc = ga.count('.') |
|
487
|
|
|
lc = la.count('.') |
|
488
|
|
|
if gc == 0 and lc == 3: |
|
489
|
|
|
# ASN first, IP second |
|
490
|
|
|
return ExtendedCommunity.unpack(header+pack('!HBBBB',int(ga),*[int(_) for _ in la.split('.')])) |
|
491
|
|
|
if gc == 3 and lc == 0: |
|
492
|
|
|
# IP first, ASN second |
|
493
|
|
|
return ExtendedCommunity.unpack(header+pack('!BBBBH',*[int(_) for _ in ga.split('.')]+[int(la)])) |
|
494
|
|
|
|
|
495
|
|
|
iga = int(ga[:-1]) if 'L' in ga else int(ga) |
|
496
|
|
|
ila = int(la[:-1]) if 'L' in la else int(la) |
|
497
|
|
|
|
|
498
|
|
|
if command == 'target': |
|
499
|
|
|
if ga.endswith('L') or iga > _SIZE_H: |
|
500
|
|
|
return ExtendedCommunity.unpack(_HEADER['target4']+pack('!LH',iga,ila),None) |
|
501
|
|
|
else: |
|
502
|
|
|
return ExtendedCommunity.unpack(header+pack('!HI',iga,ila),None) |
|
503
|
|
|
if command == 'origin': |
|
504
|
|
|
if ga.endswith('L') or iga > _SIZE_H: |
|
505
|
|
|
return ExtendedCommunity.unpack(_HEADER['origin4']+pack('!LH',iga,ila),None) |
|
506
|
|
|
else: |
|
507
|
|
|
return ExtendedCommunity.unpack(header+pack('!HI',iga,ila),None) |
|
508
|
|
|
|
|
509
|
|
|
if command == 'target4': |
|
510
|
|
|
return ExtendedCommunity.unpack(_HEADER['target4']+pack('!LH',iga,ila),None) |
|
511
|
|
|
|
|
512
|
|
|
if command == 'origin4': |
|
513
|
|
|
return ExtendedCommunity.unpack(_HEADER['origin4']+pack('!LH',iga,ila),None) |
|
514
|
|
|
|
|
515
|
|
|
if command == 'redirect': |
|
516
|
|
|
return ExtendedCommunity.unpack(header+pack('!HL',iga,ila),None) |
|
517
|
|
|
|
|
518
|
|
|
if command == 'bandwidth': |
|
519
|
|
|
return ExtendedCommunity.unpack(_HEADER['bandwidth']+pack('!Hf',iga,ila),None) |
|
520
|
|
|
|
|
521
|
|
|
raise ValueError('invalid extended community %s' % command) |
|
522
|
|
|
elif value == 'redirect-to-nexthop': |
|
523
|
|
|
header = _HEADER[value] |
|
524
|
|
|
return ExtendedCommunity.unpack(header+pack('!HL',0,0),None) |
|
525
|
|
|
else: |
|
526
|
|
|
raise ValueError('invalid extended community %s - lc+gc' % value) |
|
527
|
|
|
|
|
528
|
|
|
|
|
529
|
|
|
# The previous code was extracting the extended-community class from the attributes |
|
530
|
|
|
# And adding to it. |
|
531
|
|
|
|
|
532
|
|
|
def extended_community (tokeniser): |
|
533
|
|
|
communities = ExtendedCommunities() |
|
534
|
|
|
|
|
535
|
|
|
value = tokeniser() |
|
536
|
|
|
if value == '[': |
|
537
|
|
|
while True: |
|
538
|
|
|
value = tokeniser() |
|
539
|
|
|
if value == ']': |
|
540
|
|
|
break |
|
541
|
|
|
communities.add(_extended_community(value)) |
|
542
|
|
|
else: |
|
543
|
|
|
communities.add(_extended_community(value)) |
|
544
|
|
|
|
|
545
|
|
|
return communities |
|
546
|
|
|
|
|
547
|
|
|
|
|
548
|
|
|
# Duck class, faking part of the Attribute interface |
|
549
|
|
|
# We add this to routes when when need o split a route in smaller route |
|
550
|
|
|
# The value stored is the longer netmask we want to use |
|
551
|
|
|
# As this is not a real BGP attribute this stays in the configuration file |
|
552
|
|
|
|
|
553
|
|
|
|
|
554
|
|
|
def name (tokeniser): |
|
555
|
|
|
class Name (str): |
|
556
|
|
|
ID = Attribute.CODE.INTERNAL_NAME |
|
557
|
|
|
|
|
558
|
|
|
return Name(tokeniser()) |
|
559
|
|
|
|
|
560
|
|
|
|
|
561
|
|
|
def split (tokeniser): |
|
562
|
|
|
class Split (int): |
|
563
|
|
|
ID = Attribute.CODE.INTERNAL_SPLIT |
|
564
|
|
|
|
|
565
|
|
|
cidr = tokeniser() |
|
566
|
|
|
|
|
567
|
|
|
if not cidr or cidr[0] != '/': |
|
568
|
|
|
raise ValueError('split /<number>') |
|
569
|
|
|
|
|
570
|
|
|
size = cidr[1:] |
|
571
|
|
|
|
|
572
|
|
|
if not size.isdigit(): |
|
573
|
|
|
raise ValueError('split /<number>') |
|
574
|
|
|
|
|
575
|
|
|
return Split(int(size)) |
|
576
|
|
|
|
|
577
|
|
|
|
|
578
|
|
|
def watchdog (tokeniser): |
|
579
|
|
|
class Watchdog (str): |
|
580
|
|
|
ID = Attribute.CODE.INTERNAL_WATCHDOG |
|
581
|
|
|
|
|
582
|
|
|
command = tokeniser() |
|
583
|
|
|
if command.lower() in ['announce','withdraw']: |
|
584
|
|
|
raise ValueError('invalid watchdog name %s' % command) |
|
585
|
|
|
return Watchdog(command) |
|
586
|
|
|
|
|
587
|
|
|
|
|
588
|
|
|
def withdraw (tokeniser=None): |
|
589
|
|
|
class Withdrawn (object): |
|
590
|
|
|
ID = Attribute.CODE.INTERNAL_WITHDRAW |
|
591
|
|
|
|
|
592
|
|
|
return Withdrawn() |
|
593
|
|
|
|