Total Complexity | 353 |
Total Lines | 1936 |
Duplicated Lines | 1.24 % |
Coverage | 94.05% |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like build.models.evc often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | """Classes used in the main application.""" # pylint: disable=too-many-lines |
||
2 | 1 | import traceback |
|
3 | 1 | from collections import OrderedDict, defaultdict |
|
4 | 1 | from copy import deepcopy |
|
5 | 1 | from datetime import datetime |
|
6 | 1 | from operator import eq, ne |
|
7 | 1 | from threading import Lock |
|
8 | 1 | from typing import Union |
|
9 | 1 | from uuid import uuid4 |
|
10 | |||
11 | 1 | import httpx |
|
12 | 1 | from glom import glom |
|
13 | 1 | from tenacity import (retry, retry_if_exception_type, stop_after_attempt, |
|
14 | wait_combine, wait_fixed, wait_random) |
||
15 | |||
16 | 1 | from kytos.core import log |
|
17 | 1 | from kytos.core.common import EntityStatus, GenericEntity |
|
18 | 1 | from kytos.core.exceptions import KytosNoTagAvailableError, KytosTagError |
|
19 | 1 | from kytos.core.helpers import get_time, now |
|
20 | 1 | from kytos.core.interface import UNI, Interface, TAGRange |
|
21 | 1 | from kytos.core.link import Link |
|
22 | 1 | from kytos.core.retry import before_sleep |
|
23 | 1 | from kytos.core.tag_ranges import range_difference |
|
24 | 1 | from napps.kytos.mef_eline import controllers, settings |
|
25 | 1 | from napps.kytos.mef_eline.exceptions import (ActivationError, |
|
26 | DuplicatedNoTagUNI, |
||
27 | EVCPathNotInstalled, |
||
28 | FlowModException, InvalidPath) |
||
29 | 1 | from napps.kytos.mef_eline.utils import (_does_uni_affect_evc, |
|
30 | compare_endpoint_trace, |
||
31 | compare_uni_out_trace, emit_event, |
||
32 | make_uni_list, map_dl_vlan, |
||
33 | map_evc_event_content, |
||
34 | merge_flow_dicts) |
||
35 | |||
36 | 1 | from .path import DynamicPathManager, Path |
|
37 | |||
38 | |||
39 | 1 | class EVCBase(GenericEntity): |
|
40 | """Class to represent a circuit.""" |
||
41 | |||
42 | 1 | attributes_requiring_redeploy = [ |
|
43 | "primary_path", |
||
44 | "backup_path", |
||
45 | "dynamic_backup_path", |
||
46 | "queue_id", |
||
47 | "sb_priority", |
||
48 | "primary_constraints", |
||
49 | "secondary_constraints", |
||
50 | "uni_a", |
||
51 | "uni_z", |
||
52 | ] |
||
53 | 1 | required_attributes = ["name", "uni_a", "uni_z"] |
|
54 | |||
55 | 1 | updatable_attributes = { |
|
56 | "uni_a", |
||
57 | "uni_z", |
||
58 | "name", |
||
59 | "start_date", |
||
60 | "end_date", |
||
61 | "queue_id", |
||
62 | "bandwidth", |
||
63 | "primary_path", |
||
64 | "backup_path", |
||
65 | "dynamic_backup_path", |
||
66 | "primary_constraints", |
||
67 | "secondary_constraints", |
||
68 | "owner", |
||
69 | "sb_priority", |
||
70 | "service_level", |
||
71 | "circuit_scheduler", |
||
72 | "metadata", |
||
73 | "enabled", |
||
74 | "max_paths", |
||
75 | } |
||
76 | |||
77 | # pylint: disable=too-many-statements |
||
78 | 1 | def __init__(self, controller, **kwargs): |
|
79 | """Create an EVC instance with the provided parameters. |
||
80 | |||
81 | Args: |
||
82 | id(str): EVC identifier. Whether it's None an ID will be genereted. |
||
83 | Only the first 14 bytes passed will be used. |
||
84 | name: represents an EVC name.(Required) |
||
85 | uni_a (UNI): Endpoint A for User Network Interface.(Required) |
||
86 | uni_z (UNI): Endpoint Z for User Network Interface.(Required) |
||
87 | start_date(datetime|str): Date when the EVC was registred. |
||
88 | Default is now(). |
||
89 | end_date(datetime|str): Final date that the EVC will be fineshed. |
||
90 | Default is None. |
||
91 | bandwidth(int): Bandwidth used by EVC instance. Default is 0. |
||
92 | primary_links(list): Primary links used by evc. Default is [] |
||
93 | backup_links(list): Backups links used by evc. Default is [] |
||
94 | current_path(list): Circuit being used at the moment if this is an |
||
95 | active circuit. Default is []. |
||
96 | failover_path(list): Path being used to provide EVC protection via |
||
97 | failover during link failures. Default is []. |
||
98 | primary_path(list): primary circuit offered to user IF one or more |
||
99 | links were provided. Default is []. |
||
100 | backup_path(list): backup circuit offered to the user IF one or |
||
101 | more links were provided. Default is []. |
||
102 | dynamic_backup_path(bool): Enable computer backup path dynamically. |
||
103 | Dafault is False. |
||
104 | creation_time(datetime|str): datetime when the circuit should be |
||
105 | activated. default is now(). |
||
106 | enabled(Boolean): attribute to indicate the administrative state; |
||
107 | default is False. |
||
108 | active(Boolean): attribute to indicate the operational state; |
||
109 | default is False. |
||
110 | archived(Boolean): indicate the EVC has been deleted and is |
||
111 | archived; default is False. |
||
112 | owner(str): The EVC owner. Default is None. |
||
113 | sb_priority(int): Service level provided in the request. |
||
114 | Default is None. |
||
115 | service_level(int): Service level provided. The higher the better. |
||
116 | Default is 0. |
||
117 | |||
118 | Raises: |
||
119 | ValueError: raised when object attributes are invalid. |
||
120 | |||
121 | """ |
||
122 | 1 | self._controller = controller |
|
123 | 1 | self._validate(**kwargs) |
|
124 | 1 | super().__init__() |
|
125 | |||
126 | # required attributes |
||
127 | 1 | self._id = kwargs.get("id", uuid4().hex)[:14] |
|
128 | 1 | self.uni_a: UNI = kwargs.get("uni_a") |
|
129 | 1 | self.uni_z: UNI = kwargs.get("uni_z") |
|
130 | 1 | self.name = kwargs.get("name") |
|
131 | |||
132 | # optional attributes |
||
133 | 1 | self.start_date = get_time(kwargs.get("start_date")) or now() |
|
134 | 1 | self.end_date = get_time(kwargs.get("end_date")) or None |
|
135 | 1 | self.queue_id = kwargs.get("queue_id", -1) |
|
136 | |||
137 | 1 | self.bandwidth = kwargs.get("bandwidth", 0) |
|
138 | 1 | self.primary_links = Path(kwargs.get("primary_links", [])) |
|
139 | 1 | self.backup_links = Path(kwargs.get("backup_links", [])) |
|
140 | 1 | self.current_path = Path(kwargs.get("current_path", [])) |
|
141 | 1 | self.failover_path = Path(kwargs.get("failover_path", [])) |
|
142 | 1 | self.primary_path = Path(kwargs.get("primary_path", [])) |
|
143 | 1 | self.backup_path = Path(kwargs.get("backup_path", [])) |
|
144 | 1 | self.dynamic_backup_path = kwargs.get("dynamic_backup_path", False) |
|
145 | 1 | self.primary_constraints = kwargs.get("primary_constraints", {}) |
|
146 | 1 | self.secondary_constraints = kwargs.get("secondary_constraints", {}) |
|
147 | 1 | self.creation_time = get_time(kwargs.get("creation_time")) or now() |
|
148 | 1 | self.owner = kwargs.get("owner", None) |
|
149 | 1 | self.sb_priority = kwargs.get("sb_priority", None) or kwargs.get( |
|
150 | "priority", None |
||
151 | ) |
||
152 | 1 | self.service_level = kwargs.get("service_level", 0) |
|
153 | 1 | self.circuit_scheduler = kwargs.get("circuit_scheduler", []) |
|
154 | 1 | self.flow_removed_at = get_time(kwargs.get("flow_removed_at")) or None |
|
155 | 1 | self.updated_at = get_time(kwargs.get("updated_at")) or now() |
|
156 | 1 | self.execution_rounds = kwargs.get("execution_rounds", 0) |
|
157 | 1 | self.current_links_cache = set() |
|
158 | 1 | self.primary_links_cache = set() |
|
159 | 1 | self.backup_links_cache = set() |
|
160 | 1 | self.old_path = Path([]) |
|
161 | 1 | self.max_paths = kwargs.get("max_paths", 2) |
|
162 | |||
163 | 1 | self.lock = Lock() |
|
164 | |||
165 | 1 | self.archived = kwargs.get("archived", False) |
|
166 | |||
167 | 1 | self.metadata = kwargs.get("metadata", {}) |
|
168 | |||
169 | 1 | self._mongo_controller = controllers.ELineController() |
|
170 | |||
171 | 1 | if kwargs.get("active", False): |
|
172 | 1 | self.activate() |
|
173 | else: |
||
174 | 1 | self.deactivate() |
|
175 | |||
176 | 1 | if kwargs.get("enabled", False): |
|
177 | 1 | self.enable() |
|
178 | else: |
||
179 | 1 | self.disable() |
|
180 | |||
181 | # datetime of user request for a EVC (or datetime when object was |
||
182 | # created) |
||
183 | 1 | self.request_time = kwargs.get("request_time", now()) |
|
184 | # dict with the user original request (input) |
||
185 | 1 | self._requested = kwargs |
|
186 | |||
187 | # Special cases: No tag, any, untagged |
||
188 | 1 | self.special_cases = {None, "4096/4096", 0} |
|
189 | 1 | self.table_group = kwargs.get("table_group") |
|
190 | |||
191 | 1 | def sync(self, keys: set = None): |
|
192 | """Sync this EVC in the MongoDB.""" |
||
193 | 1 | self.updated_at = now() |
|
194 | 1 | if keys: |
|
195 | 1 | self._mongo_controller.update_evc(self.as_dict(keys)) |
|
196 | 1 | return |
|
197 | 1 | self._mongo_controller.upsert_evc(self.as_dict()) |
|
198 | |||
199 | 1 | def _get_unis_use_tags(self, **kwargs) -> tuple[UNI, UNI]: |
|
200 | """Obtain both UNIs (uni_a, uni_z). |
||
201 | If a UNI is changing, verify tags""" |
||
202 | 1 | uni_a = kwargs.get("uni_a", None) |
|
203 | 1 | uni_a_flag = False |
|
204 | 1 | if uni_a and uni_a != self.uni_a: |
|
205 | 1 | uni_a_flag = True |
|
206 | 1 | self._use_uni_vlan(uni_a, uni_dif=self.uni_a) |
|
207 | |||
208 | 1 | uni_z = kwargs.get("uni_z", None) |
|
209 | 1 | if uni_z and uni_z != self.uni_z: |
|
210 | 1 | try: |
|
211 | 1 | self._use_uni_vlan(uni_z, uni_dif=self.uni_z) |
|
212 | 1 | self.make_uni_vlan_available(self.uni_z, uni_dif=uni_z) |
|
213 | 1 | except KytosTagError as err: |
|
214 | 1 | if uni_a_flag: |
|
215 | 1 | self.make_uni_vlan_available(uni_a, uni_dif=self.uni_a) |
|
216 | 1 | raise err |
|
217 | else: |
||
218 | 1 | uni_z = self.uni_z |
|
219 | |||
220 | 1 | if uni_a_flag: |
|
221 | 1 | self.make_uni_vlan_available(self.uni_a, uni_dif=uni_a) |
|
222 | else: |
||
223 | 1 | uni_a = self.uni_a |
|
224 | 1 | return uni_a, uni_z |
|
225 | |||
226 | 1 | def update(self, **kwargs): |
|
227 | """Update evc attributes. |
||
228 | |||
229 | This method will raises an error trying to change the following |
||
230 | attributes: [creation_time, active, current_path, failover_path, |
||
231 | _id, archived] |
||
232 | [name, uni_a and uni_z] |
||
233 | |||
234 | Returns: |
||
235 | the values for enable and a redeploy attribute, if exists and None |
||
236 | otherwise |
||
237 | Raises: |
||
238 | ValueError: message with error detail. |
||
239 | |||
240 | """ |
||
241 | 1 | enable, redeploy = (None, None) |
|
242 | 1 | if not self._tag_lists_equal(**kwargs): |
|
243 | 1 | raise ValueError( |
|
244 | "UNI_A and UNI_Z tag lists should be the same." |
||
245 | ) |
||
246 | 1 | uni_a, uni_z = self._get_unis_use_tags(**kwargs) |
|
247 | 1 | self._validate_has_primary_or_dynamic( |
|
248 | primary_path=kwargs.get("primary_path"), |
||
249 | dynamic_backup_path=kwargs.get("dynamic_backup_path"), |
||
250 | uni_a=uni_a, |
||
251 | uni_z=uni_z, |
||
252 | ) |
||
253 | 1 | for attribute, value in kwargs.items(): |
|
254 | 1 | if attribute not in self.updatable_attributes: |
|
255 | 1 | raise ValueError(f"{attribute} can't be updated.") |
|
256 | 1 | if attribute in ("primary_path", "backup_path"): |
|
257 | 1 | try: |
|
258 | 1 | value.is_valid( |
|
259 | uni_a.interface.switch, uni_z.interface.switch |
||
260 | ) |
||
261 | 1 | except InvalidPath as exception: |
|
262 | 1 | raise ValueError( # pylint: disable=raise-missing-from |
|
263 | f"{attribute} is not a " f"valid path: {exception}" |
||
264 | ) |
||
265 | 1 | for attribute, value in kwargs.items(): |
|
266 | 1 | if attribute == "enabled": |
|
267 | 1 | if value: |
|
268 | 1 | self.enable() |
|
269 | else: |
||
270 | 1 | self.disable() |
|
271 | 1 | enable = value |
|
272 | else: |
||
273 | 1 | setattr(self, attribute, value) |
|
274 | 1 | if attribute in self.attributes_requiring_redeploy: |
|
275 | 1 | redeploy = True |
|
276 | 1 | self.sync(set(kwargs.keys())) |
|
277 | 1 | return enable, redeploy |
|
278 | |||
279 | 1 | def set_flow_removed_at(self): |
|
280 | """Update flow_removed_at attribute.""" |
||
281 | self.flow_removed_at = now() |
||
282 | |||
283 | 1 | def has_recent_removed_flow(self, setting=settings): |
|
284 | """Check if any flow has been removed from the evc""" |
||
285 | if self.flow_removed_at is None: |
||
286 | return False |
||
287 | res_seconds = (now() - self.flow_removed_at).seconds |
||
288 | return res_seconds < setting.TIME_RECENT_DELETED_FLOWS |
||
289 | |||
290 | 1 | def is_recent_updated(self, setting=settings): |
|
291 | """Check if the evc has been updated recently""" |
||
292 | res_seconds = (now() - self.updated_at).seconds |
||
293 | return res_seconds < setting.TIME_RECENT_UPDATED |
||
294 | |||
295 | 1 | def __repr__(self): |
|
296 | """Repr method.""" |
||
297 | 1 | return f"EVC({self._id}, {self.name})" |
|
298 | |||
299 | 1 | def _validate(self, **kwargs): |
|
300 | """Do Basic validations. |
||
301 | |||
302 | Verify required attributes: name, uni_a, uni_z |
||
303 | |||
304 | Raises: |
||
305 | ValueError: message with error detail. |
||
306 | |||
307 | """ |
||
308 | 1 | for attribute in self.required_attributes: |
|
309 | |||
310 | 1 | if attribute not in kwargs: |
|
311 | 1 | raise ValueError(f"{attribute} is required.") |
|
312 | |||
313 | 1 | if "uni" in attribute: |
|
314 | 1 | uni = kwargs.get(attribute) |
|
315 | 1 | if not isinstance(uni, UNI): |
|
316 | raise ValueError(f"{attribute} is an invalid UNI.") |
||
317 | |||
318 | 1 | def _tag_lists_equal(self, **kwargs): |
|
319 | """Verify that tag lists are the same.""" |
||
320 | 1 | uni_a = kwargs.get("uni_a") or self.uni_a |
|
321 | 1 | uni_z = kwargs.get("uni_z") or self.uni_z |
|
322 | 1 | uni_a_list = uni_z_list = False |
|
323 | 1 | if (uni_a.user_tag and isinstance(uni_a.user_tag, TAGRange)): |
|
324 | 1 | uni_a_list = True |
|
325 | 1 | if (uni_z.user_tag and isinstance(uni_z.user_tag, TAGRange)): |
|
326 | 1 | uni_z_list = True |
|
327 | 1 | if uni_a_list and uni_z_list: |
|
328 | 1 | return uni_a.user_tag.value == uni_z.user_tag.value |
|
329 | 1 | return uni_a_list == uni_z_list |
|
330 | |||
331 | 1 | def _validate_has_primary_or_dynamic( |
|
332 | self, |
||
333 | primary_path=None, |
||
334 | dynamic_backup_path=None, |
||
335 | uni_a=None, |
||
336 | uni_z=None, |
||
337 | ) -> None: |
||
338 | """Validate that it must have a primary path or allow dynamic paths.""" |
||
339 | 1 | primary_path = ( |
|
340 | primary_path |
||
341 | if primary_path is not None |
||
342 | else self.primary_path |
||
343 | ) |
||
344 | 1 | dynamic_backup_path = ( |
|
345 | dynamic_backup_path |
||
346 | if dynamic_backup_path is not None |
||
347 | else self.dynamic_backup_path |
||
348 | ) |
||
349 | 1 | uni_a = uni_a if uni_a is not None else self.uni_a |
|
350 | 1 | uni_z = uni_z if uni_z is not None else self.uni_z |
|
351 | 1 | if ( |
|
352 | not primary_path |
||
353 | and not dynamic_backup_path |
||
354 | and uni_a and uni_z |
||
355 | and uni_a.interface.switch != uni_z.interface.switch |
||
356 | ): |
||
357 | 1 | msg = "The EVC must have a primary path or allow dynamic paths." |
|
358 | 1 | raise ValueError(msg) |
|
359 | |||
360 | 1 | def __eq__(self, other): |
|
361 | """Override the default implementation.""" |
||
362 | 1 | if not isinstance(other, EVC): |
|
363 | return False |
||
364 | |||
365 | 1 | attrs_to_compare = ["name", "uni_a", "uni_z", "owner", "bandwidth"] |
|
366 | 1 | for attribute in attrs_to_compare: |
|
367 | 1 | if getattr(other, attribute) != getattr(self, attribute): |
|
368 | 1 | return False |
|
369 | 1 | return True |
|
370 | |||
371 | 1 | def is_intra_switch(self): |
|
372 | """Check if the UNIs are in the same switch.""" |
||
373 | 1 | return self.uni_a.interface.switch == self.uni_z.interface.switch |
|
374 | |||
375 | 1 | def check_no_tag_duplicate(self, other_uni: UNI): |
|
376 | """Check if a no tag UNI is duplicated.""" |
||
377 | 1 | if other_uni in (self.uni_a, self.uni_z): |
|
378 | 1 | msg = f"UNI with interface {other_uni.interface.id} is"\ |
|
379 | f" duplicated with {self}." |
||
380 | 1 | raise DuplicatedNoTagUNI(msg) |
|
381 | |||
382 | 1 | def as_dict(self, keys: set = None): |
|
383 | """Return a dictionary representing an EVC object. |
||
384 | keys: Only fields on this variable will be |
||
385 | returned in the dictionary""" |
||
386 | 1 | evc_dict = { |
|
387 | "id": self.id, |
||
388 | "name": self.name, |
||
389 | "uni_a": self.uni_a.as_dict(), |
||
390 | "uni_z": self.uni_z.as_dict(), |
||
391 | } |
||
392 | |||
393 | 1 | time_fmt = "%Y-%m-%dT%H:%M:%S" |
|
394 | |||
395 | 1 | evc_dict["start_date"] = self.start_date |
|
396 | 1 | if isinstance(self.start_date, datetime): |
|
397 | 1 | evc_dict["start_date"] = self.start_date.strftime(time_fmt) |
|
398 | |||
399 | 1 | evc_dict["end_date"] = self.end_date |
|
400 | 1 | if isinstance(self.end_date, datetime): |
|
401 | 1 | evc_dict["end_date"] = self.end_date.strftime(time_fmt) |
|
402 | |||
403 | 1 | evc_dict["queue_id"] = self.queue_id |
|
404 | 1 | evc_dict["bandwidth"] = self.bandwidth |
|
405 | 1 | evc_dict["primary_links"] = self.primary_links.as_dict() |
|
406 | 1 | evc_dict["backup_links"] = self.backup_links.as_dict() |
|
407 | 1 | evc_dict["current_path"] = self.current_path.as_dict() |
|
408 | 1 | evc_dict["failover_path"] = self.failover_path.as_dict() |
|
409 | 1 | evc_dict["primary_path"] = self.primary_path.as_dict() |
|
410 | 1 | evc_dict["backup_path"] = self.backup_path.as_dict() |
|
411 | 1 | evc_dict["dynamic_backup_path"] = self.dynamic_backup_path |
|
412 | 1 | evc_dict["metadata"] = self.metadata |
|
413 | |||
414 | 1 | evc_dict["request_time"] = self.request_time |
|
415 | 1 | if isinstance(self.request_time, datetime): |
|
416 | 1 | evc_dict["request_time"] = self.request_time.strftime(time_fmt) |
|
417 | |||
418 | 1 | time = self.creation_time.strftime(time_fmt) |
|
419 | 1 | evc_dict["creation_time"] = time |
|
420 | |||
421 | 1 | evc_dict["owner"] = self.owner |
|
422 | 1 | evc_dict["circuit_scheduler"] = [ |
|
423 | sc.as_dict() for sc in self.circuit_scheduler |
||
424 | ] |
||
425 | |||
426 | 1 | evc_dict["active"] = self.is_active() |
|
427 | 1 | evc_dict["enabled"] = self.is_enabled() |
|
428 | 1 | evc_dict["archived"] = self.archived |
|
429 | 1 | evc_dict["sb_priority"] = self.sb_priority |
|
430 | 1 | evc_dict["service_level"] = self.service_level |
|
431 | 1 | evc_dict["primary_constraints"] = self.primary_constraints |
|
432 | 1 | evc_dict["secondary_constraints"] = self.secondary_constraints |
|
433 | 1 | evc_dict["flow_removed_at"] = self.flow_removed_at |
|
434 | 1 | evc_dict["updated_at"] = self.updated_at |
|
435 | 1 | evc_dict["max_paths"] = self.max_paths |
|
436 | |||
437 | 1 | if keys: |
|
438 | 1 | selected = {} |
|
439 | 1 | for key in keys: |
|
440 | 1 | selected[key] = evc_dict[key] |
|
441 | 1 | selected["id"] = evc_dict["id"] |
|
442 | 1 | return selected |
|
443 | 1 | return evc_dict |
|
444 | |||
445 | 1 | @property |
|
446 | 1 | def id(self): # pylint: disable=invalid-name |
|
447 | """Return this EVC's ID.""" |
||
448 | 1 | return self._id |
|
449 | |||
450 | 1 | def archive(self): |
|
451 | """Archive this EVC on deletion.""" |
||
452 | 1 | self.archived = True |
|
453 | |||
454 | 1 | def _use_uni_vlan( |
|
455 | self, |
||
456 | uni: UNI, |
||
457 | uni_dif: Union[None, UNI] = None |
||
458 | ): |
||
459 | """Use tags from UNI""" |
||
460 | 1 | if uni.user_tag is None: |
|
461 | 1 | return |
|
462 | 1 | tag = uni.user_tag.value |
|
463 | 1 | tag_type = uni.user_tag.tag_type |
|
464 | 1 | if (uni_dif and isinstance(tag, list) and |
|
465 | isinstance(uni_dif.user_tag.value, list)): |
||
466 | 1 | tag = range_difference(tag, uni_dif.user_tag.value) |
|
467 | 1 | if not tag: |
|
468 | 1 | return |
|
469 | 1 | uni.interface.use_tags( |
|
470 | self._controller, tag, tag_type, use_lock=True, check_order=False |
||
471 | ) |
||
472 | |||
473 | 1 | def make_uni_vlan_available( |
|
474 | self, |
||
475 | uni: UNI, |
||
476 | uni_dif: Union[None, UNI] = None, |
||
477 | ): |
||
478 | """Make available tag from UNI""" |
||
479 | 1 | if uni.user_tag is None: |
|
480 | 1 | return |
|
481 | 1 | tag = uni.user_tag.value |
|
482 | 1 | tag_type = uni.user_tag.tag_type |
|
483 | 1 | if (uni_dif and isinstance(tag, list) and |
|
484 | isinstance(uni_dif.user_tag.value, list)): |
||
485 | 1 | tag = range_difference(tag, uni_dif.user_tag.value) |
|
486 | 1 | if not tag: |
|
487 | return |
||
488 | 1 | try: |
|
489 | 1 | conflict = uni.interface.make_tags_available( |
|
490 | self._controller, tag, tag_type, use_lock=True, |
||
491 | check_order=False |
||
492 | ) |
||
493 | 1 | except KytosTagError as err: |
|
494 | 1 | log.error(f"Error in {self}: {err}") |
|
495 | 1 | return |
|
496 | 1 | if conflict: |
|
497 | 1 | intf = uni.interface.id |
|
498 | 1 | log.warning(f"Tags {conflict} was already available in {intf}") |
|
499 | |||
500 | 1 | def remove_uni_tags(self): |
|
501 | """Remove both UNI usage of a tag""" |
||
502 | 1 | self.make_uni_vlan_available(self.uni_a) |
|
503 | 1 | self.make_uni_vlan_available(self.uni_z) |
|
504 | |||
505 | |||
506 | # pylint: disable=fixme, too-many-public-methods |
||
507 | 1 | class EVCDeploy(EVCBase): |
|
508 | """Class to handle the deploy procedures.""" |
||
509 | |||
510 | 1 | def create(self): |
|
511 | """Create a EVC.""" |
||
512 | |||
513 | 1 | def discover_new_paths(self): |
|
514 | """Discover new paths to satisfy this circuit and deploy it.""" |
||
515 | return DynamicPathManager.get_best_paths(self, self.max_paths, |
||
516 | **self.primary_constraints) |
||
517 | |||
518 | 1 | def get_failover_path_candidates(self): |
|
519 | """Get failover paths to satisfy this EVC.""" |
||
520 | # in the future we can return primary/backup paths as well |
||
521 | # we just have to properly handle link_up and failover paths |
||
522 | # if ( |
||
523 | # self.is_using_primary_path() and |
||
524 | # self.backup_path.status is EntityStatus.UP |
||
525 | # ): |
||
526 | # yield self.backup_path |
||
527 | 1 | return DynamicPathManager.get_disjoint_paths(self, self.current_path) |
|
528 | |||
529 | 1 | def change_path(self): |
|
530 | """Change EVC path.""" |
||
531 | |||
532 | 1 | def reprovision(self): |
|
533 | """Force the EVC (re-)provisioning.""" |
||
534 | |||
535 | 1 | def is_affected_by_link(self, link): |
|
536 | """Return True if this EVC has the given link on its current path.""" |
||
537 | 1 | return link in self.current_path |
|
538 | |||
539 | 1 | def link_affected_by_interface(self, interface): |
|
540 | """Return True if this EVC has the given link on its current path.""" |
||
541 | return self.current_path.link_affected_by_interface(interface) |
||
542 | |||
543 | 1 | def is_backup_path_affected_by_link(self, link): |
|
544 | """Return True if the backup path of this EVC uses the given link.""" |
||
545 | 1 | return link in self.backup_path |
|
546 | |||
547 | # pylint: disable=invalid-name |
||
548 | 1 | def is_primary_path_affected_by_link(self, link): |
|
549 | """Return True if the primary path of this EVC uses the given link.""" |
||
550 | 1 | return link in self.primary_path |
|
551 | |||
552 | 1 | def is_failover_path_affected_by_link(self, link): |
|
553 | """Return True if this EVC has the given link on its failover path.""" |
||
554 | 1 | return link in self.failover_path |
|
555 | |||
556 | 1 | def is_eligible_for_failover_path(self): |
|
557 | """Verify if this EVC is eligible for failover path (EP029)""" |
||
558 | # In the future this function can be augmented to consider |
||
559 | # primary/backup, primary/dynamic, and other path combinations |
||
560 | 1 | return ( |
|
561 | self.dynamic_backup_path and |
||
562 | not self.primary_path and not self.backup_path |
||
563 | ) |
||
564 | |||
565 | 1 | def is_using_primary_path(self): |
|
566 | """Verify if the current deployed path is self.primary_path.""" |
||
567 | 1 | return self.primary_path and (self.current_path == self.primary_path) |
|
568 | |||
569 | 1 | def is_using_backup_path(self): |
|
570 | """Verify if the current deployed path is self.backup_path.""" |
||
571 | 1 | return self.backup_path and (self.current_path == self.backup_path) |
|
572 | |||
573 | 1 | def is_using_dynamic_path(self): |
|
574 | """Verify if the current deployed path is a dynamic path.""" |
||
575 | 1 | if ( |
|
576 | self.current_path |
||
577 | and not self.is_using_primary_path() |
||
578 | and not self.is_using_backup_path() |
||
579 | and self.current_path.status == EntityStatus.UP |
||
580 | ): |
||
581 | return True |
||
582 | 1 | return False |
|
583 | |||
584 | 1 | def deploy_to_backup_path(self, old_path_dict: dict = None): |
|
585 | """Deploy the backup path into the datapaths of this circuit. |
||
586 | |||
587 | If the backup_path attribute is valid and up, this method will try to |
||
588 | deploy this backup_path. |
||
589 | |||
590 | If everything fails and dynamic_backup_path is True, then tries to |
||
591 | deploy a dynamic path. |
||
592 | """ |
||
593 | # TODO: Remove flows from current (cookies) |
||
594 | 1 | if self.is_using_backup_path(): |
|
595 | # TODO: Log to say that cannot move backup to backup |
||
596 | return True |
||
597 | |||
598 | 1 | success = False |
|
599 | 1 | if self.backup_path.status is EntityStatus.UP: |
|
600 | 1 | success = self.deploy_to_path(self.backup_path, old_path_dict) |
|
601 | |||
602 | 1 | if success: |
|
603 | 1 | return True |
|
604 | |||
605 | 1 | if self.dynamic_backup_path or self.is_intra_switch(): |
|
606 | 1 | return self.deploy_to_path(old_path_dict=old_path_dict) |
|
607 | |||
608 | return False |
||
609 | |||
610 | 1 | def deploy_to_primary_path(self, old_path_dict: dict = None): |
|
611 | """Deploy the primary path into the datapaths of this circuit. |
||
612 | |||
613 | If the primary_path attribute is valid and up, this method will try to |
||
614 | deploy this primary_path. |
||
615 | """ |
||
616 | # TODO: Remove flows from current (cookies) |
||
617 | 1 | if self.is_using_primary_path(): |
|
618 | # TODO: Log to say that cannot move primary to primary |
||
619 | return True |
||
620 | |||
621 | 1 | if self.primary_path.status is EntityStatus.UP: |
|
622 | 1 | return self.deploy_to_path(self.primary_path, old_path_dict) |
|
623 | return False |
||
624 | |||
625 | 1 | def deploy(self, old_path_dict: dict = None): |
|
626 | """Deploy EVC to best path. |
||
627 | |||
628 | Best path can be the primary path, if available. If not, the backup |
||
629 | path, and, if it is also not available, a dynamic path. |
||
630 | """ |
||
631 | 1 | if self.archived: |
|
632 | 1 | return False |
|
633 | 1 | self.enable() |
|
634 | 1 | success = self.deploy_to_primary_path(old_path_dict) |
|
635 | 1 | if not success: |
|
636 | 1 | success = self.deploy_to_backup_path(old_path_dict) |
|
637 | |||
638 | 1 | if success: |
|
639 | 1 | emit_event(self._controller, "deployed", |
|
640 | content=map_evc_event_content(self)) |
||
641 | 1 | return success |
|
642 | |||
643 | 1 | @staticmethod |
|
644 | 1 | def get_path_status(path): |
|
645 | """Check for the current status of a path. |
||
646 | |||
647 | If any link in this path is down, the path is considered down. |
||
648 | """ |
||
649 | 1 | if not path: |
|
650 | 1 | return EntityStatus.DISABLED |
|
651 | |||
652 | 1 | for link in path: |
|
653 | 1 | if link.status is not EntityStatus.UP: |
|
654 | 1 | return link.status |
|
655 | 1 | return EntityStatus.UP |
|
656 | |||
657 | # def discover_new_path(self): |
||
658 | # # TODO: discover a new path to satisfy this circuit and deploy |
||
659 | |||
660 | 1 | def remove(self): |
|
661 | """Remove EVC path and disable it.""" |
||
662 | 1 | self.remove_current_flows(sync=False) |
|
663 | 1 | self.remove_failover_flows(sync=False) |
|
664 | 1 | self.disable() |
|
665 | 1 | self.sync() |
|
666 | 1 | emit_event(self._controller, "undeployed", |
|
667 | content=map_evc_event_content(self)) |
||
668 | |||
669 | 1 | def remove_failover_flows(self, exclude_uni_switches=True, |
|
670 | force=True, sync=True) -> None: |
||
671 | """Remove failover_flows. |
||
672 | |||
673 | By default, it'll exclude UNI switches, if mef_eline has already |
||
674 | called remove_current_flows before then this minimizes the number |
||
675 | of FlowMods and IO. |
||
676 | """ |
||
677 | 1 | if not self.failover_path: |
|
678 | 1 | return |
|
679 | 1 | switches, cookie, excluded = set(), self.get_cookie(), set() |
|
680 | 1 | if exclude_uni_switches: |
|
681 | 1 | excluded.add(self.uni_a.interface.switch.id) |
|
682 | 1 | excluded.add(self.uni_z.interface.switch.id) |
|
683 | 1 | for link in self.failover_path: |
|
684 | 1 | if link.endpoint_a.switch.id not in excluded: |
|
685 | 1 | switches.add(link.endpoint_a.switch.id) |
|
686 | 1 | if link.endpoint_b.switch.id not in excluded: |
|
687 | 1 | switches.add(link.endpoint_b.switch.id) |
|
688 | 1 | flow_mods = { |
|
689 | "switches": list(switches), |
||
690 | "flows": [{ |
||
691 | "cookie": cookie, |
||
692 | "cookie_mask": int(0xffffffffffffffff), |
||
693 | "owner": "mef_eline", |
||
694 | }] |
||
695 | } |
||
696 | 1 | try: |
|
697 | 1 | self._send_flow_mods( |
|
698 | flow_mods, |
||
699 | "delete", |
||
700 | force=force, |
||
701 | ) |
||
702 | except FlowModException as err: |
||
703 | log.error(f"Error deleting {self} failover_path flows, {err}") |
||
704 | 1 | try: |
|
705 | 1 | self.failover_path.make_vlans_available(self._controller) |
|
706 | except KytosTagError as err: |
||
707 | log.error(f"Error removing {self} failover_path: {err}") |
||
708 | 1 | self.failover_path = Path([]) |
|
709 | 1 | if sync: |
|
710 | 1 | self.sync() |
|
711 | |||
712 | 1 | def remove_current_flows( |
|
713 | self, |
||
714 | current_path=None, |
||
715 | force=True, |
||
716 | sync=True, |
||
717 | return_path=False |
||
718 | ) -> dict[str, int]: |
||
719 | """Remove all flows from current path or path intended for |
||
720 | current path if exists.""" |
||
721 | 1 | switches, old_path_dict = set(), {} |
|
722 | 1 | current_path = self.current_path if not current_path else current_path |
|
723 | 1 | if not current_path and not self.is_intra_switch(): |
|
724 | 1 | return {} |
|
725 | |||
726 | 1 | if return_path: |
|
727 | 1 | for link in self.current_path: |
|
728 | 1 | s_vlan = link.metadata.get("s_vlan") |
|
729 | 1 | if s_vlan: |
|
730 | 1 | old_path_dict[link.id] = s_vlan.value |
|
731 | |||
732 | 1 | for link in current_path: |
|
733 | 1 | switches.add(link.endpoint_a.switch.id) |
|
734 | 1 | switches.add(link.endpoint_b.switch.id) |
|
735 | 1 | switches.add(self.uni_a.interface.switch.id) |
|
736 | 1 | switches.add(self.uni_z.interface.switch.id) |
|
737 | 1 | flow_mods = { |
|
738 | "switches": list(switches), |
||
739 | "flows": [{ |
||
740 | "cookie": self.get_cookie(), |
||
741 | "cookie_mask": int(0xffffffffffffffff), |
||
742 | "owner": "mef_eline", |
||
743 | }] |
||
744 | } |
||
745 | |||
746 | 1 | try: |
|
747 | 1 | self._send_flow_mods(flow_mods, "delete", force=force) |
|
748 | 1 | except FlowModException as err: |
|
749 | 1 | log.error(f"Error deleting {self} current_path flows, {err}") |
|
750 | |||
751 | 1 | try: |
|
752 | 1 | current_path.make_vlans_available(self._controller) |
|
753 | 1 | except KytosTagError as err: |
|
754 | 1 | log.error(f"Error removing {self} current_path: {err}") |
|
755 | 1 | self.current_path = Path([]) |
|
756 | 1 | self.deactivate() |
|
757 | 1 | if sync: |
|
758 | 1 | self.sync() |
|
759 | 1 | return old_path_dict |
|
760 | |||
761 | 1 | def remove_path_flows( |
|
762 | self, path=None, force=True |
||
763 | ) -> dict[str, list[dict]]: |
||
764 | """Remove all flows from path, and return the removed flows.""" |
||
765 | 1 | dpid_flows_match: dict[str, dict] = defaultdict(lambda: {"flows": []}) |
|
766 | 1 | out_flows: dict[str, list[dict]] = defaultdict(list) |
|
767 | |||
768 | 1 | if not path: |
|
769 | 1 | return dpid_flows_match |
|
770 | |||
771 | 1 | try: |
|
772 | 1 | nni_flows = self._prepare_nni_flows(path) |
|
773 | # pylint: disable=broad-except |
||
774 | except Exception: |
||
775 | err = traceback.format_exc() |
||
776 | log.error(f"Fail to remove NNI failover flows for {self}: {err}") |
||
777 | nni_flows = {} |
||
778 | |||
779 | 1 | for dpid, flows in nni_flows.items(): |
|
780 | 1 | for flow in flows: |
|
781 | 1 | flow_mod = { |
|
782 | "cookie": flow["cookie"], |
||
783 | "match": flow["match"], |
||
784 | "owner": "mef_eline", |
||
785 | "cookie_mask": int(0xffffffffffffffff) |
||
786 | } |
||
787 | 1 | dpid_flows_match[dpid]["flows"].append(flow_mod) |
|
788 | 1 | out_flows[dpid].append(flow_mod) |
|
789 | |||
790 | 1 | try: |
|
791 | 1 | uni_flows = self._prepare_uni_flows(path, skip_in=True) |
|
792 | # pylint: disable=broad-except |
||
793 | except Exception: |
||
794 | err = traceback.format_exc() |
||
795 | log.error(f"Fail to remove UNI failover flows for {self}: {err}") |
||
796 | uni_flows = {} |
||
797 | |||
798 | 1 | for dpid, flows in uni_flows.items(): |
|
799 | 1 | for flow in flows: |
|
800 | 1 | flow_mod = { |
|
801 | "cookie": flow["cookie"], |
||
802 | "match": flow["match"], |
||
803 | "owner": "mef_eline", |
||
804 | "cookie_mask": int(0xffffffffffffffff) |
||
805 | } |
||
806 | 1 | dpid_flows_match[dpid]["flows"].append(flow_mod) |
|
807 | 1 | out_flows[dpid].append(flow_mod) |
|
808 | |||
809 | 1 | try: |
|
810 | 1 | self._send_flow_mods( |
|
811 | dpid_flows_match, 'delete', force=force, by_switch=True |
||
812 | ) |
||
813 | 1 | except FlowModException as err: |
|
814 | 1 | log.error( |
|
815 | f"Error deleting {self} path flows, path:{path}, error={err}" |
||
816 | ) |
||
817 | |||
818 | 1 | try: |
|
819 | 1 | path.make_vlans_available(self._controller) |
|
820 | except KytosTagError as err: |
||
821 | log.error(f"Error removing {self} path: {err}") |
||
822 | |||
823 | 1 | return out_flows |
|
824 | |||
825 | 1 | @staticmethod |
|
826 | 1 | def links_zipped(path=None): |
|
827 | """Return an iterator which yields pairs of links in order.""" |
||
828 | 1 | if not path: |
|
829 | 1 | return [] |
|
830 | 1 | return zip(path[:-1], path[1:]) |
|
831 | |||
832 | 1 | def should_deploy(self, path=None): |
|
833 | """Verify if the circuit should be deployed.""" |
||
834 | 1 | if not path: |
|
835 | 1 | log.debug("Path is empty.") |
|
836 | 1 | return False |
|
837 | |||
838 | 1 | if not self.is_enabled(): |
|
839 | 1 | log.debug(f"{self} is disabled.") |
|
840 | 1 | return False |
|
841 | |||
842 | 1 | if not self.is_active(): |
|
843 | 1 | log.debug(f"{self} will be deployed.") |
|
844 | 1 | return True |
|
845 | |||
846 | 1 | return False |
|
847 | |||
848 | 1 | @staticmethod |
|
849 | 1 | def is_uni_interface_active( |
|
850 | *interfaces: Interface |
||
851 | ) -> tuple[bool, dict]: |
||
852 | """Whether UNIs are active and their status & status_reason.""" |
||
853 | 1 | active = True |
|
854 | 1 | bad_interfaces = [ |
|
855 | interface |
||
856 | for interface in interfaces |
||
857 | if interface.status != EntityStatus.UP |
||
858 | ] |
||
859 | 1 | if bad_interfaces: |
|
860 | 1 | active = False |
|
861 | 1 | interfaces = bad_interfaces |
|
862 | 1 | return active, { |
|
863 | interface.id: { |
||
864 | 'status': interface.status.value, |
||
865 | 'status_reason': interface.status_reason, |
||
866 | } |
||
867 | for interface in interfaces |
||
868 | } |
||
869 | |||
870 | 1 | def try_to_activate(self) -> bool: |
|
871 | """Try to activate the EVC.""" |
||
872 | 1 | if self.is_intra_switch(): |
|
873 | 1 | return self._try_to_activate_intra_evc() |
|
874 | 1 | return self._try_to_activate_inter_evc() |
|
875 | |||
876 | 1 | def _try_to_activate_intra_evc(self) -> bool: |
|
877 | """Try to activate intra EVC.""" |
||
878 | 1 | intf_a, intf_z = self.uni_a.interface, self.uni_z.interface |
|
879 | 1 | is_active, reason = self.is_uni_interface_active(intf_a, intf_z) |
|
880 | 1 | if not is_active: |
|
881 | 1 | raise ActivationError( |
|
882 | f"Won't be able to activate {self} due to UNIs: {reason}" |
||
883 | ) |
||
884 | 1 | self.activate() |
|
885 | 1 | return True |
|
886 | |||
887 | 1 | def _try_to_activate_inter_evc(self) -> bool: |
|
888 | """Try to activate inter EVC.""" |
||
889 | 1 | intf_a, intf_z = self.uni_a.interface, self.uni_z.interface |
|
890 | 1 | is_active, reason = self.is_uni_interface_active(intf_a, intf_z) |
|
891 | 1 | if not is_active: |
|
892 | 1 | raise ActivationError( |
|
893 | f"Won't be able to activate {self} due to UNIs: {reason}" |
||
894 | ) |
||
895 | 1 | if self.current_path.status != EntityStatus.UP: |
|
896 | 1 | raise ActivationError( |
|
897 | f"Won't be able to activate {self} due to current_path " |
||
898 | f"status {self.current_path.status}" |
||
899 | ) |
||
900 | 1 | self.activate() |
|
901 | 1 | return True |
|
902 | |||
903 | # pylint: disable=too-many-branches, too-many-statements |
||
904 | 1 | def deploy_to_path(self, path=None, old_path_dict: dict = None): |
|
905 | """Install the flows for this circuit. |
||
906 | |||
907 | Procedures to deploy: |
||
908 | |||
909 | 0. Remove current flows installed |
||
910 | 1. Decide if will deploy "path" or discover a new path |
||
911 | 2. Choose vlan |
||
912 | 3. Install NNI flows |
||
913 | 4. Install UNI flows |
||
914 | 5. Activate |
||
915 | 6. Update current_path |
||
916 | 7. Update links caches(primary, current, backup) |
||
917 | |||
918 | """ |
||
919 | 1 | self.remove_current_flows(sync=False) |
|
920 | 1 | use_path = path or Path([]) |
|
921 | 1 | if not old_path_dict: |
|
922 | 1 | old_path_dict = {} |
|
923 | 1 | tag_errors = [] |
|
924 | 1 | no_valid_path = False |
|
925 | 1 | if self.should_deploy(use_path): |
|
926 | 1 | try: |
|
927 | 1 | use_path.choose_vlans(self._controller, old_path_dict) |
|
928 | 1 | except KytosNoTagAvailableError as e: |
|
929 | 1 | tag_errors.append(str(e)) |
|
930 | 1 | use_path = None |
|
931 | else: |
||
932 | 1 | for use_path in self.discover_new_paths(): |
|
933 | 1 | if use_path is None: |
|
934 | 1 | no_valid_path = True |
|
935 | 1 | continue |
|
936 | 1 | try: |
|
937 | 1 | use_path.choose_vlans(self._controller, old_path_dict) |
|
938 | 1 | break |
|
939 | 1 | except KytosNoTagAvailableError as e: |
|
940 | 1 | tag_errors.append(str(e)) |
|
941 | else: |
||
942 | 1 | use_path = None |
|
943 | |||
944 | 1 | try: |
|
945 | 1 | if use_path: |
|
946 | 1 | self._install_flows(use_path) |
|
947 | 1 | elif self.is_intra_switch(): |
|
948 | 1 | use_path = Path() |
|
949 | 1 | self._install_direct_uni_flows() |
|
950 | else: |
||
951 | 1 | no_path_msg = "No available path was found." |
|
952 | 1 | if no_valid_path: |
|
953 | 1 | no_path_msg = "No valid path was found, "\ |
|
954 | "try increasing `max_paths`"\ |
||
955 | f" from {self.max_paths}." |
||
956 | 1 | msg = f"{self} was not deployed. {no_path_msg}" |
|
957 | 1 | if tag_errors: |
|
958 | 1 | msg = self.add_tag_errors(msg, tag_errors) |
|
959 | 1 | log.error(msg) |
|
960 | else: |
||
961 | 1 | log.warning(msg) |
|
962 | 1 | return False |
|
963 | 1 | except EVCPathNotInstalled as err: |
|
964 | 1 | log.error( |
|
965 | f"Error deploying EVC {self} when calling flow_manager: {err}" |
||
966 | ) |
||
967 | 1 | self.remove_current_flows(use_path, sync=True) |
|
968 | 1 | return False |
|
969 | |||
970 | 1 | self.current_path = use_path |
|
971 | 1 | msg = f"{self} was deployed." |
|
972 | 1 | try: |
|
973 | 1 | self.try_to_activate() |
|
974 | except ActivationError as exc: |
||
975 | msg = f"{msg} {str(exc)}" |
||
976 | 1 | self.sync() |
|
977 | 1 | log.info(msg) |
|
978 | 1 | return True |
|
979 | |||
980 | # pylint: disable=too-many-statements |
||
981 | 1 | def setup_failover_path(self, warn_if_not_path=True): |
|
982 | """Install flows for the failover path of this EVC. |
||
983 | |||
984 | Procedures to deploy: |
||
985 | |||
986 | 0. Remove flows currently installed for failover_path (if any) |
||
987 | 1. Discover a disjoint path from current_path |
||
988 | 2. Choose vlans |
||
989 | 3. Install NNI flows |
||
990 | 4. Install UNI egress flows |
||
991 | 5. Update failover_path |
||
992 | """ |
||
993 | # Intra-switch EVCs have no failover_path |
||
994 | 1 | if self.is_intra_switch(): |
|
995 | 1 | return False |
|
996 | |||
997 | # For not only setup failover path for totally dynamic EVCs |
||
998 | 1 | if not self.is_eligible_for_failover_path(): |
|
999 | 1 | return False |
|
1000 | |||
1001 | 1 | out_new_flows: dict[str, list[dict]] = {} |
|
1002 | 1 | reason = "" |
|
1003 | 1 | tag_errors = [] |
|
1004 | 1 | out_removed_flows = self.remove_path_flows(self.failover_path) |
|
1005 | 1 | self.failover_path = Path([]) |
|
1006 | |||
1007 | 1 | for use_path in self.get_failover_path_candidates(): |
|
1008 | 1 | if not use_path: |
|
1009 | 1 | continue |
|
1010 | 1 | try: |
|
1011 | 1 | use_path.choose_vlans(self._controller) |
|
1012 | 1 | break |
|
1013 | 1 | except KytosNoTagAvailableError as e: |
|
1014 | 1 | tag_errors.append(str(e)) |
|
1015 | else: |
||
1016 | 1 | use_path = Path([]) |
|
1017 | 1 | reason = "No available path was found" |
|
1018 | |||
1019 | 1 | try: |
|
1020 | 1 | if use_path: |
|
1021 | 1 | out_new_flows = self._install_flows( |
|
1022 | use_path, skip_in=True |
||
1023 | ) |
||
1024 | 1 | except EVCPathNotInstalled as err: |
|
1025 | 1 | reason = "Error deploying failover path" |
|
1026 | 1 | log.error( |
|
1027 | f"{reason} for {self}. FlowManager error: {err}" |
||
1028 | ) |
||
1029 | 1 | _rmed_flows = self.remove_path_flows(use_path) |
|
1030 | 1 | out_removed_flows = merge_flow_dicts( |
|
1031 | out_removed_flows, _rmed_flows |
||
1032 | ) |
||
1033 | 1 | use_path = Path([]) |
|
1034 | |||
1035 | 1 | self.failover_path = use_path |
|
1036 | 1 | self.sync() |
|
1037 | |||
1038 | 1 | if out_new_flows or out_removed_flows: |
|
1039 | 1 | emit_event(self._controller, "failover_deployed", content={ |
|
1040 | self.id: map_evc_event_content( |
||
1041 | self, |
||
1042 | flows=deepcopy(out_new_flows), |
||
1043 | removed_flows=deepcopy(out_removed_flows), |
||
1044 | error_reason=reason, |
||
1045 | current_path=self.current_path.as_dict(), |
||
1046 | ) |
||
1047 | }) |
||
1048 | |||
1049 | 1 | if not use_path: |
|
1050 | 1 | msg = f"Failover path for {self} was not deployed: {reason}." |
|
1051 | 1 | if tag_errors: |
|
1052 | 1 | msg = self.add_tag_errors(msg, tag_errors) |
|
1053 | 1 | log.error(msg) |
|
1054 | 1 | elif warn_if_not_path: |
|
1055 | 1 | log.warning(msg) |
|
1056 | 1 | return False |
|
1057 | 1 | log.info(f"Failover path for {self} was deployed.") |
|
1058 | 1 | return True |
|
1059 | |||
1060 | 1 | @staticmethod |
|
1061 | 1 | def add_tag_errors(msg: str, tag_errors: list): |
|
1062 | """Add to msg the tag errors ecountered when chossing path.""" |
||
1063 | 1 | path = ['path', 'paths'] |
|
1064 | 1 | was = ['was', 'were'] |
|
1065 | 1 | message = ['message', 'messages'] |
|
1066 | |||
1067 | # Choose either singular(0) or plural(1) words |
||
1068 | 1 | n = 1 |
|
1069 | 1 | if len(tag_errors) == 1: |
|
1070 | 1 | n = 0 |
|
1071 | |||
1072 | 1 | msg += f" {len(tag_errors)} {path[n]} {was[n]} rejected" |
|
1073 | 1 | msg += f" with {message[n]}: {tag_errors}" |
|
1074 | 1 | return msg |
|
1075 | |||
1076 | 1 | def get_failover_flows(self): |
|
1077 | """Return the flows needed to make the failover path active, i.e. the |
||
1078 | flows for ingress forwarding. |
||
1079 | |||
1080 | Return: |
||
1081 | dict: A dict of flows indexed by the switch_id will be returned, or |
||
1082 | an empty dict if no failover_path is available. |
||
1083 | """ |
||
1084 | 1 | if not self.failover_path: |
|
1085 | 1 | return {} |
|
1086 | 1 | return self._prepare_uni_flows(self.failover_path, skip_out=True) |
|
1087 | |||
1088 | # pylint: disable=too-many-branches |
||
1089 | 1 | def _prepare_direct_uni_flows(self): |
|
1090 | """Prepare flows connecting two UNIs for intra-switch EVC.""" |
||
1091 | 1 | vlan_a = self._get_value_from_uni_tag(self.uni_a) |
|
1092 | 1 | vlan_z = self._get_value_from_uni_tag(self.uni_z) |
|
1093 | |||
1094 | 1 | flow_mod_az = self._prepare_flow_mod( |
|
1095 | self.uni_a.interface, self.uni_z.interface, |
||
1096 | self.queue_id, vlan_a |
||
1097 | ) |
||
1098 | 1 | flow_mod_za = self._prepare_flow_mod( |
|
1099 | self.uni_z.interface, self.uni_a.interface, |
||
1100 | self.queue_id, vlan_z |
||
1101 | ) |
||
1102 | |||
1103 | 1 | View Code Duplication | if not isinstance(vlan_z, list) and vlan_z not in self.special_cases: |
|
|||
1104 | 1 | flow_mod_az["actions"].insert( |
|
1105 | 0, {"action_type": "set_vlan", "vlan_id": vlan_z} |
||
1106 | ) |
||
1107 | 1 | if not vlan_a: |
|
1108 | 1 | flow_mod_az["actions"].insert( |
|
1109 | 0, {"action_type": "push_vlan", "tag_type": "c"} |
||
1110 | ) |
||
1111 | 1 | if vlan_a == 0: |
|
1112 | 1 | flow_mod_za["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1113 | 1 | elif vlan_a == 0 and vlan_z == "4096/4096": |
|
1114 | 1 | flow_mod_za["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1115 | |||
1116 | 1 | View Code Duplication | if not isinstance(vlan_a, list) and vlan_a not in self.special_cases: |
1117 | 1 | flow_mod_za["actions"].insert( |
|
1118 | 0, {"action_type": "set_vlan", "vlan_id": vlan_a} |
||
1119 | ) |
||
1120 | 1 | if not vlan_z: |
|
1121 | 1 | flow_mod_za["actions"].insert( |
|
1122 | 0, {"action_type": "push_vlan", "tag_type": "c"} |
||
1123 | ) |
||
1124 | 1 | if vlan_z == 0: |
|
1125 | 1 | flow_mod_az["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1126 | 1 | elif vlan_a == "4096/4096" and vlan_z == 0: |
|
1127 | 1 | flow_mod_az["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1128 | |||
1129 | 1 | flows = [] |
|
1130 | 1 | if isinstance(vlan_a, list): |
|
1131 | 1 | for mask_a in vlan_a: |
|
1132 | 1 | flow_aux = deepcopy(flow_mod_az) |
|
1133 | 1 | flow_aux["match"]["dl_vlan"] = mask_a |
|
1134 | 1 | flows.append(flow_aux) |
|
1135 | else: |
||
1136 | 1 | if vlan_a is not None: |
|
1137 | 1 | flow_mod_az["match"]["dl_vlan"] = vlan_a |
|
1138 | 1 | flows.append(flow_mod_az) |
|
1139 | |||
1140 | 1 | if isinstance(vlan_z, list): |
|
1141 | 1 | for mask_z in vlan_z: |
|
1142 | 1 | flow_aux = deepcopy(flow_mod_za) |
|
1143 | 1 | flow_aux["match"]["dl_vlan"] = mask_z |
|
1144 | 1 | flows.append(flow_aux) |
|
1145 | else: |
||
1146 | 1 | if vlan_z is not None: |
|
1147 | 1 | flow_mod_za["match"]["dl_vlan"] = vlan_z |
|
1148 | 1 | flows.append(flow_mod_za) |
|
1149 | 1 | return ( |
|
1150 | self.uni_a.interface.switch.id, flows |
||
1151 | ) |
||
1152 | |||
1153 | 1 | def _install_direct_uni_flows(self): |
|
1154 | """Install flows connecting two UNIs. |
||
1155 | |||
1156 | This case happens when the circuit is between UNIs in the |
||
1157 | same switch. |
||
1158 | """ |
||
1159 | 1 | (dpid, flows) = self._prepare_direct_uni_flows() |
|
1160 | 1 | flow_mods = {"switches": [dpid], "flows": flows} |
|
1161 | 1 | try: |
|
1162 | 1 | self._send_flow_mods(flow_mods, "install") |
|
1163 | 1 | except FlowModException as err: |
|
1164 | 1 | raise EVCPathNotInstalled(str(err)) from err |
|
1165 | |||
1166 | 1 | def _prepare_nni_flows(self, path=None): |
|
1167 | """Prepare NNI flows.""" |
||
1168 | 1 | nni_flows = OrderedDict() |
|
1169 | 1 | previous = self.uni_a.interface.switch.dpid |
|
1170 | 1 | for incoming, outcoming in self.links_zipped(path): |
|
1171 | 1 | in_vlan = incoming.get_metadata("s_vlan").value |
|
1172 | 1 | out_vlan = outcoming.get_metadata("s_vlan").value |
|
1173 | 1 | in_endpoint = self.get_endpoint_by_id(incoming, previous, ne) |
|
1174 | 1 | out_endpoint = self.get_endpoint_by_id( |
|
1175 | outcoming, in_endpoint.switch.id, eq |
||
1176 | ) |
||
1177 | |||
1178 | 1 | flows = [] |
|
1179 | # Flow for one direction |
||
1180 | 1 | flows.append( |
|
1181 | self._prepare_nni_flow( |
||
1182 | in_endpoint, |
||
1183 | out_endpoint, |
||
1184 | in_vlan, |
||
1185 | out_vlan, |
||
1186 | queue_id=self.queue_id, |
||
1187 | ) |
||
1188 | ) |
||
1189 | |||
1190 | # Flow for the other direction |
||
1191 | 1 | flows.append( |
|
1192 | self._prepare_nni_flow( |
||
1193 | out_endpoint, |
||
1194 | in_endpoint, |
||
1195 | out_vlan, |
||
1196 | in_vlan, |
||
1197 | queue_id=self.queue_id, |
||
1198 | ) |
||
1199 | ) |
||
1200 | 1 | previous = in_endpoint.switch.id |
|
1201 | 1 | nni_flows[in_endpoint.switch.id] = flows |
|
1202 | 1 | return nni_flows |
|
1203 | |||
1204 | 1 | def _install_flows( |
|
1205 | self, path=None, skip_in=False, skip_out=False |
||
1206 | ) -> dict[str, list[dict]]: |
||
1207 | """Install uni and nni flows""" |
||
1208 | 1 | flows_by_switch = defaultdict(lambda: {"flows": []}) |
|
1209 | 1 | new_flows = defaultdict(list) |
|
1210 | 1 | for dpid, flows in self._prepare_nni_flows(path).items(): |
|
1211 | 1 | flows_by_switch[dpid]["flows"].extend(flows) |
|
1212 | 1 | new_flows[dpid].extend(flows) |
|
1213 | 1 | for dpid, flows in self._prepare_uni_flows( |
|
1214 | path, skip_in, skip_out |
||
1215 | ).items(): |
||
1216 | 1 | flows_by_switch[dpid]["flows"].extend(flows) |
|
1217 | 1 | new_flows[dpid].extend(flows) |
|
1218 | |||
1219 | 1 | try: |
|
1220 | 1 | self._send_flow_mods(flows_by_switch, "install", by_switch=True) |
|
1221 | 1 | except FlowModException as err: |
|
1222 | 1 | raise EVCPathNotInstalled(str(err)) from err |
|
1223 | |||
1224 | 1 | return new_flows |
|
1225 | |||
1226 | 1 | @staticmethod |
|
1227 | 1 | def _get_value_from_uni_tag(uni: UNI): |
|
1228 | """Returns the value from tag. In case of any and untagged |
||
1229 | it should return 4096/4096 and 0 respectively""" |
||
1230 | 1 | special = {"any": "4096/4096", "untagged": 0} |
|
1231 | 1 | if uni.user_tag: |
|
1232 | 1 | value = uni.user_tag.value |
|
1233 | 1 | if isinstance(value, list): |
|
1234 | 1 | return uni.user_tag.mask_list |
|
1235 | 1 | return special.get(value, value) |
|
1236 | 1 | return None |
|
1237 | |||
1238 | # pylint: disable=too-many-locals |
||
1239 | 1 | def _prepare_uni_flows(self, path=None, skip_in=False, skip_out=False): |
|
1240 | """Prepare flows to install UNIs.""" |
||
1241 | 1 | uni_flows = {} |
|
1242 | 1 | if not path: |
|
1243 | log.info("install uni flows without path.") |
||
1244 | return uni_flows |
||
1245 | |||
1246 | # Determine VLANs |
||
1247 | 1 | in_vlan_a = self._get_value_from_uni_tag(self.uni_a) |
|
1248 | 1 | out_vlan_a = path[0].get_metadata("s_vlan").value |
|
1249 | |||
1250 | 1 | in_vlan_z = self._get_value_from_uni_tag(self.uni_z) |
|
1251 | 1 | out_vlan_z = path[-1].get_metadata("s_vlan").value |
|
1252 | |||
1253 | # Get endpoints from path |
||
1254 | 1 | endpoint_a = self.get_endpoint_by_id( |
|
1255 | path[0], self.uni_a.interface.switch.id, eq |
||
1256 | ) |
||
1257 | 1 | endpoint_z = self.get_endpoint_by_id( |
|
1258 | path[-1], self.uni_z.interface.switch.id, eq |
||
1259 | ) |
||
1260 | |||
1261 | # Flows for the first UNI |
||
1262 | 1 | flows_a = [] |
|
1263 | |||
1264 | # Flow for one direction, pushing the service tag |
||
1265 | 1 | if not skip_in: |
|
1266 | 1 | if isinstance(in_vlan_a, list): |
|
1267 | 1 | for in_mask_a in in_vlan_a: |
|
1268 | 1 | push_flow = self._prepare_push_flow( |
|
1269 | self.uni_a.interface, |
||
1270 | endpoint_a, |
||
1271 | in_mask_a, |
||
1272 | out_vlan_a, |
||
1273 | in_vlan_z, |
||
1274 | queue_id=self.queue_id, |
||
1275 | ) |
||
1276 | 1 | flows_a.append(push_flow) |
|
1277 | else: |
||
1278 | push_flow = self._prepare_push_flow( |
||
1279 | self.uni_a.interface, |
||
1280 | endpoint_a, |
||
1281 | in_vlan_a, |
||
1282 | out_vlan_a, |
||
1283 | in_vlan_z, |
||
1284 | queue_id=self.queue_id, |
||
1285 | ) |
||
1286 | flows_a.append(push_flow) |
||
1287 | |||
1288 | # Flow for the other direction, popping the service tag |
||
1289 | 1 | if not skip_out: |
|
1290 | 1 | pop_flow = self._prepare_pop_flow( |
|
1291 | endpoint_a, |
||
1292 | self.uni_a.interface, |
||
1293 | out_vlan_a, |
||
1294 | in_vlan_a, |
||
1295 | in_vlan_z, |
||
1296 | queue_id=self.queue_id, |
||
1297 | ) |
||
1298 | 1 | flows_a.append(pop_flow) |
|
1299 | |||
1300 | 1 | uni_flows[self.uni_a.interface.switch.id] = flows_a |
|
1301 | |||
1302 | # Flows for the second UNI |
||
1303 | 1 | flows_z = [] |
|
1304 | |||
1305 | # Flow for one direction, pushing the service tag |
||
1306 | 1 | if not skip_in: |
|
1307 | 1 | if isinstance(in_vlan_z, list): |
|
1308 | 1 | for in_mask_z in in_vlan_z: |
|
1309 | 1 | push_flow = self._prepare_push_flow( |
|
1310 | self.uni_z.interface, |
||
1311 | endpoint_z, |
||
1312 | in_mask_z, |
||
1313 | out_vlan_z, |
||
1314 | in_vlan_a, |
||
1315 | queue_id=self.queue_id, |
||
1316 | ) |
||
1317 | 1 | flows_z.append(push_flow) |
|
1318 | else: |
||
1319 | push_flow = self._prepare_push_flow( |
||
1320 | self.uni_z.interface, |
||
1321 | endpoint_z, |
||
1322 | in_vlan_z, |
||
1323 | out_vlan_z, |
||
1324 | in_vlan_a, |
||
1325 | queue_id=self.queue_id, |
||
1326 | ) |
||
1327 | flows_z.append(push_flow) |
||
1328 | |||
1329 | # Flow for the other direction, popping the service tag |
||
1330 | 1 | if not skip_out: |
|
1331 | 1 | pop_flow = self._prepare_pop_flow( |
|
1332 | endpoint_z, |
||
1333 | self.uni_z.interface, |
||
1334 | out_vlan_z, |
||
1335 | in_vlan_z, |
||
1336 | in_vlan_a, |
||
1337 | queue_id=self.queue_id, |
||
1338 | ) |
||
1339 | 1 | flows_z.append(pop_flow) |
|
1340 | |||
1341 | 1 | uni_flows[self.uni_z.interface.switch.id] = flows_z |
|
1342 | |||
1343 | 1 | return uni_flows |
|
1344 | |||
1345 | 1 | @staticmethod |
|
1346 | 1 | @retry( |
|
1347 | stop=stop_after_attempt(3), |
||
1348 | wait=wait_combine(wait_fixed(3), wait_random(min=2, max=7)), |
||
1349 | retry=retry_if_exception_type(FlowModException), |
||
1350 | before_sleep=before_sleep, |
||
1351 | reraise=True, |
||
1352 | ) |
||
1353 | 1 | def _send_flow_mods( |
|
1354 | data_content: dict, |
||
1355 | command="install", |
||
1356 | force=False, |
||
1357 | by_switch=False |
||
1358 | ): |
||
1359 | """Send a flow_mod list to a specific switch. |
||
1360 | |||
1361 | Args: |
||
1362 | dpid(str): The target of flows (i.e. Switch.id). |
||
1363 | flow_mods(dict): Python dictionary with flow_mods. |
||
1364 | command(str): By default is 'flows'. To remove a flow is 'remove'. |
||
1365 | force(bool): True to send via consistency check in case of errors. |
||
1366 | by_switch(bool): True to send to 'flows_by_switch' request instead. |
||
1367 | """ |
||
1368 | 1 | if by_switch: |
|
1369 | 1 | endpoint = f"{settings.MANAGER_URL}/flows_by_switch/?force={force}" |
|
1370 | else: |
||
1371 | 1 | endpoint = f"{settings.MANAGER_URL}/flows" |
|
1372 | 1 | data_content["force"] = force |
|
1373 | 1 | try: |
|
1374 | 1 | if command == "install": |
|
1375 | 1 | res = httpx.post(endpoint, json=data_content, timeout=30) |
|
1376 | 1 | elif command == "delete": |
|
1377 | 1 | res = httpx.request( |
|
1378 | "DELETE", endpoint, json=data_content, timeout=30 |
||
1379 | ) |
||
1380 | 1 | except httpx.RequestError as err: |
|
1381 | 1 | raise FlowModException(str(err)) from err |
|
1382 | 1 | if res.is_server_error or res.status_code >= 400: |
|
1383 | 1 | raise FlowModException(res.text) |
|
1384 | |||
1385 | 1 | def get_cookie(self): |
|
1386 | """Return the cookie integer from evc id.""" |
||
1387 | 1 | return int(self.id, 16) + (settings.COOKIE_PREFIX << 56) |
|
1388 | |||
1389 | 1 | @staticmethod |
|
1390 | 1 | def get_id_from_cookie(cookie): |
|
1391 | """Return the evc id given a cookie value.""" |
||
1392 | 1 | evc_id = cookie - (settings.COOKIE_PREFIX << 56) |
|
1393 | 1 | return f"{evc_id:x}".zfill(14) |
|
1394 | |||
1395 | 1 | def set_flow_table_group_id(self, flow_mod: dict, vlan) -> dict: |
|
1396 | """Set table_group and table_id""" |
||
1397 | 1 | table_group = "epl" if vlan is None else "evpl" |
|
1398 | 1 | flow_mod["table_group"] = table_group |
|
1399 | 1 | flow_mod["table_id"] = self.table_group[table_group] |
|
1400 | 1 | return flow_mod |
|
1401 | |||
1402 | 1 | @staticmethod |
|
1403 | 1 | def get_priority(vlan): |
|
1404 | """Return priority value depending on vlan value""" |
||
1405 | 1 | if isinstance(vlan, list): |
|
1406 | 1 | return settings.EVPL_SB_PRIORITY |
|
1407 | 1 | if vlan not in {None, "4096/4096", 0}: |
|
1408 | 1 | return settings.EVPL_SB_PRIORITY |
|
1409 | 1 | if vlan == 0: |
|
1410 | 1 | return settings.UNTAGGED_SB_PRIORITY |
|
1411 | 1 | if vlan == "4096/4096": |
|
1412 | 1 | return settings.ANY_SB_PRIORITY |
|
1413 | 1 | return settings.EPL_SB_PRIORITY |
|
1414 | |||
1415 | 1 | def _prepare_flow_mod(self, in_interface, out_interface, |
|
1416 | queue_id=None, vlan=True): |
||
1417 | """Prepare a common flow mod.""" |
||
1418 | 1 | default_actions = [ |
|
1419 | {"action_type": "output", "port": out_interface.port_number} |
||
1420 | ] |
||
1421 | 1 | queue_id = settings.QUEUE_ID if queue_id == -1 else queue_id |
|
1422 | 1 | if queue_id is not None: |
|
1423 | 1 | default_actions.insert( |
|
1424 | 0, |
||
1425 | {"action_type": "set_queue", "queue_id": queue_id} |
||
1426 | ) |
||
1427 | |||
1428 | 1 | flow_mod = { |
|
1429 | "match": {"in_port": in_interface.port_number}, |
||
1430 | "cookie": self.get_cookie(), |
||
1431 | "actions": default_actions, |
||
1432 | "owner": "mef_eline", |
||
1433 | } |
||
1434 | |||
1435 | 1 | self.set_flow_table_group_id(flow_mod, vlan) |
|
1436 | 1 | if self.sb_priority: |
|
1437 | 1 | flow_mod["priority"] = self.sb_priority |
|
1438 | else: |
||
1439 | 1 | flow_mod["priority"] = self.get_priority(vlan) |
|
1440 | 1 | return flow_mod |
|
1441 | |||
1442 | 1 | def _prepare_nni_flow(self, *args, queue_id=None): |
|
1443 | """Create NNI flows.""" |
||
1444 | 1 | in_interface, out_interface, in_vlan, out_vlan = args |
|
1445 | 1 | flow_mod = self._prepare_flow_mod( |
|
1446 | in_interface, out_interface, queue_id |
||
1447 | ) |
||
1448 | 1 | flow_mod["match"]["dl_vlan"] = in_vlan |
|
1449 | 1 | new_action = {"action_type": "set_vlan", "vlan_id": out_vlan} |
|
1450 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1451 | |||
1452 | 1 | return flow_mod |
|
1453 | |||
1454 | 1 | def _prepare_push_flow(self, *args, queue_id=None): |
|
1455 | """Prepare push flow. |
||
1456 | |||
1457 | Arguments: |
||
1458 | in_interface(Interface): Interface input. |
||
1459 | out_interface(Interface): Interface output. |
||
1460 | in_vlan(int,str,None): Vlan input. |
||
1461 | out_vlan(int): Vlan output. |
||
1462 | new_c_vlan(int,str,list,None): New client vlan. |
||
1463 | |||
1464 | Return: |
||
1465 | dict: An python dictionary representing a FlowMod |
||
1466 | |||
1467 | """ |
||
1468 | # assign all arguments |
||
1469 | 1 | in_interface, out_interface, in_vlan, out_vlan, new_c_vlan = args |
|
1470 | 1 | vlan_pri = in_vlan if not isinstance(new_c_vlan, list) else new_c_vlan |
|
1471 | 1 | flow_mod = self._prepare_flow_mod( |
|
1472 | in_interface, out_interface, queue_id, vlan_pri |
||
1473 | ) |
||
1474 | # the service tag must be always pushed |
||
1475 | 1 | new_action = {"action_type": "set_vlan", "vlan_id": out_vlan} |
|
1476 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1477 | |||
1478 | 1 | if ( |
|
1479 | not (in_vlan != new_c_vlan and isinstance(in_vlan, int) and |
||
1480 | isinstance(new_c_vlan, int)) |
||
1481 | ): |
||
1482 | # Add service VLAN header when it does NOT fall into this |
||
1483 | # statement: Both VLANs should be integer and different. |
||
1484 | 1 | new_action = {"action_type": "push_vlan", "tag_type": "s"} |
|
1485 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1486 | |||
1487 | 1 | if in_vlan is not None: |
|
1488 | # if in_vlan is set, it must be included in the match |
||
1489 | 1 | flow_mod["match"]["dl_vlan"] = in_vlan |
|
1490 | |||
1491 | 1 | if ( |
|
1492 | not isinstance(in_vlan, int) and isinstance(new_c_vlan, int) and |
||
1493 | new_c_vlan != 0 |
||
1494 | ): |
||
1495 | # new_in_vlan is an integer but zero, action to set is required |
||
1496 | 1 | new_action = {"action_type": "set_vlan", "vlan_id": new_c_vlan} |
|
1497 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1498 | |||
1499 | 1 | if in_vlan == "4096/4096" and new_c_vlan == 0: |
|
1500 | # if in_vlan match with any tags and new_c_vlan does not, |
||
1501 | # a pop action is required |
||
1502 | 1 | new_action = {"action_type": "pop_vlan"} |
|
1503 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1504 | |||
1505 | 1 | elif (not in_vlan and |
|
1506 | (not isinstance(new_c_vlan, list) and |
||
1507 | new_c_vlan not in self.special_cases)): |
||
1508 | # new_in_vlan is an integer but zero and in_vlan is a no-tag or |
||
1509 | # untagged |
||
1510 | 1 | new_action = {"action_type": "push_vlan", "tag_type": "c"} |
|
1511 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1512 | |||
1513 | 1 | return flow_mod |
|
1514 | |||
1515 | 1 | def _prepare_pop_flow( |
|
1516 | self, |
||
1517 | in_interface: Interface, |
||
1518 | out_interface: Interface, |
||
1519 | out_vlan: int, |
||
1520 | in_vlan: Union[int, str, list, None], |
||
1521 | new_c_vlan: Union[int, str, list, None], |
||
1522 | queue_id=None, |
||
1523 | ): |
||
1524 | # pylint: disable=too-many-arguments |
||
1525 | """Prepare pop flow.""" |
||
1526 | 1 | flow_mod = self._prepare_flow_mod( |
|
1527 | in_interface, out_interface, queue_id |
||
1528 | ) |
||
1529 | 1 | flow_mod["match"]["dl_vlan"] = out_vlan |
|
1530 | 1 | if in_vlan == 0: |
|
1531 | 1 | new_action = {"action_type": "pop_vlan"} |
|
1532 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1533 | 1 | elif ( |
|
1534 | in_vlan != new_c_vlan and isinstance(in_vlan, int) and |
||
1535 | isinstance(new_c_vlan, int) |
||
1536 | ): |
||
1537 | # If UNI VLANs are different and in_vlan is not 0 |
||
1538 | 1 | new_action = {"action_type": "set_vlan", "vlan_id": in_vlan} |
|
1539 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1540 | else: |
||
1541 | 1 | new_action = {"action_type": "pop_vlan"} |
|
1542 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1543 | 1 | return flow_mod |
|
1544 | |||
1545 | 1 | @staticmethod |
|
1546 | 1 | def run_bulk_sdntraces( |
|
1547 | uni_list: list[tuple[Interface, Union[str, int, None]]] |
||
1548 | ) -> dict: |
||
1549 | """Run SDN traces on control plane starting from EVC UNIs.""" |
||
1550 | 1 | endpoint = f"{settings.SDN_TRACE_CP_URL}/traces" |
|
1551 | 1 | data = [] |
|
1552 | 1 | for interface, tag_value in uni_list: |
|
1553 | 1 | data_uni = { |
|
1554 | "trace": { |
||
1555 | "switch": { |
||
1556 | "dpid": interface.switch.dpid, |
||
1557 | "in_port": interface.port_number, |
||
1558 | } |
||
1559 | } |
||
1560 | } |
||
1561 | 1 | if tag_value: |
|
1562 | 1 | uni_dl_vlan = map_dl_vlan(tag_value) |
|
1563 | 1 | if uni_dl_vlan: |
|
1564 | 1 | data_uni["trace"]["eth"] = { |
|
1565 | "dl_type": 0x8100, |
||
1566 | "dl_vlan": uni_dl_vlan, |
||
1567 | } |
||
1568 | 1 | data.append(data_uni) |
|
1569 | 1 | try: |
|
1570 | 1 | response = httpx.put(endpoint, json=data, timeout=30) |
|
1571 | 1 | except httpx.TimeoutException as exception: |
|
1572 | 1 | log.error(f"Request has timed out: {exception}") |
|
1573 | 1 | return {"result": []} |
|
1574 | 1 | if response.status_code >= 400: |
|
1575 | 1 | log.error(f"Failed to run sdntrace-cp: {response.text}") |
|
1576 | 1 | return {"result": []} |
|
1577 | 1 | return response.json() |
|
1578 | |||
1579 | # pylint: disable=too-many-return-statements, too-many-arguments |
||
1580 | 1 | @staticmethod |
|
1581 | 1 | def check_trace( |
|
1582 | evc_id: str, |
||
1583 | evc_name: str, |
||
1584 | tag_a: Union[None, int, str], |
||
1585 | tag_z: Union[None, int, str], |
||
1586 | interface_a: Interface, |
||
1587 | interface_z: Interface, |
||
1588 | current_path: list, |
||
1589 | trace_a: list, |
||
1590 | trace_z: list |
||
1591 | ) -> bool: |
||
1592 | """Auxiliar function to check an individual trace""" |
||
1593 | 1 | if ( |
|
1594 | len(trace_a) != len(current_path) + 1 |
||
1595 | or not compare_uni_out_trace(tag_z, interface_z, trace_a[-1]) |
||
1596 | ): |
||
1597 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1598 | f"Invalid trace from uni_a: {trace_a}") |
||
1599 | 1 | return False |
|
1600 | 1 | if ( |
|
1601 | len(trace_z) != len(current_path) + 1 |
||
1602 | or not compare_uni_out_trace(tag_a, interface_a, trace_z[-1]) |
||
1603 | ): |
||
1604 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1605 | f"Invalid trace from uni_z: {trace_z}") |
||
1606 | 1 | return False |
|
1607 | |||
1608 | 1 | if not current_path: |
|
1609 | return True |
||
1610 | |||
1611 | 1 | first_link, trace_path_begin, trace_path_end = current_path[0], [], [] |
|
1612 | 1 | if ( |
|
1613 | first_link.endpoint_a.switch.id == trace_a[0]["dpid"] |
||
1614 | ): |
||
1615 | 1 | trace_path_begin, trace_path_end = trace_a, trace_z |
|
1616 | 1 | elif ( |
|
1617 | first_link.endpoint_a.switch.id == trace_z[0]["dpid"] |
||
1618 | ): |
||
1619 | 1 | trace_path_begin, trace_path_end = trace_z, trace_a |
|
1620 | else: |
||
1621 | msg = ( |
||
1622 | f"first link {first_link} endpoint_a didn't match the first " |
||
1623 | f"step of trace_a {trace_a} or trace_z {trace_z}" |
||
1624 | ) |
||
1625 | log.warning(msg) |
||
1626 | return False |
||
1627 | |||
1628 | 1 | for link, trace1, trace2 in zip(current_path, |
|
1629 | trace_path_begin[1:], |
||
1630 | trace_path_end[:0:-1]): |
||
1631 | 1 | metadata_vlan = None |
|
1632 | 1 | if link.metadata: |
|
1633 | 1 | metadata_vlan = glom(link.metadata, 's_vlan.value') |
|
1634 | 1 | if compare_endpoint_trace( |
|
1635 | link.endpoint_a, |
||
1636 | metadata_vlan, |
||
1637 | trace2 |
||
1638 | ) is False: |
||
1639 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1640 | f"Invalid trace from uni_a: {trace_a}") |
||
1641 | 1 | return False |
|
1642 | 1 | if compare_endpoint_trace( |
|
1643 | link.endpoint_b, |
||
1644 | metadata_vlan, |
||
1645 | trace1 |
||
1646 | ) is False: |
||
1647 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1648 | f"Invalid trace from uni_z: {trace_z}") |
||
1649 | 1 | return False |
|
1650 | |||
1651 | 1 | return True |
|
1652 | |||
1653 | 1 | @staticmethod |
|
1654 | 1 | def check_range(circuit, traces: list) -> bool: |
|
1655 | """Check traces when for UNI with TAGRange""" |
||
1656 | 1 | check = True |
|
1657 | 1 | for i, mask in enumerate(circuit.uni_a.user_tag.mask_list): |
|
1658 | 1 | trace_a = traces[i*2] |
|
1659 | 1 | trace_z = traces[i*2+1] |
|
1660 | 1 | check &= EVCDeploy.check_trace( |
|
1661 | circuit.id, circuit.name, |
||
1662 | mask, mask, |
||
1663 | circuit.uni_a.interface, |
||
1664 | circuit.uni_z.interface, |
||
1665 | circuit.current_path, |
||
1666 | trace_a, trace_z, |
||
1667 | ) |
||
1668 | 1 | return check |
|
1669 | |||
1670 | 1 | @staticmethod |
|
1671 | 1 | def check_list_traces(list_circuits: list) -> dict: |
|
1672 | """Check if current_path is deployed comparing with SDN traces.""" |
||
1673 | 1 | if not list_circuits: |
|
1674 | 1 | return {} |
|
1675 | 1 | uni_list = make_uni_list(list_circuits) |
|
1676 | 1 | traces = EVCDeploy.run_bulk_sdntraces(uni_list)["result"] |
|
1677 | |||
1678 | 1 | if not traces: |
|
1679 | 1 | return {} |
|
1680 | |||
1681 | 1 | try: |
|
1682 | 1 | circuits_checked = {} |
|
1683 | 1 | i = 0 |
|
1684 | 1 | for circuit in list_circuits: |
|
1685 | 1 | if isinstance(circuit.uni_a.user_tag, TAGRange): |
|
1686 | 1 | length = len(circuit.uni_a.user_tag.mask_list) |
|
1687 | 1 | circuits_checked[circuit.id] = EVCDeploy.check_range( |
|
1688 | circuit, traces[i:i+length*2] |
||
1689 | ) |
||
1690 | 1 | i += length*2 |
|
1691 | else: |
||
1692 | 1 | trace_a = traces[i] |
|
1693 | 1 | trace_z = traces[i+1] |
|
1694 | 1 | tag_a = None |
|
1695 | 1 | if circuit.uni_a.user_tag: |
|
1696 | 1 | tag_a = circuit.uni_a.user_tag.value |
|
1697 | 1 | tag_z = None |
|
1698 | 1 | if circuit.uni_z.user_tag: |
|
1699 | 1 | tag_z = circuit.uni_z.user_tag.value |
|
1700 | 1 | circuits_checked[circuit.id] = EVCDeploy.check_trace( |
|
1701 | circuit.id, circuit.name, |
||
1702 | tag_a, tag_z, |
||
1703 | circuit.uni_a.interface, |
||
1704 | circuit.uni_z.interface, |
||
1705 | circuit.current_path, |
||
1706 | trace_a, trace_z |
||
1707 | ) |
||
1708 | 1 | i += 2 |
|
1709 | 1 | except IndexError as err: |
|
1710 | 1 | log.error( |
|
1711 | f"Bulk sdntraces returned fewer items than expected." |
||
1712 | f"Error = {err}" |
||
1713 | ) |
||
1714 | 1 | return {} |
|
1715 | |||
1716 | 1 | return circuits_checked |
|
1717 | |||
1718 | 1 | @staticmethod |
|
1719 | 1 | def get_endpoint_by_id( |
|
1720 | link: Link, |
||
1721 | id_: str, |
||
1722 | operator: Union[eq, ne] |
||
1723 | ) -> Interface: |
||
1724 | """Return endpoint from link |
||
1725 | either equal(eq) or not equal(ne) to id""" |
||
1726 | 1 | if operator(link.endpoint_a.switch.id, id_): |
|
1727 | 1 | return link.endpoint_a |
|
1728 | 1 | return link.endpoint_b |
|
1729 | |||
1730 | |||
1731 | 1 | class LinkProtection(EVCDeploy): |
|
1732 | """Class to handle link protection.""" |
||
1733 | |||
1734 | 1 | def is_affected_by_link(self, link=None): |
|
1735 | """Verify if the current path is affected by link down event.""" |
||
1736 | return self.current_path.is_affected_by_link(link) |
||
1737 | |||
1738 | 1 | def is_using_primary_path(self): |
|
1739 | """Verify if the current deployed path is self.primary_path.""" |
||
1740 | 1 | return self.current_path == self.primary_path |
|
1741 | |||
1742 | 1 | def is_using_backup_path(self): |
|
1743 | """Verify if the current deployed path is self.backup_path.""" |
||
1744 | 1 | return self.current_path == self.backup_path |
|
1745 | |||
1746 | 1 | def is_using_dynamic_path(self): |
|
1747 | """Verify if the current deployed path is dynamic.""" |
||
1748 | 1 | if ( |
|
1749 | self.current_path |
||
1750 | and not self.is_using_primary_path() |
||
1751 | and not self.is_using_backup_path() |
||
1752 | and self.current_path.status is EntityStatus.UP |
||
1753 | ): |
||
1754 | return True |
||
1755 | 1 | return False |
|
1756 | |||
1757 | 1 | def handle_link_up(self, link=None, interface=None): |
|
1758 | """Handle circuit when link up. |
||
1759 | |||
1760 | Args: |
||
1761 | link(Link): Link affected by link.up event. |
||
1762 | |||
1763 | """ |
||
1764 | 1 | condition_pairs = [ |
|
1765 | ( |
||
1766 | lambda me: me.is_using_primary_path(), |
||
1767 | lambda _: (True, 'nothing') |
||
1768 | ), |
||
1769 | ( |
||
1770 | lambda me: me.is_intra_switch(), |
||
1771 | lambda _: (True, 'nothing') |
||
1772 | ), |
||
1773 | ( |
||
1774 | lambda me: me.primary_path.is_affected_by_link(link), |
||
1775 | lambda me: (me.deploy_to_primary_path(), 'redeploy') |
||
1776 | ), |
||
1777 | # For this special case, it reached this point because interface |
||
1778 | # was previously confirmed to be a UNI and both UNI are UP |
||
1779 | ( |
||
1780 | lambda me: (me.primary_path.status == EntityStatus.UP |
||
1781 | and interface), |
||
1782 | lambda me: (me.deploy_to_primary_path(), 'redeploy') |
||
1783 | ), |
||
1784 | ( |
||
1785 | lambda me: (me.backup_path.status == EntityStatus.UP |
||
1786 | and interface), |
||
1787 | lambda me: (me.deploy_to_backup_path(), 'redeploy') |
||
1788 | ), |
||
1789 | # We tried to deploy(primary_path) without success. |
||
1790 | # And in this case is up by some how. Nothing to do. |
||
1791 | ( |
||
1792 | lambda me: me.is_using_backup_path(), |
||
1793 | lambda _: (True, 'nothing') |
||
1794 | ), |
||
1795 | ( |
||
1796 | lambda me: me.is_using_dynamic_path(), |
||
1797 | lambda _: (True, 'nothing') |
||
1798 | ), |
||
1799 | # In this case, probably the circuit is not being used and |
||
1800 | # we can move to backup |
||
1801 | ( |
||
1802 | lambda me: me.backup_path.is_affected_by_link(link), |
||
1803 | lambda me: (me.deploy_to_backup_path(), 'redeploy') |
||
1804 | ), |
||
1805 | # In this case, the circuit is not being used and we should |
||
1806 | # try a dynamic path |
||
1807 | ( |
||
1808 | lambda me: me.dynamic_backup_path and not me.is_active(), |
||
1809 | lambda me: (me.deploy_to_path(), 'redeploy') |
||
1810 | ) |
||
1811 | ] |
||
1812 | 1 | for predicate, action in condition_pairs: |
|
1813 | 1 | if not predicate(self): |
|
1814 | 1 | continue |
|
1815 | 1 | success, succcess_type = action(self) |
|
1816 | 1 | if success: |
|
1817 | 1 | if succcess_type == 'redeploy': |
|
1818 | 1 | emit_event( |
|
1819 | self._controller, |
||
1820 | "redeployed_link_up", |
||
1821 | content=map_evc_event_content(self) |
||
1822 | ) |
||
1823 | 1 | return True |
|
1824 | 1 | return False |
|
1825 | |||
1826 | 1 | def handle_link_down(self): |
|
1827 | """Handle circuit when link down. |
||
1828 | |||
1829 | Returns: |
||
1830 | bool: True if the re-deploy was successly otherwise False. |
||
1831 | |||
1832 | """ |
||
1833 | 1 | success = False |
|
1834 | 1 | if self.is_using_primary_path(): |
|
1835 | 1 | success = self.deploy_to_backup_path() |
|
1836 | 1 | elif self.is_using_backup_path(): |
|
1837 | 1 | success = self.deploy_to_primary_path() |
|
1838 | |||
1839 | 1 | if not success and self.dynamic_backup_path: |
|
1840 | 1 | success = self.deploy_to_path() |
|
1841 | |||
1842 | 1 | if success: |
|
1843 | 1 | log.debug(f"{self} deployed after link down.") |
|
1844 | else: |
||
1845 | 1 | self.remove_current_flows(sync=False) |
|
1846 | 1 | self.deactivate() |
|
1847 | 1 | self.sync() |
|
1848 | 1 | log.debug(f"Failed to re-deploy {self} after link down.") |
|
1849 | |||
1850 | 1 | return success |
|
1851 | |||
1852 | 1 | def are_unis_active(self) -> bool: |
|
1853 | """Determine whether this EVC should be active""" |
||
1854 | 1 | interface_a = self.uni_a.interface |
|
1855 | 1 | interface_z = self.uni_z.interface |
|
1856 | 1 | active, _ = self.is_uni_interface_active(interface_a, interface_z) |
|
1857 | 1 | return active |
|
1858 | |||
1859 | 1 | def try_to_handle_uni_as_link_up(self, interface: Interface) -> bool: |
|
1860 | """Try to handle UNI as link_up to trigger deployment.""" |
||
1861 | if ( |
||
1862 | self.current_path.status != EntityStatus.UP |
||
1863 | and not self.is_intra_switch() |
||
1864 | ): |
||
1865 | succeeded = self.handle_link_up(interface=interface) |
||
1866 | if succeeded: |
||
1867 | msg = ( |
||
1868 | f"Activated {self} due to successful " |
||
1869 | f"deployment triggered by {interface}" |
||
1870 | ) |
||
1871 | else: |
||
1872 | msg = ( |
||
1873 | f"Couldn't activate {self} due to unsuccessful " |
||
1874 | f"deployment triggered by {interface}" |
||
1875 | ) |
||
1876 | log.info(msg) |
||
1877 | return True |
||
1878 | return False |
||
1879 | |||
1880 | 1 | def handle_interface_link_up(self, interface: Interface): |
|
1881 | """ |
||
1882 | Handler for interface link_up events |
||
1883 | """ |
||
1884 | 1 | if not _does_uni_affect_evc(self, interface, "up"): |
|
1885 | 1 | return |
|
1886 | 1 | if self.try_to_handle_uni_as_link_up(interface): |
|
1887 | return |
||
1888 | |||
1889 | 1 | interface_dicts = { |
|
1890 | interface.id: { |
||
1891 | 'status': interface.status.value, |
||
1892 | 'status_reason': interface.status_reason, |
||
1893 | } |
||
1894 | for interface in (self.uni_a.interface, self.uni_z.interface) |
||
1895 | } |
||
1896 | 1 | try: |
|
1897 | 1 | self.try_to_activate() |
|
1898 | 1 | log.info( |
|
1899 | f"Activating {self}. Interfaces: " |
||
1900 | f"{interface_dicts}." |
||
1901 | ) |
||
1902 | 1 | emit_event(self._controller, "uni_active_updated", |
|
1903 | content=map_evc_event_content(self)) |
||
1904 | 1 | self.sync() |
|
1905 | except ActivationError as exc: |
||
1906 | # On this ctx, no ActivationError isn't expected since the |
||
1907 | # activation pre-requisites states were checked, so handled as err |
||
1908 | log.error(f"ActivationError: {str(exc)} when handling {interface}") |
||
1909 | |||
1910 | 1 | def handle_interface_link_down(self, interface): |
|
1911 | """ |
||
1912 | Handler for interface link_down events |
||
1913 | """ |
||
1914 | 1 | if not _does_uni_affect_evc(self, interface, "down"): |
|
1915 | 1 | return |
|
1916 | 1 | interface_dicts = { |
|
1917 | interface.id: { |
||
1918 | 'status': interface.status.value, |
||
1919 | 'status_reason': interface.status_reason, |
||
1920 | } |
||
1921 | for interface in (self.uni_a.interface, self.uni_z.interface) |
||
1922 | if interface.status != EntityStatus.UP |
||
1923 | } |
||
1924 | 1 | self.deactivate() |
|
1925 | 1 | log.info( |
|
1926 | f"Deactivating {self}. Interfaces: " |
||
1927 | f"{interface_dicts}." |
||
1928 | ) |
||
1929 | 1 | emit_event(self._controller, "uni_active_updated", |
|
1930 | content=map_evc_event_content(self)) |
||
1931 | 1 | self.sync() |
|
1932 | |||
1933 | |||
1934 | 1 | class EVC(LinkProtection): |
|
1935 | """Class that represents a E-Line Virtual Connection.""" |
||
1936 |