Total Complexity | 334 |
Total Lines | 1846 |
Duplicated Lines | 1.3 % |
Coverage | 94.6% |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like build.models.evc often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | """Classes used in the main application.""" # pylint: disable=too-many-lines |
||
2 | 1 | import traceback |
|
3 | 1 | from collections import OrderedDict, defaultdict |
|
4 | 1 | from copy import deepcopy |
|
5 | 1 | from datetime import datetime |
|
6 | 1 | from operator import eq, ne |
|
7 | 1 | from threading import Lock |
|
8 | 1 | from typing import Union |
|
9 | 1 | from uuid import uuid4 |
|
10 | |||
11 | 1 | import httpx |
|
12 | 1 | from glom import glom |
|
13 | 1 | from tenacity import (retry, retry_if_exception_type, stop_after_attempt, |
|
14 | wait_combine, wait_fixed, wait_random) |
||
15 | |||
16 | 1 | from kytos.core import log |
|
17 | 1 | from kytos.core.common import EntityStatus, GenericEntity |
|
18 | 1 | from kytos.core.exceptions import KytosNoTagAvailableError, KytosTagError |
|
19 | 1 | from kytos.core.helpers import get_time, now |
|
20 | 1 | from kytos.core.interface import UNI, Interface, TAGRange |
|
21 | 1 | from kytos.core.link import Link |
|
22 | 1 | from kytos.core.retry import before_sleep |
|
23 | 1 | from kytos.core.tag_ranges import range_difference |
|
24 | 1 | from napps.kytos.mef_eline import controllers, settings |
|
25 | 1 | from napps.kytos.mef_eline.exceptions import (DuplicatedNoTagUNI, |
|
26 | EVCPathNotInstalled, |
||
27 | FlowModException, InvalidPath) |
||
28 | 1 | from napps.kytos.mef_eline.utils import (check_disabled_component, |
|
29 | compare_endpoint_trace, |
||
30 | compare_uni_out_trace, emit_event, |
||
31 | make_uni_list, map_dl_vlan, |
||
32 | map_evc_event_content, |
||
33 | merge_flow_dicts) |
||
34 | |||
35 | 1 | from .path import DynamicPathManager, Path |
|
36 | |||
37 | |||
38 | 1 | class EVCBase(GenericEntity): |
|
39 | """Class to represent a circuit.""" |
||
40 | |||
41 | 1 | attributes_requiring_redeploy = [ |
|
42 | "primary_path", |
||
43 | "backup_path", |
||
44 | "dynamic_backup_path", |
||
45 | "queue_id", |
||
46 | "sb_priority", |
||
47 | "primary_constraints", |
||
48 | "secondary_constraints", |
||
49 | "uni_a", |
||
50 | "uni_z", |
||
51 | ] |
||
52 | 1 | required_attributes = ["name", "uni_a", "uni_z"] |
|
53 | |||
54 | 1 | updatable_attributes = { |
|
55 | "uni_a", |
||
56 | "uni_z", |
||
57 | "name", |
||
58 | "start_date", |
||
59 | "end_date", |
||
60 | "queue_id", |
||
61 | "bandwidth", |
||
62 | "primary_path", |
||
63 | "backup_path", |
||
64 | "dynamic_backup_path", |
||
65 | "primary_constraints", |
||
66 | "secondary_constraints", |
||
67 | "owner", |
||
68 | "sb_priority", |
||
69 | "service_level", |
||
70 | "circuit_scheduler", |
||
71 | "metadata", |
||
72 | "enabled" |
||
73 | } |
||
74 | |||
75 | # pylint: disable=too-many-statements |
||
76 | 1 | def __init__(self, controller, **kwargs): |
|
77 | """Create an EVC instance with the provided parameters. |
||
78 | |||
79 | Args: |
||
80 | id(str): EVC identifier. Whether it's None an ID will be genereted. |
||
81 | Only the first 14 bytes passed will be used. |
||
82 | name: represents an EVC name.(Required) |
||
83 | uni_a (UNI): Endpoint A for User Network Interface.(Required) |
||
84 | uni_z (UNI): Endpoint Z for User Network Interface.(Required) |
||
85 | start_date(datetime|str): Date when the EVC was registred. |
||
86 | Default is now(). |
||
87 | end_date(datetime|str): Final date that the EVC will be fineshed. |
||
88 | Default is None. |
||
89 | bandwidth(int): Bandwidth used by EVC instance. Default is 0. |
||
90 | primary_links(list): Primary links used by evc. Default is [] |
||
91 | backup_links(list): Backups links used by evc. Default is [] |
||
92 | current_path(list): Circuit being used at the moment if this is an |
||
93 | active circuit. Default is []. |
||
94 | failover_path(list): Path being used to provide EVC protection via |
||
95 | failover during link failures. Default is []. |
||
96 | primary_path(list): primary circuit offered to user IF one or more |
||
97 | links were provided. Default is []. |
||
98 | backup_path(list): backup circuit offered to the user IF one or |
||
99 | more links were provided. Default is []. |
||
100 | dynamic_backup_path(bool): Enable computer backup path dynamically. |
||
101 | Dafault is False. |
||
102 | creation_time(datetime|str): datetime when the circuit should be |
||
103 | activated. default is now(). |
||
104 | enabled(Boolean): attribute to indicate the administrative state; |
||
105 | default is False. |
||
106 | active(Boolean): attribute to indicate the operational state; |
||
107 | default is False. |
||
108 | archived(Boolean): indicate the EVC has been deleted and is |
||
109 | archived; default is False. |
||
110 | owner(str): The EVC owner. Default is None. |
||
111 | sb_priority(int): Service level provided in the request. |
||
112 | Default is None. |
||
113 | service_level(int): Service level provided. The higher the better. |
||
114 | Default is 0. |
||
115 | |||
116 | Raises: |
||
117 | ValueError: raised when object attributes are invalid. |
||
118 | |||
119 | """ |
||
120 | 1 | self._controller = controller |
|
121 | 1 | self._validate(**kwargs) |
|
122 | 1 | super().__init__() |
|
123 | |||
124 | # required attributes |
||
125 | 1 | self._id = kwargs.get("id", uuid4().hex)[:14] |
|
126 | 1 | self.uni_a: UNI = kwargs.get("uni_a") |
|
127 | 1 | self.uni_z: UNI = kwargs.get("uni_z") |
|
128 | 1 | self.name = kwargs.get("name") |
|
129 | |||
130 | # optional attributes |
||
131 | 1 | self.start_date = get_time(kwargs.get("start_date")) or now() |
|
132 | 1 | self.end_date = get_time(kwargs.get("end_date")) or None |
|
133 | 1 | self.queue_id = kwargs.get("queue_id", -1) |
|
134 | |||
135 | 1 | self.bandwidth = kwargs.get("bandwidth", 0) |
|
136 | 1 | self.primary_links = Path(kwargs.get("primary_links", [])) |
|
137 | 1 | self.backup_links = Path(kwargs.get("backup_links", [])) |
|
138 | 1 | self.current_path = Path(kwargs.get("current_path", [])) |
|
139 | 1 | self.failover_path = Path(kwargs.get("failover_path", [])) |
|
140 | 1 | self.primary_path = Path(kwargs.get("primary_path", [])) |
|
141 | 1 | self.backup_path = Path(kwargs.get("backup_path", [])) |
|
142 | 1 | self.dynamic_backup_path = kwargs.get("dynamic_backup_path", False) |
|
143 | 1 | self.primary_constraints = kwargs.get("primary_constraints", {}) |
|
144 | 1 | self.secondary_constraints = kwargs.get("secondary_constraints", {}) |
|
145 | 1 | self.creation_time = get_time(kwargs.get("creation_time")) or now() |
|
146 | 1 | self.owner = kwargs.get("owner", None) |
|
147 | 1 | self.sb_priority = kwargs.get("sb_priority", None) or kwargs.get( |
|
148 | "priority", None |
||
149 | ) |
||
150 | 1 | self.service_level = kwargs.get("service_level", 0) |
|
151 | 1 | self.circuit_scheduler = kwargs.get("circuit_scheduler", []) |
|
152 | 1 | self.flow_removed_at = get_time(kwargs.get("flow_removed_at")) or None |
|
153 | 1 | self.updated_at = get_time(kwargs.get("updated_at")) or now() |
|
154 | 1 | self.execution_rounds = kwargs.get("execution_rounds", 0) |
|
155 | 1 | self.current_links_cache = set() |
|
156 | 1 | self.primary_links_cache = set() |
|
157 | 1 | self.backup_links_cache = set() |
|
158 | 1 | self.affected_by_link_at = get_time("0001-01-01T00:00:00") |
|
159 | 1 | self.old_path = Path([]) |
|
160 | |||
161 | 1 | self.lock = Lock() |
|
162 | |||
163 | 1 | self.archived = kwargs.get("archived", False) |
|
164 | |||
165 | 1 | self.metadata = kwargs.get("metadata", {}) |
|
166 | |||
167 | 1 | self._mongo_controller = controllers.ELineController() |
|
168 | |||
169 | 1 | if kwargs.get("active", False): |
|
170 | 1 | self.activate() |
|
171 | else: |
||
172 | 1 | self.deactivate() |
|
173 | |||
174 | 1 | if kwargs.get("enabled", False): |
|
175 | 1 | self.enable() |
|
176 | else: |
||
177 | 1 | self.disable() |
|
178 | |||
179 | # datetime of user request for a EVC (or datetime when object was |
||
180 | # created) |
||
181 | 1 | self.request_time = kwargs.get("request_time", now()) |
|
182 | # dict with the user original request (input) |
||
183 | 1 | self._requested = kwargs |
|
184 | |||
185 | # Special cases: No tag, any, untagged |
||
186 | 1 | self.special_cases = {None, "4096/4096", 0} |
|
187 | 1 | self.table_group = kwargs.get("table_group") |
|
188 | |||
189 | 1 | def sync(self, keys: set = None): |
|
190 | """Sync this EVC in the MongoDB.""" |
||
191 | 1 | self.updated_at = now() |
|
192 | 1 | if keys: |
|
193 | 1 | self._mongo_controller.update_evc(self.as_dict(keys)) |
|
194 | 1 | return |
|
195 | 1 | self._mongo_controller.upsert_evc(self.as_dict()) |
|
196 | |||
197 | 1 | def _get_unis_use_tags(self, **kwargs) -> tuple[UNI, UNI]: |
|
198 | """Obtain both UNIs (uni_a, uni_z). |
||
199 | If a UNI is changing, verify tags""" |
||
200 | 1 | uni_a = kwargs.get("uni_a", None) |
|
201 | 1 | uni_a_flag = False |
|
202 | 1 | if uni_a and uni_a != self.uni_a: |
|
203 | 1 | uni_a_flag = True |
|
204 | 1 | self._use_uni_vlan(uni_a, uni_dif=self.uni_a) |
|
205 | |||
206 | 1 | uni_z = kwargs.get("uni_z", None) |
|
207 | 1 | if uni_z and uni_z != self.uni_z: |
|
208 | 1 | try: |
|
209 | 1 | self._use_uni_vlan(uni_z, uni_dif=self.uni_z) |
|
210 | 1 | self.make_uni_vlan_available(self.uni_z, uni_dif=uni_z) |
|
211 | 1 | except KytosTagError as err: |
|
212 | 1 | if uni_a_flag: |
|
213 | 1 | self.make_uni_vlan_available(uni_a, uni_dif=self.uni_a) |
|
214 | 1 | raise err |
|
215 | else: |
||
216 | 1 | uni_z = self.uni_z |
|
217 | |||
218 | 1 | if uni_a_flag: |
|
219 | 1 | self.make_uni_vlan_available(self.uni_a, uni_dif=uni_a) |
|
220 | else: |
||
221 | 1 | uni_a = self.uni_a |
|
222 | 1 | return uni_a, uni_z |
|
223 | |||
224 | 1 | def update(self, **kwargs): |
|
225 | """Update evc attributes. |
||
226 | |||
227 | This method will raises an error trying to change the following |
||
228 | attributes: [creation_time, active, current_path, failover_path, |
||
229 | _id, archived] |
||
230 | [name, uni_a and uni_z] |
||
231 | |||
232 | Returns: |
||
233 | the values for enable and a redeploy attribute, if exists and None |
||
234 | otherwise |
||
235 | Raises: |
||
236 | ValueError: message with error detail. |
||
237 | |||
238 | """ |
||
239 | 1 | enable, redeploy = (None, None) |
|
240 | 1 | if not self._tag_lists_equal(**kwargs): |
|
241 | 1 | raise ValueError( |
|
242 | "UNI_A and UNI_Z tag lists should be the same." |
||
243 | ) |
||
244 | 1 | uni_a, uni_z = self._get_unis_use_tags(**kwargs) |
|
245 | 1 | check_disabled_component(uni_a, uni_z) |
|
246 | 1 | self._validate_has_primary_or_dynamic( |
|
247 | primary_path=kwargs.get("primary_path"), |
||
248 | dynamic_backup_path=kwargs.get("dynamic_backup_path"), |
||
249 | uni_a=uni_a, |
||
250 | uni_z=uni_z, |
||
251 | ) |
||
252 | 1 | for attribute, value in kwargs.items(): |
|
253 | 1 | if attribute not in self.updatable_attributes: |
|
254 | 1 | raise ValueError(f"{attribute} can't be updated.") |
|
255 | 1 | if attribute in ("primary_path", "backup_path"): |
|
256 | 1 | try: |
|
257 | 1 | value.is_valid( |
|
258 | uni_a.interface.switch, uni_z.interface.switch |
||
259 | ) |
||
260 | 1 | except InvalidPath as exception: |
|
261 | 1 | raise ValueError( # pylint: disable=raise-missing-from |
|
262 | f"{attribute} is not a " f"valid path: {exception}" |
||
263 | ) |
||
264 | 1 | for attribute, value in kwargs.items(): |
|
265 | 1 | if attribute == "enabled": |
|
266 | 1 | if value: |
|
267 | 1 | self.enable() |
|
268 | else: |
||
269 | 1 | self.disable() |
|
270 | 1 | enable = value |
|
271 | else: |
||
272 | 1 | setattr(self, attribute, value) |
|
273 | 1 | if attribute in self.attributes_requiring_redeploy: |
|
274 | 1 | redeploy = True |
|
275 | 1 | self.sync(set(kwargs.keys())) |
|
276 | 1 | return enable, redeploy |
|
277 | |||
278 | 1 | def set_flow_removed_at(self): |
|
279 | """Update flow_removed_at attribute.""" |
||
280 | self.flow_removed_at = now() |
||
281 | |||
282 | 1 | def has_recent_removed_flow(self, setting=settings): |
|
283 | """Check if any flow has been removed from the evc""" |
||
284 | if self.flow_removed_at is None: |
||
285 | return False |
||
286 | res_seconds = (now() - self.flow_removed_at).seconds |
||
287 | return res_seconds < setting.TIME_RECENT_DELETED_FLOWS |
||
288 | |||
289 | 1 | def is_recent_updated(self, setting=settings): |
|
290 | """Check if the evc has been updated recently""" |
||
291 | res_seconds = (now() - self.updated_at).seconds |
||
292 | return res_seconds < setting.TIME_RECENT_UPDATED |
||
293 | |||
294 | 1 | def __repr__(self): |
|
295 | """Repr method.""" |
||
296 | 1 | return f"EVC({self._id}, {self.name})" |
|
297 | |||
298 | 1 | def _validate(self, **kwargs): |
|
299 | """Do Basic validations. |
||
300 | |||
301 | Verify required attributes: name, uni_a, uni_z |
||
302 | |||
303 | Raises: |
||
304 | ValueError: message with error detail. |
||
305 | |||
306 | """ |
||
307 | 1 | for attribute in self.required_attributes: |
|
308 | |||
309 | 1 | if attribute not in kwargs: |
|
310 | 1 | raise ValueError(f"{attribute} is required.") |
|
311 | |||
312 | 1 | if "uni" in attribute: |
|
313 | 1 | uni = kwargs.get(attribute) |
|
314 | 1 | if not isinstance(uni, UNI): |
|
315 | raise ValueError(f"{attribute} is an invalid UNI.") |
||
316 | |||
317 | 1 | def _tag_lists_equal(self, **kwargs): |
|
318 | """Verify that tag lists are the same.""" |
||
319 | 1 | uni_a = kwargs.get("uni_a") or self.uni_a |
|
320 | 1 | uni_z = kwargs.get("uni_z") or self.uni_z |
|
321 | 1 | uni_a_list = uni_z_list = False |
|
322 | 1 | if (uni_a.user_tag and isinstance(uni_a.user_tag, TAGRange)): |
|
323 | 1 | uni_a_list = True |
|
324 | 1 | if (uni_z.user_tag and isinstance(uni_z.user_tag, TAGRange)): |
|
325 | 1 | uni_z_list = True |
|
326 | 1 | if uni_a_list and uni_z_list: |
|
327 | 1 | return uni_a.user_tag.value == uni_z.user_tag.value |
|
328 | 1 | return uni_a_list == uni_z_list |
|
329 | |||
330 | 1 | def _validate_has_primary_or_dynamic( |
|
331 | self, |
||
332 | primary_path=None, |
||
333 | dynamic_backup_path=None, |
||
334 | uni_a=None, |
||
335 | uni_z=None, |
||
336 | ) -> None: |
||
337 | """Validate that it must have a primary path or allow dynamic paths.""" |
||
338 | 1 | primary_path = ( |
|
339 | primary_path |
||
340 | if primary_path is not None |
||
341 | else self.primary_path |
||
342 | ) |
||
343 | 1 | dynamic_backup_path = ( |
|
344 | dynamic_backup_path |
||
345 | if dynamic_backup_path is not None |
||
346 | else self.dynamic_backup_path |
||
347 | ) |
||
348 | 1 | uni_a = uni_a if uni_a is not None else self.uni_a |
|
349 | 1 | uni_z = uni_z if uni_z is not None else self.uni_z |
|
350 | 1 | if ( |
|
351 | not primary_path |
||
352 | and not dynamic_backup_path |
||
353 | and uni_a and uni_z |
||
354 | and uni_a.interface.switch != uni_z.interface.switch |
||
355 | ): |
||
356 | 1 | msg = "The EVC must have a primary path or allow dynamic paths." |
|
357 | 1 | raise ValueError(msg) |
|
358 | |||
359 | 1 | def __eq__(self, other): |
|
360 | """Override the default implementation.""" |
||
361 | 1 | if not isinstance(other, EVC): |
|
362 | return False |
||
363 | |||
364 | 1 | attrs_to_compare = ["name", "uni_a", "uni_z", "owner", "bandwidth"] |
|
365 | 1 | for attribute in attrs_to_compare: |
|
366 | 1 | if getattr(other, attribute) != getattr(self, attribute): |
|
367 | 1 | return False |
|
368 | 1 | return True |
|
369 | |||
370 | 1 | def is_intra_switch(self): |
|
371 | """Check if the UNIs are in the same switch.""" |
||
372 | 1 | return self.uni_a.interface.switch == self.uni_z.interface.switch |
|
373 | |||
374 | 1 | def check_no_tag_duplicate(self, other_uni: UNI): |
|
375 | """Check if a no tag UNI is duplicated.""" |
||
376 | 1 | if other_uni in (self.uni_a, self.uni_z): |
|
377 | 1 | msg = f"UNI with interface {other_uni.interface.id} is"\ |
|
378 | f" duplicated with {self}." |
||
379 | 1 | raise DuplicatedNoTagUNI(msg) |
|
380 | |||
381 | 1 | def as_dict(self, keys: set = None): |
|
382 | """Return a dictionary representing an EVC object. |
||
383 | keys: Only fields on this variable will be |
||
384 | returned in the dictionary""" |
||
385 | 1 | evc_dict = { |
|
386 | "id": self.id, |
||
387 | "name": self.name, |
||
388 | "uni_a": self.uni_a.as_dict(), |
||
389 | "uni_z": self.uni_z.as_dict(), |
||
390 | } |
||
391 | |||
392 | 1 | time_fmt = "%Y-%m-%dT%H:%M:%S" |
|
393 | |||
394 | 1 | evc_dict["start_date"] = self.start_date |
|
395 | 1 | if isinstance(self.start_date, datetime): |
|
396 | 1 | evc_dict["start_date"] = self.start_date.strftime(time_fmt) |
|
397 | |||
398 | 1 | evc_dict["end_date"] = self.end_date |
|
399 | 1 | if isinstance(self.end_date, datetime): |
|
400 | 1 | evc_dict["end_date"] = self.end_date.strftime(time_fmt) |
|
401 | |||
402 | 1 | evc_dict["queue_id"] = self.queue_id |
|
403 | 1 | evc_dict["bandwidth"] = self.bandwidth |
|
404 | 1 | evc_dict["primary_links"] = self.primary_links.as_dict() |
|
405 | 1 | evc_dict["backup_links"] = self.backup_links.as_dict() |
|
406 | 1 | evc_dict["current_path"] = self.current_path.as_dict() |
|
407 | 1 | evc_dict["failover_path"] = self.failover_path.as_dict() |
|
408 | 1 | evc_dict["primary_path"] = self.primary_path.as_dict() |
|
409 | 1 | evc_dict["backup_path"] = self.backup_path.as_dict() |
|
410 | 1 | evc_dict["dynamic_backup_path"] = self.dynamic_backup_path |
|
411 | 1 | evc_dict["metadata"] = self.metadata |
|
412 | |||
413 | 1 | evc_dict["request_time"] = self.request_time |
|
414 | 1 | if isinstance(self.request_time, datetime): |
|
415 | 1 | evc_dict["request_time"] = self.request_time.strftime(time_fmt) |
|
416 | |||
417 | 1 | time = self.creation_time.strftime(time_fmt) |
|
418 | 1 | evc_dict["creation_time"] = time |
|
419 | |||
420 | 1 | evc_dict["owner"] = self.owner |
|
421 | 1 | evc_dict["circuit_scheduler"] = [ |
|
422 | sc.as_dict() for sc in self.circuit_scheduler |
||
423 | ] |
||
424 | |||
425 | 1 | evc_dict["active"] = self.is_active() |
|
426 | 1 | evc_dict["enabled"] = self.is_enabled() |
|
427 | 1 | evc_dict["archived"] = self.archived |
|
428 | 1 | evc_dict["sb_priority"] = self.sb_priority |
|
429 | 1 | evc_dict["service_level"] = self.service_level |
|
430 | 1 | evc_dict["primary_constraints"] = self.primary_constraints |
|
431 | 1 | evc_dict["secondary_constraints"] = self.secondary_constraints |
|
432 | 1 | evc_dict["flow_removed_at"] = self.flow_removed_at |
|
433 | 1 | evc_dict["updated_at"] = self.updated_at |
|
434 | |||
435 | 1 | if keys: |
|
436 | 1 | selected = {} |
|
437 | 1 | for key in keys: |
|
438 | 1 | selected[key] = evc_dict[key] |
|
439 | 1 | selected["id"] = evc_dict["id"] |
|
440 | 1 | return selected |
|
441 | 1 | return evc_dict |
|
442 | |||
443 | 1 | @property |
|
444 | 1 | def id(self): # pylint: disable=invalid-name |
|
445 | """Return this EVC's ID.""" |
||
446 | 1 | return self._id |
|
447 | |||
448 | 1 | def archive(self): |
|
449 | """Archive this EVC on deletion.""" |
||
450 | 1 | self.archived = True |
|
451 | |||
452 | 1 | def _use_uni_vlan( |
|
453 | self, |
||
454 | uni: UNI, |
||
455 | uni_dif: Union[None, UNI] = None |
||
456 | ): |
||
457 | """Use tags from UNI""" |
||
458 | 1 | if uni.user_tag is None: |
|
459 | 1 | return |
|
460 | 1 | tag = uni.user_tag.value |
|
461 | 1 | tag_type = uni.user_tag.tag_type |
|
462 | 1 | if (uni_dif and isinstance(tag, list) and |
|
463 | isinstance(uni_dif.user_tag.value, list)): |
||
464 | 1 | tag = range_difference(tag, uni_dif.user_tag.value) |
|
465 | 1 | if not tag: |
|
466 | 1 | return |
|
467 | 1 | uni.interface.use_tags( |
|
468 | self._controller, tag, tag_type, use_lock=True, check_order=False |
||
469 | ) |
||
470 | |||
471 | 1 | def make_uni_vlan_available( |
|
472 | self, |
||
473 | uni: UNI, |
||
474 | uni_dif: Union[None, UNI] = None, |
||
475 | ): |
||
476 | """Make available tag from UNI""" |
||
477 | 1 | if uni.user_tag is None: |
|
478 | 1 | return |
|
479 | 1 | tag = uni.user_tag.value |
|
480 | 1 | tag_type = uni.user_tag.tag_type |
|
481 | 1 | if (uni_dif and isinstance(tag, list) and |
|
482 | isinstance(uni_dif.user_tag.value, list)): |
||
483 | 1 | tag = range_difference(tag, uni_dif.user_tag.value) |
|
484 | 1 | if not tag: |
|
485 | return |
||
486 | 1 | try: |
|
487 | 1 | conflict = uni.interface.make_tags_available( |
|
488 | self._controller, tag, tag_type, use_lock=True, |
||
489 | check_order=False |
||
490 | ) |
||
491 | 1 | except KytosTagError as err: |
|
492 | 1 | log.error(f"Error in {self}: {err}") |
|
493 | 1 | return |
|
494 | 1 | if conflict: |
|
495 | 1 | intf = uni.interface.id |
|
496 | 1 | log.warning(f"Tags {conflict} was already available in {intf}") |
|
497 | |||
498 | 1 | def remove_uni_tags(self): |
|
499 | """Remove both UNI usage of a tag""" |
||
500 | 1 | self.make_uni_vlan_available(self.uni_a) |
|
501 | 1 | self.make_uni_vlan_available(self.uni_z) |
|
502 | |||
503 | |||
504 | # pylint: disable=fixme, too-many-public-methods |
||
505 | 1 | class EVCDeploy(EVCBase): |
|
506 | """Class to handle the deploy procedures.""" |
||
507 | |||
508 | 1 | def create(self): |
|
509 | """Create a EVC.""" |
||
510 | |||
511 | 1 | def discover_new_paths(self): |
|
512 | """Discover new paths to satisfy this circuit and deploy it.""" |
||
513 | return DynamicPathManager.get_best_paths(self, |
||
514 | **self.primary_constraints) |
||
515 | |||
516 | 1 | def get_failover_path_candidates(self): |
|
517 | """Get failover paths to satisfy this EVC.""" |
||
518 | # in the future we can return primary/backup paths as well |
||
519 | # we just have to properly handle link_up and failover paths |
||
520 | # if ( |
||
521 | # self.is_using_primary_path() and |
||
522 | # self.backup_path.status is EntityStatus.UP |
||
523 | # ): |
||
524 | # yield self.backup_path |
||
525 | 1 | return DynamicPathManager.get_disjoint_paths(self, self.current_path) |
|
526 | |||
527 | 1 | def change_path(self): |
|
528 | """Change EVC path.""" |
||
529 | |||
530 | 1 | def reprovision(self): |
|
531 | """Force the EVC (re-)provisioning.""" |
||
532 | |||
533 | 1 | def is_affected_by_link(self, link): |
|
534 | """Return True if this EVC has the given link on its current path.""" |
||
535 | 1 | return link in self.current_path |
|
536 | |||
537 | 1 | def link_affected_by_interface(self, interface): |
|
538 | """Return True if this EVC has the given link on its current path.""" |
||
539 | return self.current_path.link_affected_by_interface(interface) |
||
540 | |||
541 | 1 | def is_backup_path_affected_by_link(self, link): |
|
542 | """Return True if the backup path of this EVC uses the given link.""" |
||
543 | 1 | return link in self.backup_path |
|
544 | |||
545 | # pylint: disable=invalid-name |
||
546 | 1 | def is_primary_path_affected_by_link(self, link): |
|
547 | """Return True if the primary path of this EVC uses the given link.""" |
||
548 | 1 | return link in self.primary_path |
|
549 | |||
550 | 1 | def is_failover_path_affected_by_link(self, link): |
|
551 | """Return True if this EVC has the given link on its failover path.""" |
||
552 | 1 | return link in self.failover_path |
|
553 | |||
554 | 1 | def is_eligible_for_failover_path(self): |
|
555 | """Verify if this EVC is eligible for failover path (EP029)""" |
||
556 | # In the future this function can be augmented to consider |
||
557 | # primary/backup, primary/dynamic, and other path combinations |
||
558 | 1 | return ( |
|
559 | self.dynamic_backup_path and |
||
560 | not self.primary_path and not self.backup_path |
||
561 | ) |
||
562 | |||
563 | 1 | def is_using_primary_path(self): |
|
564 | """Verify if the current deployed path is self.primary_path.""" |
||
565 | 1 | return self.primary_path and (self.current_path == self.primary_path) |
|
566 | |||
567 | 1 | def is_using_backup_path(self): |
|
568 | """Verify if the current deployed path is self.backup_path.""" |
||
569 | 1 | return self.backup_path and (self.current_path == self.backup_path) |
|
570 | |||
571 | 1 | def is_using_dynamic_path(self): |
|
572 | """Verify if the current deployed path is a dynamic path.""" |
||
573 | 1 | if ( |
|
574 | self.current_path |
||
575 | and not self.is_using_primary_path() |
||
576 | and not self.is_using_backup_path() |
||
577 | and self.current_path.status == EntityStatus.UP |
||
578 | ): |
||
579 | return True |
||
580 | 1 | return False |
|
581 | |||
582 | 1 | def deploy_to_backup_path(self): |
|
583 | """Deploy the backup path into the datapaths of this circuit. |
||
584 | |||
585 | If the backup_path attribute is valid and up, this method will try to |
||
586 | deploy this backup_path. |
||
587 | |||
588 | If everything fails and dynamic_backup_path is True, then tries to |
||
589 | deploy a dynamic path. |
||
590 | """ |
||
591 | # TODO: Remove flows from current (cookies) |
||
592 | 1 | if self.is_using_backup_path(): |
|
593 | # TODO: Log to say that cannot move backup to backup |
||
594 | return True |
||
595 | |||
596 | 1 | success = False |
|
597 | 1 | if self.backup_path.status is EntityStatus.UP: |
|
598 | 1 | success = self.deploy_to_path(self.backup_path) |
|
599 | |||
600 | 1 | if success: |
|
601 | 1 | return True |
|
602 | |||
603 | 1 | if self.dynamic_backup_path or self.is_intra_switch(): |
|
604 | 1 | return self.deploy_to_path() |
|
605 | |||
606 | return False |
||
607 | |||
608 | 1 | def deploy_to_primary_path(self): |
|
609 | """Deploy the primary path into the datapaths of this circuit. |
||
610 | |||
611 | If the primary_path attribute is valid and up, this method will try to |
||
612 | deploy this primary_path. |
||
613 | """ |
||
614 | # TODO: Remove flows from current (cookies) |
||
615 | 1 | if self.is_using_primary_path(): |
|
616 | # TODO: Log to say that cannot move primary to primary |
||
617 | return True |
||
618 | |||
619 | 1 | if self.primary_path.status is EntityStatus.UP: |
|
620 | 1 | return self.deploy_to_path(self.primary_path) |
|
621 | 1 | return False |
|
622 | |||
623 | 1 | def deploy(self): |
|
624 | """Deploy EVC to best path. |
||
625 | |||
626 | Best path can be the primary path, if available. If not, the backup |
||
627 | path, and, if it is also not available, a dynamic path. |
||
628 | """ |
||
629 | 1 | if self.archived: |
|
630 | 1 | return False |
|
631 | 1 | self.enable() |
|
632 | 1 | success = self.deploy_to_primary_path() |
|
633 | 1 | if not success: |
|
634 | 1 | success = self.deploy_to_backup_path() |
|
635 | |||
636 | 1 | if success: |
|
637 | 1 | emit_event(self._controller, "deployed", |
|
638 | content=map_evc_event_content(self)) |
||
639 | 1 | return success |
|
640 | |||
641 | 1 | @staticmethod |
|
642 | 1 | def get_path_status(path): |
|
643 | """Check for the current status of a path. |
||
644 | |||
645 | If any link in this path is down, the path is considered down. |
||
646 | """ |
||
647 | 1 | if not path: |
|
648 | 1 | return EntityStatus.DISABLED |
|
649 | |||
650 | 1 | for link in path: |
|
651 | 1 | if link.status is not EntityStatus.UP: |
|
652 | 1 | return link.status |
|
653 | 1 | return EntityStatus.UP |
|
654 | |||
655 | # def discover_new_path(self): |
||
656 | # # TODO: discover a new path to satisfy this circuit and deploy |
||
657 | |||
658 | 1 | def remove(self): |
|
659 | """Remove EVC path and disable it.""" |
||
660 | 1 | self.remove_current_flows(sync=False) |
|
661 | 1 | self.remove_failover_flows(sync=False) |
|
662 | 1 | self.disable() |
|
663 | 1 | self.sync() |
|
664 | 1 | emit_event(self._controller, "undeployed", |
|
665 | content=map_evc_event_content(self)) |
||
666 | |||
667 | 1 | def remove_failover_flows(self, exclude_uni_switches=True, |
|
668 | force=True, sync=True) -> None: |
||
669 | """Remove failover_flows. |
||
670 | |||
671 | By default, it'll exclude UNI switches, if mef_eline has already |
||
672 | called remove_current_flows before then this minimizes the number |
||
673 | of FlowMods and IO. |
||
674 | """ |
||
675 | 1 | if not self.failover_path: |
|
676 | 1 | return |
|
677 | 1 | switches, cookie, excluded = set(), self.get_cookie(), set() |
|
678 | 1 | if exclude_uni_switches: |
|
679 | 1 | excluded.add(self.uni_a.interface.switch.id) |
|
680 | 1 | excluded.add(self.uni_z.interface.switch.id) |
|
681 | 1 | for link in self.failover_path: |
|
682 | 1 | if link.endpoint_a.switch.id not in excluded: |
|
683 | 1 | switches.add(link.endpoint_a.switch.id) |
|
684 | 1 | if link.endpoint_b.switch.id not in excluded: |
|
685 | 1 | switches.add(link.endpoint_b.switch.id) |
|
686 | 1 | flow_mods = { |
|
687 | "switches": list(switches), |
||
688 | "flows": [{ |
||
689 | "cookie": cookie, |
||
690 | "cookie_mask": int(0xffffffffffffffff), |
||
691 | "owner": "mef_eline", |
||
692 | }] |
||
693 | } |
||
694 | 1 | try: |
|
695 | 1 | self._send_flow_mods( |
|
696 | flow_mods, |
||
697 | "delete", |
||
698 | force=force, |
||
699 | ) |
||
700 | except FlowModException as err: |
||
701 | log.error(f"Error deleting {self} failover_path flows, {err}") |
||
702 | 1 | try: |
|
703 | 1 | self.failover_path.make_vlans_available(self._controller) |
|
704 | except KytosTagError as err: |
||
705 | log.error(f"Error removing {self} failover_path: {err}") |
||
706 | 1 | self.failover_path = Path([]) |
|
707 | 1 | if sync: |
|
708 | 1 | self.sync() |
|
709 | |||
710 | 1 | def remove_current_flows(self, force=True, sync=True): |
|
711 | """Remove all flows from current path or path intended for |
||
712 | current path if exists.""" |
||
713 | 1 | switches = set() |
|
714 | |||
715 | 1 | if not self.current_path and not self.is_intra_switch(): |
|
716 | 1 | return |
|
717 | 1 | current_path = self.current_path |
|
718 | 1 | for link in current_path: |
|
719 | 1 | switches.add(link.endpoint_a.switch.id) |
|
720 | 1 | switches.add(link.endpoint_b.switch.id) |
|
721 | 1 | switches.add(self.uni_a.interface.switch.id) |
|
722 | 1 | switches.add(self.uni_z.interface.switch.id) |
|
723 | 1 | flow_mods = { |
|
724 | "switches": list(switches), |
||
725 | "flows": [{ |
||
726 | "cookie": self.get_cookie(), |
||
727 | "cookie_mask": int(0xffffffffffffffff), |
||
728 | "owner": "mef_eline", |
||
729 | }] |
||
730 | } |
||
731 | |||
732 | 1 | try: |
|
733 | 1 | self._send_flow_mods(flow_mods, "delete", force=force) |
|
734 | 1 | except FlowModException as err: |
|
735 | 1 | log.error(f"Error deleting {self} current_path flows, {err}") |
|
736 | |||
737 | 1 | try: |
|
738 | 1 | current_path.make_vlans_available(self._controller) |
|
739 | except KytosTagError as err: |
||
740 | log.error(f"Error removing {self} current_path: {err}") |
||
741 | 1 | self.current_path = Path([]) |
|
742 | 1 | self.deactivate() |
|
743 | 1 | if sync: |
|
744 | 1 | self.sync() |
|
745 | |||
746 | 1 | def remove_path_flows( |
|
747 | self, path=None, force=True |
||
748 | ) -> dict[str, list[dict]]: |
||
749 | """Remove all flows from path, and return the removed flows.""" |
||
750 | 1 | dpid_flows_match: dict[str, dict] = defaultdict(lambda: {"flows": []}) |
|
751 | 1 | out_flows: dict[str, list[dict]] = defaultdict(list) |
|
752 | |||
753 | 1 | if not path: |
|
754 | 1 | return dpid_flows_match |
|
755 | |||
756 | 1 | try: |
|
757 | 1 | nni_flows = self._prepare_nni_flows(path) |
|
758 | # pylint: disable=broad-except |
||
759 | except Exception: |
||
760 | err = traceback.format_exc().replace("\n", ", ") |
||
761 | log.error(f"Fail to remove NNI failover flows for {self}: {err}") |
||
762 | nni_flows = {} |
||
763 | |||
764 | 1 | for dpid, flows in nni_flows.items(): |
|
765 | 1 | for flow in flows: |
|
766 | 1 | flow_mod = { |
|
767 | "cookie": flow["cookie"], |
||
768 | "match": flow["match"], |
||
769 | "owner": "mef_eline", |
||
770 | "cookie_mask": int(0xffffffffffffffff) |
||
771 | } |
||
772 | 1 | dpid_flows_match[dpid]["flows"].append(flow_mod) |
|
773 | 1 | out_flows[dpid].append(flow_mod) |
|
774 | |||
775 | 1 | try: |
|
776 | 1 | uni_flows = self._prepare_uni_flows(path, skip_in=True) |
|
777 | # pylint: disable=broad-except |
||
778 | except Exception: |
||
779 | err = traceback.format_exc().replace("\n", ", ") |
||
780 | log.error(f"Fail to remove UNI failover flows for {self}: {err}") |
||
781 | uni_flows = {} |
||
782 | |||
783 | 1 | for dpid, flows in uni_flows.items(): |
|
784 | 1 | for flow in flows: |
|
785 | 1 | flow_mod = { |
|
786 | "cookie": flow["cookie"], |
||
787 | "match": flow["match"], |
||
788 | "owner": "mef_eline", |
||
789 | "cookie_mask": int(0xffffffffffffffff) |
||
790 | } |
||
791 | 1 | dpid_flows_match[dpid]["flows"].append(flow_mod) |
|
792 | 1 | out_flows[dpid].append(flow_mod) |
|
793 | |||
794 | 1 | try: |
|
795 | 1 | self._send_flow_mods( |
|
796 | dpid_flows_match, 'delete', force=force, by_switch=True |
||
797 | ) |
||
798 | 1 | except FlowModException as err: |
|
799 | 1 | log.error( |
|
800 | f"Error deleting {self} path flows, path:{path}, error={err}" |
||
801 | ) |
||
802 | |||
803 | 1 | try: |
|
804 | 1 | path.make_vlans_available(self._controller) |
|
805 | except KytosTagError as err: |
||
806 | log.error(f"Error removing {self} path: {err}") |
||
807 | |||
808 | 1 | return out_flows |
|
809 | |||
810 | 1 | @staticmethod |
|
811 | 1 | def links_zipped(path=None): |
|
812 | """Return an iterator which yields pairs of links in order.""" |
||
813 | 1 | if not path: |
|
814 | 1 | return [] |
|
815 | 1 | return zip(path[:-1], path[1:]) |
|
816 | |||
817 | 1 | def should_deploy(self, path=None): |
|
818 | """Verify if the circuit should be deployed.""" |
||
819 | 1 | if not path: |
|
820 | 1 | log.debug("Path is empty.") |
|
821 | 1 | return False |
|
822 | |||
823 | 1 | if not self.is_enabled(): |
|
824 | 1 | log.debug(f"{self} is disabled.") |
|
825 | 1 | return False |
|
826 | |||
827 | 1 | if not self.is_active(): |
|
828 | 1 | log.debug(f"{self} will be deployed.") |
|
829 | 1 | return True |
|
830 | |||
831 | 1 | return False |
|
832 | |||
833 | # pylint: disable=too-many-branches, too-many-statements |
||
834 | 1 | def deploy_to_path(self, path=None): |
|
835 | """Install the flows for this circuit. |
||
836 | |||
837 | Procedures to deploy: |
||
838 | |||
839 | 0. Remove current flows installed |
||
840 | 1. Decide if will deploy "path" or discover a new path |
||
841 | 2. Choose vlan |
||
842 | 3. Install NNI flows |
||
843 | 4. Install UNI flows |
||
844 | 5. Activate |
||
845 | 6. Update current_path |
||
846 | 7. Update links caches(primary, current, backup) |
||
847 | |||
848 | """ |
||
849 | 1 | self.remove_current_flows(sync=False) |
|
850 | 1 | use_path = path or Path([]) |
|
851 | 1 | tag_errors = [] |
|
852 | 1 | if self.should_deploy(use_path): |
|
853 | 1 | try: |
|
854 | 1 | use_path.choose_vlans(self._controller) |
|
855 | 1 | except KytosNoTagAvailableError as e: |
|
856 | 1 | tag_errors.append(str(e)) |
|
857 | 1 | use_path = None |
|
858 | else: |
||
859 | 1 | for use_path in self.discover_new_paths(): |
|
860 | 1 | if use_path is None: |
|
861 | continue |
||
862 | 1 | try: |
|
863 | 1 | use_path.choose_vlans(self._controller) |
|
864 | 1 | break |
|
865 | 1 | except KytosNoTagAvailableError as e: |
|
866 | 1 | tag_errors.append(str(e)) |
|
867 | else: |
||
868 | 1 | use_path = None |
|
869 | |||
870 | 1 | try: |
|
871 | 1 | if use_path: |
|
872 | 1 | self._install_flows(use_path) |
|
873 | 1 | elif self.is_intra_switch(): |
|
874 | 1 | use_path = Path() |
|
875 | 1 | self._install_direct_uni_flows() |
|
876 | else: |
||
877 | 1 | msg = f"{self} was not deployed. No available path was found." |
|
878 | 1 | if tag_errors: |
|
879 | 1 | msg = self.add_tag_errors(msg, tag_errors) |
|
880 | 1 | log.error(msg) |
|
881 | else: |
||
882 | 1 | log.warning(msg) |
|
883 | 1 | return False |
|
884 | 1 | except EVCPathNotInstalled as err: |
|
885 | 1 | log.error( |
|
886 | f"Error deploying EVC {self} when calling flow_manager: {err}" |
||
887 | ) |
||
888 | 1 | self.remove_current_flows(use_path, sync=True) |
|
889 | 1 | return False |
|
890 | 1 | self.activate() |
|
891 | 1 | self.current_path = use_path |
|
892 | 1 | self.sync() |
|
893 | 1 | log.info(f"{self} was deployed.") |
|
894 | 1 | return True |
|
895 | |||
896 | 1 | def try_setup_failover_path(self, wait=settings.DEPLOY_EVCS_INTERVAL): |
|
897 | """Try setup failover_path whenever possible.""" |
||
898 | 1 | if ( |
|
899 | self.failover_path or not self.current_path |
||
900 | or not self.is_active() |
||
901 | ): |
||
902 | 1 | return |
|
903 | 1 | if (now() - self.affected_by_link_at).seconds >= wait: |
|
904 | 1 | with self.lock: |
|
905 | 1 | self.setup_failover_path() |
|
906 | |||
907 | # pylint: disable=too-many-statements |
||
908 | 1 | def setup_failover_path(self): |
|
909 | """Install flows for the failover path of this EVC. |
||
910 | |||
911 | Procedures to deploy: |
||
912 | |||
913 | 0. Remove flows currently installed for failover_path (if any) |
||
914 | 1. Discover a disjoint path from current_path |
||
915 | 2. Choose vlans |
||
916 | 3. Install NNI flows |
||
917 | 4. Install UNI egress flows |
||
918 | 5. Update failover_path |
||
919 | """ |
||
920 | # Intra-switch EVCs have no failover_path |
||
921 | 1 | if self.is_intra_switch(): |
|
922 | 1 | return False |
|
923 | |||
924 | # For not only setup failover path for totally dynamic EVCs |
||
925 | 1 | if not self.is_eligible_for_failover_path(): |
|
926 | 1 | return False |
|
927 | |||
928 | 1 | out_new_flows: dict[str, list[dict]] = {} |
|
929 | 1 | reason = "" |
|
930 | 1 | tag_errors = [] |
|
931 | 1 | out_removed_flows = self.remove_path_flows(self.failover_path) |
|
932 | 1 | self.failover_path = Path([]) |
|
933 | |||
934 | 1 | for use_path in self.get_failover_path_candidates(): |
|
935 | 1 | if not use_path: |
|
936 | 1 | continue |
|
937 | 1 | try: |
|
938 | 1 | use_path.choose_vlans(self._controller) |
|
939 | 1 | break |
|
940 | 1 | except KytosNoTagAvailableError as e: |
|
941 | 1 | tag_errors.append(str(e)) |
|
942 | else: |
||
943 | 1 | use_path = Path([]) |
|
944 | 1 | reason = "No available path was found" |
|
945 | |||
946 | 1 | try: |
|
947 | 1 | if use_path: |
|
948 | 1 | out_new_flows = self._install_flows( |
|
949 | use_path, skip_in=True |
||
950 | ) |
||
951 | 1 | except EVCPathNotInstalled as err: |
|
952 | 1 | reason = "Error deploying failover path" |
|
953 | 1 | log.error( |
|
954 | f"{reason} for {self}. FlowManager error: {err}" |
||
955 | ) |
||
956 | 1 | _rmed_flows = self.remove_path_flows(use_path) |
|
957 | 1 | out_removed_flows = merge_flow_dicts( |
|
958 | out_removed_flows, _rmed_flows |
||
959 | ) |
||
960 | 1 | use_path = Path([]) |
|
961 | |||
962 | 1 | self.failover_path = use_path |
|
963 | 1 | self.sync() |
|
964 | |||
965 | 1 | if out_new_flows or out_removed_flows: |
|
966 | 1 | emit_event(self._controller, "failover_deployed", content={ |
|
967 | self.id: map_evc_event_content( |
||
968 | self, |
||
969 | flows=deepcopy(out_new_flows), |
||
970 | removed_flows=deepcopy(out_removed_flows), |
||
971 | error_reason=reason, |
||
972 | current_path=self.current_path.as_dict(), |
||
973 | ) |
||
974 | }) |
||
975 | |||
976 | 1 | if not use_path: |
|
977 | 1 | msg = f"Failover path for {self} was not deployed: {reason}." |
|
978 | 1 | if tag_errors: |
|
979 | 1 | msg = self.add_tag_errors(msg, tag_errors) |
|
980 | 1 | log.error(msg) |
|
981 | else: |
||
982 | 1 | log.warning(msg) |
|
983 | 1 | return False |
|
984 | 1 | log.info(f"Failover path for {self} was deployed.") |
|
985 | 1 | return True |
|
986 | |||
987 | 1 | @staticmethod |
|
988 | 1 | def add_tag_errors(msg: str, tag_errors: list): |
|
989 | """Add to msg the tag errors ecountered when chossing path.""" |
||
990 | 1 | path = ['path', 'paths'] |
|
991 | 1 | was = ['was', 'were'] |
|
992 | 1 | message = ['message', 'messages'] |
|
993 | |||
994 | # Choose either singular(0) or plural(1) words |
||
995 | 1 | n = 1 |
|
996 | 1 | if len(tag_errors) == 1: |
|
997 | 1 | n = 0 |
|
998 | |||
999 | 1 | msg += f" {len(tag_errors)} {path[n]} {was[n]} rejected" |
|
1000 | 1 | msg += f" with {message[n]}: {tag_errors}" |
|
1001 | 1 | return msg |
|
1002 | |||
1003 | 1 | def get_failover_flows(self): |
|
1004 | """Return the flows needed to make the failover path active, i.e. the |
||
1005 | flows for ingress forwarding. |
||
1006 | |||
1007 | Return: |
||
1008 | dict: A dict of flows indexed by the switch_id will be returned, or |
||
1009 | an empty dict if no failover_path is available. |
||
1010 | """ |
||
1011 | 1 | if not self.failover_path: |
|
1012 | 1 | return {} |
|
1013 | 1 | return self._prepare_uni_flows(self.failover_path, skip_out=True) |
|
1014 | |||
1015 | # pylint: disable=too-many-branches |
||
1016 | 1 | def _prepare_direct_uni_flows(self): |
|
1017 | """Prepare flows connecting two UNIs for intra-switch EVC.""" |
||
1018 | 1 | vlan_a = self._get_value_from_uni_tag(self.uni_a) |
|
1019 | 1 | vlan_z = self._get_value_from_uni_tag(self.uni_z) |
|
1020 | |||
1021 | 1 | flow_mod_az = self._prepare_flow_mod( |
|
1022 | self.uni_a.interface, self.uni_z.interface, |
||
1023 | self.queue_id, vlan_a |
||
1024 | ) |
||
1025 | 1 | flow_mod_za = self._prepare_flow_mod( |
|
1026 | self.uni_z.interface, self.uni_a.interface, |
||
1027 | self.queue_id, vlan_z |
||
1028 | ) |
||
1029 | |||
1030 | 1 | View Code Duplication | if not isinstance(vlan_z, list) and vlan_z not in self.special_cases: |
|
|||
1031 | 1 | flow_mod_az["actions"].insert( |
|
1032 | 0, {"action_type": "set_vlan", "vlan_id": vlan_z} |
||
1033 | ) |
||
1034 | 1 | if not vlan_a: |
|
1035 | 1 | flow_mod_az["actions"].insert( |
|
1036 | 0, {"action_type": "push_vlan", "tag_type": "c"} |
||
1037 | ) |
||
1038 | 1 | if vlan_a == 0: |
|
1039 | 1 | flow_mod_za["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1040 | 1 | elif vlan_a == 0 and vlan_z == "4096/4096": |
|
1041 | 1 | flow_mod_za["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1042 | |||
1043 | 1 | View Code Duplication | if not isinstance(vlan_a, list) and vlan_a not in self.special_cases: |
1044 | 1 | flow_mod_za["actions"].insert( |
|
1045 | 0, {"action_type": "set_vlan", "vlan_id": vlan_a} |
||
1046 | ) |
||
1047 | 1 | if not vlan_z: |
|
1048 | 1 | flow_mod_za["actions"].insert( |
|
1049 | 0, {"action_type": "push_vlan", "tag_type": "c"} |
||
1050 | ) |
||
1051 | 1 | if vlan_z == 0: |
|
1052 | 1 | flow_mod_az["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1053 | 1 | elif vlan_a == "4096/4096" and vlan_z == 0: |
|
1054 | 1 | flow_mod_az["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1055 | |||
1056 | 1 | flows = [] |
|
1057 | 1 | if isinstance(vlan_a, list): |
|
1058 | 1 | for mask_a in vlan_a: |
|
1059 | 1 | flow_aux = deepcopy(flow_mod_az) |
|
1060 | 1 | flow_aux["match"]["dl_vlan"] = mask_a |
|
1061 | 1 | flows.append(flow_aux) |
|
1062 | else: |
||
1063 | 1 | if vlan_a is not None: |
|
1064 | 1 | flow_mod_az["match"]["dl_vlan"] = vlan_a |
|
1065 | 1 | flows.append(flow_mod_az) |
|
1066 | |||
1067 | 1 | if isinstance(vlan_z, list): |
|
1068 | 1 | for mask_z in vlan_z: |
|
1069 | 1 | flow_aux = deepcopy(flow_mod_za) |
|
1070 | 1 | flow_aux["match"]["dl_vlan"] = mask_z |
|
1071 | 1 | flows.append(flow_aux) |
|
1072 | else: |
||
1073 | 1 | if vlan_z is not None: |
|
1074 | 1 | flow_mod_za["match"]["dl_vlan"] = vlan_z |
|
1075 | 1 | flows.append(flow_mod_za) |
|
1076 | 1 | return ( |
|
1077 | self.uni_a.interface.switch.id, flows |
||
1078 | ) |
||
1079 | |||
1080 | 1 | def _install_direct_uni_flows(self): |
|
1081 | """Install flows connecting two UNIs. |
||
1082 | |||
1083 | This case happens when the circuit is between UNIs in the |
||
1084 | same switch. |
||
1085 | """ |
||
1086 | 1 | (dpid, flows) = self._prepare_direct_uni_flows() |
|
1087 | 1 | flow_mods = {"switches": [dpid], "flows": flows} |
|
1088 | 1 | try: |
|
1089 | 1 | self._send_flow_mods(flow_mods, "install") |
|
1090 | 1 | except FlowModException as err: |
|
1091 | 1 | raise EVCPathNotInstalled(str(err)) from err |
|
1092 | |||
1093 | 1 | def _prepare_nni_flows(self, path=None): |
|
1094 | """Prepare NNI flows.""" |
||
1095 | 1 | nni_flows = OrderedDict() |
|
1096 | 1 | previous = self.uni_a.interface.switch.dpid |
|
1097 | 1 | for incoming, outcoming in self.links_zipped(path): |
|
1098 | 1 | in_vlan = incoming.get_metadata("s_vlan").value |
|
1099 | 1 | out_vlan = outcoming.get_metadata("s_vlan").value |
|
1100 | 1 | in_endpoint = self.get_endpoint_by_id(incoming, previous, ne) |
|
1101 | 1 | out_endpoint = self.get_endpoint_by_id( |
|
1102 | outcoming, in_endpoint.switch.id, eq |
||
1103 | ) |
||
1104 | |||
1105 | 1 | flows = [] |
|
1106 | # Flow for one direction |
||
1107 | 1 | flows.append( |
|
1108 | self._prepare_nni_flow( |
||
1109 | in_endpoint, |
||
1110 | out_endpoint, |
||
1111 | in_vlan, |
||
1112 | out_vlan, |
||
1113 | queue_id=self.queue_id, |
||
1114 | ) |
||
1115 | ) |
||
1116 | |||
1117 | # Flow for the other direction |
||
1118 | 1 | flows.append( |
|
1119 | self._prepare_nni_flow( |
||
1120 | out_endpoint, |
||
1121 | in_endpoint, |
||
1122 | out_vlan, |
||
1123 | in_vlan, |
||
1124 | queue_id=self.queue_id, |
||
1125 | ) |
||
1126 | ) |
||
1127 | 1 | previous = in_endpoint.switch.id |
|
1128 | 1 | nni_flows[in_endpoint.switch.id] = flows |
|
1129 | 1 | return nni_flows |
|
1130 | |||
1131 | 1 | def _install_flows( |
|
1132 | self, path=None, skip_in=False, skip_out=False |
||
1133 | ) -> dict[str, list[dict]]: |
||
1134 | """Install uni and nni flows""" |
||
1135 | 1 | flows_by_switch = defaultdict(lambda: {"flows": []}) |
|
1136 | 1 | new_flows = defaultdict(list) |
|
1137 | 1 | for dpid, flows in self._prepare_nni_flows(path).items(): |
|
1138 | 1 | flows_by_switch[dpid]["flows"].extend(flows) |
|
1139 | 1 | new_flows[dpid].extend(flows) |
|
1140 | 1 | for dpid, flows in self._prepare_uni_flows( |
|
1141 | path, skip_in, skip_out |
||
1142 | ).items(): |
||
1143 | 1 | flows_by_switch[dpid]["flows"].extend(flows) |
|
1144 | 1 | new_flows[dpid].extend(flows) |
|
1145 | |||
1146 | 1 | try: |
|
1147 | 1 | self._send_flow_mods(flows_by_switch, "install", by_switch=True) |
|
1148 | 1 | except FlowModException as err: |
|
1149 | 1 | raise EVCPathNotInstalled(str(err)) from err |
|
1150 | |||
1151 | 1 | return new_flows |
|
1152 | |||
1153 | 1 | @staticmethod |
|
1154 | 1 | def _get_value_from_uni_tag(uni: UNI): |
|
1155 | """Returns the value from tag. In case of any and untagged |
||
1156 | it should return 4096/4096 and 0 respectively""" |
||
1157 | 1 | special = {"any": "4096/4096", "untagged": 0} |
|
1158 | 1 | if uni.user_tag: |
|
1159 | 1 | value = uni.user_tag.value |
|
1160 | 1 | if isinstance(value, list): |
|
1161 | 1 | return uni.user_tag.mask_list |
|
1162 | 1 | return special.get(value, value) |
|
1163 | 1 | return None |
|
1164 | |||
1165 | # pylint: disable=too-many-locals |
||
1166 | 1 | def _prepare_uni_flows(self, path=None, skip_in=False, skip_out=False): |
|
1167 | """Prepare flows to install UNIs.""" |
||
1168 | 1 | uni_flows = {} |
|
1169 | 1 | if not path: |
|
1170 | log.info("install uni flows without path.") |
||
1171 | return uni_flows |
||
1172 | |||
1173 | # Determine VLANs |
||
1174 | 1 | in_vlan_a = self._get_value_from_uni_tag(self.uni_a) |
|
1175 | 1 | out_vlan_a = path[0].get_metadata("s_vlan").value |
|
1176 | |||
1177 | 1 | in_vlan_z = self._get_value_from_uni_tag(self.uni_z) |
|
1178 | 1 | out_vlan_z = path[-1].get_metadata("s_vlan").value |
|
1179 | |||
1180 | # Get endpoints from path |
||
1181 | 1 | endpoint_a = self.get_endpoint_by_id( |
|
1182 | path[0], self.uni_a.interface.switch.id, eq |
||
1183 | ) |
||
1184 | 1 | endpoint_z = self.get_endpoint_by_id( |
|
1185 | path[-1], self.uni_z.interface.switch.id, eq |
||
1186 | ) |
||
1187 | |||
1188 | # Flows for the first UNI |
||
1189 | 1 | flows_a = [] |
|
1190 | |||
1191 | # Flow for one direction, pushing the service tag |
||
1192 | 1 | if not skip_in: |
|
1193 | 1 | if isinstance(in_vlan_a, list): |
|
1194 | 1 | for in_mask_a in in_vlan_a: |
|
1195 | 1 | push_flow = self._prepare_push_flow( |
|
1196 | self.uni_a.interface, |
||
1197 | endpoint_a, |
||
1198 | in_mask_a, |
||
1199 | out_vlan_a, |
||
1200 | in_vlan_z, |
||
1201 | queue_id=self.queue_id, |
||
1202 | ) |
||
1203 | 1 | flows_a.append(push_flow) |
|
1204 | else: |
||
1205 | push_flow = self._prepare_push_flow( |
||
1206 | self.uni_a.interface, |
||
1207 | endpoint_a, |
||
1208 | in_vlan_a, |
||
1209 | out_vlan_a, |
||
1210 | in_vlan_z, |
||
1211 | queue_id=self.queue_id, |
||
1212 | ) |
||
1213 | flows_a.append(push_flow) |
||
1214 | |||
1215 | # Flow for the other direction, popping the service tag |
||
1216 | 1 | if not skip_out: |
|
1217 | 1 | pop_flow = self._prepare_pop_flow( |
|
1218 | endpoint_a, |
||
1219 | self.uni_a.interface, |
||
1220 | out_vlan_a, |
||
1221 | queue_id=self.queue_id, |
||
1222 | ) |
||
1223 | 1 | flows_a.append(pop_flow) |
|
1224 | |||
1225 | 1 | uni_flows[self.uni_a.interface.switch.id] = flows_a |
|
1226 | |||
1227 | # Flows for the second UNI |
||
1228 | 1 | flows_z = [] |
|
1229 | |||
1230 | # Flow for one direction, pushing the service tag |
||
1231 | 1 | if not skip_in: |
|
1232 | 1 | if isinstance(in_vlan_z, list): |
|
1233 | 1 | for in_mask_z in in_vlan_z: |
|
1234 | 1 | push_flow = self._prepare_push_flow( |
|
1235 | self.uni_z.interface, |
||
1236 | endpoint_z, |
||
1237 | in_mask_z, |
||
1238 | out_vlan_z, |
||
1239 | in_vlan_a, |
||
1240 | queue_id=self.queue_id, |
||
1241 | ) |
||
1242 | 1 | flows_z.append(push_flow) |
|
1243 | else: |
||
1244 | push_flow = self._prepare_push_flow( |
||
1245 | self.uni_z.interface, |
||
1246 | endpoint_z, |
||
1247 | in_vlan_z, |
||
1248 | out_vlan_z, |
||
1249 | in_vlan_a, |
||
1250 | queue_id=self.queue_id, |
||
1251 | ) |
||
1252 | flows_z.append(push_flow) |
||
1253 | |||
1254 | # Flow for the other direction, popping the service tag |
||
1255 | 1 | if not skip_out: |
|
1256 | 1 | pop_flow = self._prepare_pop_flow( |
|
1257 | endpoint_z, |
||
1258 | self.uni_z.interface, |
||
1259 | out_vlan_z, |
||
1260 | queue_id=self.queue_id, |
||
1261 | ) |
||
1262 | 1 | flows_z.append(pop_flow) |
|
1263 | |||
1264 | 1 | uni_flows[self.uni_z.interface.switch.id] = flows_z |
|
1265 | |||
1266 | 1 | return uni_flows |
|
1267 | |||
1268 | 1 | @staticmethod |
|
1269 | 1 | @retry( |
|
1270 | stop=stop_after_attempt(3), |
||
1271 | wait=wait_combine(wait_fixed(3), wait_random(min=2, max=7)), |
||
1272 | retry=retry_if_exception_type(FlowModException), |
||
1273 | before_sleep=before_sleep, |
||
1274 | reraise=True, |
||
1275 | ) |
||
1276 | 1 | def _send_flow_mods( |
|
1277 | data_content: dict, |
||
1278 | command="install", |
||
1279 | force=False, |
||
1280 | by_switch=False |
||
1281 | ): |
||
1282 | """Send a flow_mod list to a specific switch. |
||
1283 | |||
1284 | Args: |
||
1285 | dpid(str): The target of flows (i.e. Switch.id). |
||
1286 | flow_mods(dict): Python dictionary with flow_mods. |
||
1287 | command(str): By default is 'flows'. To remove a flow is 'remove'. |
||
1288 | force(bool): True to send via consistency check in case of errors. |
||
1289 | by_switch(bool): True to send to 'flows_by_switch' request instead. |
||
1290 | """ |
||
1291 | 1 | if by_switch: |
|
1292 | 1 | endpoint = f"{settings.MANAGER_URL}/flows_by_switch/?force={force}" |
|
1293 | else: |
||
1294 | 1 | endpoint = f"{settings.MANAGER_URL}/flows" |
|
1295 | 1 | data_content["force"] = force |
|
1296 | 1 | try: |
|
1297 | 1 | if command == "install": |
|
1298 | 1 | res = httpx.post(endpoint, json=data_content, timeout=30) |
|
1299 | 1 | elif command == "delete": |
|
1300 | 1 | res = httpx.request( |
|
1301 | "DELETE", endpoint, json=data_content, timeout=30 |
||
1302 | ) |
||
1303 | 1 | except httpx.RequestError as err: |
|
1304 | 1 | raise FlowModException(str(err)) from err |
|
1305 | 1 | if res.is_server_error or res.status_code >= 400: |
|
1306 | 1 | raise FlowModException(res.text) |
|
1307 | |||
1308 | 1 | def get_cookie(self): |
|
1309 | """Return the cookie integer from evc id.""" |
||
1310 | 1 | return int(self.id, 16) + (settings.COOKIE_PREFIX << 56) |
|
1311 | |||
1312 | 1 | @staticmethod |
|
1313 | 1 | def get_id_from_cookie(cookie): |
|
1314 | """Return the evc id given a cookie value.""" |
||
1315 | 1 | evc_id = cookie - (settings.COOKIE_PREFIX << 56) |
|
1316 | 1 | return f"{evc_id:x}".zfill(14) |
|
1317 | |||
1318 | 1 | def set_flow_table_group_id(self, flow_mod: dict, vlan) -> dict: |
|
1319 | """Set table_group and table_id""" |
||
1320 | 1 | table_group = "epl" if vlan is None else "evpl" |
|
1321 | 1 | flow_mod["table_group"] = table_group |
|
1322 | 1 | flow_mod["table_id"] = self.table_group[table_group] |
|
1323 | 1 | return flow_mod |
|
1324 | |||
1325 | 1 | @staticmethod |
|
1326 | 1 | def get_priority(vlan): |
|
1327 | """Return priority value depending on vlan value""" |
||
1328 | 1 | if isinstance(vlan, list): |
|
1329 | 1 | return settings.EVPL_SB_PRIORITY |
|
1330 | 1 | if vlan not in {None, "4096/4096", 0}: |
|
1331 | 1 | return settings.EVPL_SB_PRIORITY |
|
1332 | 1 | if vlan == 0: |
|
1333 | 1 | return settings.UNTAGGED_SB_PRIORITY |
|
1334 | 1 | if vlan == "4096/4096": |
|
1335 | 1 | return settings.ANY_SB_PRIORITY |
|
1336 | 1 | return settings.EPL_SB_PRIORITY |
|
1337 | |||
1338 | 1 | def _prepare_flow_mod(self, in_interface, out_interface, |
|
1339 | queue_id=None, vlan=True): |
||
1340 | """Prepare a common flow mod.""" |
||
1341 | 1 | default_actions = [ |
|
1342 | {"action_type": "output", "port": out_interface.port_number} |
||
1343 | ] |
||
1344 | 1 | queue_id = settings.QUEUE_ID if queue_id == -1 else queue_id |
|
1345 | 1 | if queue_id is not None: |
|
1346 | 1 | default_actions.append( |
|
1347 | {"action_type": "set_queue", "queue_id": queue_id} |
||
1348 | ) |
||
1349 | |||
1350 | 1 | flow_mod = { |
|
1351 | "match": {"in_port": in_interface.port_number}, |
||
1352 | "cookie": self.get_cookie(), |
||
1353 | "actions": default_actions, |
||
1354 | "owner": "mef_eline", |
||
1355 | } |
||
1356 | |||
1357 | 1 | self.set_flow_table_group_id(flow_mod, vlan) |
|
1358 | 1 | if self.sb_priority: |
|
1359 | 1 | flow_mod["priority"] = self.sb_priority |
|
1360 | else: |
||
1361 | 1 | flow_mod["priority"] = self.get_priority(vlan) |
|
1362 | 1 | return flow_mod |
|
1363 | |||
1364 | 1 | def _prepare_nni_flow(self, *args, queue_id=None): |
|
1365 | """Create NNI flows.""" |
||
1366 | 1 | in_interface, out_interface, in_vlan, out_vlan = args |
|
1367 | 1 | flow_mod = self._prepare_flow_mod( |
|
1368 | in_interface, out_interface, queue_id |
||
1369 | ) |
||
1370 | 1 | flow_mod["match"]["dl_vlan"] = in_vlan |
|
1371 | 1 | new_action = {"action_type": "set_vlan", "vlan_id": out_vlan} |
|
1372 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1373 | |||
1374 | 1 | return flow_mod |
|
1375 | |||
1376 | 1 | def _prepare_push_flow(self, *args, queue_id=None): |
|
1377 | """Prepare push flow. |
||
1378 | |||
1379 | Arguments: |
||
1380 | in_interface(str): Interface input. |
||
1381 | out_interface(str): Interface output. |
||
1382 | in_vlan(int,str,None): Vlan input. |
||
1383 | out_vlan(str): Vlan output. |
||
1384 | new_c_vlan(int,str,list,None): New client vlan. |
||
1385 | |||
1386 | Return: |
||
1387 | dict: An python dictionary representing a FlowMod |
||
1388 | |||
1389 | """ |
||
1390 | # assign all arguments |
||
1391 | 1 | in_interface, out_interface, in_vlan, out_vlan, new_c_vlan = args |
|
1392 | 1 | vlan_pri = in_vlan if not isinstance(new_c_vlan, list) else new_c_vlan |
|
1393 | 1 | flow_mod = self._prepare_flow_mod( |
|
1394 | in_interface, out_interface, queue_id, vlan_pri |
||
1395 | ) |
||
1396 | # the service tag must be always pushed |
||
1397 | 1 | new_action = {"action_type": "set_vlan", "vlan_id": out_vlan} |
|
1398 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1399 | |||
1400 | 1 | new_action = {"action_type": "push_vlan", "tag_type": "s"} |
|
1401 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1402 | |||
1403 | 1 | if in_vlan is not None: |
|
1404 | # if in_vlan is set, it must be included in the match |
||
1405 | 1 | flow_mod["match"]["dl_vlan"] = in_vlan |
|
1406 | |||
1407 | 1 | if (not isinstance(new_c_vlan, list) and in_vlan != new_c_vlan and |
|
1408 | new_c_vlan not in self.special_cases): |
||
1409 | # new_in_vlan is an integer but zero, action to set is required |
||
1410 | 1 | new_action = {"action_type": "set_vlan", "vlan_id": new_c_vlan} |
|
1411 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1412 | |||
1413 | 1 | if in_vlan not in self.special_cases and new_c_vlan == 0: |
|
1414 | # # new_in_vlan is an integer but zero and new_c_vlan does not |
||
1415 | # a pop action is required |
||
1416 | 1 | new_action = {"action_type": "pop_vlan"} |
|
1417 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1418 | |||
1419 | 1 | elif in_vlan == "4096/4096" and new_c_vlan == 0: |
|
1420 | # if in_vlan match with any tags and new_c_vlan does not |
||
1421 | # a pop action is required |
||
1422 | 1 | new_action = {"action_type": "pop_vlan"} |
|
1423 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1424 | |||
1425 | 1 | elif (not in_vlan and |
|
1426 | (not isinstance(new_c_vlan, list) and |
||
1427 | new_c_vlan not in self.special_cases)): |
||
1428 | # new_in_vlan is an integer but zero and in_vlan is not set |
||
1429 | # then it is set now |
||
1430 | 1 | new_action = {"action_type": "push_vlan", "tag_type": "c"} |
|
1431 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1432 | |||
1433 | 1 | return flow_mod |
|
1434 | |||
1435 | 1 | def _prepare_pop_flow( |
|
1436 | self, in_interface, out_interface, out_vlan, queue_id=None |
||
1437 | ): |
||
1438 | # pylint: disable=too-many-arguments |
||
1439 | """Prepare pop flow.""" |
||
1440 | 1 | flow_mod = self._prepare_flow_mod( |
|
1441 | in_interface, out_interface, queue_id |
||
1442 | ) |
||
1443 | 1 | flow_mod["match"]["dl_vlan"] = out_vlan |
|
1444 | 1 | new_action = {"action_type": "pop_vlan"} |
|
1445 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1446 | 1 | return flow_mod |
|
1447 | |||
1448 | 1 | @staticmethod |
|
1449 | 1 | def run_bulk_sdntraces( |
|
1450 | uni_list: list[tuple[Interface, Union[str, int, None]]] |
||
1451 | ) -> dict: |
||
1452 | """Run SDN traces on control plane starting from EVC UNIs.""" |
||
1453 | 1 | endpoint = f"{settings.SDN_TRACE_CP_URL}/traces" |
|
1454 | 1 | data = [] |
|
1455 | 1 | for interface, tag_value in uni_list: |
|
1456 | 1 | data_uni = { |
|
1457 | "trace": { |
||
1458 | "switch": { |
||
1459 | "dpid": interface.switch.dpid, |
||
1460 | "in_port": interface.port_number, |
||
1461 | } |
||
1462 | } |
||
1463 | } |
||
1464 | 1 | if tag_value: |
|
1465 | 1 | uni_dl_vlan = map_dl_vlan(tag_value) |
|
1466 | 1 | if uni_dl_vlan: |
|
1467 | 1 | data_uni["trace"]["eth"] = { |
|
1468 | "dl_type": 0x8100, |
||
1469 | "dl_vlan": uni_dl_vlan, |
||
1470 | } |
||
1471 | 1 | data.append(data_uni) |
|
1472 | 1 | try: |
|
1473 | 1 | response = httpx.put(endpoint, json=data, timeout=30) |
|
1474 | 1 | except httpx.TimeoutException as exception: |
|
1475 | 1 | log.error(f"Request has timed out: {exception}") |
|
1476 | 1 | return {"result": []} |
|
1477 | 1 | if response.status_code >= 400: |
|
1478 | 1 | log.error(f"Failed to run sdntrace-cp: {response.text}") |
|
1479 | 1 | return {"result": []} |
|
1480 | 1 | return response.json() |
|
1481 | |||
1482 | # pylint: disable=too-many-return-statements, too-many-arguments |
||
1483 | 1 | @staticmethod |
|
1484 | 1 | def check_trace( |
|
1485 | evc_id: str, |
||
1486 | evc_name: str, |
||
1487 | tag_a: Union[None, int, str], |
||
1488 | tag_z: Union[None, int, str], |
||
1489 | interface_a: Interface, |
||
1490 | interface_z: Interface, |
||
1491 | current_path: list, |
||
1492 | trace_a: list, |
||
1493 | trace_z: list |
||
1494 | ) -> bool: |
||
1495 | """Auxiliar function to check an individual trace""" |
||
1496 | 1 | if ( |
|
1497 | len(trace_a) != len(current_path) + 1 |
||
1498 | or not compare_uni_out_trace(tag_z, interface_z, trace_a[-1]) |
||
1499 | ): |
||
1500 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1501 | f"Invalid trace from uni_a: {trace_a}") |
||
1502 | 1 | return False |
|
1503 | 1 | if ( |
|
1504 | len(trace_z) != len(current_path) + 1 |
||
1505 | or not compare_uni_out_trace(tag_a, interface_a, trace_z[-1]) |
||
1506 | ): |
||
1507 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1508 | f"Invalid trace from uni_z: {trace_z}") |
||
1509 | 1 | return False |
|
1510 | |||
1511 | 1 | if not current_path: |
|
1512 | return True |
||
1513 | |||
1514 | 1 | first_link, trace_path_begin, trace_path_end = current_path[0], [], [] |
|
1515 | 1 | if ( |
|
1516 | first_link.endpoint_a.switch.id == trace_a[0]["dpid"] |
||
1517 | ): |
||
1518 | 1 | trace_path_begin, trace_path_end = trace_a, trace_z |
|
1519 | 1 | elif ( |
|
1520 | first_link.endpoint_a.switch.id == trace_z[0]["dpid"] |
||
1521 | ): |
||
1522 | 1 | trace_path_begin, trace_path_end = trace_z, trace_a |
|
1523 | else: |
||
1524 | msg = ( |
||
1525 | f"first link {first_link} endpoint_a didn't match the first " |
||
1526 | f"step of trace_a {trace_a} or trace_z {trace_z}" |
||
1527 | ) |
||
1528 | log.warning(msg) |
||
1529 | return False |
||
1530 | |||
1531 | 1 | for link, trace1, trace2 in zip(current_path, |
|
1532 | trace_path_begin[1:], |
||
1533 | trace_path_end[:0:-1]): |
||
1534 | 1 | metadata_vlan = None |
|
1535 | 1 | if link.metadata: |
|
1536 | 1 | metadata_vlan = glom(link.metadata, 's_vlan.value') |
|
1537 | 1 | if compare_endpoint_trace( |
|
1538 | link.endpoint_a, |
||
1539 | metadata_vlan, |
||
1540 | trace2 |
||
1541 | ) is False: |
||
1542 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1543 | f"Invalid trace from uni_a: {trace_a}") |
||
1544 | 1 | return False |
|
1545 | 1 | if compare_endpoint_trace( |
|
1546 | link.endpoint_b, |
||
1547 | metadata_vlan, |
||
1548 | trace1 |
||
1549 | ) is False: |
||
1550 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1551 | f"Invalid trace from uni_z: {trace_z}") |
||
1552 | 1 | return False |
|
1553 | |||
1554 | 1 | return True |
|
1555 | |||
1556 | 1 | @staticmethod |
|
1557 | 1 | def check_range(circuit, traces: list) -> bool: |
|
1558 | """Check traces when for UNI with TAGRange""" |
||
1559 | 1 | check = True |
|
1560 | 1 | for i, mask in enumerate(circuit.uni_a.user_tag.mask_list): |
|
1561 | 1 | trace_a = traces[i*2] |
|
1562 | 1 | trace_z = traces[i*2+1] |
|
1563 | 1 | check &= EVCDeploy.check_trace( |
|
1564 | circuit.id, circuit.name, |
||
1565 | mask, mask, |
||
1566 | circuit.uni_a.interface, |
||
1567 | circuit.uni_z.interface, |
||
1568 | circuit.current_path, |
||
1569 | trace_a, trace_z, |
||
1570 | ) |
||
1571 | 1 | return check |
|
1572 | |||
1573 | 1 | @staticmethod |
|
1574 | 1 | def check_list_traces(list_circuits: list) -> dict: |
|
1575 | """Check if current_path is deployed comparing with SDN traces.""" |
||
1576 | 1 | if not list_circuits: |
|
1577 | 1 | return {} |
|
1578 | 1 | uni_list = make_uni_list(list_circuits) |
|
1579 | 1 | traces = EVCDeploy.run_bulk_sdntraces(uni_list)["result"] |
|
1580 | |||
1581 | 1 | if not traces: |
|
1582 | 1 | return {} |
|
1583 | |||
1584 | 1 | try: |
|
1585 | 1 | circuits_checked = {} |
|
1586 | 1 | i = 0 |
|
1587 | 1 | for circuit in list_circuits: |
|
1588 | 1 | if isinstance(circuit.uni_a.user_tag, TAGRange): |
|
1589 | 1 | length = len(circuit.uni_a.user_tag.mask_list) |
|
1590 | 1 | circuits_checked[circuit.id] = EVCDeploy.check_range( |
|
1591 | circuit, traces[i:i+length*2] |
||
1592 | ) |
||
1593 | 1 | i += length*2 |
|
1594 | else: |
||
1595 | 1 | trace_a = traces[i] |
|
1596 | 1 | trace_z = traces[i+1] |
|
1597 | 1 | tag_a = None |
|
1598 | 1 | if circuit.uni_a.user_tag: |
|
1599 | 1 | tag_a = circuit.uni_a.user_tag.value |
|
1600 | 1 | tag_z = None |
|
1601 | 1 | if circuit.uni_z.user_tag: |
|
1602 | 1 | tag_z = circuit.uni_z.user_tag.value |
|
1603 | 1 | circuits_checked[circuit.id] = EVCDeploy.check_trace( |
|
1604 | circuit.id, circuit.name, |
||
1605 | tag_a, tag_z, |
||
1606 | circuit.uni_a.interface, |
||
1607 | circuit.uni_z.interface, |
||
1608 | circuit.current_path, |
||
1609 | trace_a, trace_z |
||
1610 | ) |
||
1611 | 1 | i += 2 |
|
1612 | 1 | except IndexError as err: |
|
1613 | 1 | log.error( |
|
1614 | f"Bulk sdntraces returned fewer items than expected." |
||
1615 | f"Error = {err}" |
||
1616 | ) |
||
1617 | 1 | return {} |
|
1618 | |||
1619 | 1 | return circuits_checked |
|
1620 | |||
1621 | 1 | @staticmethod |
|
1622 | 1 | def get_endpoint_by_id( |
|
1623 | link: Link, |
||
1624 | id_: str, |
||
1625 | operator: Union[eq, ne] |
||
1626 | ) -> Interface: |
||
1627 | """Return endpoint from link |
||
1628 | either equal(eq) or not equal(ne) to id""" |
||
1629 | 1 | if operator(link.endpoint_a.switch.id, id_): |
|
1630 | 1 | return link.endpoint_a |
|
1631 | 1 | return link.endpoint_b |
|
1632 | |||
1633 | |||
1634 | 1 | class LinkProtection(EVCDeploy): |
|
1635 | """Class to handle link protection.""" |
||
1636 | |||
1637 | 1 | def is_affected_by_link(self, link=None): |
|
1638 | """Verify if the current path is affected by link down event.""" |
||
1639 | return self.current_path.is_affected_by_link(link) |
||
1640 | |||
1641 | 1 | def is_using_primary_path(self): |
|
1642 | """Verify if the current deployed path is self.primary_path.""" |
||
1643 | 1 | return self.current_path == self.primary_path |
|
1644 | |||
1645 | 1 | def is_using_backup_path(self): |
|
1646 | """Verify if the current deployed path is self.backup_path.""" |
||
1647 | 1 | return self.current_path == self.backup_path |
|
1648 | |||
1649 | 1 | def is_using_dynamic_path(self): |
|
1650 | """Verify if the current deployed path is dynamic.""" |
||
1651 | 1 | if ( |
|
1652 | self.current_path |
||
1653 | and not self.is_using_primary_path() |
||
1654 | and not self.is_using_backup_path() |
||
1655 | and self.current_path.status is EntityStatus.UP |
||
1656 | ): |
||
1657 | return True |
||
1658 | 1 | return False |
|
1659 | |||
1660 | 1 | def handle_link_up(self, link): |
|
1661 | """Handle circuit when link up. |
||
1662 | |||
1663 | Args: |
||
1664 | link(Link): Link affected by link.up event. |
||
1665 | |||
1666 | """ |
||
1667 | 1 | condition_pairs = [ |
|
1668 | ( |
||
1669 | lambda me: me.is_using_primary_path(), |
||
1670 | lambda _: (True, 'nothing') |
||
1671 | ), |
||
1672 | ( |
||
1673 | lambda me: me.is_intra_switch(), |
||
1674 | lambda _: (True, 'nothing') |
||
1675 | ), |
||
1676 | ( |
||
1677 | lambda me: me.primary_path.is_affected_by_link(link), |
||
1678 | lambda me: (me.deploy_to_primary_path(), 'redeploy') |
||
1679 | ), |
||
1680 | # We tried to deploy(primary_path) without success. |
||
1681 | # And in this case is up by some how. Nothing to do. |
||
1682 | ( |
||
1683 | lambda me: me.is_using_backup_path(), |
||
1684 | lambda _: (True, 'nothing') |
||
1685 | ), |
||
1686 | ( |
||
1687 | lambda me: me.is_using_dynamic_path(), |
||
1688 | lambda _: (True, 'nothing') |
||
1689 | ), |
||
1690 | # In this case, probably the circuit is not being used and |
||
1691 | # we can move to backup |
||
1692 | ( |
||
1693 | lambda me: me.backup_path.is_affected_by_link(link), |
||
1694 | lambda me: (me.deploy_to_backup_path(), 'redeploy') |
||
1695 | ), |
||
1696 | # In this case, the circuit is not being used and we should |
||
1697 | # try a dynamic path |
||
1698 | ( |
||
1699 | lambda me: me.dynamic_backup_path and not me.is_active(), |
||
1700 | lambda me: (me.deploy_to_path(), 'redeploy') |
||
1701 | ) |
||
1702 | ] |
||
1703 | 1 | for predicate, action in condition_pairs: |
|
1704 | 1 | if not predicate(self): |
|
1705 | 1 | continue |
|
1706 | 1 | success, succcess_type = action(self) |
|
1707 | 1 | if success: |
|
1708 | 1 | if succcess_type == 'redeploy': |
|
1709 | 1 | emit_event( |
|
1710 | self._controller, |
||
1711 | "redeployed_link_up", |
||
1712 | content=map_evc_event_content(self) |
||
1713 | ) |
||
1714 | 1 | return True |
|
1715 | 1 | return False |
|
1716 | |||
1717 | 1 | def handle_link_down(self): |
|
1718 | """Handle circuit when link down. |
||
1719 | |||
1720 | Returns: |
||
1721 | bool: True if the re-deploy was successly otherwise False. |
||
1722 | |||
1723 | """ |
||
1724 | 1 | success = False |
|
1725 | 1 | if self.is_using_primary_path(): |
|
1726 | 1 | success = self.deploy_to_backup_path() |
|
1727 | 1 | elif self.is_using_backup_path(): |
|
1728 | 1 | success = self.deploy_to_primary_path() |
|
1729 | |||
1730 | 1 | if not success and self.dynamic_backup_path: |
|
1731 | 1 | success = self.deploy_to_path() |
|
1732 | |||
1733 | 1 | if success: |
|
1734 | 1 | log.debug(f"{self} deployed after link down.") |
|
1735 | else: |
||
1736 | 1 | self.remove_current_flows(sync=False) |
|
1737 | 1 | self.deactivate() |
|
1738 | 1 | self.sync() |
|
1739 | 1 | log.debug(f"Failed to re-deploy {self} after link down.") |
|
1740 | |||
1741 | 1 | return success |
|
1742 | |||
1743 | 1 | @staticmethod |
|
1744 | 1 | def get_interface_from_switch(uni: UNI, switches: dict) -> Interface: |
|
1745 | """Get interface from switch by uni""" |
||
1746 | 1 | switch = switches[uni.interface.switch.dpid] |
|
1747 | 1 | interface = switch.interfaces[uni.interface.port_number] |
|
1748 | 1 | return interface |
|
1749 | |||
1750 | 1 | def are_unis_active(self, switches: dict) -> bool: |
|
1751 | """Determine whether this EVC should be active""" |
||
1752 | 1 | interface_a = self.get_interface_from_switch(self.uni_a, switches) |
|
1753 | 1 | interface_z = self.get_interface_from_switch(self.uni_z, switches) |
|
1754 | 1 | active, _ = self.is_uni_interface_active(interface_a, interface_z) |
|
1755 | 1 | return active |
|
1756 | |||
1757 | 1 | @staticmethod |
|
1758 | 1 | def is_uni_interface_active( |
|
1759 | *interfaces: Interface |
||
1760 | ) -> tuple[bool, dict]: |
||
1761 | """Determine whether a UNI should be active""" |
||
1762 | 1 | active = True |
|
1763 | 1 | bad_interfaces = [ |
|
1764 | interface |
||
1765 | for interface in interfaces |
||
1766 | if interface.status != EntityStatus.UP |
||
1767 | ] |
||
1768 | 1 | if bad_interfaces: |
|
1769 | 1 | active = False |
|
1770 | 1 | interfaces = bad_interfaces |
|
1771 | 1 | return active, { |
|
1772 | interface.id: { |
||
1773 | 'status': interface.status.value, |
||
1774 | 'status_reason': interface.status_reason, |
||
1775 | } |
||
1776 | for interface in interfaces |
||
1777 | } |
||
1778 | |||
1779 | 1 | def handle_interface_link_up(self, interface: Interface): |
|
1780 | """ |
||
1781 | Handler for interface link_up events |
||
1782 | """ |
||
1783 | 1 | if self.is_active(): |
|
1784 | 1 | return |
|
1785 | 1 | interfaces = (self.uni_a.interface, self.uni_z.interface) |
|
1786 | 1 | if interface not in interfaces: |
|
1787 | return |
||
1788 | 1 | down_interfaces = [ |
|
1789 | interface |
||
1790 | for interface in interfaces |
||
1791 | if interface.status != EntityStatus.UP |
||
1792 | ] |
||
1793 | 1 | if down_interfaces: |
|
1794 | return |
||
1795 | 1 | interface_dicts = { |
|
1796 | interface.id: { |
||
1797 | 'status': interface.status.value, |
||
1798 | 'status_reason': interface.status_reason, |
||
1799 | } |
||
1800 | for interface in interfaces |
||
1801 | } |
||
1802 | 1 | self.activate() |
|
1803 | 1 | log.info( |
|
1804 | f"Activating {self}. Interfaces: " |
||
1805 | f"{interface_dicts}." |
||
1806 | ) |
||
1807 | 1 | emit_event(self._controller, "uni_active_updated", |
|
1808 | content=map_evc_event_content(self)) |
||
1809 | 1 | self.sync() |
|
1810 | |||
1811 | 1 | def handle_interface_link_down(self, interface): |
|
1812 | """ |
||
1813 | Handler for interface link_down events |
||
1814 | """ |
||
1815 | 1 | if not self.is_active(): |
|
1816 | 1 | return |
|
1817 | 1 | interfaces = (self.uni_a.interface, self.uni_z.interface) |
|
1818 | 1 | if interface not in interfaces: |
|
1819 | return |
||
1820 | 1 | down_interfaces = [ |
|
1821 | interface |
||
1822 | for interface in interfaces |
||
1823 | if interface.status != EntityStatus.UP |
||
1824 | ] |
||
1825 | 1 | if not down_interfaces: |
|
1826 | return |
||
1827 | 1 | interface_dicts = { |
|
1828 | interface.id: { |
||
1829 | 'status': interface.status.value, |
||
1830 | 'status_reason': interface.status_reason, |
||
1831 | } |
||
1832 | for interface in down_interfaces |
||
1833 | } |
||
1834 | 1 | self.deactivate() |
|
1835 | 1 | log.info( |
|
1836 | f"Deactivating {self}. Interfaces: " |
||
1837 | f"{interface_dicts}." |
||
1838 | ) |
||
1839 | 1 | emit_event(self._controller, "uni_active_updated", |
|
1840 | content=map_evc_event_content(self)) |
||
1841 | 1 | self.sync() |
|
1842 | |||
1843 | |||
1844 | 1 | class EVC(LinkProtection): |
|
1845 | """Class that represents a E-Line Virtual Connection.""" |
||
1846 |