Total Complexity | 353 |
Total Lines | 1920 |
Duplicated Lines | 1.25 % |
Coverage | 94.03% |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like build.models.evc often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | """Classes used in the main application.""" # pylint: disable=too-many-lines |
||
2 | 1 | import traceback |
|
3 | 1 | from collections import OrderedDict, defaultdict |
|
4 | 1 | from copy import deepcopy |
|
5 | 1 | from datetime import datetime |
|
6 | 1 | from operator import eq, ne |
|
7 | 1 | from threading import Lock |
|
8 | 1 | from typing import Union |
|
9 | 1 | from uuid import uuid4 |
|
10 | |||
11 | 1 | import httpx |
|
12 | 1 | from glom import glom |
|
13 | 1 | from tenacity import (retry, retry_if_exception_type, stop_after_attempt, |
|
14 | wait_combine, wait_fixed, wait_random) |
||
15 | |||
16 | 1 | from kytos.core import log |
|
17 | 1 | from kytos.core.common import EntityStatus, GenericEntity |
|
18 | 1 | from kytos.core.exceptions import KytosNoTagAvailableError, KytosTagError |
|
19 | 1 | from kytos.core.helpers import get_time, now |
|
20 | 1 | from kytos.core.interface import UNI, Interface, TAGRange |
|
21 | 1 | from kytos.core.link import Link |
|
22 | 1 | from kytos.core.retry import before_sleep |
|
23 | 1 | from kytos.core.tag_ranges import range_difference |
|
24 | 1 | from napps.kytos.mef_eline import controllers, settings |
|
25 | 1 | from napps.kytos.mef_eline.exceptions import (ActivationError, |
|
26 | DuplicatedNoTagUNI, |
||
27 | EVCPathNotInstalled, |
||
28 | FlowModException, InvalidPath) |
||
29 | 1 | from napps.kytos.mef_eline.utils import (_does_uni_affect_evc, |
|
30 | check_disabled_component, |
||
31 | compare_endpoint_trace, |
||
32 | compare_uni_out_trace, emit_event, |
||
33 | make_uni_list, map_dl_vlan, |
||
34 | map_evc_event_content, |
||
35 | merge_flow_dicts) |
||
36 | |||
37 | 1 | from .path import DynamicPathManager, Path |
|
38 | |||
39 | |||
40 | 1 | class EVCBase(GenericEntity): |
|
41 | """Class to represent a circuit.""" |
||
42 | |||
43 | 1 | attributes_requiring_redeploy = [ |
|
44 | "primary_path", |
||
45 | "backup_path", |
||
46 | "dynamic_backup_path", |
||
47 | "queue_id", |
||
48 | "sb_priority", |
||
49 | "primary_constraints", |
||
50 | "secondary_constraints", |
||
51 | "uni_a", |
||
52 | "uni_z", |
||
53 | ] |
||
54 | 1 | required_attributes = ["name", "uni_a", "uni_z"] |
|
55 | |||
56 | 1 | updatable_attributes = { |
|
57 | "uni_a", |
||
58 | "uni_z", |
||
59 | "name", |
||
60 | "start_date", |
||
61 | "end_date", |
||
62 | "queue_id", |
||
63 | "bandwidth", |
||
64 | "primary_path", |
||
65 | "backup_path", |
||
66 | "dynamic_backup_path", |
||
67 | "primary_constraints", |
||
68 | "secondary_constraints", |
||
69 | "owner", |
||
70 | "sb_priority", |
||
71 | "service_level", |
||
72 | "circuit_scheduler", |
||
73 | "metadata", |
||
74 | "enabled" |
||
75 | } |
||
76 | |||
77 | # pylint: disable=too-many-statements |
||
78 | 1 | def __init__(self, controller, **kwargs): |
|
79 | """Create an EVC instance with the provided parameters. |
||
80 | |||
81 | Args: |
||
82 | id(str): EVC identifier. Whether it's None an ID will be genereted. |
||
83 | Only the first 14 bytes passed will be used. |
||
84 | name: represents an EVC name.(Required) |
||
85 | uni_a (UNI): Endpoint A for User Network Interface.(Required) |
||
86 | uni_z (UNI): Endpoint Z for User Network Interface.(Required) |
||
87 | start_date(datetime|str): Date when the EVC was registred. |
||
88 | Default is now(). |
||
89 | end_date(datetime|str): Final date that the EVC will be fineshed. |
||
90 | Default is None. |
||
91 | bandwidth(int): Bandwidth used by EVC instance. Default is 0. |
||
92 | primary_links(list): Primary links used by evc. Default is [] |
||
93 | backup_links(list): Backups links used by evc. Default is [] |
||
94 | current_path(list): Circuit being used at the moment if this is an |
||
95 | active circuit. Default is []. |
||
96 | failover_path(list): Path being used to provide EVC protection via |
||
97 | failover during link failures. Default is []. |
||
98 | primary_path(list): primary circuit offered to user IF one or more |
||
99 | links were provided. Default is []. |
||
100 | backup_path(list): backup circuit offered to the user IF one or |
||
101 | more links were provided. Default is []. |
||
102 | dynamic_backup_path(bool): Enable computer backup path dynamically. |
||
103 | Dafault is False. |
||
104 | creation_time(datetime|str): datetime when the circuit should be |
||
105 | activated. default is now(). |
||
106 | enabled(Boolean): attribute to indicate the administrative state; |
||
107 | default is False. |
||
108 | active(Boolean): attribute to indicate the operational state; |
||
109 | default is False. |
||
110 | archived(Boolean): indicate the EVC has been deleted and is |
||
111 | archived; default is False. |
||
112 | owner(str): The EVC owner. Default is None. |
||
113 | sb_priority(int): Service level provided in the request. |
||
114 | Default is None. |
||
115 | service_level(int): Service level provided. The higher the better. |
||
116 | Default is 0. |
||
117 | |||
118 | Raises: |
||
119 | ValueError: raised when object attributes are invalid. |
||
120 | |||
121 | """ |
||
122 | 1 | self._controller = controller |
|
123 | 1 | self._validate(**kwargs) |
|
124 | 1 | super().__init__() |
|
125 | |||
126 | # required attributes |
||
127 | 1 | self._id = kwargs.get("id", uuid4().hex)[:14] |
|
128 | 1 | self.uni_a: UNI = kwargs.get("uni_a") |
|
129 | 1 | self.uni_z: UNI = kwargs.get("uni_z") |
|
130 | 1 | self.name = kwargs.get("name") |
|
131 | |||
132 | # optional attributes |
||
133 | 1 | self.start_date = get_time(kwargs.get("start_date")) or now() |
|
134 | 1 | self.end_date = get_time(kwargs.get("end_date")) or None |
|
135 | 1 | self.queue_id = kwargs.get("queue_id", -1) |
|
136 | |||
137 | 1 | self.bandwidth = kwargs.get("bandwidth", 0) |
|
138 | 1 | self.primary_links = Path(kwargs.get("primary_links", [])) |
|
139 | 1 | self.backup_links = Path(kwargs.get("backup_links", [])) |
|
140 | 1 | self.current_path = Path(kwargs.get("current_path", [])) |
|
141 | 1 | self.failover_path = Path(kwargs.get("failover_path", [])) |
|
142 | 1 | self.primary_path = Path(kwargs.get("primary_path", [])) |
|
143 | 1 | self.backup_path = Path(kwargs.get("backup_path", [])) |
|
144 | 1 | self.dynamic_backup_path = kwargs.get("dynamic_backup_path", False) |
|
145 | 1 | self.primary_constraints = kwargs.get("primary_constraints", {}) |
|
146 | 1 | self.secondary_constraints = kwargs.get("secondary_constraints", {}) |
|
147 | 1 | self.creation_time = get_time(kwargs.get("creation_time")) or now() |
|
148 | 1 | self.owner = kwargs.get("owner", None) |
|
149 | 1 | self.sb_priority = kwargs.get("sb_priority", None) or kwargs.get( |
|
150 | "priority", None |
||
151 | ) |
||
152 | 1 | self.service_level = kwargs.get("service_level", 0) |
|
153 | 1 | self.circuit_scheduler = kwargs.get("circuit_scheduler", []) |
|
154 | 1 | self.flow_removed_at = get_time(kwargs.get("flow_removed_at")) or None |
|
155 | 1 | self.updated_at = get_time(kwargs.get("updated_at")) or now() |
|
156 | 1 | self.execution_rounds = kwargs.get("execution_rounds", 0) |
|
157 | 1 | self.current_links_cache = set() |
|
158 | 1 | self.primary_links_cache = set() |
|
159 | 1 | self.backup_links_cache = set() |
|
160 | 1 | self.affected_by_link_at = get_time("0001-01-01T00:00:00") |
|
161 | 1 | self.old_path = Path([]) |
|
162 | |||
163 | 1 | self.lock = Lock() |
|
164 | |||
165 | 1 | self.archived = kwargs.get("archived", False) |
|
166 | |||
167 | 1 | self.metadata = kwargs.get("metadata", {}) |
|
168 | |||
169 | 1 | self._mongo_controller = controllers.ELineController() |
|
170 | |||
171 | 1 | if kwargs.get("active", False): |
|
172 | 1 | self.activate() |
|
173 | else: |
||
174 | 1 | self.deactivate() |
|
175 | |||
176 | 1 | if kwargs.get("enabled", False): |
|
177 | 1 | self.enable() |
|
178 | else: |
||
179 | 1 | self.disable() |
|
180 | |||
181 | # datetime of user request for a EVC (or datetime when object was |
||
182 | # created) |
||
183 | 1 | self.request_time = kwargs.get("request_time", now()) |
|
184 | # dict with the user original request (input) |
||
185 | 1 | self._requested = kwargs |
|
186 | |||
187 | # Special cases: No tag, any, untagged |
||
188 | 1 | self.special_cases = {None, "4096/4096", 0} |
|
189 | 1 | self.table_group = kwargs.get("table_group") |
|
190 | |||
191 | 1 | def sync(self, keys: set = None): |
|
192 | """Sync this EVC in the MongoDB.""" |
||
193 | 1 | self.updated_at = now() |
|
194 | 1 | if keys: |
|
195 | 1 | self._mongo_controller.update_evc(self.as_dict(keys)) |
|
196 | 1 | return |
|
197 | 1 | self._mongo_controller.upsert_evc(self.as_dict()) |
|
198 | |||
199 | 1 | def _get_unis_use_tags(self, **kwargs) -> tuple[UNI, UNI]: |
|
200 | """Obtain both UNIs (uni_a, uni_z). |
||
201 | If a UNI is changing, verify tags""" |
||
202 | 1 | uni_a = kwargs.get("uni_a", None) |
|
203 | 1 | uni_a_flag = False |
|
204 | 1 | if uni_a and uni_a != self.uni_a: |
|
205 | 1 | uni_a_flag = True |
|
206 | 1 | self._use_uni_vlan(uni_a, uni_dif=self.uni_a) |
|
207 | |||
208 | 1 | uni_z = kwargs.get("uni_z", None) |
|
209 | 1 | if uni_z and uni_z != self.uni_z: |
|
210 | 1 | try: |
|
211 | 1 | self._use_uni_vlan(uni_z, uni_dif=self.uni_z) |
|
212 | 1 | self.make_uni_vlan_available(self.uni_z, uni_dif=uni_z) |
|
213 | 1 | except KytosTagError as err: |
|
214 | 1 | if uni_a_flag: |
|
215 | 1 | self.make_uni_vlan_available(uni_a, uni_dif=self.uni_a) |
|
216 | 1 | raise err |
|
217 | else: |
||
218 | 1 | uni_z = self.uni_z |
|
219 | |||
220 | 1 | if uni_a_flag: |
|
221 | 1 | self.make_uni_vlan_available(self.uni_a, uni_dif=uni_a) |
|
222 | else: |
||
223 | 1 | uni_a = self.uni_a |
|
224 | 1 | return uni_a, uni_z |
|
225 | |||
226 | 1 | def update(self, **kwargs): |
|
227 | """Update evc attributes. |
||
228 | |||
229 | This method will raises an error trying to change the following |
||
230 | attributes: [creation_time, active, current_path, failover_path, |
||
231 | _id, archived] |
||
232 | [name, uni_a and uni_z] |
||
233 | |||
234 | Returns: |
||
235 | the values for enable and a redeploy attribute, if exists and None |
||
236 | otherwise |
||
237 | Raises: |
||
238 | ValueError: message with error detail. |
||
239 | |||
240 | """ |
||
241 | 1 | enable, redeploy = (None, None) |
|
242 | 1 | if not self._tag_lists_equal(**kwargs): |
|
243 | 1 | raise ValueError( |
|
244 | "UNI_A and UNI_Z tag lists should be the same." |
||
245 | ) |
||
246 | 1 | uni_a, uni_z = self._get_unis_use_tags(**kwargs) |
|
247 | 1 | check_disabled_component(uni_a, uni_z) |
|
248 | 1 | self._validate_has_primary_or_dynamic( |
|
249 | primary_path=kwargs.get("primary_path"), |
||
250 | dynamic_backup_path=kwargs.get("dynamic_backup_path"), |
||
251 | uni_a=uni_a, |
||
252 | uni_z=uni_z, |
||
253 | ) |
||
254 | 1 | for attribute, value in kwargs.items(): |
|
255 | 1 | if attribute not in self.updatable_attributes: |
|
256 | 1 | raise ValueError(f"{attribute} can't be updated.") |
|
257 | 1 | if attribute in ("primary_path", "backup_path"): |
|
258 | 1 | try: |
|
259 | 1 | value.is_valid( |
|
260 | uni_a.interface.switch, uni_z.interface.switch |
||
261 | ) |
||
262 | 1 | except InvalidPath as exception: |
|
263 | 1 | raise ValueError( # pylint: disable=raise-missing-from |
|
264 | f"{attribute} is not a " f"valid path: {exception}" |
||
265 | ) |
||
266 | 1 | for attribute, value in kwargs.items(): |
|
267 | 1 | if attribute == "enabled": |
|
268 | 1 | if value: |
|
269 | 1 | self.enable() |
|
270 | else: |
||
271 | 1 | self.disable() |
|
272 | 1 | enable = value |
|
273 | else: |
||
274 | 1 | setattr(self, attribute, value) |
|
275 | 1 | if attribute in self.attributes_requiring_redeploy: |
|
276 | 1 | redeploy = True |
|
277 | 1 | self.sync(set(kwargs.keys())) |
|
278 | 1 | return enable, redeploy |
|
279 | |||
280 | 1 | def set_flow_removed_at(self): |
|
281 | """Update flow_removed_at attribute.""" |
||
282 | self.flow_removed_at = now() |
||
283 | |||
284 | 1 | def has_recent_removed_flow(self, setting=settings): |
|
285 | """Check if any flow has been removed from the evc""" |
||
286 | if self.flow_removed_at is None: |
||
287 | return False |
||
288 | res_seconds = (now() - self.flow_removed_at).seconds |
||
289 | return res_seconds < setting.TIME_RECENT_DELETED_FLOWS |
||
290 | |||
291 | 1 | def is_recent_updated(self, setting=settings): |
|
292 | """Check if the evc has been updated recently""" |
||
293 | res_seconds = (now() - self.updated_at).seconds |
||
294 | return res_seconds < setting.TIME_RECENT_UPDATED |
||
295 | |||
296 | 1 | def __repr__(self): |
|
297 | """Repr method.""" |
||
298 | 1 | return f"EVC({self._id}, {self.name})" |
|
299 | |||
300 | 1 | def _validate(self, **kwargs): |
|
301 | """Do Basic validations. |
||
302 | |||
303 | Verify required attributes: name, uni_a, uni_z |
||
304 | |||
305 | Raises: |
||
306 | ValueError: message with error detail. |
||
307 | |||
308 | """ |
||
309 | 1 | for attribute in self.required_attributes: |
|
310 | |||
311 | 1 | if attribute not in kwargs: |
|
312 | 1 | raise ValueError(f"{attribute} is required.") |
|
313 | |||
314 | 1 | if "uni" in attribute: |
|
315 | 1 | uni = kwargs.get(attribute) |
|
316 | 1 | if not isinstance(uni, UNI): |
|
317 | raise ValueError(f"{attribute} is an invalid UNI.") |
||
318 | |||
319 | 1 | def _tag_lists_equal(self, **kwargs): |
|
320 | """Verify that tag lists are the same.""" |
||
321 | 1 | uni_a = kwargs.get("uni_a") or self.uni_a |
|
322 | 1 | uni_z = kwargs.get("uni_z") or self.uni_z |
|
323 | 1 | uni_a_list = uni_z_list = False |
|
324 | 1 | if (uni_a.user_tag and isinstance(uni_a.user_tag, TAGRange)): |
|
325 | 1 | uni_a_list = True |
|
326 | 1 | if (uni_z.user_tag and isinstance(uni_z.user_tag, TAGRange)): |
|
327 | 1 | uni_z_list = True |
|
328 | 1 | if uni_a_list and uni_z_list: |
|
329 | 1 | return uni_a.user_tag.value == uni_z.user_tag.value |
|
330 | 1 | return uni_a_list == uni_z_list |
|
331 | |||
332 | 1 | def _validate_has_primary_or_dynamic( |
|
333 | self, |
||
334 | primary_path=None, |
||
335 | dynamic_backup_path=None, |
||
336 | uni_a=None, |
||
337 | uni_z=None, |
||
338 | ) -> None: |
||
339 | """Validate that it must have a primary path or allow dynamic paths.""" |
||
340 | 1 | primary_path = ( |
|
341 | primary_path |
||
342 | if primary_path is not None |
||
343 | else self.primary_path |
||
344 | ) |
||
345 | 1 | dynamic_backup_path = ( |
|
346 | dynamic_backup_path |
||
347 | if dynamic_backup_path is not None |
||
348 | else self.dynamic_backup_path |
||
349 | ) |
||
350 | 1 | uni_a = uni_a if uni_a is not None else self.uni_a |
|
351 | 1 | uni_z = uni_z if uni_z is not None else self.uni_z |
|
352 | 1 | if ( |
|
353 | not primary_path |
||
354 | and not dynamic_backup_path |
||
355 | and uni_a and uni_z |
||
356 | and uni_a.interface.switch != uni_z.interface.switch |
||
357 | ): |
||
358 | 1 | msg = "The EVC must have a primary path or allow dynamic paths." |
|
359 | 1 | raise ValueError(msg) |
|
360 | |||
361 | 1 | def __eq__(self, other): |
|
362 | """Override the default implementation.""" |
||
363 | 1 | if not isinstance(other, EVC): |
|
364 | return False |
||
365 | |||
366 | 1 | attrs_to_compare = ["name", "uni_a", "uni_z", "owner", "bandwidth"] |
|
367 | 1 | for attribute in attrs_to_compare: |
|
368 | 1 | if getattr(other, attribute) != getattr(self, attribute): |
|
369 | 1 | return False |
|
370 | 1 | return True |
|
371 | |||
372 | 1 | def is_intra_switch(self): |
|
373 | """Check if the UNIs are in the same switch.""" |
||
374 | 1 | return self.uni_a.interface.switch == self.uni_z.interface.switch |
|
375 | |||
376 | 1 | def check_no_tag_duplicate(self, other_uni: UNI): |
|
377 | """Check if a no tag UNI is duplicated.""" |
||
378 | 1 | if other_uni in (self.uni_a, self.uni_z): |
|
379 | 1 | msg = f"UNI with interface {other_uni.interface.id} is"\ |
|
380 | f" duplicated with {self}." |
||
381 | 1 | raise DuplicatedNoTagUNI(msg) |
|
382 | |||
383 | 1 | def as_dict(self, keys: set = None): |
|
384 | """Return a dictionary representing an EVC object. |
||
385 | keys: Only fields on this variable will be |
||
386 | returned in the dictionary""" |
||
387 | 1 | evc_dict = { |
|
388 | "id": self.id, |
||
389 | "name": self.name, |
||
390 | "uni_a": self.uni_a.as_dict(), |
||
391 | "uni_z": self.uni_z.as_dict(), |
||
392 | } |
||
393 | |||
394 | 1 | time_fmt = "%Y-%m-%dT%H:%M:%S" |
|
395 | |||
396 | 1 | evc_dict["start_date"] = self.start_date |
|
397 | 1 | if isinstance(self.start_date, datetime): |
|
398 | 1 | evc_dict["start_date"] = self.start_date.strftime(time_fmt) |
|
399 | |||
400 | 1 | evc_dict["end_date"] = self.end_date |
|
401 | 1 | if isinstance(self.end_date, datetime): |
|
402 | 1 | evc_dict["end_date"] = self.end_date.strftime(time_fmt) |
|
403 | |||
404 | 1 | evc_dict["queue_id"] = self.queue_id |
|
405 | 1 | evc_dict["bandwidth"] = self.bandwidth |
|
406 | 1 | evc_dict["primary_links"] = self.primary_links.as_dict() |
|
407 | 1 | evc_dict["backup_links"] = self.backup_links.as_dict() |
|
408 | 1 | evc_dict["current_path"] = self.current_path.as_dict() |
|
409 | 1 | evc_dict["failover_path"] = self.failover_path.as_dict() |
|
410 | 1 | evc_dict["primary_path"] = self.primary_path.as_dict() |
|
411 | 1 | evc_dict["backup_path"] = self.backup_path.as_dict() |
|
412 | 1 | evc_dict["dynamic_backup_path"] = self.dynamic_backup_path |
|
413 | 1 | evc_dict["metadata"] = self.metadata |
|
414 | |||
415 | 1 | evc_dict["request_time"] = self.request_time |
|
416 | 1 | if isinstance(self.request_time, datetime): |
|
417 | 1 | evc_dict["request_time"] = self.request_time.strftime(time_fmt) |
|
418 | |||
419 | 1 | time = self.creation_time.strftime(time_fmt) |
|
420 | 1 | evc_dict["creation_time"] = time |
|
421 | |||
422 | 1 | evc_dict["owner"] = self.owner |
|
423 | 1 | evc_dict["circuit_scheduler"] = [ |
|
424 | sc.as_dict() for sc in self.circuit_scheduler |
||
425 | ] |
||
426 | |||
427 | 1 | evc_dict["active"] = self.is_active() |
|
428 | 1 | evc_dict["enabled"] = self.is_enabled() |
|
429 | 1 | evc_dict["archived"] = self.archived |
|
430 | 1 | evc_dict["sb_priority"] = self.sb_priority |
|
431 | 1 | evc_dict["service_level"] = self.service_level |
|
432 | 1 | evc_dict["primary_constraints"] = self.primary_constraints |
|
433 | 1 | evc_dict["secondary_constraints"] = self.secondary_constraints |
|
434 | 1 | evc_dict["flow_removed_at"] = self.flow_removed_at |
|
435 | 1 | evc_dict["updated_at"] = self.updated_at |
|
436 | |||
437 | 1 | if keys: |
|
438 | 1 | selected = {} |
|
439 | 1 | for key in keys: |
|
440 | 1 | selected[key] = evc_dict[key] |
|
441 | 1 | selected["id"] = evc_dict["id"] |
|
442 | 1 | return selected |
|
443 | 1 | return evc_dict |
|
444 | |||
445 | 1 | @property |
|
446 | 1 | def id(self): # pylint: disable=invalid-name |
|
447 | """Return this EVC's ID.""" |
||
448 | 1 | return self._id |
|
449 | |||
450 | 1 | def archive(self): |
|
451 | """Archive this EVC on deletion.""" |
||
452 | 1 | self.archived = True |
|
453 | |||
454 | 1 | def _use_uni_vlan( |
|
455 | self, |
||
456 | uni: UNI, |
||
457 | uni_dif: Union[None, UNI] = None |
||
458 | ): |
||
459 | """Use tags from UNI""" |
||
460 | 1 | if uni.user_tag is None: |
|
461 | 1 | return |
|
462 | 1 | tag = uni.user_tag.value |
|
463 | 1 | tag_type = uni.user_tag.tag_type |
|
464 | 1 | if (uni_dif and isinstance(tag, list) and |
|
465 | isinstance(uni_dif.user_tag.value, list)): |
||
466 | 1 | tag = range_difference(tag, uni_dif.user_tag.value) |
|
467 | 1 | if not tag: |
|
468 | 1 | return |
|
469 | 1 | uni.interface.use_tags( |
|
470 | self._controller, tag, tag_type, use_lock=True, check_order=False |
||
471 | ) |
||
472 | |||
473 | 1 | def make_uni_vlan_available( |
|
474 | self, |
||
475 | uni: UNI, |
||
476 | uni_dif: Union[None, UNI] = None, |
||
477 | ): |
||
478 | """Make available tag from UNI""" |
||
479 | 1 | if uni.user_tag is None: |
|
480 | 1 | return |
|
481 | 1 | tag = uni.user_tag.value |
|
482 | 1 | tag_type = uni.user_tag.tag_type |
|
483 | 1 | if (uni_dif and isinstance(tag, list) and |
|
484 | isinstance(uni_dif.user_tag.value, list)): |
||
485 | 1 | tag = range_difference(tag, uni_dif.user_tag.value) |
|
486 | 1 | if not tag: |
|
487 | return |
||
488 | 1 | try: |
|
489 | 1 | conflict = uni.interface.make_tags_available( |
|
490 | self._controller, tag, tag_type, use_lock=True, |
||
491 | check_order=False |
||
492 | ) |
||
493 | 1 | except KytosTagError as err: |
|
494 | 1 | log.error(f"Error in {self}: {err}") |
|
495 | 1 | return |
|
496 | 1 | if conflict: |
|
497 | 1 | intf = uni.interface.id |
|
498 | 1 | log.warning(f"Tags {conflict} was already available in {intf}") |
|
499 | |||
500 | 1 | def remove_uni_tags(self): |
|
501 | """Remove both UNI usage of a tag""" |
||
502 | 1 | self.make_uni_vlan_available(self.uni_a) |
|
503 | 1 | self.make_uni_vlan_available(self.uni_z) |
|
504 | |||
505 | |||
506 | # pylint: disable=fixme, too-many-public-methods |
||
507 | 1 | class EVCDeploy(EVCBase): |
|
508 | """Class to handle the deploy procedures.""" |
||
509 | |||
510 | 1 | def create(self): |
|
511 | """Create a EVC.""" |
||
512 | |||
513 | 1 | def discover_new_paths(self): |
|
514 | """Discover new paths to satisfy this circuit and deploy it.""" |
||
515 | return DynamicPathManager.get_best_paths(self, |
||
516 | **self.primary_constraints) |
||
517 | |||
518 | 1 | def get_failover_path_candidates(self): |
|
519 | """Get failover paths to satisfy this EVC.""" |
||
520 | # in the future we can return primary/backup paths as well |
||
521 | # we just have to properly handle link_up and failover paths |
||
522 | # if ( |
||
523 | # self.is_using_primary_path() and |
||
524 | # self.backup_path.status is EntityStatus.UP |
||
525 | # ): |
||
526 | # yield self.backup_path |
||
527 | 1 | return DynamicPathManager.get_disjoint_paths(self, self.current_path) |
|
528 | |||
529 | 1 | def change_path(self): |
|
530 | """Change EVC path.""" |
||
531 | |||
532 | 1 | def reprovision(self): |
|
533 | """Force the EVC (re-)provisioning.""" |
||
534 | |||
535 | 1 | def is_affected_by_link(self, link): |
|
536 | """Return True if this EVC has the given link on its current path.""" |
||
537 | 1 | return link in self.current_path |
|
538 | |||
539 | 1 | def link_affected_by_interface(self, interface): |
|
540 | """Return True if this EVC has the given link on its current path.""" |
||
541 | return self.current_path.link_affected_by_interface(interface) |
||
542 | |||
543 | 1 | def is_backup_path_affected_by_link(self, link): |
|
544 | """Return True if the backup path of this EVC uses the given link.""" |
||
545 | 1 | return link in self.backup_path |
|
546 | |||
547 | # pylint: disable=invalid-name |
||
548 | 1 | def is_primary_path_affected_by_link(self, link): |
|
549 | """Return True if the primary path of this EVC uses the given link.""" |
||
550 | 1 | return link in self.primary_path |
|
551 | |||
552 | 1 | def is_failover_path_affected_by_link(self, link): |
|
553 | """Return True if this EVC has the given link on its failover path.""" |
||
554 | 1 | return link in self.failover_path |
|
555 | |||
556 | 1 | def is_eligible_for_failover_path(self): |
|
557 | """Verify if this EVC is eligible for failover path (EP029)""" |
||
558 | # In the future this function can be augmented to consider |
||
559 | # primary/backup, primary/dynamic, and other path combinations |
||
560 | 1 | return ( |
|
561 | self.dynamic_backup_path and |
||
562 | not self.primary_path and not self.backup_path |
||
563 | ) |
||
564 | |||
565 | 1 | def is_using_primary_path(self): |
|
566 | """Verify if the current deployed path is self.primary_path.""" |
||
567 | 1 | return self.primary_path and (self.current_path == self.primary_path) |
|
568 | |||
569 | 1 | def is_using_backup_path(self): |
|
570 | """Verify if the current deployed path is self.backup_path.""" |
||
571 | 1 | return self.backup_path and (self.current_path == self.backup_path) |
|
572 | |||
573 | 1 | def is_using_dynamic_path(self): |
|
574 | """Verify if the current deployed path is a dynamic path.""" |
||
575 | 1 | if ( |
|
576 | self.current_path |
||
577 | and not self.is_using_primary_path() |
||
578 | and not self.is_using_backup_path() |
||
579 | and self.current_path.status == EntityStatus.UP |
||
580 | ): |
||
581 | return True |
||
582 | 1 | return False |
|
583 | |||
584 | 1 | def deploy_to_backup_path(self, old_path_dict: dict = None): |
|
585 | """Deploy the backup path into the datapaths of this circuit. |
||
586 | |||
587 | If the backup_path attribute is valid and up, this method will try to |
||
588 | deploy this backup_path. |
||
589 | |||
590 | If everything fails and dynamic_backup_path is True, then tries to |
||
591 | deploy a dynamic path. |
||
592 | """ |
||
593 | # TODO: Remove flows from current (cookies) |
||
594 | 1 | if self.is_using_backup_path(): |
|
595 | # TODO: Log to say that cannot move backup to backup |
||
596 | return True |
||
597 | |||
598 | 1 | success = False |
|
599 | 1 | if self.backup_path.status is EntityStatus.UP: |
|
600 | 1 | success = self.deploy_to_path(self.backup_path, old_path_dict) |
|
601 | |||
602 | 1 | if success: |
|
603 | 1 | return True |
|
604 | |||
605 | 1 | if self.dynamic_backup_path or self.is_intra_switch(): |
|
606 | 1 | return self.deploy_to_path(old_path_dict=old_path_dict) |
|
607 | |||
608 | return False |
||
609 | |||
610 | 1 | def deploy_to_primary_path(self, old_path_dict: dict = None): |
|
611 | """Deploy the primary path into the datapaths of this circuit. |
||
612 | |||
613 | If the primary_path attribute is valid and up, this method will try to |
||
614 | deploy this primary_path. |
||
615 | """ |
||
616 | # TODO: Remove flows from current (cookies) |
||
617 | 1 | if self.is_using_primary_path(): |
|
618 | # TODO: Log to say that cannot move primary to primary |
||
619 | return True |
||
620 | |||
621 | 1 | if self.primary_path.status is EntityStatus.UP: |
|
622 | 1 | return self.deploy_to_path(self.primary_path, old_path_dict) |
|
623 | 1 | return False |
|
624 | |||
625 | 1 | def deploy(self, old_path_dict: dict = None): |
|
626 | """Deploy EVC to best path. |
||
627 | |||
628 | Best path can be the primary path, if available. If not, the backup |
||
629 | path, and, if it is also not available, a dynamic path. |
||
630 | """ |
||
631 | 1 | if self.archived: |
|
632 | 1 | return False |
|
633 | 1 | self.enable() |
|
634 | 1 | success = self.deploy_to_primary_path(old_path_dict) |
|
635 | 1 | if not success: |
|
636 | 1 | success = self.deploy_to_backup_path(old_path_dict) |
|
637 | |||
638 | 1 | if success: |
|
639 | 1 | emit_event(self._controller, "deployed", |
|
640 | content=map_evc_event_content(self)) |
||
641 | 1 | return success |
|
642 | |||
643 | 1 | @staticmethod |
|
644 | 1 | def get_path_status(path): |
|
645 | """Check for the current status of a path. |
||
646 | |||
647 | If any link in this path is down, the path is considered down. |
||
648 | """ |
||
649 | 1 | if not path: |
|
650 | 1 | return EntityStatus.DISABLED |
|
651 | |||
652 | 1 | for link in path: |
|
653 | 1 | if link.status is not EntityStatus.UP: |
|
654 | 1 | return link.status |
|
655 | 1 | return EntityStatus.UP |
|
656 | |||
657 | # def discover_new_path(self): |
||
658 | # # TODO: discover a new path to satisfy this circuit and deploy |
||
659 | |||
660 | 1 | def remove(self): |
|
661 | """Remove EVC path and disable it.""" |
||
662 | 1 | self.remove_current_flows(sync=False) |
|
663 | 1 | self.remove_failover_flows(sync=False) |
|
664 | 1 | self.disable() |
|
665 | 1 | self.sync() |
|
666 | 1 | emit_event(self._controller, "undeployed", |
|
667 | content=map_evc_event_content(self)) |
||
668 | |||
669 | 1 | def remove_failover_flows(self, exclude_uni_switches=True, |
|
670 | force=True, sync=True) -> None: |
||
671 | """Remove failover_flows. |
||
672 | |||
673 | By default, it'll exclude UNI switches, if mef_eline has already |
||
674 | called remove_current_flows before then this minimizes the number |
||
675 | of FlowMods and IO. |
||
676 | """ |
||
677 | 1 | if not self.failover_path: |
|
678 | 1 | return |
|
679 | 1 | switches, cookie, excluded = set(), self.get_cookie(), set() |
|
680 | 1 | if exclude_uni_switches: |
|
681 | 1 | excluded.add(self.uni_a.interface.switch.id) |
|
682 | 1 | excluded.add(self.uni_z.interface.switch.id) |
|
683 | 1 | for link in self.failover_path: |
|
684 | 1 | if link.endpoint_a.switch.id not in excluded: |
|
685 | 1 | switches.add(link.endpoint_a.switch.id) |
|
686 | 1 | if link.endpoint_b.switch.id not in excluded: |
|
687 | 1 | switches.add(link.endpoint_b.switch.id) |
|
688 | 1 | flow_mods = { |
|
689 | "switches": list(switches), |
||
690 | "flows": [{ |
||
691 | "cookie": cookie, |
||
692 | "cookie_mask": int(0xffffffffffffffff), |
||
693 | "owner": "mef_eline", |
||
694 | }] |
||
695 | } |
||
696 | 1 | try: |
|
697 | 1 | self._send_flow_mods( |
|
698 | flow_mods, |
||
699 | "delete", |
||
700 | force=force, |
||
701 | ) |
||
702 | except FlowModException as err: |
||
703 | log.error(f"Error deleting {self} failover_path flows, {err}") |
||
704 | 1 | try: |
|
705 | 1 | self.failover_path.make_vlans_available(self._controller) |
|
706 | except KytosTagError as err: |
||
707 | log.error(f"Error removing {self} failover_path: {err}") |
||
708 | 1 | self.failover_path = Path([]) |
|
709 | 1 | if sync: |
|
710 | 1 | self.sync() |
|
711 | |||
712 | 1 | def remove_current_flows( |
|
713 | self, |
||
714 | current_path=None, |
||
715 | force=True, |
||
716 | sync=True, |
||
717 | return_path=False |
||
718 | ) -> dict[str, int]: |
||
719 | """Remove all flows from current path or path intended for |
||
720 | current path if exists.""" |
||
721 | 1 | switches, old_path_dict = set(), {} |
|
722 | 1 | current_path = self.current_path if not current_path else current_path |
|
723 | 1 | if not current_path and not self.is_intra_switch(): |
|
724 | 1 | return {} |
|
725 | |||
726 | 1 | if return_path: |
|
727 | 1 | for link in self.current_path: |
|
728 | 1 | s_vlan = link.metadata.get("s_vlan") |
|
729 | 1 | if s_vlan: |
|
730 | 1 | old_path_dict[link.id] = s_vlan.value |
|
731 | |||
732 | 1 | for link in current_path: |
|
733 | 1 | switches.add(link.endpoint_a.switch.id) |
|
734 | 1 | switches.add(link.endpoint_b.switch.id) |
|
735 | 1 | switches.add(self.uni_a.interface.switch.id) |
|
736 | 1 | switches.add(self.uni_z.interface.switch.id) |
|
737 | 1 | flow_mods = { |
|
738 | "switches": list(switches), |
||
739 | "flows": [{ |
||
740 | "cookie": self.get_cookie(), |
||
741 | "cookie_mask": int(0xffffffffffffffff), |
||
742 | "owner": "mef_eline", |
||
743 | }] |
||
744 | } |
||
745 | |||
746 | 1 | try: |
|
747 | 1 | self._send_flow_mods(flow_mods, "delete", force=force) |
|
748 | 1 | except FlowModException as err: |
|
749 | 1 | log.error(f"Error deleting {self} current_path flows, {err}") |
|
750 | |||
751 | 1 | try: |
|
752 | 1 | current_path.make_vlans_available(self._controller) |
|
753 | 1 | except KytosTagError as err: |
|
754 | 1 | log.error(f"Error removing {self} current_path: {err}") |
|
755 | 1 | self.current_path = Path([]) |
|
756 | 1 | self.deactivate() |
|
757 | 1 | if sync: |
|
758 | 1 | self.sync() |
|
759 | 1 | return old_path_dict |
|
760 | |||
761 | 1 | def remove_path_flows( |
|
762 | self, path=None, force=True |
||
763 | ) -> dict[str, list[dict]]: |
||
764 | """Remove all flows from path, and return the removed flows.""" |
||
765 | 1 | dpid_flows_match: dict[str, dict] = defaultdict(lambda: {"flows": []}) |
|
766 | 1 | out_flows: dict[str, list[dict]] = defaultdict(list) |
|
767 | |||
768 | 1 | if not path: |
|
769 | 1 | return dpid_flows_match |
|
770 | |||
771 | 1 | try: |
|
772 | 1 | nni_flows = self._prepare_nni_flows(path) |
|
773 | # pylint: disable=broad-except |
||
774 | except Exception: |
||
775 | err = traceback.format_exc().replace("\n", ", ") |
||
776 | log.error(f"Fail to remove NNI failover flows for {self}: {err}") |
||
777 | nni_flows = {} |
||
778 | |||
779 | 1 | for dpid, flows in nni_flows.items(): |
|
780 | 1 | for flow in flows: |
|
781 | 1 | flow_mod = { |
|
782 | "cookie": flow["cookie"], |
||
783 | "match": flow["match"], |
||
784 | "owner": "mef_eline", |
||
785 | "cookie_mask": int(0xffffffffffffffff) |
||
786 | } |
||
787 | 1 | dpid_flows_match[dpid]["flows"].append(flow_mod) |
|
788 | 1 | out_flows[dpid].append(flow_mod) |
|
789 | |||
790 | 1 | try: |
|
791 | 1 | uni_flows = self._prepare_uni_flows(path, skip_in=True) |
|
792 | # pylint: disable=broad-except |
||
793 | except Exception: |
||
794 | err = traceback.format_exc().replace("\n", ", ") |
||
795 | log.error(f"Fail to remove UNI failover flows for {self}: {err}") |
||
796 | uni_flows = {} |
||
797 | |||
798 | 1 | for dpid, flows in uni_flows.items(): |
|
799 | 1 | for flow in flows: |
|
800 | 1 | flow_mod = { |
|
801 | "cookie": flow["cookie"], |
||
802 | "match": flow["match"], |
||
803 | "owner": "mef_eline", |
||
804 | "cookie_mask": int(0xffffffffffffffff) |
||
805 | } |
||
806 | 1 | dpid_flows_match[dpid]["flows"].append(flow_mod) |
|
807 | 1 | out_flows[dpid].append(flow_mod) |
|
808 | |||
809 | 1 | try: |
|
810 | 1 | self._send_flow_mods( |
|
811 | dpid_flows_match, 'delete', force=force, by_switch=True |
||
812 | ) |
||
813 | 1 | except FlowModException as err: |
|
814 | 1 | log.error( |
|
815 | f"Error deleting {self} path flows, path:{path}, error={err}" |
||
816 | ) |
||
817 | |||
818 | 1 | try: |
|
819 | 1 | path.make_vlans_available(self._controller) |
|
820 | except KytosTagError as err: |
||
821 | log.error(f"Error removing {self} path: {err}") |
||
822 | |||
823 | 1 | return out_flows |
|
824 | |||
825 | 1 | @staticmethod |
|
826 | 1 | def links_zipped(path=None): |
|
827 | """Return an iterator which yields pairs of links in order.""" |
||
828 | 1 | if not path: |
|
829 | 1 | return [] |
|
830 | 1 | return zip(path[:-1], path[1:]) |
|
831 | |||
832 | 1 | def should_deploy(self, path=None): |
|
833 | """Verify if the circuit should be deployed.""" |
||
834 | 1 | if not path: |
|
835 | 1 | log.debug("Path is empty.") |
|
836 | 1 | return False |
|
837 | |||
838 | 1 | if not self.is_enabled(): |
|
839 | 1 | log.debug(f"{self} is disabled.") |
|
840 | 1 | return False |
|
841 | |||
842 | 1 | if not self.is_active(): |
|
843 | 1 | log.debug(f"{self} will be deployed.") |
|
844 | 1 | return True |
|
845 | |||
846 | 1 | return False |
|
847 | |||
848 | 1 | @staticmethod |
|
849 | 1 | def is_uni_interface_active( |
|
850 | *interfaces: Interface |
||
851 | ) -> tuple[bool, dict]: |
||
852 | """Whether UNIs are active and their status & status_reason.""" |
||
853 | 1 | active = True |
|
854 | 1 | bad_interfaces = [ |
|
855 | interface |
||
856 | for interface in interfaces |
||
857 | if interface.status != EntityStatus.UP |
||
858 | ] |
||
859 | 1 | if bad_interfaces: |
|
860 | 1 | active = False |
|
861 | 1 | interfaces = bad_interfaces |
|
862 | 1 | return active, { |
|
863 | interface.id: { |
||
864 | 'status': interface.status.value, |
||
865 | 'status_reason': interface.status_reason, |
||
866 | } |
||
867 | for interface in interfaces |
||
868 | } |
||
869 | |||
870 | 1 | def try_to_activate(self) -> bool: |
|
871 | """Try to activate the EVC.""" |
||
872 | 1 | if self.is_intra_switch(): |
|
873 | 1 | return self._try_to_activate_intra_evc() |
|
874 | 1 | return self._try_to_activate_inter_evc() |
|
875 | |||
876 | 1 | def _try_to_activate_intra_evc(self) -> bool: |
|
877 | """Try to activate intra EVC.""" |
||
878 | 1 | intf_a, intf_z = self.uni_a.interface, self.uni_z.interface |
|
879 | 1 | is_active, reason = self.is_uni_interface_active(intf_a, intf_z) |
|
880 | 1 | if not is_active: |
|
881 | 1 | raise ActivationError( |
|
882 | f"Won't be able to activate {self} due to UNIs: {reason}" |
||
883 | ) |
||
884 | 1 | self.activate() |
|
885 | 1 | return True |
|
886 | |||
887 | 1 | def _try_to_activate_inter_evc(self) -> bool: |
|
888 | """Try to activate inter EVC.""" |
||
889 | 1 | intf_a, intf_z = self.uni_a.interface, self.uni_z.interface |
|
890 | 1 | is_active, reason = self.is_uni_interface_active(intf_a, intf_z) |
|
891 | 1 | if not is_active: |
|
892 | 1 | raise ActivationError( |
|
893 | f"Won't be able to activate {self} due to UNIs: {reason}" |
||
894 | ) |
||
895 | 1 | if self.current_path.status != EntityStatus.UP: |
|
896 | 1 | raise ActivationError( |
|
897 | f"Won't be able to activate {self} due to current_path " |
||
898 | f"status {self.current_path.status}" |
||
899 | ) |
||
900 | 1 | self.activate() |
|
901 | 1 | return True |
|
902 | |||
903 | # pylint: disable=too-many-branches, too-many-statements |
||
904 | 1 | def deploy_to_path(self, path=None, old_path_dict: dict = None): |
|
905 | """Install the flows for this circuit. |
||
906 | |||
907 | Procedures to deploy: |
||
908 | |||
909 | 0. Remove current flows installed |
||
910 | 1. Decide if will deploy "path" or discover a new path |
||
911 | 2. Choose vlan |
||
912 | 3. Install NNI flows |
||
913 | 4. Install UNI flows |
||
914 | 5. Activate |
||
915 | 6. Update current_path |
||
916 | 7. Update links caches(primary, current, backup) |
||
917 | |||
918 | """ |
||
919 | 1 | self.remove_current_flows(sync=False) |
|
920 | 1 | use_path = path or Path([]) |
|
921 | 1 | if not old_path_dict: |
|
922 | 1 | old_path_dict = {} |
|
923 | 1 | tag_errors = [] |
|
924 | 1 | if self.should_deploy(use_path): |
|
925 | 1 | try: |
|
926 | 1 | use_path.choose_vlans(self._controller, old_path_dict) |
|
927 | 1 | except KytosNoTagAvailableError as e: |
|
928 | 1 | tag_errors.append(str(e)) |
|
929 | 1 | use_path = None |
|
930 | else: |
||
931 | 1 | for use_path in self.discover_new_paths(): |
|
932 | 1 | if use_path is None: |
|
933 | continue |
||
934 | 1 | try: |
|
935 | 1 | use_path.choose_vlans(self._controller, old_path_dict) |
|
936 | 1 | break |
|
937 | 1 | except KytosNoTagAvailableError as e: |
|
938 | 1 | tag_errors.append(str(e)) |
|
939 | else: |
||
940 | 1 | use_path = None |
|
941 | |||
942 | 1 | try: |
|
943 | 1 | if use_path: |
|
944 | 1 | self._install_flows(use_path) |
|
945 | 1 | elif self.is_intra_switch(): |
|
946 | 1 | use_path = Path() |
|
947 | 1 | self._install_direct_uni_flows() |
|
948 | else: |
||
949 | 1 | msg = f"{self} was not deployed. No available path was found." |
|
950 | 1 | if tag_errors: |
|
951 | 1 | msg = self.add_tag_errors(msg, tag_errors) |
|
952 | 1 | log.error(msg) |
|
953 | else: |
||
954 | 1 | log.warning(msg) |
|
955 | 1 | return False |
|
956 | 1 | except EVCPathNotInstalled as err: |
|
957 | 1 | log.error( |
|
958 | f"Error deploying EVC {self} when calling flow_manager: {err}" |
||
959 | ) |
||
960 | 1 | self.remove_current_flows(use_path, sync=True) |
|
961 | 1 | return False |
|
962 | |||
963 | 1 | self.current_path = use_path |
|
964 | 1 | msg = f"{self} was deployed." |
|
965 | 1 | try: |
|
966 | 1 | self.try_to_activate() |
|
967 | except ActivationError as exc: |
||
968 | msg = f"{msg} {str(exc)}" |
||
969 | 1 | self.sync() |
|
970 | 1 | log.info(msg) |
|
971 | 1 | return True |
|
972 | |||
973 | 1 | def try_setup_failover_path( |
|
974 | self, |
||
975 | wait=settings.DEPLOY_EVCS_INTERVAL, |
||
976 | warn_if_not_path=True |
||
977 | ): |
||
978 | """Try setup failover_path whenever possible.""" |
||
979 | 1 | if ( |
|
980 | self.failover_path or not self.current_path |
||
981 | or not self.is_active() |
||
982 | ): |
||
983 | 1 | return |
|
984 | 1 | if (now() - self.affected_by_link_at).seconds >= wait: |
|
985 | 1 | with self.lock: |
|
986 | 1 | self.setup_failover_path(warn_if_not_path) |
|
987 | |||
988 | # pylint: disable=too-many-statements |
||
989 | 1 | def setup_failover_path(self, warn_if_not_path=True): |
|
990 | """Install flows for the failover path of this EVC. |
||
991 | |||
992 | Procedures to deploy: |
||
993 | |||
994 | 0. Remove flows currently installed for failover_path (if any) |
||
995 | 1. Discover a disjoint path from current_path |
||
996 | 2. Choose vlans |
||
997 | 3. Install NNI flows |
||
998 | 4. Install UNI egress flows |
||
999 | 5. Update failover_path |
||
1000 | """ |
||
1001 | # Intra-switch EVCs have no failover_path |
||
1002 | 1 | if self.is_intra_switch(): |
|
1003 | 1 | return False |
|
1004 | |||
1005 | # For not only setup failover path for totally dynamic EVCs |
||
1006 | 1 | if not self.is_eligible_for_failover_path(): |
|
1007 | 1 | return False |
|
1008 | |||
1009 | 1 | out_new_flows: dict[str, list[dict]] = {} |
|
1010 | 1 | reason = "" |
|
1011 | 1 | tag_errors = [] |
|
1012 | 1 | out_removed_flows = self.remove_path_flows(self.failover_path) |
|
1013 | 1 | self.failover_path = Path([]) |
|
1014 | |||
1015 | 1 | for use_path in self.get_failover_path_candidates(): |
|
1016 | 1 | if not use_path: |
|
1017 | 1 | continue |
|
1018 | 1 | try: |
|
1019 | 1 | use_path.choose_vlans(self._controller) |
|
1020 | 1 | break |
|
1021 | 1 | except KytosNoTagAvailableError as e: |
|
1022 | 1 | tag_errors.append(str(e)) |
|
1023 | else: |
||
1024 | 1 | use_path = Path([]) |
|
1025 | 1 | reason = "No available path was found" |
|
1026 | |||
1027 | 1 | try: |
|
1028 | 1 | if use_path: |
|
1029 | 1 | out_new_flows = self._install_flows( |
|
1030 | use_path, skip_in=True |
||
1031 | ) |
||
1032 | 1 | except EVCPathNotInstalled as err: |
|
1033 | 1 | reason = "Error deploying failover path" |
|
1034 | 1 | log.error( |
|
1035 | f"{reason} for {self}. FlowManager error: {err}" |
||
1036 | ) |
||
1037 | 1 | _rmed_flows = self.remove_path_flows(use_path) |
|
1038 | 1 | out_removed_flows = merge_flow_dicts( |
|
1039 | out_removed_flows, _rmed_flows |
||
1040 | ) |
||
1041 | 1 | use_path = Path([]) |
|
1042 | |||
1043 | 1 | self.failover_path = use_path |
|
1044 | 1 | self.sync() |
|
1045 | |||
1046 | 1 | if out_new_flows or out_removed_flows: |
|
1047 | 1 | emit_event(self._controller, "failover_deployed", content={ |
|
1048 | self.id: map_evc_event_content( |
||
1049 | self, |
||
1050 | flows=deepcopy(out_new_flows), |
||
1051 | removed_flows=deepcopy(out_removed_flows), |
||
1052 | error_reason=reason, |
||
1053 | current_path=self.current_path.as_dict(), |
||
1054 | ) |
||
1055 | }) |
||
1056 | |||
1057 | 1 | if not use_path: |
|
1058 | 1 | msg = f"Failover path for {self} was not deployed: {reason}." |
|
1059 | 1 | if tag_errors: |
|
1060 | 1 | msg = self.add_tag_errors(msg, tag_errors) |
|
1061 | 1 | log.error(msg) |
|
1062 | 1 | elif warn_if_not_path: |
|
1063 | 1 | log.warning(msg) |
|
1064 | 1 | return False |
|
1065 | 1 | log.info(f"Failover path for {self} was deployed.") |
|
1066 | 1 | return True |
|
1067 | |||
1068 | 1 | @staticmethod |
|
1069 | 1 | def add_tag_errors(msg: str, tag_errors: list): |
|
1070 | """Add to msg the tag errors ecountered when chossing path.""" |
||
1071 | 1 | path = ['path', 'paths'] |
|
1072 | 1 | was = ['was', 'were'] |
|
1073 | 1 | message = ['message', 'messages'] |
|
1074 | |||
1075 | # Choose either singular(0) or plural(1) words |
||
1076 | 1 | n = 1 |
|
1077 | 1 | if len(tag_errors) == 1: |
|
1078 | 1 | n = 0 |
|
1079 | |||
1080 | 1 | msg += f" {len(tag_errors)} {path[n]} {was[n]} rejected" |
|
1081 | 1 | msg += f" with {message[n]}: {tag_errors}" |
|
1082 | 1 | return msg |
|
1083 | |||
1084 | 1 | def get_failover_flows(self): |
|
1085 | """Return the flows needed to make the failover path active, i.e. the |
||
1086 | flows for ingress forwarding. |
||
1087 | |||
1088 | Return: |
||
1089 | dict: A dict of flows indexed by the switch_id will be returned, or |
||
1090 | an empty dict if no failover_path is available. |
||
1091 | """ |
||
1092 | 1 | if not self.failover_path: |
|
1093 | 1 | return {} |
|
1094 | 1 | return self._prepare_uni_flows(self.failover_path, skip_out=True) |
|
1095 | |||
1096 | # pylint: disable=too-many-branches |
||
1097 | 1 | def _prepare_direct_uni_flows(self): |
|
1098 | """Prepare flows connecting two UNIs for intra-switch EVC.""" |
||
1099 | 1 | vlan_a = self._get_value_from_uni_tag(self.uni_a) |
|
1100 | 1 | vlan_z = self._get_value_from_uni_tag(self.uni_z) |
|
1101 | |||
1102 | 1 | flow_mod_az = self._prepare_flow_mod( |
|
1103 | self.uni_a.interface, self.uni_z.interface, |
||
1104 | self.queue_id, vlan_a |
||
1105 | ) |
||
1106 | 1 | flow_mod_za = self._prepare_flow_mod( |
|
1107 | self.uni_z.interface, self.uni_a.interface, |
||
1108 | self.queue_id, vlan_z |
||
1109 | ) |
||
1110 | |||
1111 | 1 | View Code Duplication | if not isinstance(vlan_z, list) and vlan_z not in self.special_cases: |
|
|||
1112 | 1 | flow_mod_az["actions"].insert( |
|
1113 | 0, {"action_type": "set_vlan", "vlan_id": vlan_z} |
||
1114 | ) |
||
1115 | 1 | if not vlan_a: |
|
1116 | 1 | flow_mod_az["actions"].insert( |
|
1117 | 0, {"action_type": "push_vlan", "tag_type": "c"} |
||
1118 | ) |
||
1119 | 1 | if vlan_a == 0: |
|
1120 | 1 | flow_mod_za["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1121 | 1 | elif vlan_a == 0 and vlan_z == "4096/4096": |
|
1122 | 1 | flow_mod_za["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1123 | |||
1124 | 1 | View Code Duplication | if not isinstance(vlan_a, list) and vlan_a not in self.special_cases: |
1125 | 1 | flow_mod_za["actions"].insert( |
|
1126 | 0, {"action_type": "set_vlan", "vlan_id": vlan_a} |
||
1127 | ) |
||
1128 | 1 | if not vlan_z: |
|
1129 | 1 | flow_mod_za["actions"].insert( |
|
1130 | 0, {"action_type": "push_vlan", "tag_type": "c"} |
||
1131 | ) |
||
1132 | 1 | if vlan_z == 0: |
|
1133 | 1 | flow_mod_az["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1134 | 1 | elif vlan_a == "4096/4096" and vlan_z == 0: |
|
1135 | 1 | flow_mod_az["actions"].insert(0, {"action_type": "pop_vlan"}) |
|
1136 | |||
1137 | 1 | flows = [] |
|
1138 | 1 | if isinstance(vlan_a, list): |
|
1139 | 1 | for mask_a in vlan_a: |
|
1140 | 1 | flow_aux = deepcopy(flow_mod_az) |
|
1141 | 1 | flow_aux["match"]["dl_vlan"] = mask_a |
|
1142 | 1 | flows.append(flow_aux) |
|
1143 | else: |
||
1144 | 1 | if vlan_a is not None: |
|
1145 | 1 | flow_mod_az["match"]["dl_vlan"] = vlan_a |
|
1146 | 1 | flows.append(flow_mod_az) |
|
1147 | |||
1148 | 1 | if isinstance(vlan_z, list): |
|
1149 | 1 | for mask_z in vlan_z: |
|
1150 | 1 | flow_aux = deepcopy(flow_mod_za) |
|
1151 | 1 | flow_aux["match"]["dl_vlan"] = mask_z |
|
1152 | 1 | flows.append(flow_aux) |
|
1153 | else: |
||
1154 | 1 | if vlan_z is not None: |
|
1155 | 1 | flow_mod_za["match"]["dl_vlan"] = vlan_z |
|
1156 | 1 | flows.append(flow_mod_za) |
|
1157 | 1 | return ( |
|
1158 | self.uni_a.interface.switch.id, flows |
||
1159 | ) |
||
1160 | |||
1161 | 1 | def _install_direct_uni_flows(self): |
|
1162 | """Install flows connecting two UNIs. |
||
1163 | |||
1164 | This case happens when the circuit is between UNIs in the |
||
1165 | same switch. |
||
1166 | """ |
||
1167 | 1 | (dpid, flows) = self._prepare_direct_uni_flows() |
|
1168 | 1 | flow_mods = {"switches": [dpid], "flows": flows} |
|
1169 | 1 | try: |
|
1170 | 1 | self._send_flow_mods(flow_mods, "install") |
|
1171 | 1 | except FlowModException as err: |
|
1172 | 1 | raise EVCPathNotInstalled(str(err)) from err |
|
1173 | |||
1174 | 1 | def _prepare_nni_flows(self, path=None): |
|
1175 | """Prepare NNI flows.""" |
||
1176 | 1 | nni_flows = OrderedDict() |
|
1177 | 1 | previous = self.uni_a.interface.switch.dpid |
|
1178 | 1 | for incoming, outcoming in self.links_zipped(path): |
|
1179 | 1 | in_vlan = incoming.get_metadata("s_vlan").value |
|
1180 | 1 | out_vlan = outcoming.get_metadata("s_vlan").value |
|
1181 | 1 | in_endpoint = self.get_endpoint_by_id(incoming, previous, ne) |
|
1182 | 1 | out_endpoint = self.get_endpoint_by_id( |
|
1183 | outcoming, in_endpoint.switch.id, eq |
||
1184 | ) |
||
1185 | |||
1186 | 1 | flows = [] |
|
1187 | # Flow for one direction |
||
1188 | 1 | flows.append( |
|
1189 | self._prepare_nni_flow( |
||
1190 | in_endpoint, |
||
1191 | out_endpoint, |
||
1192 | in_vlan, |
||
1193 | out_vlan, |
||
1194 | queue_id=self.queue_id, |
||
1195 | ) |
||
1196 | ) |
||
1197 | |||
1198 | # Flow for the other direction |
||
1199 | 1 | flows.append( |
|
1200 | self._prepare_nni_flow( |
||
1201 | out_endpoint, |
||
1202 | in_endpoint, |
||
1203 | out_vlan, |
||
1204 | in_vlan, |
||
1205 | queue_id=self.queue_id, |
||
1206 | ) |
||
1207 | ) |
||
1208 | 1 | previous = in_endpoint.switch.id |
|
1209 | 1 | nni_flows[in_endpoint.switch.id] = flows |
|
1210 | 1 | return nni_flows |
|
1211 | |||
1212 | 1 | def _install_flows( |
|
1213 | self, path=None, skip_in=False, skip_out=False |
||
1214 | ) -> dict[str, list[dict]]: |
||
1215 | """Install uni and nni flows""" |
||
1216 | 1 | flows_by_switch = defaultdict(lambda: {"flows": []}) |
|
1217 | 1 | new_flows = defaultdict(list) |
|
1218 | 1 | for dpid, flows in self._prepare_nni_flows(path).items(): |
|
1219 | 1 | flows_by_switch[dpid]["flows"].extend(flows) |
|
1220 | 1 | new_flows[dpid].extend(flows) |
|
1221 | 1 | for dpid, flows in self._prepare_uni_flows( |
|
1222 | path, skip_in, skip_out |
||
1223 | ).items(): |
||
1224 | 1 | flows_by_switch[dpid]["flows"].extend(flows) |
|
1225 | 1 | new_flows[dpid].extend(flows) |
|
1226 | |||
1227 | 1 | try: |
|
1228 | 1 | self._send_flow_mods(flows_by_switch, "install", by_switch=True) |
|
1229 | 1 | except FlowModException as err: |
|
1230 | 1 | raise EVCPathNotInstalled(str(err)) from err |
|
1231 | |||
1232 | 1 | return new_flows |
|
1233 | |||
1234 | 1 | @staticmethod |
|
1235 | 1 | def _get_value_from_uni_tag(uni: UNI): |
|
1236 | """Returns the value from tag. In case of any and untagged |
||
1237 | it should return 4096/4096 and 0 respectively""" |
||
1238 | 1 | special = {"any": "4096/4096", "untagged": 0} |
|
1239 | 1 | if uni.user_tag: |
|
1240 | 1 | value = uni.user_tag.value |
|
1241 | 1 | if isinstance(value, list): |
|
1242 | 1 | return uni.user_tag.mask_list |
|
1243 | 1 | return special.get(value, value) |
|
1244 | 1 | return None |
|
1245 | |||
1246 | # pylint: disable=too-many-locals |
||
1247 | 1 | def _prepare_uni_flows(self, path=None, skip_in=False, skip_out=False): |
|
1248 | """Prepare flows to install UNIs.""" |
||
1249 | 1 | uni_flows = {} |
|
1250 | 1 | if not path: |
|
1251 | log.info("install uni flows without path.") |
||
1252 | return uni_flows |
||
1253 | |||
1254 | # Determine VLANs |
||
1255 | 1 | in_vlan_a = self._get_value_from_uni_tag(self.uni_a) |
|
1256 | 1 | out_vlan_a = path[0].get_metadata("s_vlan").value |
|
1257 | |||
1258 | 1 | in_vlan_z = self._get_value_from_uni_tag(self.uni_z) |
|
1259 | 1 | out_vlan_z = path[-1].get_metadata("s_vlan").value |
|
1260 | |||
1261 | # Get endpoints from path |
||
1262 | 1 | endpoint_a = self.get_endpoint_by_id( |
|
1263 | path[0], self.uni_a.interface.switch.id, eq |
||
1264 | ) |
||
1265 | 1 | endpoint_z = self.get_endpoint_by_id( |
|
1266 | path[-1], self.uni_z.interface.switch.id, eq |
||
1267 | ) |
||
1268 | |||
1269 | # Flows for the first UNI |
||
1270 | 1 | flows_a = [] |
|
1271 | |||
1272 | # Flow for one direction, pushing the service tag |
||
1273 | 1 | if not skip_in: |
|
1274 | 1 | if isinstance(in_vlan_a, list): |
|
1275 | 1 | for in_mask_a in in_vlan_a: |
|
1276 | 1 | push_flow = self._prepare_push_flow( |
|
1277 | self.uni_a.interface, |
||
1278 | endpoint_a, |
||
1279 | in_mask_a, |
||
1280 | out_vlan_a, |
||
1281 | in_vlan_z, |
||
1282 | queue_id=self.queue_id, |
||
1283 | ) |
||
1284 | 1 | flows_a.append(push_flow) |
|
1285 | else: |
||
1286 | push_flow = self._prepare_push_flow( |
||
1287 | self.uni_a.interface, |
||
1288 | endpoint_a, |
||
1289 | in_vlan_a, |
||
1290 | out_vlan_a, |
||
1291 | in_vlan_z, |
||
1292 | queue_id=self.queue_id, |
||
1293 | ) |
||
1294 | flows_a.append(push_flow) |
||
1295 | |||
1296 | # Flow for the other direction, popping the service tag |
||
1297 | 1 | if not skip_out: |
|
1298 | 1 | pop_flow = self._prepare_pop_flow( |
|
1299 | endpoint_a, |
||
1300 | self.uni_a.interface, |
||
1301 | out_vlan_a, |
||
1302 | queue_id=self.queue_id, |
||
1303 | ) |
||
1304 | 1 | flows_a.append(pop_flow) |
|
1305 | |||
1306 | 1 | uni_flows[self.uni_a.interface.switch.id] = flows_a |
|
1307 | |||
1308 | # Flows for the second UNI |
||
1309 | 1 | flows_z = [] |
|
1310 | |||
1311 | # Flow for one direction, pushing the service tag |
||
1312 | 1 | if not skip_in: |
|
1313 | 1 | if isinstance(in_vlan_z, list): |
|
1314 | 1 | for in_mask_z in in_vlan_z: |
|
1315 | 1 | push_flow = self._prepare_push_flow( |
|
1316 | self.uni_z.interface, |
||
1317 | endpoint_z, |
||
1318 | in_mask_z, |
||
1319 | out_vlan_z, |
||
1320 | in_vlan_a, |
||
1321 | queue_id=self.queue_id, |
||
1322 | ) |
||
1323 | 1 | flows_z.append(push_flow) |
|
1324 | else: |
||
1325 | push_flow = self._prepare_push_flow( |
||
1326 | self.uni_z.interface, |
||
1327 | endpoint_z, |
||
1328 | in_vlan_z, |
||
1329 | out_vlan_z, |
||
1330 | in_vlan_a, |
||
1331 | queue_id=self.queue_id, |
||
1332 | ) |
||
1333 | flows_z.append(push_flow) |
||
1334 | |||
1335 | # Flow for the other direction, popping the service tag |
||
1336 | 1 | if not skip_out: |
|
1337 | 1 | pop_flow = self._prepare_pop_flow( |
|
1338 | endpoint_z, |
||
1339 | self.uni_z.interface, |
||
1340 | out_vlan_z, |
||
1341 | queue_id=self.queue_id, |
||
1342 | ) |
||
1343 | 1 | flows_z.append(pop_flow) |
|
1344 | |||
1345 | 1 | uni_flows[self.uni_z.interface.switch.id] = flows_z |
|
1346 | |||
1347 | 1 | return uni_flows |
|
1348 | |||
1349 | 1 | @staticmethod |
|
1350 | 1 | @retry( |
|
1351 | stop=stop_after_attempt(3), |
||
1352 | wait=wait_combine(wait_fixed(3), wait_random(min=2, max=7)), |
||
1353 | retry=retry_if_exception_type(FlowModException), |
||
1354 | before_sleep=before_sleep, |
||
1355 | reraise=True, |
||
1356 | ) |
||
1357 | 1 | def _send_flow_mods( |
|
1358 | data_content: dict, |
||
1359 | command="install", |
||
1360 | force=False, |
||
1361 | by_switch=False |
||
1362 | ): |
||
1363 | """Send a flow_mod list to a specific switch. |
||
1364 | |||
1365 | Args: |
||
1366 | dpid(str): The target of flows (i.e. Switch.id). |
||
1367 | flow_mods(dict): Python dictionary with flow_mods. |
||
1368 | command(str): By default is 'flows'. To remove a flow is 'remove'. |
||
1369 | force(bool): True to send via consistency check in case of errors. |
||
1370 | by_switch(bool): True to send to 'flows_by_switch' request instead. |
||
1371 | """ |
||
1372 | 1 | if by_switch: |
|
1373 | 1 | endpoint = f"{settings.MANAGER_URL}/flows_by_switch/?force={force}" |
|
1374 | else: |
||
1375 | 1 | endpoint = f"{settings.MANAGER_URL}/flows" |
|
1376 | 1 | data_content["force"] = force |
|
1377 | 1 | try: |
|
1378 | 1 | if command == "install": |
|
1379 | 1 | res = httpx.post(endpoint, json=data_content, timeout=30) |
|
1380 | 1 | elif command == "delete": |
|
1381 | 1 | res = httpx.request( |
|
1382 | "DELETE", endpoint, json=data_content, timeout=30 |
||
1383 | ) |
||
1384 | 1 | except httpx.RequestError as err: |
|
1385 | 1 | raise FlowModException(str(err)) from err |
|
1386 | 1 | if res.is_server_error or res.status_code >= 400: |
|
1387 | 1 | raise FlowModException(res.text) |
|
1388 | |||
1389 | 1 | def get_cookie(self): |
|
1390 | """Return the cookie integer from evc id.""" |
||
1391 | 1 | return int(self.id, 16) + (settings.COOKIE_PREFIX << 56) |
|
1392 | |||
1393 | 1 | @staticmethod |
|
1394 | 1 | def get_id_from_cookie(cookie): |
|
1395 | """Return the evc id given a cookie value.""" |
||
1396 | 1 | evc_id = cookie - (settings.COOKIE_PREFIX << 56) |
|
1397 | 1 | return f"{evc_id:x}".zfill(14) |
|
1398 | |||
1399 | 1 | def set_flow_table_group_id(self, flow_mod: dict, vlan) -> dict: |
|
1400 | """Set table_group and table_id""" |
||
1401 | 1 | table_group = "epl" if vlan is None else "evpl" |
|
1402 | 1 | flow_mod["table_group"] = table_group |
|
1403 | 1 | flow_mod["table_id"] = self.table_group[table_group] |
|
1404 | 1 | return flow_mod |
|
1405 | |||
1406 | 1 | @staticmethod |
|
1407 | 1 | def get_priority(vlan): |
|
1408 | """Return priority value depending on vlan value""" |
||
1409 | 1 | if isinstance(vlan, list): |
|
1410 | 1 | return settings.EVPL_SB_PRIORITY |
|
1411 | 1 | if vlan not in {None, "4096/4096", 0}: |
|
1412 | 1 | return settings.EVPL_SB_PRIORITY |
|
1413 | 1 | if vlan == 0: |
|
1414 | 1 | return settings.UNTAGGED_SB_PRIORITY |
|
1415 | 1 | if vlan == "4096/4096": |
|
1416 | 1 | return settings.ANY_SB_PRIORITY |
|
1417 | 1 | return settings.EPL_SB_PRIORITY |
|
1418 | |||
1419 | 1 | def _prepare_flow_mod(self, in_interface, out_interface, |
|
1420 | queue_id=None, vlan=True): |
||
1421 | """Prepare a common flow mod.""" |
||
1422 | 1 | default_actions = [ |
|
1423 | {"action_type": "output", "port": out_interface.port_number} |
||
1424 | ] |
||
1425 | 1 | queue_id = settings.QUEUE_ID if queue_id == -1 else queue_id |
|
1426 | 1 | if queue_id is not None: |
|
1427 | 1 | default_actions.append( |
|
1428 | {"action_type": "set_queue", "queue_id": queue_id} |
||
1429 | ) |
||
1430 | |||
1431 | 1 | flow_mod = { |
|
1432 | "match": {"in_port": in_interface.port_number}, |
||
1433 | "cookie": self.get_cookie(), |
||
1434 | "actions": default_actions, |
||
1435 | "owner": "mef_eline", |
||
1436 | } |
||
1437 | |||
1438 | 1 | self.set_flow_table_group_id(flow_mod, vlan) |
|
1439 | 1 | if self.sb_priority: |
|
1440 | 1 | flow_mod["priority"] = self.sb_priority |
|
1441 | else: |
||
1442 | 1 | flow_mod["priority"] = self.get_priority(vlan) |
|
1443 | 1 | return flow_mod |
|
1444 | |||
1445 | 1 | def _prepare_nni_flow(self, *args, queue_id=None): |
|
1446 | """Create NNI flows.""" |
||
1447 | 1 | in_interface, out_interface, in_vlan, out_vlan = args |
|
1448 | 1 | flow_mod = self._prepare_flow_mod( |
|
1449 | in_interface, out_interface, queue_id |
||
1450 | ) |
||
1451 | 1 | flow_mod["match"]["dl_vlan"] = in_vlan |
|
1452 | 1 | new_action = {"action_type": "set_vlan", "vlan_id": out_vlan} |
|
1453 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1454 | |||
1455 | 1 | return flow_mod |
|
1456 | |||
1457 | 1 | def _prepare_push_flow(self, *args, queue_id=None): |
|
1458 | """Prepare push flow. |
||
1459 | |||
1460 | Arguments: |
||
1461 | in_interface(str): Interface input. |
||
1462 | out_interface(str): Interface output. |
||
1463 | in_vlan(int,str,None): Vlan input. |
||
1464 | out_vlan(str): Vlan output. |
||
1465 | new_c_vlan(int,str,list,None): New client vlan. |
||
1466 | |||
1467 | Return: |
||
1468 | dict: An python dictionary representing a FlowMod |
||
1469 | |||
1470 | """ |
||
1471 | # assign all arguments |
||
1472 | 1 | in_interface, out_interface, in_vlan, out_vlan, new_c_vlan = args |
|
1473 | 1 | vlan_pri = in_vlan if not isinstance(new_c_vlan, list) else new_c_vlan |
|
1474 | 1 | flow_mod = self._prepare_flow_mod( |
|
1475 | in_interface, out_interface, queue_id, vlan_pri |
||
1476 | ) |
||
1477 | # the service tag must be always pushed |
||
1478 | 1 | new_action = {"action_type": "set_vlan", "vlan_id": out_vlan} |
|
1479 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1480 | |||
1481 | 1 | new_action = {"action_type": "push_vlan", "tag_type": "s"} |
|
1482 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1483 | |||
1484 | 1 | if in_vlan is not None: |
|
1485 | # if in_vlan is set, it must be included in the match |
||
1486 | 1 | flow_mod["match"]["dl_vlan"] = in_vlan |
|
1487 | |||
1488 | 1 | if (not isinstance(new_c_vlan, list) and in_vlan != new_c_vlan and |
|
1489 | new_c_vlan not in self.special_cases): |
||
1490 | # new_in_vlan is an integer but zero, action to set is required |
||
1491 | 1 | new_action = {"action_type": "set_vlan", "vlan_id": new_c_vlan} |
|
1492 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1493 | |||
1494 | 1 | if in_vlan not in self.special_cases and new_c_vlan == 0: |
|
1495 | # # new_in_vlan is an integer but zero and new_c_vlan does not |
||
1496 | # a pop action is required |
||
1497 | 1 | new_action = {"action_type": "pop_vlan"} |
|
1498 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1499 | |||
1500 | 1 | elif in_vlan == "4096/4096" and new_c_vlan == 0: |
|
1501 | # if in_vlan match with any tags and new_c_vlan does not |
||
1502 | # a pop action is required |
||
1503 | 1 | new_action = {"action_type": "pop_vlan"} |
|
1504 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1505 | |||
1506 | 1 | elif (not in_vlan and |
|
1507 | (not isinstance(new_c_vlan, list) and |
||
1508 | new_c_vlan not in self.special_cases)): |
||
1509 | # new_in_vlan is an integer but zero and in_vlan is not set |
||
1510 | # then it is set now |
||
1511 | 1 | new_action = {"action_type": "push_vlan", "tag_type": "c"} |
|
1512 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1513 | |||
1514 | 1 | return flow_mod |
|
1515 | |||
1516 | 1 | def _prepare_pop_flow( |
|
1517 | self, in_interface, out_interface, out_vlan, queue_id=None |
||
1518 | ): |
||
1519 | # pylint: disable=too-many-arguments |
||
1520 | """Prepare pop flow.""" |
||
1521 | 1 | flow_mod = self._prepare_flow_mod( |
|
1522 | in_interface, out_interface, queue_id |
||
1523 | ) |
||
1524 | 1 | flow_mod["match"]["dl_vlan"] = out_vlan |
|
1525 | 1 | new_action = {"action_type": "pop_vlan"} |
|
1526 | 1 | flow_mod["actions"].insert(0, new_action) |
|
1527 | 1 | return flow_mod |
|
1528 | |||
1529 | 1 | @staticmethod |
|
1530 | 1 | def run_bulk_sdntraces( |
|
1531 | uni_list: list[tuple[Interface, Union[str, int, None]]] |
||
1532 | ) -> dict: |
||
1533 | """Run SDN traces on control plane starting from EVC UNIs.""" |
||
1534 | 1 | endpoint = f"{settings.SDN_TRACE_CP_URL}/traces" |
|
1535 | 1 | data = [] |
|
1536 | 1 | for interface, tag_value in uni_list: |
|
1537 | 1 | data_uni = { |
|
1538 | "trace": { |
||
1539 | "switch": { |
||
1540 | "dpid": interface.switch.dpid, |
||
1541 | "in_port": interface.port_number, |
||
1542 | } |
||
1543 | } |
||
1544 | } |
||
1545 | 1 | if tag_value: |
|
1546 | 1 | uni_dl_vlan = map_dl_vlan(tag_value) |
|
1547 | 1 | if uni_dl_vlan: |
|
1548 | 1 | data_uni["trace"]["eth"] = { |
|
1549 | "dl_type": 0x8100, |
||
1550 | "dl_vlan": uni_dl_vlan, |
||
1551 | } |
||
1552 | 1 | data.append(data_uni) |
|
1553 | 1 | try: |
|
1554 | 1 | response = httpx.put(endpoint, json=data, timeout=30) |
|
1555 | 1 | except httpx.TimeoutException as exception: |
|
1556 | 1 | log.error(f"Request has timed out: {exception}") |
|
1557 | 1 | return {"result": []} |
|
1558 | 1 | if response.status_code >= 400: |
|
1559 | 1 | log.error(f"Failed to run sdntrace-cp: {response.text}") |
|
1560 | 1 | return {"result": []} |
|
1561 | 1 | return response.json() |
|
1562 | |||
1563 | # pylint: disable=too-many-return-statements, too-many-arguments |
||
1564 | 1 | @staticmethod |
|
1565 | 1 | def check_trace( |
|
1566 | evc_id: str, |
||
1567 | evc_name: str, |
||
1568 | tag_a: Union[None, int, str], |
||
1569 | tag_z: Union[None, int, str], |
||
1570 | interface_a: Interface, |
||
1571 | interface_z: Interface, |
||
1572 | current_path: list, |
||
1573 | trace_a: list, |
||
1574 | trace_z: list |
||
1575 | ) -> bool: |
||
1576 | """Auxiliar function to check an individual trace""" |
||
1577 | 1 | if ( |
|
1578 | len(trace_a) != len(current_path) + 1 |
||
1579 | or not compare_uni_out_trace(tag_z, interface_z, trace_a[-1]) |
||
1580 | ): |
||
1581 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1582 | f"Invalid trace from uni_a: {trace_a}") |
||
1583 | 1 | return False |
|
1584 | 1 | if ( |
|
1585 | len(trace_z) != len(current_path) + 1 |
||
1586 | or not compare_uni_out_trace(tag_a, interface_a, trace_z[-1]) |
||
1587 | ): |
||
1588 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1589 | f"Invalid trace from uni_z: {trace_z}") |
||
1590 | 1 | return False |
|
1591 | |||
1592 | 1 | if not current_path: |
|
1593 | return True |
||
1594 | |||
1595 | 1 | first_link, trace_path_begin, trace_path_end = current_path[0], [], [] |
|
1596 | 1 | if ( |
|
1597 | first_link.endpoint_a.switch.id == trace_a[0]["dpid"] |
||
1598 | ): |
||
1599 | 1 | trace_path_begin, trace_path_end = trace_a, trace_z |
|
1600 | 1 | elif ( |
|
1601 | first_link.endpoint_a.switch.id == trace_z[0]["dpid"] |
||
1602 | ): |
||
1603 | 1 | trace_path_begin, trace_path_end = trace_z, trace_a |
|
1604 | else: |
||
1605 | msg = ( |
||
1606 | f"first link {first_link} endpoint_a didn't match the first " |
||
1607 | f"step of trace_a {trace_a} or trace_z {trace_z}" |
||
1608 | ) |
||
1609 | log.warning(msg) |
||
1610 | return False |
||
1611 | |||
1612 | 1 | for link, trace1, trace2 in zip(current_path, |
|
1613 | trace_path_begin[1:], |
||
1614 | trace_path_end[:0:-1]): |
||
1615 | 1 | metadata_vlan = None |
|
1616 | 1 | if link.metadata: |
|
1617 | 1 | metadata_vlan = glom(link.metadata, 's_vlan.value') |
|
1618 | 1 | if compare_endpoint_trace( |
|
1619 | link.endpoint_a, |
||
1620 | metadata_vlan, |
||
1621 | trace2 |
||
1622 | ) is False: |
||
1623 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1624 | f"Invalid trace from uni_a: {trace_a}") |
||
1625 | 1 | return False |
|
1626 | 1 | if compare_endpoint_trace( |
|
1627 | link.endpoint_b, |
||
1628 | metadata_vlan, |
||
1629 | trace1 |
||
1630 | ) is False: |
||
1631 | 1 | log.warning(f"From EVC({evc_id}) named '{evc_name}'. " |
|
1632 | f"Invalid trace from uni_z: {trace_z}") |
||
1633 | 1 | return False |
|
1634 | |||
1635 | 1 | return True |
|
1636 | |||
1637 | 1 | @staticmethod |
|
1638 | 1 | def check_range(circuit, traces: list) -> bool: |
|
1639 | """Check traces when for UNI with TAGRange""" |
||
1640 | 1 | check = True |
|
1641 | 1 | for i, mask in enumerate(circuit.uni_a.user_tag.mask_list): |
|
1642 | 1 | trace_a = traces[i*2] |
|
1643 | 1 | trace_z = traces[i*2+1] |
|
1644 | 1 | check &= EVCDeploy.check_trace( |
|
1645 | circuit.id, circuit.name, |
||
1646 | mask, mask, |
||
1647 | circuit.uni_a.interface, |
||
1648 | circuit.uni_z.interface, |
||
1649 | circuit.current_path, |
||
1650 | trace_a, trace_z, |
||
1651 | ) |
||
1652 | 1 | return check |
|
1653 | |||
1654 | 1 | @staticmethod |
|
1655 | 1 | def check_list_traces(list_circuits: list) -> dict: |
|
1656 | """Check if current_path is deployed comparing with SDN traces.""" |
||
1657 | 1 | if not list_circuits: |
|
1658 | 1 | return {} |
|
1659 | 1 | uni_list = make_uni_list(list_circuits) |
|
1660 | 1 | traces = EVCDeploy.run_bulk_sdntraces(uni_list)["result"] |
|
1661 | |||
1662 | 1 | if not traces: |
|
1663 | 1 | return {} |
|
1664 | |||
1665 | 1 | try: |
|
1666 | 1 | circuits_checked = {} |
|
1667 | 1 | i = 0 |
|
1668 | 1 | for circuit in list_circuits: |
|
1669 | 1 | if isinstance(circuit.uni_a.user_tag, TAGRange): |
|
1670 | 1 | length = len(circuit.uni_a.user_tag.mask_list) |
|
1671 | 1 | circuits_checked[circuit.id] = EVCDeploy.check_range( |
|
1672 | circuit, traces[i:i+length*2] |
||
1673 | ) |
||
1674 | 1 | i += length*2 |
|
1675 | else: |
||
1676 | 1 | trace_a = traces[i] |
|
1677 | 1 | trace_z = traces[i+1] |
|
1678 | 1 | tag_a = None |
|
1679 | 1 | if circuit.uni_a.user_tag: |
|
1680 | 1 | tag_a = circuit.uni_a.user_tag.value |
|
1681 | 1 | tag_z = None |
|
1682 | 1 | if circuit.uni_z.user_tag: |
|
1683 | 1 | tag_z = circuit.uni_z.user_tag.value |
|
1684 | 1 | circuits_checked[circuit.id] = EVCDeploy.check_trace( |
|
1685 | circuit.id, circuit.name, |
||
1686 | tag_a, tag_z, |
||
1687 | circuit.uni_a.interface, |
||
1688 | circuit.uni_z.interface, |
||
1689 | circuit.current_path, |
||
1690 | trace_a, trace_z |
||
1691 | ) |
||
1692 | 1 | i += 2 |
|
1693 | 1 | except IndexError as err: |
|
1694 | 1 | log.error( |
|
1695 | f"Bulk sdntraces returned fewer items than expected." |
||
1696 | f"Error = {err}" |
||
1697 | ) |
||
1698 | 1 | return {} |
|
1699 | |||
1700 | 1 | return circuits_checked |
|
1701 | |||
1702 | 1 | @staticmethod |
|
1703 | 1 | def get_endpoint_by_id( |
|
1704 | link: Link, |
||
1705 | id_: str, |
||
1706 | operator: Union[eq, ne] |
||
1707 | ) -> Interface: |
||
1708 | """Return endpoint from link |
||
1709 | either equal(eq) or not equal(ne) to id""" |
||
1710 | 1 | if operator(link.endpoint_a.switch.id, id_): |
|
1711 | 1 | return link.endpoint_a |
|
1712 | 1 | return link.endpoint_b |
|
1713 | |||
1714 | |||
1715 | 1 | class LinkProtection(EVCDeploy): |
|
1716 | """Class to handle link protection.""" |
||
1717 | |||
1718 | 1 | def is_affected_by_link(self, link=None): |
|
1719 | """Verify if the current path is affected by link down event.""" |
||
1720 | return self.current_path.is_affected_by_link(link) |
||
1721 | |||
1722 | 1 | def is_using_primary_path(self): |
|
1723 | """Verify if the current deployed path is self.primary_path.""" |
||
1724 | 1 | return self.current_path == self.primary_path |
|
1725 | |||
1726 | 1 | def is_using_backup_path(self): |
|
1727 | """Verify if the current deployed path is self.backup_path.""" |
||
1728 | 1 | return self.current_path == self.backup_path |
|
1729 | |||
1730 | 1 | def is_using_dynamic_path(self): |
|
1731 | """Verify if the current deployed path is dynamic.""" |
||
1732 | 1 | if ( |
|
1733 | self.current_path |
||
1734 | and not self.is_using_primary_path() |
||
1735 | and not self.is_using_backup_path() |
||
1736 | and self.current_path.status is EntityStatus.UP |
||
1737 | ): |
||
1738 | return True |
||
1739 | 1 | return False |
|
1740 | |||
1741 | 1 | def handle_link_up(self, link=None, interface=None): |
|
1742 | """Handle circuit when link up. |
||
1743 | |||
1744 | Args: |
||
1745 | link(Link): Link affected by link.up event. |
||
1746 | |||
1747 | """ |
||
1748 | 1 | condition_pairs = [ |
|
1749 | ( |
||
1750 | lambda me: me.is_using_primary_path(), |
||
1751 | lambda _: (True, 'nothing') |
||
1752 | ), |
||
1753 | ( |
||
1754 | lambda me: me.is_intra_switch(), |
||
1755 | lambda _: (True, 'nothing') |
||
1756 | ), |
||
1757 | ( |
||
1758 | lambda me: me.primary_path.is_affected_by_link(link), |
||
1759 | lambda me: (me.deploy_to_primary_path(), 'redeploy') |
||
1760 | ), |
||
1761 | # For this special case, it reached this point because interface |
||
1762 | # was previously confirmed to be a UNI and both UNI are UP |
||
1763 | ( |
||
1764 | lambda me: (me.primary_path.status == EntityStatus.UP |
||
1765 | and interface), |
||
1766 | lambda me: (me.deploy_to_primary_path(), 'redeploy') |
||
1767 | ), |
||
1768 | ( |
||
1769 | lambda me: (me.backup_path.status == EntityStatus.UP |
||
1770 | and interface), |
||
1771 | lambda me: (me.deploy_to_backup_path(), 'redeploy') |
||
1772 | ), |
||
1773 | # We tried to deploy(primary_path) without success. |
||
1774 | # And in this case is up by some how. Nothing to do. |
||
1775 | ( |
||
1776 | lambda me: me.is_using_backup_path(), |
||
1777 | lambda _: (True, 'nothing') |
||
1778 | ), |
||
1779 | ( |
||
1780 | lambda me: me.is_using_dynamic_path(), |
||
1781 | lambda _: (True, 'nothing') |
||
1782 | ), |
||
1783 | # In this case, probably the circuit is not being used and |
||
1784 | # we can move to backup |
||
1785 | ( |
||
1786 | lambda me: me.backup_path.is_affected_by_link(link), |
||
1787 | lambda me: (me.deploy_to_backup_path(), 'redeploy') |
||
1788 | ), |
||
1789 | # In this case, the circuit is not being used and we should |
||
1790 | # try a dynamic path |
||
1791 | ( |
||
1792 | lambda me: me.dynamic_backup_path and not me.is_active(), |
||
1793 | lambda me: (me.deploy_to_path(), 'redeploy') |
||
1794 | ) |
||
1795 | ] |
||
1796 | 1 | for predicate, action in condition_pairs: |
|
1797 | 1 | if not predicate(self): |
|
1798 | 1 | continue |
|
1799 | 1 | success, succcess_type = action(self) |
|
1800 | 1 | if success: |
|
1801 | 1 | if succcess_type == 'redeploy': |
|
1802 | 1 | emit_event( |
|
1803 | self._controller, |
||
1804 | "redeployed_link_up", |
||
1805 | content=map_evc_event_content(self) |
||
1806 | ) |
||
1807 | 1 | return True |
|
1808 | 1 | return False |
|
1809 | |||
1810 | 1 | def handle_link_down(self): |
|
1811 | """Handle circuit when link down. |
||
1812 | |||
1813 | Returns: |
||
1814 | bool: True if the re-deploy was successly otherwise False. |
||
1815 | |||
1816 | """ |
||
1817 | 1 | success = False |
|
1818 | 1 | if self.is_using_primary_path(): |
|
1819 | 1 | success = self.deploy_to_backup_path() |
|
1820 | 1 | elif self.is_using_backup_path(): |
|
1821 | 1 | success = self.deploy_to_primary_path() |
|
1822 | |||
1823 | 1 | if not success and self.dynamic_backup_path: |
|
1824 | 1 | success = self.deploy_to_path() |
|
1825 | |||
1826 | 1 | if success: |
|
1827 | 1 | log.debug(f"{self} deployed after link down.") |
|
1828 | else: |
||
1829 | 1 | self.remove_current_flows(sync=False) |
|
1830 | 1 | self.deactivate() |
|
1831 | 1 | self.sync() |
|
1832 | 1 | log.debug(f"Failed to re-deploy {self} after link down.") |
|
1833 | |||
1834 | 1 | return success |
|
1835 | |||
1836 | 1 | def are_unis_active(self) -> bool: |
|
1837 | """Determine whether this EVC should be active""" |
||
1838 | 1 | interface_a = self.uni_a.interface |
|
1839 | 1 | interface_z = self.uni_z.interface |
|
1840 | 1 | active, _ = self.is_uni_interface_active(interface_a, interface_z) |
|
1841 | 1 | return active |
|
1842 | |||
1843 | 1 | def try_to_handle_uni_as_link_up(self, interface: Interface) -> bool: |
|
1844 | """Try to handle UNI as link_up to trigger deployment.""" |
||
1845 | if ( |
||
1846 | self.current_path.status != EntityStatus.UP |
||
1847 | and not self.is_intra_switch() |
||
1848 | ): |
||
1849 | succeeded = self.handle_link_up(interface=interface) |
||
1850 | if succeeded: |
||
1851 | msg = ( |
||
1852 | f"Activated {self} due to successful " |
||
1853 | f"deployment triggered by {interface}" |
||
1854 | ) |
||
1855 | else: |
||
1856 | msg = ( |
||
1857 | f"Couldn't activate {self} due to unsuccessful " |
||
1858 | f"deployment triggered by {interface}" |
||
1859 | ) |
||
1860 | log.info(msg) |
||
1861 | return True |
||
1862 | return False |
||
1863 | |||
1864 | 1 | def handle_interface_link_up(self, interface: Interface): |
|
1865 | """ |
||
1866 | Handler for interface link_up events |
||
1867 | """ |
||
1868 | 1 | if not _does_uni_affect_evc(self, interface, "up"): |
|
1869 | 1 | return |
|
1870 | 1 | if self.try_to_handle_uni_as_link_up(interface): |
|
1871 | return |
||
1872 | |||
1873 | 1 | interface_dicts = { |
|
1874 | interface.id: { |
||
1875 | 'status': interface.status.value, |
||
1876 | 'status_reason': interface.status_reason, |
||
1877 | } |
||
1878 | for interface in (self.uni_a.interface, self.uni_z.interface) |
||
1879 | } |
||
1880 | 1 | try: |
|
1881 | 1 | self.try_to_activate() |
|
1882 | 1 | log.info( |
|
1883 | f"Activating {self}. Interfaces: " |
||
1884 | f"{interface_dicts}." |
||
1885 | ) |
||
1886 | 1 | emit_event(self._controller, "uni_active_updated", |
|
1887 | content=map_evc_event_content(self)) |
||
1888 | 1 | self.sync() |
|
1889 | except ActivationError as exc: |
||
1890 | # On this ctx, no ActivationError isn't expected since the |
||
1891 | # activation pre-requisites states were checked, so handled as err |
||
1892 | log.error(f"ActivationError: {str(exc)} when handling {interface}") |
||
1893 | |||
1894 | 1 | def handle_interface_link_down(self, interface): |
|
1895 | """ |
||
1896 | Handler for interface link_down events |
||
1897 | """ |
||
1898 | 1 | if not _does_uni_affect_evc(self, interface, "down"): |
|
1899 | 1 | return |
|
1900 | 1 | interface_dicts = { |
|
1901 | interface.id: { |
||
1902 | 'status': interface.status.value, |
||
1903 | 'status_reason': interface.status_reason, |
||
1904 | } |
||
1905 | for interface in (self.uni_a.interface, self.uni_z.interface) |
||
1906 | if interface.status != EntityStatus.UP |
||
1907 | } |
||
1908 | 1 | self.deactivate() |
|
1909 | 1 | log.info( |
|
1910 | f"Deactivating {self}. Interfaces: " |
||
1911 | f"{interface_dicts}." |
||
1912 | ) |
||
1913 | 1 | emit_event(self._controller, "uni_active_updated", |
|
1914 | content=map_evc_event_content(self)) |
||
1915 | 1 | self.sync() |
|
1916 | |||
1917 | |||
1918 | 1 | class EVC(LinkProtection): |
|
1919 | """Class that represents a E-Line Virtual Connection.""" |
||
1920 |