diff --git a/.gitignore b/.gitignore index 3a33e1c3..ed2c6c8f 100644 --- a/.gitignore +++ b/.gitignore @@ -150,3 +150,8 @@ fabric_cf/actor/test/*.graphml secrets/kafkacat2-ca1-signed.pem secrets/kafkacat1-ca1-signed.pem secrets/... +neo4j/RENC.graphml +neo4j/Network-dev.graphml +.DS_Store +*.log* +*.avsc diff --git a/Dockerfile-auth b/Dockerfile-auth index ec955fac..302680c0 100644 --- a/Dockerfile-auth +++ b/Dockerfile-auth @@ -1,7 +1,7 @@ FROM python:3.11.0 MAINTAINER Komal Thareja -ARG HANDLERS_VER=1.6.3 +ARG HANDLERS_VER=1.7.1 RUN mkdir -p /usr/src/app WORKDIR /usr/src/app @@ -11,6 +11,7 @@ EXPOSE 11000 RUN apt-get update RUN apt-get install cron -y +RUN apt-get install sshpass -y COPY docker-entrypoint.sh /usr/src/app/ COPY fabric_cf /usr/src/app/fabric_cf diff --git a/fabric_cf/__init__.py b/fabric_cf/__init__.py index 709fa443..1f6dc025 100644 --- a/fabric_cf/__init__.py +++ b/fabric_cf/__init__.py @@ -1,2 +1,2 @@ -__version__ = "1.6.2" +__version__ = "1.7.0" __VERSION__ = __version__ diff --git a/fabric_cf/actor/core/apis/abc_actor_container.py b/fabric_cf/actor/core/apis/abc_actor_container.py index 9c41c7a9..dbd997b7 100644 --- a/fabric_cf/actor/core/apis/abc_actor_container.py +++ b/fabric_cf/actor/core/apis/abc_actor_container.py @@ -152,3 +152,12 @@ def remove_actor_database(self, *, actor_name: str): Remove Actor Database @params actor_name: actor name """ + + @abstractmethod + def get_actor(self) -> ABCActorMixin: + """ + Return Actor + + @return Actor + """ + pass diff --git a/fabric_cf/actor/core/apis/abc_actor_management_object.py b/fabric_cf/actor/core/apis/abc_actor_management_object.py index 3e2abbbf..9e8a807f 100644 --- a/fabric_cf/actor/core/apis/abc_actor_management_object.py +++ b/fabric_cf/actor/core/apis/abc_actor_management_object.py @@ -26,9 +26,9 @@ from __future__ import annotations from abc import abstractmethod +from datetime import datetime from typing import TYPE_CHECKING, Tuple, Dict, List -from fabric_mb.message_bus.messages.poa_avro import PoaAvro from fabric_mb.message_bus.messages.result_avro import ResultAvro from fabric_mb.message_bus.messages.result_delegation_avro import ResultDelegationAvro from fabric_mb.message_bus.messages.result_poa_avro import ResultPoaAvro @@ -38,6 +38,7 @@ from fabric_mb.message_bus.messages.result_slice_avro import ResultSliceAvro from fabric_mb.message_bus.messages.result_string_avro import ResultStringAvro from fabric_mb.message_bus.messages.slice_avro import SliceAvro +from fim.user import GraphFormat from fabric_cf.actor.core.apis.abc_management_object import ABCManagementObject from fabric_cf.actor.core.container.maintenance import Site @@ -237,7 +238,8 @@ def get_sites(self, *, caller: AuthToken, site: str) -> ResultSitesAvro: def get_reservations(self, *, caller: AuthToken, states: List[int] = None, slice_id: ID = None, rid: ID = None, oidc_claim_sub: str = None, email: str = None, rid_list: List[str] = None, type: str = None, - site: str = None, node_id: str = None) -> ResultReservationAvro: + site: str = None, node_id: str = None, + host: str = None, ip_subnet: str = None) -> ResultReservationAvro: """ Get Reservations @param states states @@ -251,12 +253,16 @@ def get_reservations(self, *, caller: AuthToken, states: List[int] = None, @param node_id node id Obtains all reservations with error information in case of failure @param caller caller + @param host host + @param ip_subnet ip subnet + @return returns list of the reservations """ def get_slices(self, *, slice_id: ID, caller: AuthToken, slice_name: str = None, email: str = None, states: List[int] = None, project: str = None, limit: int = None, - offset: int = None, user_id: str = None) -> ResultSliceAvro: + offset: int = None, user_id: str = None, search: str = None, + exact_match: bool = False) -> ResultSliceAvro: """ Obtains all slices. @param slice_id slice id @@ -268,13 +274,73 @@ def get_slices(self, *, slice_id: ID, caller: AuthToken, slice_name: str = None, @param offset offset @param caller caller @param user_id user_id + @param search: search term applied + @param exact_match: Exact Match for Search term @return returns list of slices """ + @abstractmethod + def increment_metrics(self, *, project_id: str, oidc_sub: str, slice_count: int = 1) -> bool: + """ + Add or update metrics + + @param project_id project id + @param oidc_sub oidc sub + @param slice_count slice_count + + @return true or false + + @throws Exception in case of error + """ + + @abstractmethod + def get_metrics(self, *, project_id: str, oidc_sub: str, excluded_projects: List[str] = None) -> list: + """ + Get metrics + + @param project_id project id + @param oidc_sub oidc sub + @param excluded_projects excluded_projects + + @return list of metric information + + @throws Exception in case of error + """ + + def get_slice_count(self, *, caller: AuthToken, email: str = None, states: List[int] = None, + project: str = None, user_id: str = None, excluded_projects: List[str] = None) -> int: + """ + Obtains Slice count matching the filter criteria. + + @param email email + @param project project id + @param states slice states + @param caller caller + @param user_id user_id + @param excluded_projects excluded_projects + @return returns number of slices + """ + def remove_slice(self, *, slice_id: ID, caller: AuthToken) -> ResultAvro: """ Removes the specified slice @param slice_id slice id @param caller caller @return true for success; false otherwise + """ + + def build_broker_query_model(self, level_0_broker_query_model: str, level: int, + graph_format: GraphFormat = GraphFormat.GRAPHML, + start: datetime = None, end: datetime = None, includes: str = None, + excludes: str = None) -> str: + """ + Build the BQM Model using current usage + @param level_0_broker_query_model Capacity Model + @param level: level of details + @param graph_format: Graph Format + @param start: start time + @param end: end time + @param includes: comma separated lists of sites to include + @param excludes: comma separated lists of sites to exclude + @return BQM """ \ No newline at end of file diff --git a/fabric_cf/actor/core/apis/abc_actor_mixin.py b/fabric_cf/actor/core/apis/abc_actor_mixin.py index f6183c64..68453c72 100644 --- a/fabric_cf/actor/core/apis/abc_actor_mixin.py +++ b/fabric_cf/actor/core/apis/abc_actor_mixin.py @@ -38,6 +38,7 @@ from fabric_cf.actor.core.apis.abc_tick import ABCTick if TYPE_CHECKING: + from fabric_cf.actor.core.kernel.slice_state_machine import SliceState from fabric_cf.actor.core.apis.abc_actor_event import ABCActorEvent from fabric_cf.actor.core.apis.abc_actor_proxy import ABCActorProxy from fabric_cf.actor.core.apis.abc_base_plugin import ABCBasePlugin @@ -431,12 +432,13 @@ def register_slice(self, *, slice_object: ABCSlice): """ @abstractmethod - def modify_slice(self, *, slice_object: ABCSlice): + def modify_slice(self, *, slice_object: ABCSlice, new_state: SliceState): """ Modify the slice registered with the actor. Moves the slice into Modifying State Args: slice_object: slice_object + new_state: new_state Raises: Exception in case of error """ diff --git a/fabric_cf/actor/core/apis/abc_client_actor_management_object.py b/fabric_cf/actor/core/apis/abc_client_actor_management_object.py index 79b03420..d7e9a14b 100644 --- a/fabric_cf/actor/core/apis/abc_client_actor_management_object.py +++ b/fabric_cf/actor/core/apis/abc_client_actor_management_object.py @@ -128,7 +128,9 @@ def update_broker(self, *, broker: ProxyAvro, caller: AuthToken) -> ResultAvro: @abstractmethod def get_broker_query_model(self, *, broker: ID, caller: AuthToken, id_token: str, - level: int, graph_format: GraphFormat) -> ResultBrokerQueryModelAvro: + level: int, graph_format: GraphFormat, start: datetime = None, + end: datetime = None, includes: str = None, + excludes: str = None) -> ResultBrokerQueryModelAvro: """ Get Pool Info @param broker : broker ID @@ -136,7 +138,12 @@ def get_broker_query_model(self, *, broker: ID, caller: AuthToken, id_token: str @param id_token: str @param level: level of details @param graph_format: Graph Format - @return pool information + @param start: start time + @param end: end time + @param includes: comma separated lists of sites to include + @param excludes: comma separated lists of sites to exclude + + @return resource information """ @abstractmethod diff --git a/fabric_cf/actor/core/apis/abc_database.py b/fabric_cf/actor/core/apis/abc_database.py index d972f28d..fd6a0245 100644 --- a/fabric_cf/actor/core/apis/abc_database.py +++ b/fabric_cf/actor/core/apis/abc_database.py @@ -159,9 +159,9 @@ def update_slice(self, *, slice_object: ABCSlice): @abstractmethod def get_reservations(self, *, slice_id: ID = None, graph_node_id: str = None, project_id: str = None, - email: str = None, oidc_sub: str = None, rid: ID = None, - states: list[int] = None, site: str = None, - rsv_type: list[str] = None) -> List[ABCReservationMixin]: + email: str = None, oidc_sub: str = None, rid: ID = None, states: list[int] = None, + site: str = None, rsv_type: list[str] = None, start: datetime = None, + end: datetime = None, ip_subnet: str = None, host: str = None) -> List[ABCReservationMixin]: """ Retrieves the reservations. @@ -172,13 +172,22 @@ def get_reservations(self, *, slice_id: ID = None, graph_node_id: str = None, pr @abstractmethod def get_components(self, *, node_id: str, states: list[int], rsv_type: list[str], component: str = None, - bdf: str = None) -> Dict[str, List[str]]: + bdf: str = None, start: datetime = None, end: datetime = None, + excludes: List[str] = None) -> Dict[str, List[str]]: """ - Retrieves the components. + Returns components matching the search criteria + @param node_id: Worker Node ID to which components belong + @param states: list of states used to find reservations + @param rsv_type: type of reservations + @param component: component name + @param bdf: Component's PCI address + @param start: start time + @param end: end time + @param excludes: Excludes the list of reservations - @return list of components + NOTE# For P4 switches; node_id=node+renc-p4-sw component=ip+192.168.11.8 bdf=p1 - @throws Exception in case of error + @return Dictionary with component name as the key and value as list of associated PCI addresses in use. """ @abstractmethod @@ -194,7 +203,8 @@ def get_client_reservations(self, *, slice_id: ID = None) -> List[ABCReservation @abstractmethod def get_slices(self, *, slice_id: ID = None, slice_name: str = None, project_id: str = None, email: str = None, states: list[int] = None, oidc_sub: str = None, slc_type: List[SliceTypes] = None, - limit: int = None, offset: int = None, lease_end: datetime = None) -> List[ABCSlice] or None: + limit: int = None, offset: int = None, lease_end: datetime = None, + search: str = None, exact_match: bool = False) -> List[ABCSlice] or None: """ Retrieves the specified slices. @@ -208,12 +218,58 @@ def get_slices(self, *, slice_id: ID = None, slice_name: str = None, project_id: @param limit limit @param offset offset @param lease_end lease_end + @param search: search term applied + @param exact_match: Exact Match for Search term @return list of slices @throws Exception in case of error """ + @abstractmethod + def get_slice_count(self, *, project_id: str = None, email: str = None, states: list[int] = None, + oidc_sub: str = None, slc_type: List[SliceTypes] = None, + excluded_projects: List[str] = None) -> int: + """ + Retrieves the slices count. + + @param project_id project id + @param email email + @param states states + @param oidc_sub oidc sub + @param slc_type slice type + @param excluded_projects excluded_projects + + @return number of slices matching the filter criteria + + @throws Exception in case of error + """ + + @abstractmethod + def increment_metrics(self, *, project_id: str, oidc_sub: str, slice_count: int = 1) -> bool: + """ + Add or Update Metrics + + @param project_id project id + @param oidc_sub oidc sub + @param slice_count slice_count + + @return true or false + + @throws Exception in case of error + """ + + @abstractmethod + def get_metrics(self, *, project_id: str, oidc_sub: str, excluded_projects: List[str] = None) -> list: + """ + Get Metrics + @param project_id: project id + @param oidc_sub: user id + @param excluded_projects: list of project ids to exclude + + @return list of metrics + """ + @abstractmethod def initialize(self): """ diff --git a/fabric_cf/actor/core/apis/abc_mgmt_actor.py b/fabric_cf/actor/core/apis/abc_mgmt_actor.py index eef1505a..daf17a32 100644 --- a/fabric_cf/actor/core/apis/abc_mgmt_actor.py +++ b/fabric_cf/actor/core/apis/abc_mgmt_actor.py @@ -29,7 +29,6 @@ from typing import TYPE_CHECKING, List, Tuple, Dict from fabric_mb.message_bus.messages.delegation_avro import DelegationAvro -from fabric_mb.message_bus.messages.poa_avro import PoaAvro from fabric_mb.message_bus.messages.poa_info_avro import PoaInfoAvro from fabric_mb.message_bus.messages.site_avro import SiteAvro @@ -47,7 +46,7 @@ class ABCMgmtActor(ABCComponent): @abstractmethod def get_slices(self, *, slice_id: ID = None, slice_name: str = None, email: str = None, project: str = None, states: List[int] = None, limit: int = None, offset: int = None, - user_id: str = None) -> List[SliceAvro] or None: + user_id: str = None, search: str = None, exact_match: bool = False) -> List[SliceAvro] or None: """ Obtains all slices. @param slice_id slice id @@ -58,9 +57,52 @@ def get_slices(self, *, slice_id: ID = None, slice_name: str = None, email: str @param limit limit @param offset offset @param user_id user_id + @param search: search term applied + @param exact_match: Exact Match for Search term @return returns list of slices """ + def increment_metrics(self, *, project_id: str, oidc_sub: str, slice_count: int = 1) -> bool: + """ + Add or update metrics + + @param project_id project id + @param oidc_sub oidc sub + @param slice_count slice_count + + @return true or false + + @throws Exception in case of error + """ + raise NotImplementedError + + def get_metrics(self, *, project_id: str, oidc_sub: str, excluded_projects: List[str] = None) -> list: + """ + Get metrics + + @param project_id project id + @param oidc_sub oidc sub + @param excluded_projects excluded_projects + + @return list of metric information + + @throws Exception in case of error + """ + raise NotImplementedError + + def get_slice_count(self, *, email: str = None, project: str = None, states: List[int] = None, + user_id: str = None, excluded_projects: List[str] = None) -> int: + """ + Obtains slice count. + @param email email + @param project project id + @param states slice states + @param user_id user_id + @param excluded_projects excluded_projects + @return returns list of slices + """ + raise NotImplementedError + @abstractmethod def add_slice(self, *, slice_obj: SliceAvro) -> ID: """ @@ -108,7 +150,8 @@ def accept_update_slice(self, *, slice_id: ID) -> bool: @abstractmethod def get_reservations(self, *, states: List[int] = None, slice_id: ID = None, rid: ID = None, oidc_claim_sub: str = None, email: str = None, rid_list: List[str] = None, - type: str = None, site: str = None, node_id: str = None) -> List[ReservationMng]: + type: str = None, site: str = None, node_id: str = None, + host: str = None, ip_subnet: str = None) -> List[ReservationMng]: """ Get Reservations @param states states @@ -120,6 +163,8 @@ def get_reservations(self, *, states: List[int] = None, slice_id: ID = None, @param type type of reservations like NodeSliver/NetworkServiceSliver @param site site @param node_id node id + @param ip_subnet ip subnet + @param host host Obtains all reservations @return returns list of the reservations """ diff --git a/fabric_cf/actor/core/apis/abc_mgmt_client_actor.py b/fabric_cf/actor/core/apis/abc_mgmt_client_actor.py index c4197a29..01a3473d 100644 --- a/fabric_cf/actor/core/apis/abc_mgmt_client_actor.py +++ b/fabric_cf/actor/core/apis/abc_mgmt_client_actor.py @@ -29,6 +29,9 @@ from datetime import datetime from typing import TYPE_CHECKING, List +from fabric_cf.actor.core.common.exceptions import ManageException + +from fabric_cf.actor.core.common.constants import Constants from fabric_mb.message_bus.messages.delegation_avro import DelegationAvro from fabric_mb.message_bus.messages.broker_query_model_avro import BrokerQueryModelAvro from fabric_mb.message_bus.messages.reservation_predecessor_avro import ReservationPredecessorAvro @@ -49,7 +52,6 @@ class ABCMgmtClientActor(ABCComponent): """ Implements base class for Management Interface for a Client Actor """ - @abstractmethod def add_reservation(self, *, reservation: TicketReservationAvro) -> ID: """ Adds the reservation to the actor's state and returns the assigned reservation ID. @@ -58,8 +60,8 @@ def add_reservation(self, *, reservation: TicketReservationAvro) -> ID: @param reservation reservation @return null on failure, assigned reservation ID otherwise """ + raise ManageException(Constants.NOT_IMPLEMENTED) - @abstractmethod def add_reservations(self, *, reservations: List[ReservationMng])->list: """ Adds all reservations to the actor's state and returns the assigned reservation ID. @@ -69,8 +71,8 @@ def add_reservations(self, *, reservations: List[ReservationMng])->list: @param reservations reservation @return null on failure, list of assigned ReservationIDs on success. """ + raise ManageException(Constants.NOT_IMPLEMENTED) - @abstractmethod def demand_reservation_rid(self, *, rid: ID) -> bool: """ Demands the specified reservation. @@ -78,8 +80,8 @@ def demand_reservation_rid(self, *, rid: ID) -> bool: @param rid reservation id @return true for success; false otherwise """ + raise ManageException(Constants.NOT_IMPLEMENTED) - @abstractmethod def demand_reservation(self, *, reservation: ReservationMng) -> bool: """ Updates the reservation and issues a demand for it. @@ -89,6 +91,7 @@ def demand_reservation(self, *, reservation: ReservationMng) -> bool: @param reservation reservation @return true for success; false otherwise """ + raise ManageException(Constants.NOT_IMPLEMENTED) @abstractmethod def get_brokers(self, *, broker: ID = None, id_token: str = None) -> List[ProxyAvro]: @@ -115,19 +118,40 @@ def update_broker(self, *, broker: ProxyAvro) -> bool: @return true for sucess; false otherwise """ - @abstractmethod - def get_broker_query_model(self, *, broker: ID, id_token: str, level: int, - graph_format: GraphFormat) -> BrokerQueryModelAvro: + def get_broker_query_model(self, *, broker: ID, id_token: str, level: int, graph_format: GraphFormat, + start: datetime = None, end: datetime = None, + includes: str = None, excludes: str = None) -> BrokerQueryModelAvro: """ Obtains the resources available at the specified broker @param broker broker @param id_token identity token generated by Credmgr @param level: level of details @param graph_format: Graph Format + @param start: start time + @param end: end time + @param includes: comma separated lists of sites to include + @param excludes: comma separated lists of sites to exclude @return BQM """ + raise ManageException(Constants.NOT_IMPLEMENTED) + + def build_broker_query_model(self, level_0_broker_query_model: str, level: int, + graph_format: GraphFormat = GraphFormat.GRAPHML, + start: datetime = None, end: datetime = None, + includes: str = None, excludes: str = None) -> str: + """ + Build the BQM Model using current usage + @param level_0_broker_query_model Capacity Model + @param level: level of details + @param graph_format: Graph Format + @param start: start time + @param end: end time + @param includes: comma separated lists of sites to include + @param excludes: comma separated lists of sites to exclude + @return BQM + """ + raise ManageException(Constants.NOT_IMPLEMENTED) - @abstractmethod def extend_reservation(self, *, reservation: ID, new_end_time: datetime, sliver: BaseSliver, dependencies: List[ReservationPredecessorAvro] = None) -> bool: """ @@ -138,8 +162,8 @@ def extend_reservation(self, *, reservation: ID, new_end_time: datetime, sliver: @param dependencies: Dependency reservations @return true for success and false for failure """ + raise ManageException(Constants.NOT_IMPLEMENTED) - @abstractmethod def claim_delegations(self, *, broker: ID, did: ID) -> DelegationAvro: """ Claims delegations exported by the specified broker @@ -147,8 +171,8 @@ def claim_delegations(self, *, broker: ID, did: ID) -> DelegationAvro: @param did reservation id @return reservation """ + raise ManageException(Constants.NOT_IMPLEMENTED) - @abstractmethod def reclaim_delegations(self, *, broker: ID, did: ID) -> DelegationAvro: """ Reclaim delegations exported by the specified broker @@ -156,3 +180,4 @@ def reclaim_delegations(self, *, broker: ID, did: ID) -> DelegationAvro: @param did reservation id @return reservation """ + raise ManageException(Constants.NOT_IMPLEMENTED) diff --git a/fabric_cf/actor/core/apis/abc_reservation_mixin.py b/fabric_cf/actor/core/apis/abc_reservation_mixin.py index 72a55180..04a2a480 100644 --- a/fabric_cf/actor/core/apis/abc_reservation_mixin.py +++ b/fabric_cf/actor/core/apis/abc_reservation_mixin.py @@ -29,6 +29,8 @@ from enum import Enum from typing import TYPE_CHECKING +from fabric_cf.actor.core.time.term import Term + from fabric_cf.actor.core.apis.abc_reservation_resources import ABCReservationResources from fabric_cf.actor.core.apis.abc_reservation_status import ABCReservationStatus @@ -541,5 +543,13 @@ def poa_info(self, *, incoming: Poa): """ Process POA response + @throws Exception in case of error + """ + + @abstractmethod + def get_term(self) -> Term: + """ + Return Tem + @throws Exception in case of error """ \ No newline at end of file diff --git a/fabric_cf/actor/core/common/constants.py b/fabric_cf/actor/core/common/constants.py index 1eb3cd25..a1735b4c 100644 --- a/fabric_cf/actor/core/common/constants.py +++ b/fabric_cf/actor/core/common/constants.py @@ -123,6 +123,7 @@ class Constants: PUBLISH_INTERVAL = "publish-interval" REFRESH_INTERVAL = "refresh-interval" DELEGATION = "delegation" + LOCAL_BQM = "local" PROPERTY_CLASS_NAME = "ObjectClassName" PROPERTY_MODULE_NAME = "ModuleName" @@ -198,6 +199,7 @@ class Constants: STATE_FILE_LOCATION = '/tmp/fabric_actor.tmp' MAINT_PROJECT_ID = 'maint.project.id' INFRASTRUCTURE_PROJECT_ID = "infrastructure.project.id" + TOTAL_SLICE_COUNT_SEED = "total_slice_count_seed" ELASTIC_TIME = "request.elasticTime" ELASTIC_SIZE = "request.elasticSize" @@ -210,7 +212,11 @@ class Constants: QUERY_DETAIL_LEVEL = "query.detail.level" BROKER_QUERY_MODEL = "bqm" BROKER_QUERY_MODEL_FORMAT = "bqm.format" + START = "start" + END = "end" POOL_TYPE = "neo4j" + INCLUDES = "includes" + EXCLUDES = "excludes" UNIT_MODIFY_PROP_MESSAGE_SUFFIX = ".message" UNIT_MODIFY_PROP_CODE_SUFFIX = ".code" diff --git a/fabric_cf/actor/core/common/event_logger.py b/fabric_cf/actor/core/common/event_logger.py index 79047fc7..9908fda0 100644 --- a/fabric_cf/actor/core/common/event_logger.py +++ b/fabric_cf/actor/core/common/event_logger.py @@ -29,6 +29,7 @@ from fabric_mb.message_bus.messages.slice_avro import SliceAvro from fim.logging.log_collector import LogCollector from fim.slivers.base_sliver import BaseSliver +from fim.slivers.network_node import NodeSliver from fim.user.topology import ExperimentTopology from fabric_cf.actor.core.common.constants import Constants @@ -98,7 +99,7 @@ def log_sliver_event(self, *, slice_object: SliceAvro, sliver: BaseSliver, verb: owner = slice_object.get_owner() log_message = f"CFEL Sliver event slc:{slice_object.get_slice_id()} " \ - f"slvr:{sliver.get_reservation_info().reservation_id} of " \ + f"slvr:{sliver.get_reservation_info().reservation_id}/{sliver.get_name()} of " \ f"type {sliver.get_type()} {verb} " \ f"by prj:{slice_object.get_project_id()} usr:{owner.get_oidc_sub_claim()}" \ f":{owner.get_email()}" @@ -112,6 +113,9 @@ def log_sliver_event(self, *, slice_object: SliceAvro, sliver: BaseSliver, verb: log_message += f" {str(lc)}" + if isinstance(sliver, NodeSliver) and sliver.get_image_ref(): + log_message += f" image:{sliver.get_image_ref()}" + self.logger.info(log_message) except Exception as e: traceback.print_exc() diff --git a/fabric_cf/actor/core/core/actor.py b/fabric_cf/actor/core/core/actor.py index c3cde772..95a79702 100644 --- a/fabric_cf/actor/core/core/actor.py +++ b/fabric_cf/actor/core/core/actor.py @@ -28,9 +28,6 @@ from typing import List, Dict from fabric_cf.actor.core.common.constants import Constants -from fabric_mb.message_bus.messages.poa_avro import PoaAvro -from fabric_mb.message_bus.messages.poa_info_avro import PoaInfoAvro - from fabric_cf.actor.boot.configuration import ActorConfig from fabric_cf.actor.core.apis.abc_delegation import ABCDelegation from fabric_cf.actor.core.apis.abc_policy import ABCPolicy @@ -379,12 +376,14 @@ def recover(self): client_slices = self.plugin.get_database().get_slices(slc_type=[SliceTypes.ClientSlice, SliceTypes.BrokerClientSlice], states=[SliceState.Configuring.value, - SliceState.Nascent.value, - SliceState.StableOK.value, - SliceState.StableError.value, - SliceState.Modifying.value, - SliceState.ModifyOK.value, - SliceState.ModifyError.value]) + SliceState.Nascent.value, + SliceState.StableOK.value, + SliceState.StableError.value, + SliceState.Modifying.value, + SliceState.ModifyOK.value, + SliceState.ModifyError.value, + SliceState.AllocatedOK.value, + SliceState.AllocatedError.value]) self.logger.debug("Found {} client slices".format(len(client_slices))) self.recover_slices(slices=client_slices) self.logger.debug("Recovery of client slices complete") @@ -659,8 +658,8 @@ def register(self, *, reservation: ABCReservationMixin): def register_slice(self, *, slice_object: ABCSlice): self.wrapper.register_slice(slice_object=slice_object) - def modify_slice(self, *, slice_object: ABCSlice): - self.wrapper.modify_slice(slice_object=slice_object) + def modify_slice(self, *, slice_object: ABCSlice, new_state: SliceState): + self.wrapper.modify_slice(slice_object=slice_object, new_state=new_state) def delete_slice(self, *, slice_id: ID): self.wrapper.delete_slice(slice_id=slice_id) diff --git a/fabric_cf/actor/core/core/broker_policy.py b/fabric_cf/actor/core/core/broker_policy.py index 7a5dd5d6..85b40a1a 100644 --- a/fabric_cf/actor/core/core/broker_policy.py +++ b/fabric_cf/actor/core/core/broker_policy.py @@ -25,6 +25,7 @@ # Author: Komal Thareja (kthare10@renci.org) from __future__ import annotations +from datetime import datetime from typing import TYPE_CHECKING from fim.graph.abc_property_graph import GraphFormat @@ -127,16 +128,31 @@ def get_client_id(self, *, reservation: ABCServerReservation) -> ID: return reservation.get_client_auth_token().get_guid() @staticmethod - def get_broker_query_model_query(*, level: int, bqm_format: GraphFormat = GraphFormat.GRAPHML) -> dict: + def get_broker_query_model_query(*, level: int, bqm_format: GraphFormat = GraphFormat.GRAPHML, + start: datetime = None, end: datetime = None, + includes: str = None, excludes: str = None) -> dict: """ Return dictionary representing query :param level: Graph Level :param bqm_format: Graph Format + :param start: start time + :param end: end time + :param includes: comma separated lists of sites to include + :param excludes: comma separated lists of sites to exclude + :return dictionary representing the query """ properties = {Constants.QUERY_ACTION: Constants.QUERY_ACTION_DISCOVER_BQM, Constants.QUERY_DETAIL_LEVEL: str(level), - Constants.BROKER_QUERY_MODEL_FORMAT: str(bqm_format.value)} + Constants.BROKER_QUERY_MODEL_FORMAT: str(bqm_format.value),} + if start: + properties[Constants.START] = start.strftime(Constants.LEASE_TIME_FORMAT) + if end: + properties[Constants.END] = end.strftime(Constants.LEASE_TIME_FORMAT) + if includes: + properties[Constants.INCLUDES] = includes + if excludes: + properties[Constants.EXCLUDES] = excludes return properties @staticmethod diff --git a/fabric_cf/actor/core/core/controller.py b/fabric_cf/actor/core/core/controller.py index 65f721ce..0ffcfecf 100644 --- a/fabric_cf/actor/core/core/controller.py +++ b/fabric_cf/actor/core/core/controller.py @@ -30,6 +30,8 @@ import traceback from typing import TYPE_CHECKING +from fabric_cf.actor.fim.plugins.broker.aggregate_bqm_plugin import AggregatedBQMPlugin +from fim.pluggable import PluggableRegistry, PluggableType from fim.slivers.base_sliver import BaseSliver from fabric_cf.actor.boot.configuration import ActorConfig @@ -83,6 +85,7 @@ def __init__(self, *, identity: AuthToken = None, clock: ActorClock = None): self.asm_update_thread = AsmUpdateThread(name=f"{self.get_name()}-asm-thread", logger=self.logger) self.thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix=self.__class__.__name__) + self.pluggable_registry = PluggableRegistry() def __getstate__(self): state = self.__dict__.copy() @@ -107,6 +110,9 @@ def __getstate__(self): del state['asm_update_thread'] del state['event_processors'] del state['thread_pool'] + if hasattr(self, 'pluggable_registry'): + del state['pluggable_registry'] + return state def __setstate__(self, state): @@ -132,12 +138,15 @@ def __setstate__(self, state): self.event_processors = {} self.thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix=self.__class__.__name__) + self.pluggable_registry = PluggableRegistry() def set_logger(self, logger): super(Controller, self).set_logger(logger=logger) self.asm_update_thread.set_logger(logger=logger) def start(self): + self.pluggable_registry.register_pluggable(t=PluggableType.Broker, p=AggregatedBQMPlugin, actor=self, + logger=self.logger) self.asm_update_thread.set_logger(logger=self.logger) self.asm_update_thread.start() super(Controller, self).start() diff --git a/fabric_cf/actor/core/core/inventory_slice_manager.py b/fabric_cf/actor/core/core/inventory_slice_manager.py index bb82a7cb..8c53aaa9 100644 --- a/fabric_cf/actor/core/core/inventory_slice_manager.py +++ b/fabric_cf/actor/core/core/inventory_slice_manager.py @@ -85,7 +85,7 @@ def create_inventory_slice(self, *, slice_id: ID, name: str, rtype: ResourceType result.code = InventorySliceManagerError.ErrorInvalidArguments return result try: - temp = self.db.get_slices(slice_id=str(slice_id)) + temp = self.db.get_slices(slice_id=slice_id) if temp is not None and len(temp) > 0: result.code = InventorySliceManagerError.ErrorPoolExists @@ -133,7 +133,7 @@ def remove_inventory_slice(self, *, slice_id: ID, rtype: ResourceType): @param slice_id pool id @param rtype resource type """ - slices = self.db.get_slices(slice_id=str(slice_id)) + slices = self.db.get_slices(slice_id=slice_id) if slices is not None and len(slices) == 1: slice_obj = slices[0] diff --git a/fabric_cf/actor/core/core/policy.py b/fabric_cf/actor/core/core/policy.py index c882b96c..a7f8e563 100644 --- a/fabric_cf/actor/core/core/policy.py +++ b/fabric_cf/actor/core/core/policy.py @@ -121,6 +121,9 @@ def initialize(self, *, config: ActorConfig): if self.clock is None: raise ActorException("Missing clock") + if config and config.get_policy() and config.get_policy().get_properties(): + self.set_properties(properties=config.get_policy().get_properties()) + self.initialized = True def internal_error(self, *, message: str): diff --git a/fabric_cf/actor/core/kernel/broker_reservation.py b/fabric_cf/actor/core/kernel/broker_reservation.py index 24976d7b..c54538d8 100644 --- a/fabric_cf/actor/core/kernel/broker_reservation.py +++ b/fabric_cf/actor/core/kernel/broker_reservation.py @@ -319,6 +319,8 @@ def probe_pending(self): self.transition(prefix="Recover from Extend Failure", state=ReservationStates.Ticketed, pending=ReservationPendingStates.None_) self.extend_failure = False + self.update_data.clear(clear_fail=True) + self.error_message = "" else: if self.pending_state == ReservationPendingStates.Ticketing: # Check for a pending ticket operation that may have completed @@ -559,7 +561,9 @@ def handle_failed_rpc(self, *, failed: FailedRPC): super().handle_failed_rpc(failed=failed) def fail_extend(self, *, message: str, exception: Exception = None): + self.logger.debug(f"Failed Extend: {message}") self.extend_failure = True + self.notified_failed = False super().fail(message=message, exception=exception) diff --git a/fabric_cf/actor/core/kernel/kernel.py b/fabric_cf/actor/core/kernel/kernel.py index 92bcdac1..4d15883e 100644 --- a/fabric_cf/actor/core/kernel/kernel.py +++ b/fabric_cf/actor/core/kernel/kernel.py @@ -26,6 +26,7 @@ import threading import time import traceback + from typing import List, Dict from fabric_cf.actor.core.apis.abc_base_plugin import ABCBasePlugin @@ -583,7 +584,7 @@ def __probe_pending_slices(self, *, slice_obj: ABCSlice): state_changed, slice_state = slice_obj.transition_slice(operation=SliceStateMachine.REEVALUATE) if state_changed: slice_obj.set_dirty() - if slice_state == SliceState.Closing: + if slice_state in [SliceState.Dead, SliceState.Closing]: slice_avro = Translate.translate_slice_to_avro(slice_obj=slice_obj) EventLoggerSingleton.get().log_slice_event(slice_object=slice_avro, action=ActionId.delete) self.plugin.get_database().update_slice(slice_object=slice_obj) @@ -871,10 +872,11 @@ def register_reservation(self, *, reservation: ABCReservationMixin): finally: reservation.unlock() - def modify_slice(self, *, slice_object: ABCSlice): + def modify_slice(self, *, slice_object: ABCSlice, new_state: SliceState): """ Modify the specified slice with the kernel. @param slice_object slice_object + @param new_state new_state @throws Exception in case of failure """ if slice_object is None: @@ -890,7 +892,11 @@ def modify_slice(self, *, slice_object: ABCSlice): if not real.is_dead_or_closing(): real.set_config_properties(value=slice_object.get_config_properties()) # Transition slice to Configuring state - real.transition_slice(operation=SliceStateMachine.MODIFY) + if new_state == SliceState.Modifying: + operation = SliceStateMachine.MODIFY + else: + operation = SliceStateMachine.RENEW + real.transition_slice(operation=operation) real.set_graph_id(graph_id=slice_object.get_graph_id()) real.set_dirty() self.plugin.get_database().update_slice(slice_object=real) diff --git a/fabric_cf/actor/core/kernel/kernel_wrapper.py b/fabric_cf/actor/core/kernel/kernel_wrapper.py index 86c8166b..ca01f761 100644 --- a/fabric_cf/actor/core/kernel/kernel_wrapper.py +++ b/fabric_cf/actor/core/kernel/kernel_wrapper.py @@ -28,6 +28,7 @@ from datetime import datetime from typing import List, Dict +from fabric_cf.actor.core.kernel.slice_state_machine import SliceState from fabric_mb.message_bus.messages.poa_info_avro import PoaInfoAvro from fim.slivers.base_sliver import BaseSliver @@ -769,16 +770,17 @@ def register_delegation(self, *, delegation: ABCDelegation): self.kernel.register_delegation(delegation=delegation) - def modify_slice(self, *, slice_object: ABCSlice): + def modify_slice(self, *, slice_object: ABCSlice, new_state: SliceState): """ Modify the slice registered with the kernel @param slice_object slice_object + @param new_state new_state @throws Exception in case of error """ if slice_object is None or slice_object.get_slice_id() is None or not isinstance(slice_object, ABCSlice): raise KernelException("Invalid argument {}".format(slice_object)) - self.kernel.modify_slice(slice_object=slice_object) + self.kernel.modify_slice(slice_object=slice_object, new_state=new_state) def delete_slice(self, *, slice_id: ID): """ diff --git a/fabric_cf/actor/core/kernel/reservation_client.py b/fabric_cf/actor/core/kernel/reservation_client.py index 1e7a6982..7e0769a9 100644 --- a/fabric_cf/actor/core/kernel/reservation_client.py +++ b/fabric_cf/actor/core/kernel/reservation_client.py @@ -25,6 +25,7 @@ # Author: Komal Thareja (kthare10@renci.org) from __future__ import annotations +import datetime import json import re import threading @@ -304,6 +305,12 @@ def absorb_ticket_update(self, *, incoming: ABCReservationMixin, update_data: Up self.resources.update(reservation=self, resource_set=incoming.get_resources()) self.logger.debug("absorb_update: {}".format(incoming)) + # Clear error message from previous Extend operations + self.error_message = "" + + #if self.resources.get_sliver().reservation_info: + # self.resources.get_sliver().reservation_info.error_message = self.error_message + self.policy.update_ticket_complete(reservation=self) def accept_lease_update(self, *, incoming: ABCReservationMixin, update_data: UpdateData) -> bool: @@ -422,6 +429,10 @@ def approve_redeem(self): @return true if approved; false otherwise """ approved = True + now = datetime.datetime.now(datetime.timezone.utc) + if self.requested_term and self.requested_term.get_start_time() > now: + self.logger.debug(f"Future Reservation : {self}!") + return False for pred_state in self.redeem_predecessors.values(): if pred_state.get_reservation() is None or \ @@ -500,34 +511,45 @@ def prepare_ticket(self, extend: bool = False): parent_res = pred_state.get_reservation() if Constants.PEERED in value1: + self.logger.debug(f"KOMAL --- Node MAP:{ifs.get_node_map()} Result: {result}") if parent_res is not None: ns_sliver = parent_res.get_resources().get_sliver() # component_name contains => Peered:: al2s_ifs = ns_sliver.interface_info.interfaces.get(result[2]) - ifs.labels = Labels.update(ifs.labels, vlan=al2s_ifs.labels.vlan) - ifs.set_node_map(node_map=(Constants.PEERED, value2)) + if al2s_ifs: + ifs.labels = Labels.update(ifs.labels, vlan=al2s_ifs.labels.vlan) + ifs.set_node_map(node_map=(Constants.PEERED, value2)) + else: + msg = f"Could not determine al2s_ifs: {al2s_ifs} result: {result}" + self.logger.error(msg) + self.fail(message=msg) continue if parent_res is not None and (parent_res.is_ticketed() or parent_res.is_active()): node_sliver = parent_res.get_resources().get_sliver() - component = node_sliver.attached_components_info.get_device(name=value1) - graph_id, bqm_component_id = component.get_node_map() - graph_id, node_id = node_sliver.get_node_map() - ifs.set_node_map(node_map=(node_id, bqm_component_id)) - - # For shared NICs grab the MAC & VLAN from corresponding Interface Sliver - # maintained in the Parent Reservation Sliver - if component.get_type() == ComponentType.SharedNIC: - parent_res_ifs_sliver = FimHelper.get_site_interface_sliver(component=component, - local_name=ifs.get_labels().local_name) - parent_labs = parent_res_ifs_sliver.get_label_allocations() - - if component.get_model() == Constants.OPENSTACK_VNIC_MODEL: - ifs.labels = Labels.update(ifs.labels, mac=parent_labs.mac, bdf=parent_labs.bdf, - instance_parent=f"{parent_res.get_reservation_id()}-{node_sliver.get_name()}") - else: - ifs.labels = Labels.update(ifs.labels, mac=parent_labs.mac, vlan=parent_labs.vlan, - bdf=parent_labs.bdf) + # P4 Switch + if node_sliver.get_type() == NodeType.Switch: + graph_id, node_id = node_sliver.get_node_map() + ifs.set_node_map(node_map=(str(NodeType.Switch), node_id)) + else: + component = node_sliver.attached_components_info.get_device(name=value1) + graph_id, bqm_component_id = component.get_node_map() + graph_id, node_id = node_sliver.get_node_map() + ifs.set_node_map(node_map=(node_id, bqm_component_id)) + + # For shared NICs grab the MAC & VLAN from corresponding Interface Sliver + # maintained in the Parent Reservation Sliver + if component.get_type() == ComponentType.SharedNIC: + parent_res_ifs_sliver = FimHelper.get_site_interface_sliver(component=component, + local_name=ifs.get_labels().local_name) + parent_labs = parent_res_ifs_sliver.get_label_allocations() + + if component.get_model() == Constants.OPENSTACK_VNIC_MODEL: + ifs.labels = Labels.update(ifs.labels, mac=parent_labs.mac, bdf=parent_labs.bdf, + instance_parent=f"{parent_res.get_reservation_id()}-{node_sliver.get_name()}") + else: + ifs.labels = Labels.update(ifs.labels, mac=parent_labs.mac, vlan=parent_labs.vlan, + bdf=parent_labs.bdf) self.logger.trace(f"Updated Network Res# {self.get_reservation_id()} {sliver}") @@ -607,9 +629,9 @@ def can_renew(self) -> bool: return self.last_ticket_update.successful() - def clear_notice(self, clear_fail: bool=False): - self.last_ticket_update.clear() - self.last_lease_update.clear() + def clear_notice(self, clear_fail: bool = False): + self.last_ticket_update.clear(clear_fail=clear_fail) + self.last_lease_update.clear(clear_fail=clear_fail) def do_relinquish(self): """ @@ -864,9 +886,11 @@ def get_last_ticket_update(self) -> str: if self.last_ticket_update is not None: if self.last_ticket_update.get_message() is not None and self.last_ticket_update.get_message() != "": result += f"{self.last_ticket_update.get_message()}, " - ev = self.last_ticket_update.get_events() - if ev is not None and ev != "": - result += f"events: {ev}, " + # Include events only in case of failure + if self.last_ticket_update.is_failed(): + ev = self.last_ticket_update.get_events() + if ev is not None and ev != "": + result += f"events: {ev}, " result = result[:-2] return result @@ -875,9 +899,11 @@ def get_last_lease_update(self) -> str: if self.last_lease_update is not None: if self.last_lease_update.get_message() is not None and self.last_lease_update.get_message() != "": result += f"{self.last_lease_update.get_message()}, " - ev = self.last_lease_update.get_events() - if ev is not None and ev != "": - result += f"events: {ev}, " + # Include events only in case of failure + if self.last_lease_update.is_failed(): + ev = self.last_lease_update.get_events() + if ev is not None and ev != "": + result += f"events: {ev}, " result = result[:-2] return result diff --git a/fabric_cf/actor/core/kernel/reservation_states.py b/fabric_cf/actor/core/kernel/reservation_states.py index 5d471fb5..a2aa8b7c 100644 --- a/fabric_cf/actor/core/kernel/reservation_states.py +++ b/fabric_cf/actor/core/kernel/reservation_states.py @@ -23,9 +23,22 @@ # # # Author: Komal Thareja (kthare10@renci.org) +import enum from enum import Enum +class ReservationOperation(enum.Enum): + Create = enum.auto(), + Modify = enum.auto(), + Extend = enum.auto() + + def __repr__(self): + return self.name + + def __str__(self): + return self.name + + class ReservationStates(Enum): """ Reservation states @@ -63,7 +76,7 @@ def translate(state_name: str): elif state_name.lower() == ReservationStates.Failed.name.lower(): return ReservationStates.Failed elif state_name.lower() == ReservationStates.CloseFail.name.lower(): - return ReservationStates.Failed + return ReservationStates.CloseFail else: return ReservationStates.Unknown diff --git a/fabric_cf/actor/core/kernel/slice.py b/fabric_cf/actor/core/kernel/slice.py index 6816ccfd..593dda76 100644 --- a/fabric_cf/actor/core/kernel/slice.py +++ b/fabric_cf/actor/core/kernel/slice.py @@ -268,7 +268,18 @@ def is_dirty(self) -> bool: return self.dirty def transition_slice(self, *, operation: SliceOperation) -> Tuple[bool, SliceState]: - return self.state_machine.transition_slice(operation=operation, reservations=self.reservations) + status, new_state = self.state_machine.transition_slice(operation=operation, reservations=self.reservations) + if status and new_state in [SliceState.StableOK, SliceState.StableError]: + new_end = None + for r in self.reservations.values(): + term = r.get_term() + if not new_end or (term and term.get_end_time() > new_end): + new_end = term.get_end_time() + + if new_end: + self.lease_end = new_end + + return status, new_state def is_stable_ok(self) -> bool: state_changed, slice_state = self.transition_slice(operation=SliceStateMachine.REEVALUATE) @@ -289,7 +300,7 @@ def is_stable_error(self) -> bool: def is_stable(self) -> bool: state_changed, slice_state = self.transition_slice(operation=SliceStateMachine.REEVALUATE) - if slice_state == SliceState.StableError or slice_state == SliceState.StableOk: + if slice_state in [SliceState.StableError, SliceState.StableOK]: return True return False @@ -313,7 +324,23 @@ def is_modify_error(self) -> bool: def is_modified(self) -> bool: state_changed, slice_state = self.transition_slice(operation=SliceStateMachine.REEVALUATE) - if slice_state == SliceState.ModifyError or slice_state == SliceState.ModifyOK: + if slice_state in [SliceState.ModifyError, SliceState.ModifyOK]: + return True + + return False + + def is_allocated_error(self) -> bool: + state_changed, slice_state = self.transition_slice(operation=SliceStateMachine.REEVALUATE) + + if slice_state == SliceState.AllocatedError: + return True + + return False + + def is_allocated(self) -> bool: + state_changed, slice_state = self.transition_slice(operation=SliceStateMachine.REEVALUATE) + + if slice_state in [SliceState.AllocatedOK, SliceState.AllocatedError]: return True return False @@ -321,7 +348,7 @@ def is_modified(self) -> bool: def is_dead_or_closing(self) -> bool: state_changed, slice_state = self.transition_slice(operation=SliceStateMachine.REEVALUATE) - if slice_state == SliceState.Dead or slice_state == SliceState.Closing: + if slice_state in [SliceState.Dead, SliceState.Closing]: return True return False diff --git a/fabric_cf/actor/core/kernel/slice_state_machine.py b/fabric_cf/actor/core/kernel/slice_state_machine.py index 3ed346f9..fa2d7e70 100644 --- a/fabric_cf/actor/core/kernel/slice_state_machine.py +++ b/fabric_cf/actor/core/kernel/slice_state_machine.py @@ -44,6 +44,8 @@ class SliceState(Enum): Modifying = enum.auto() ModifyError = enum.auto() ModifyOK = enum.auto() + AllocatedError = enum.auto() + AllocatedOK = enum.auto() All = enum.auto() # used only for querying def __str__(self): @@ -102,6 +104,10 @@ def translate(state_name: str): return SliceState.Closing elif state_name.lower() == SliceState.Dead.name.lower(): return SliceState.Dead + elif state_name.lower() == SliceState.AllocatedOK.name.lower(): + return SliceState.Closing + elif state_name.lower() == SliceState.AllocatedError.name.lower(): + return SliceState.Dead else: return SliceState.All @@ -117,6 +123,12 @@ def is_stable(*, state) -> bool: return True return False + @staticmethod + def is_allocated(*, state) -> bool: + if state == SliceState.AllocatedOK or state == SliceState.AllocatedError: + return True + return False + @staticmethod def is_modified(*, state) -> bool: if state == SliceState.ModifyOK or state == SliceState.ModifyError: @@ -127,6 +139,7 @@ def is_modified(*, state) -> bool: class SliceCommand(Enum): Create = enum.auto() Modify = enum.auto() + Renew = enum.auto() Delete = enum.auto() Reevaluate = enum.auto() ModifyAccept = enum.auto() @@ -183,18 +196,23 @@ def has_state_other_than(self, *states) -> bool: class SliceStateMachine: CREATE = SliceOperation(SliceCommand.Create, SliceState.Nascent) - MODIFY = SliceOperation(SliceCommand.Modify, SliceState.StableOK, SliceState.StableError, SliceState.Configuring) + MODIFY = SliceOperation(SliceCommand.Modify, SliceState.StableOK, SliceState.StableError, SliceState.Configuring, + SliceState.AllocatedOK, SliceState.AllocatedError) + + RENEW = SliceOperation(SliceCommand.Renew, SliceState.StableOK, SliceState.StableError, SliceState.AllocatedOK, + SliceState.ModifyOK, SliceState.ModifyError, SliceState.AllocatedError) MODIFY_ACCEPT = SliceOperation(SliceCommand.ModifyAccept, SliceState.ModifyOK, SliceState.ModifyError, - SliceState.Modifying) + SliceState.Modifying, SliceState.AllocatedOK, SliceState.AllocatedError) DELETE = SliceOperation(SliceCommand.Delete, SliceState.Nascent, SliceState.StableOK, SliceState.StableError, SliceState.Configuring, SliceState.Modifying, SliceState.ModifyOK, SliceState.ModifyError, - SliceState.Dead) + SliceState.Dead, SliceState.AllocatedOK, SliceState.AllocatedError) REEVALUATE = SliceOperation(SliceCommand.Reevaluate, SliceState.Nascent, SliceState.StableOK, SliceState.StableError, SliceState.Configuring, SliceState.Dead, SliceState.Closing, - SliceState.Modifying, SliceState.ModifyError, SliceState.ModifyOK) + SliceState.Modifying, SliceState.ModifyError, SliceState.ModifyOK, + SliceState.AllocatedError, SliceState.AllocatedOK) def __init__(self, *, slice_id: ID): self.slice_guid = slice_id @@ -233,14 +251,24 @@ def transition_slice(self, *, operation: SliceOperation, reservations: Reservati if operation.command == SliceCommand.Create: self.state = SliceState.Configuring + elif operation.command == SliceCommand.Renew: + self.state = SliceState.Configuring + elif operation.command == SliceCommand.Modify: self.state = SliceState.Modifying elif operation.command == SliceCommand.ModifyAccept: if self.state == SliceState.ModifyError: - self.state = SliceState.StableError + if self.last_state in [SliceState.AllocatedOK, SliceState.AllocatedError]: + self.state = SliceState.AllocatedError + else: + self.state = SliceState.StableError + elif self.state == SliceState.ModifyOK: - self.state = SliceState.StableOK + if self.last_state in [SliceState.AllocatedOK, SliceState.AllocatedError]: + self.state = SliceState.AllocatedOK + else: + self.state = SliceState.StableOK elif operation.command == SliceCommand.Delete: if self.state != SliceState.Dead: @@ -266,7 +294,36 @@ def transition_slice(self, *, operation: SliceOperation, reservations: Reservati if not has_error and r.get_error_message() is not None and len(r.get_error_message()) > 0: has_error = True - if self.state == SliceState.Nascent or self.state == SliceState.Configuring: + if self.state in [SliceState.Nascent, SliceState.Configuring]: + if not bins.has_state_other_than(ReservationStates.Active, ReservationStates.Closed, + ReservationStates.CloseFail): + if not has_error: + self.state = SliceState.StableOK + else: + self.state = SliceState.StableError + + if (not bins.has_state_other_than(ReservationStates.Active, ReservationStates.Failed, + ReservationStates.Closed, ReservationStates.CloseFail)) and \ + bins.has_state(s=ReservationStates.Failed): + self.state = SliceState.StableError + + if not bins.has_state_other_than(ReservationStates.Ticketed, ReservationStates.Closed, + ReservationStates.CloseFail): + if not has_error: + self.state = SliceState.AllocatedOK + else: + self.state = SliceState.AllocatedError + + if (not bins.has_state_other_than(ReservationStates.Ticketed, ReservationStates.Failed, + ReservationStates.Closed, ReservationStates.CloseFail)) and \ + bins.has_state(s=ReservationStates.Failed): + self.state = SliceState.AllocatedError + + if not bins.has_state_other_than(ReservationStates.Closed, ReservationStates.CloseWait, + ReservationStates.Failed, ReservationStates.CloseFail): + self.state = SliceState.Closing + + if self.state in [SliceState.AllocatedOK, SliceState.AllocatedError]: if not bins.has_state_other_than(ReservationStates.Active, ReservationStates.Closed, ReservationStates.CloseFail): if not has_error: @@ -296,12 +353,24 @@ def transition_slice(self, *, operation: SliceOperation, reservations: Reservati bins.has_state(s=ReservationStates.Failed): self.state = SliceState.ModifyError + if not bins.has_state_other_than(ReservationStates.Ticketed, ReservationStates.Closed, + ReservationStates.CloseFail): + if has_error: + self.state = SliceState.ModifyError + else: + self.state = SliceState.ModifyOK + + if (not bins.has_state_other_than(ReservationStates.Ticketed, ReservationStates.Failed, + ReservationStates.Closed, ReservationStates.CloseFail)) and \ + bins.has_state(s=ReservationStates.Failed): + self.state = SliceState.ModifyError + if not bins.has_state_other_than(ReservationStates.Closed, ReservationStates.CloseWait, ReservationStates.Failed, ReservationStates.CloseFail): self.state = SliceState.Closing - elif self.state == SliceState.StableError or self.state == SliceState.StableOK or \ - self.state == SliceState.ModifyError or self.state == SliceState.ModifyOK: + elif self.state in [SliceState.StableError, SliceState.StableOK, SliceState.ModifyError, + SliceState.ModifyOK, SliceState.AllocatedError, SliceState.AllocatedOK]: if not bins.has_state_other_than(ReservationStates.Closed, ReservationStates.CloseWait, ReservationStates.Failed, ReservationStates.CloseFail): self.state = SliceState.Dead @@ -326,4 +395,4 @@ def get_state(self) -> SliceState: return self.state def clear(self): - self.state = SliceState.Nascent \ No newline at end of file + self.state = SliceState.Nascent diff --git a/fabric_cf/actor/core/manage/actor_management_object.py b/fabric_cf/actor/core/manage/actor_management_object.py index e08df666..8f304837 100644 --- a/fabric_cf/actor/core/manage/actor_management_object.py +++ b/fabric_cf/actor/core/manage/actor_management_object.py @@ -25,11 +25,11 @@ # Author: Komal Thareja (kthare10@renci.org) from __future__ import annotations +import traceback from datetime import datetime, timezone from typing import TYPE_CHECKING, List, Dict, Tuple -from fabric_mb.message_bus.messages.poa_avro import PoaAvro -from fabric_mb.message_bus.messages.poa_info_avro import PoaInfoAvro +from fabric_cf.actor.fim.fim_helper import FimHelper from fabric_mb.message_bus.messages.reservation_mng import ReservationMng from fabric_mb.message_bus.messages.result_delegation_avro import ResultDelegationAvro from fabric_mb.message_bus.messages.result_poa_avro import ResultPoaAvro @@ -40,6 +40,7 @@ from fabric_mb.message_bus.messages.result_avro import ResultAvro from fabric_mb.message_bus.messages.result_slice_avro import ResultSliceAvro from fabric_mb.message_bus.messages.slice_avro import SliceAvro +from fim.user import GraphFormat from fabric_cf.actor.core.apis.abc_actor_runnable import ABCActorRunnable from fabric_cf.actor.core.common.constants import Constants, ErrorCodes @@ -92,7 +93,6 @@ def save(self) -> dict: return properties def recover(self): - actor_name = None if Constants.PROPERTY_ACTOR_NAME in self.serial: actor_name = self.serial[Constants.PROPERTY_ACTOR_NAME] else: @@ -131,14 +131,14 @@ def make_local_db_object(self, *, actor: ABCActorMixin): def set_actor(self, *, actor: ABCActorMixin): if self.actor is None: self.actor = actor - #self.db = actor.get_plugin().get_database() self.logger = actor.get_logger() self.id = actor.get_guid() self.make_local_db_object(actor=actor) def get_slices(self, *, slice_id: ID, caller: AuthToken, slice_name: str = None, email: str = None, states: List[int] = None, project: str = None, limit: int = None, - offset: int = None, user_id: str = None) -> ResultSliceAvro: + offset: int = None, user_id: str = None, search: str = None, + exact_match: bool = False) -> ResultSliceAvro: result = ResultSliceAvro() result.status = ResultAvro() @@ -152,7 +152,7 @@ def get_slices(self, *, slice_id: ID, caller: AuthToken, slice_name: str = None, try: slice_list = self.db.get_slices(slice_id=slice_id, slice_name=slice_name, email=email, states=states, project_id=project, limit=limit, offset=offset, - oidc_sub=user_id) + oidc_sub=user_id, search=search, exact_match=exact_match) except Exception as e: self.logger.error("getSlices:db access {}".format(e)) result.status.set_code(ErrorCodes.ErrorDatabaseError.value) @@ -170,6 +170,26 @@ def get_slices(self, *, slice_id: ID, caller: AuthToken, slice_name: str = None, result.status = ManagementObject.set_exception_details(result=result.status, e=e) return result + def get_metrics(self, *, project_id: str, oidc_sub: str, excluded_projects: List[str] = None) -> list: + try: + return self.db.get_metrics(project_id=project_id, oidc_sub=oidc_sub, excluded_projects=excluded_projects) + except Exception as e: + self.logger.error("get_metrics {}".format(e)) + + def increment_metrics(self, *, project_id: str, oidc_sub: str, slice_count: int = 1) -> bool: + try: + return self.db.increment_metrics(project_id=project_id, oidc_sub=oidc_sub, slice_count=slice_count) + except Exception as e: + self.logger.error("add_or_update_metrics {}".format(e)) + + def get_slice_count(self, *, caller: AuthToken, email: str = None, states: List[int] = None, + project: str = None, user_id: str = None, excluded_projects: List[str] = None) -> int: + try: + return self.db.get_slice_count(email=email, states=states, project_id=project, oidc_sub=user_id) + except Exception as e: + self.logger.error("get_slice_count {}".format(e)) + return -1 + def add_slice(self, *, slice_obj: SliceAvro, caller: AuthToken) -> ResultStringAvro: result = ResultStringAvro() result.status = ResultAvro() @@ -190,7 +210,7 @@ def add_slice(self, *, slice_obj: SliceAvro, caller: AuthToken) -> ResultStringA slice_obj_new.set_graph_id(graph_id=slice_obj.graph_id) slice_obj_new.set_config_properties(value=slice_obj.get_config_properties()) slice_obj_new.set_lease_end(lease_end=slice_obj.get_lease_end()) - slice_obj_new.set_lease_start(lease_start=datetime.now(timezone.utc)) + slice_obj_new.set_lease_start(lease_start=slice_obj.get_lease_start()) if slice_obj.get_inventory(): slice_obj_new.set_inventory(value=True) @@ -327,7 +347,8 @@ def run(self): try: if modify_state: slice_object = Translate.translate_slice(slice_avro=slice_mng) - self.actor.modify_slice(slice_object=slice_object) + from fabric_cf.actor.core.kernel.slice_state_machine import SliceState + self.actor.modify_slice(slice_object=slice_object, new_state=SliceState(slice_mng.get_state())) else: slice_obj = self.actor.get_slice(slice_id=slice_id) if slice_obj is None: @@ -409,7 +430,8 @@ def get_sites(self, *, caller: AuthToken, site: str) -> ResultSitesAvro: def get_reservations(self, *, caller: AuthToken, states: List[int] = None, slice_id: ID = None, rid: ID = None, oidc_claim_sub: str = None, email: str = None, rid_list: List[str] = None, type: str = None, - site: str = None, node_id: str = None) -> ResultReservationAvro: + site: str = None, node_id: str = None, host: str = None, + ip_subnet: str = None) -> ResultReservationAvro: result = ResultReservationAvro() result.status = ResultAvro() @@ -429,7 +451,7 @@ def get_reservations(self, *, caller: AuthToken, states: List[int] = None, else: res_list = self.db.get_reservations(slice_id=slice_id, rid=rid, email=email, states=states, rsv_type=rsv_type, site=site, - graph_node_id=node_id) + graph_node_id=node_id, host=host, ip_subnet=ip_subnet) except Exception as e: self.logger.error("getReservations:db access {}".format(e)) result.status.set_code(ErrorCodes.ErrorDatabaseError.value) @@ -439,10 +461,11 @@ def get_reservations(self, *, caller: AuthToken, states: List[int] = None, if res_list is not None: result.reservations = [] for r in res_list: - slice_id = r.get_slice_id() - slice_obj = self.get_slice_by_guid(guid=slice_id) + r_slice_id = r.get_slice_id() + slice_obj = self.get_slice_by_guid(guid=r_slice_id) r.restore(actor=self.actor, slice_obj=slice_obj) - rr = Converter.fill_reservation(reservation=r, full=True) + full = True if slice_id or rid else False + rr = Converter.fill_reservation(reservation=r, full=full) result.reservations.append(rr) except ReservationNotFoundException as e: self.logger.error("getReservations: {}".format(e)) @@ -859,3 +882,15 @@ def get_poas(self, *, caller: AuthToken, states: List[int] = None, result.status = ManagementObject.set_exception_details(result=result.status, e=e) return result + + def build_broker_query_model(self, level_0_broker_query_model: str, level: int, + graph_format: GraphFormat = GraphFormat.GRAPHML, + start: datetime = None, end: datetime = None, includes: str = None, + excludes: str = None) -> str: + try: + return FimHelper.build_broker_query_model(db=self.db, level_0_broker_query_model=level_0_broker_query_model, + level=level, graph_format=graph_format, start=start, + end=end, includes=includes, excludes=excludes) + except Exception as e: + self.logger.error(f"Exception occurred build_broker_query_model e: {e}") + self.logger.error(traceback.format_exc()) \ No newline at end of file diff --git a/fabric_cf/actor/core/manage/broker_management_object.py b/fabric_cf/actor/core/manage/broker_management_object.py index be4c00e3..04b95402 100644 --- a/fabric_cf/actor/core/manage/broker_management_object.py +++ b/fabric_cf/actor/core/manage/broker_management_object.py @@ -98,9 +98,12 @@ def update_broker(self, *, broker: ProxyAvro, caller: AuthToken) -> ResultAvro: return self.client_helper.update_broker(broker=broker, caller=caller) def get_broker_query_model(self, *, broker: ID, caller: AuthToken, id_token: str, - level: int, graph_format: GraphFormat) -> ResultBrokerQueryModelAvro: + level: int, graph_format: GraphFormat, start: datetime = None, + end: datetime = None, includes: str = None, + excludes: str = None) -> ResultBrokerQueryModelAvro: return self.client_helper.get_broker_query_model(broker=broker, caller=caller, id_token=id_token, level=level, - graph_format=graph_format) + graph_format=graph_format, start=start, end=end, + includes=includes, excludes=excludes) def add_reservation(self, *, reservation: TicketReservationAvro, caller: AuthToken) -> ResultStringAvro: return self.client_helper.add_reservation(reservation=reservation, caller=caller) diff --git a/fabric_cf/actor/core/manage/client_actor_management_object_helper.py b/fabric_cf/actor/core/manage/client_actor_management_object_helper.py index 9d38573c..662b5608 100644 --- a/fabric_cf/actor/core/manage/client_actor_management_object_helper.py +++ b/fabric_cf/actor/core/manage/client_actor_management_object_helper.py @@ -152,7 +152,9 @@ def update_broker(self, *, broker: ProxyAvro, caller: AuthToken) -> ResultAvro: return result def get_broker_query_model(self, *, broker: ID, caller: AuthToken, id_token: str, - level: int, graph_format: GraphFormat) -> ResultBrokerQueryModelAvro: + level: int, graph_format: GraphFormat, start: datetime = None, + end: datetime = None, includes: str = None, + excludes: str = None) -> ResultBrokerQueryModelAvro: result = ResultBrokerQueryModelAvro() result.status = ResultAvro() @@ -165,7 +167,9 @@ def get_broker_query_model(self, *, broker: ID, caller: AuthToken, id_token: str b = self.client.get_broker(guid=broker) if b is not None: - request = BrokerPolicy.get_broker_query_model_query(level=level, bqm_format=graph_format) + request = BrokerPolicy.get_broker_query_model_query(level=level, bqm_format=graph_format, + start=start, end=end, includes=includes, + excludes=excludes) response = ManagementUtils.query(actor=self.client, actor_proxy=b, query=request) result.model = Translate.translate_to_broker_query_model(query_response=response, level=level) else: @@ -458,13 +462,16 @@ def run(self): dependencies=redeem_dep_res_list) return result - + ''' # Process Extend for Renew synchronously if new_end_time is not None: result = self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client)) # Process Extend for Modify asynchronously else: self.client.execute_on_actor_thread(runnable=Runner(actor=self.client)) + ''' + # Always Process Extend asynchronously + self.client.execute_on_actor_thread(runnable=Runner(actor=self.client)) except Exception as e: self.logger.error("extend_reservation {}".format(e)) diff --git a/fabric_cf/actor/core/manage/controller_management_object.py b/fabric_cf/actor/core/manage/controller_management_object.py index 5d36c735..ae89609d 100644 --- a/fabric_cf/actor/core/manage/controller_management_object.py +++ b/fabric_cf/actor/core/manage/controller_management_object.py @@ -102,9 +102,12 @@ def update_broker(self, *, broker: ProxyAvro, caller: AuthToken) -> ResultAvro: return self.client_helper.update_broker(broker=broker, caller=caller) def get_broker_query_model(self, *, broker: ID, caller: AuthToken, id_token: str, - level: int, graph_format: GraphFormat) -> ResultBrokerQueryModelAvro: + level: int, graph_format: GraphFormat, start: datetime = None, + end: datetime = None, includes: str = None, + excludes: str = None) -> ResultBrokerQueryModelAvro: return self.client_helper.get_broker_query_model(broker=broker, caller=caller, id_token=id_token, level=level, - graph_format=graph_format) + graph_format=graph_format, start=start, end=end, + includes=includes, excludes=excludes) def add_reservation(self, *, reservation: TicketReservationAvro, caller: AuthToken) -> ResultStringAvro: return self.client_helper.add_reservation(reservation=reservation, caller=caller) diff --git a/fabric_cf/actor/core/manage/kafka/kafka_actor.py b/fabric_cf/actor/core/manage/kafka/kafka_actor.py index 10ede622..129d7ee5 100644 --- a/fabric_cf/actor/core/manage/kafka/kafka_actor.py +++ b/fabric_cf/actor/core/manage/kafka/kafka_actor.py @@ -33,7 +33,6 @@ from fabric_mb.message_bus.messages.get_delegations_avro import GetDelegationsAvro from fabric_mb.message_bus.messages.get_sites_request_avro import GetSitesRequestAvro from fabric_mb.message_bus.messages.maintenance_request_avro import MaintenanceRequestAvro -from fabric_mb.message_bus.messages.poa_avro import PoaAvro from fabric_mb.message_bus.messages.poa_info_avro import PoaInfoAvro from fabric_mb.message_bus.messages.remove_delegation_avro import RemoveDelegationAvro from fabric_mb.message_bus.messages.reservation_state_avro import ReservationStateAvro @@ -79,7 +78,7 @@ def toggle_maintenance_mode(self, actor_guid: str, callback_topic: str, sites: L def get_slices(self, *, slice_id: ID = None, slice_name: str = None, email: str = None, project: str = None, states: List[int] = None, limit: int = None, offset: int = None, - user_id: str = None) -> List[SliceAvro] or None: + user_id: str = None, search: str = None, exact_match: bool = False) -> List[SliceAvro] or None: request = GetSlicesRequestAvro() request = self.fill_request_by_id_message(request=request, email=email, slice_id=slice_id, slice_name=slice_name, states=states) @@ -132,11 +131,12 @@ def delete_slice(self, *, slice_id: ID) -> bool: def get_reservations(self, *, states: List[int] = None, slice_id: ID = None, rid: ID = None, oidc_claim_sub: str = None, email: str = None, rid_list: List[str] = None, - type: str = None, site: str = None, node_id: str = None) -> List[ReservationMng]: + type: str = None, site: str = None, node_id: str = None, + host: str = None, ip_subnet: str = None) -> List[ReservationMng]: request = GetReservationsRequestAvro() request = self.fill_request_by_id_message(request=request, slice_id=slice_id, states=states, email=email, rid=rid, - type=type, site=site) + type=type, site=site, host=host, ip_subnet=ip_subnet) status, response = self.send_request(request) if status.code == 0: diff --git a/fabric_cf/actor/core/manage/kafka/kafka_broker.py b/fabric_cf/actor/core/manage/kafka/kafka_broker.py index ca67d55d..39f66936 100644 --- a/fabric_cf/actor/core/manage/kafka/kafka_broker.py +++ b/fabric_cf/actor/core/manage/kafka/kafka_broker.py @@ -79,8 +79,6 @@ def add_reservations(self, *, reservations: list) -> List[TicketReservationAvro] if status.code == 0: return response.result - return None - def demand_reservation(self, *, reservation: ReservationMng) -> bool: request = DemandReservationAvro() request.guid = str(self.management_id) @@ -110,10 +108,10 @@ def get_brokers(self, *, broker: ID = None, id_token: str = None) -> List[ProxyA if status.code == 0: return response.proxies - return None - def get_broker_query_model(self, *, broker: ID, id_token: str, level: int, - graph_format: GraphFormat) -> BrokerQueryModelAvro: + def get_broker_query_model(self, *, broker: ID, id_token: str, level: int, graph_format: GraphFormat, + start: datetime = None, end: datetime = None, + includes: str = None, excludes: str = None) -> BrokerQueryModelAvro: request = GetBrokerQueryModelRequestAvro() request.id_token = id_token request.guid = str(self.management_id) @@ -121,13 +119,14 @@ def get_broker_query_model(self, *, broker: ID, id_token: str, level: int, request.message_id = str(ID()) request.callback_topic = self.callback_topic request.broker_id = str(broker) - request.level = level - request.graph_format = graph_format.value + request.set_level(value=level) + request.set_graph_format(graph_format=graph_format.value) + request.set_start(start=start) + request.set_end(end=end) status, response = self.send_request(request) if status.code == 0: return response.model - return None def extend_reservation(self, *, reservation: ID, new_end_time: datetime, sliver: BaseSliver, dependencies: List[ReservationPredecessorAvro] = None) -> bool: @@ -159,8 +158,6 @@ def claim_delegations(self, *, broker: ID, did: ID) -> DelegationAvro: if status.code == 0 and response.delegations is not None and len(response.delegations) > 0: return next(iter(response.delegations)) - return None - def reclaim_delegations(self, *, broker: ID, did: ID) -> DelegationAvro: request = ReclaimResourcesAvro() request.guid = str(self.management_id) @@ -175,8 +172,6 @@ def reclaim_delegations(self, *, broker: ID, did: ID) -> DelegationAvro: if status.code == 0 and response.delegations is not None and len(response.delegations) > 0: return next(iter(response.delegations)) - return None - def clone(self): return KafkaBroker(guid=self.management_id, kafka_topic=self.kafka_topic, diff --git a/fabric_cf/actor/core/manage/kafka/kafka_controller.py b/fabric_cf/actor/core/manage/kafka/kafka_controller.py index 4aa0c19f..6dbc0a02 100644 --- a/fabric_cf/actor/core/manage/kafka/kafka_controller.py +++ b/fabric_cf/actor/core/manage/kafka/kafka_controller.py @@ -71,17 +71,6 @@ def get_brokers(self, *, broker: ID = None, id_token: str = None) -> List[ProxyA if status.code == 0: return response.proxies - return None - - def get_broker_query_model(self, *, broker: ID, id_token: str, level: int, - graph_format: GraphFormat) -> BrokerQueryModelAvro: - raise ManageException(Constants.NOT_IMPLEMENTED) - - def claim_delegations(self, *, broker: ID, did: ID) -> DelegationAvro: - raise ManageException(Constants.NOT_IMPLEMENTED) - - def reclaim_delegations(self, *, broker: ID, did: ID) -> DelegationAvro: - raise ManageException(Constants.NOT_IMPLEMENTED) def get_reservation_units(self, *, rid: ID, id_token: str = None) -> List[UnitAvro]: request = GetReservationUnitsRequestAvro() @@ -90,23 +79,6 @@ def get_reservation_units(self, *, rid: ID, id_token: str = None) -> List[UnitAv if status.code == 0: return response.units - return None - - def add_reservation(self, *, reservation: TicketReservationAvro) -> ID: - raise ManageException(Constants.NOT_IMPLEMENTED) - - def add_reservations(self, *, reservations: List[ReservationMng]) ->list: - raise ManageException(Constants.NOT_IMPLEMENTED) - - def demand_reservation(self, *, reservation: ReservationMng) -> bool: - raise ManageException(Constants.NOT_IMPLEMENTED) - - def demand_reservation_rid(self, *, rid: ID) -> bool: - raise ManageException(Constants.NOT_IMPLEMENTED) - - def extend_reservation(self, *, reservation: ID, new_end_time: datetime, sliver: BaseSliver, - dependencies: List[ReservationPredecessorAvro] = None) -> bool: - raise ManageException(Constants.NOT_IMPLEMENTED) def modify_reservation(self, *, rid: ID, modify_properties: dict) -> bool: raise ManageException(Constants.NOT_IMPLEMENTED) diff --git a/fabric_cf/actor/core/manage/kafka/kafka_mgmt_message_processor.py b/fabric_cf/actor/core/manage/kafka/kafka_mgmt_message_processor.py index 8f21059d..975772b5 100644 --- a/fabric_cf/actor/core/manage/kafka/kafka_mgmt_message_processor.py +++ b/fabric_cf/actor/core/manage/kafka/kafka_mgmt_message_processor.py @@ -48,8 +48,7 @@ def __init__(self, *, consumer_conf: dict, key_schema_location, value_schema_loc super(KafkaMgmtMessageProcessor, self).__init__(consumer_conf=consumer_conf, key_schema_location=key_schema_location, value_schema_location=value_schema_location, - topics=topics, batch_size=batch_size, logger=logger, - sync=sync) + topics=topics, batch_size=batch_size, logger=logger) self.thread_lock = threading.Lock() self.thread = None self.messages = {} diff --git a/fabric_cf/actor/core/manage/kafka/kafka_proxy.py b/fabric_cf/actor/core/manage/kafka/kafka_proxy.py index c6bc9501..a45c76cc 100644 --- a/fabric_cf/actor/core/manage/kafka/kafka_proxy.py +++ b/fabric_cf/actor/core/manage/kafka/kafka_proxy.py @@ -93,7 +93,7 @@ def get_type_id(self) -> str: def fill_request_by_id_message(self, request: RequestByIdRecord, id_token: str = None, email: str = None, slice_id: ID = None, slice_name: str = None, states: List[int] = None, rid: ID = None, delegation_id: str = None, broker_id: ID = None, type: str = None, - site: str = None): + site: str = None, host: str = None, ip_subnet: str = None): request.guid = str(self.management_id) request.auth = self.auth request.callback_topic = self.callback_topic @@ -104,6 +104,8 @@ def fill_request_by_id_message(self, request: RequestByIdRecord, id_token: str = request.delegation_id = delegation_id request.site = site request.type = type + request.ip_subnet = ip_subnet + request.host = host if slice_id is not None: request.slice_id = str(slice_id) if rid is not None: diff --git a/fabric_cf/actor/core/manage/kafka/services/kafka_actor_service.py b/fabric_cf/actor/core/manage/kafka/services/kafka_actor_service.py index 15dc58d8..2d576a98 100644 --- a/fabric_cf/actor/core/manage/kafka/services/kafka_actor_service.py +++ b/fabric_cf/actor/core/manage/kafka/services/kafka_actor_service.py @@ -283,7 +283,8 @@ def get_reservations(self, *, request: GetReservationsRequestAvro) -> ResultRese result = mo.get_reservations(caller=auth, states=request.get_states(), slice_id=slice_id, rid=rid, email=request.get_email(), type=request.get_type(), - site=request.get_site()) + site=request.get_site(), ip_subnet=request.get_ip_subnet(), + host=request.get_host()) except Exception as e: result.status.set_code(ErrorCodes.ErrorInternalError.value) diff --git a/fabric_cf/actor/core/manage/local/local_actor.py b/fabric_cf/actor/core/manage/local/local_actor.py index c462630d..ae80b11e 100644 --- a/fabric_cf/actor/core/manage/local/local_actor.py +++ b/fabric_cf/actor/core/manage/local/local_actor.py @@ -57,7 +57,7 @@ def __init__(self, *, manager: ManagementObject, auth: AuthToken): def get_slices(self, *, slice_id: ID = None, slice_name: str = None, email: str = None, project: str = None, states: List[int] = None, limit: int = None, offset: int = None, - user_id: str = None) -> List[SliceAvro] or None: + user_id: str = None, search: str = None, exact_match: bool = False) -> List[SliceAvro] or None: self.clear_last() try: result = self.manager.get_slices(slice_id=slice_id, caller=self.auth, states=states, @@ -72,6 +72,30 @@ def get_slices(self, *, slice_id: ID = None, slice_name: str = None, email: str return None + def increment_metrics(self, *, project_id: str, oidc_sub: str, slice_count: int = 1) -> bool: + try: + return self.manager.increment_metrics(project_id=project_id, oidc_sub=oidc_sub, slice_count=slice_count) + except Exception as e: + self.on_exception(e=e, traceback_str=traceback.format_exc()) + return False + + def get_metrics(self, *, project_id: str, oidc_sub: str, excluded_projects: List[str] = None) -> list: + try: + return self.manager.get_metrics(project_id=project_id, oidc_sub=oidc_sub, + excluded_projects=excluded_projects) + except Exception as e: + self.on_exception(e=e, traceback_str=traceback.format_exc()) + + def get_slice_count(self, *, email: str = None, project: str = None, states: List[int] = None, + user_id: str = None, excluded_projects: List[str] = None) -> int: + try: + return self.manager.get_slice_count(caller=self.auth, states=states, email=email, project=project, + user_id=user_id, excluded_projects=excluded_projects) + except Exception as e: + self.on_exception(e=e, traceback_str=traceback.format_exc()) + + return -1 + def remove_slice(self, *, slice_id: ID) -> bool: self.clear_last() try: @@ -86,12 +110,14 @@ def remove_slice(self, *, slice_id: ID) -> bool: def get_reservations(self, *, states: List[int] = None, slice_id: ID = None, rid: ID = None, oidc_claim_sub: str = None, email: str = None, rid_list: List[str] = None, - type: str = None, site: str = None, node_id: str = None) -> List[ReservationMng]: + type: str = None, site: str = None, node_id: str = None, + host: str = None, ip_subnet: str = None) -> List[ReservationMng]: self.clear_last() try: result = self.manager.get_reservations(caller=self.auth, states=states, slice_id=slice_id, rid=rid, oidc_claim_sub=oidc_claim_sub, email=email, rid_list=rid_list, - type=type, site=site, node_id=node_id) + type=type, site=site, node_id=node_id, host=host, + ip_subnet=ip_subnet) self.last_status = result.status if result.status.get_code() == 0: @@ -307,4 +333,4 @@ def get_poas(self, *, states: List[int] = None, slice_id: ID = None, rid: ID = N return result.poas except Exception as e: - self.on_exception(e=e, traceback_str=traceback.format_exc()) \ No newline at end of file + self.on_exception(e=e, traceback_str=traceback.format_exc()) diff --git a/fabric_cf/actor/core/manage/local/local_broker.py b/fabric_cf/actor/core/manage/local/local_broker.py index 212d61c3..d8f0d581 100644 --- a/fabric_cf/actor/core/manage/local/local_broker.py +++ b/fabric_cf/actor/core/manage/local/local_broker.py @@ -94,13 +94,13 @@ def get_brokers(self, *, broker: ID = None, id_token: str = None) -> List[ProxyA return None - def get_broker_query_model(self, *, broker: ID, id_token: str, level: int, - graph_format: GraphFormat) -> BrokerQueryModelAvro: + def get_broker_query_model(self, *, broker: ID, id_token: str, level: int, graph_format: GraphFormat, + start: datetime = None, end: datetime = None) -> BrokerQueryModelAvro: self.clear_last() try: result = self.manager.get_broker_query_model(broker=broker, caller=self.auth, level=level, id_token=id_token, ignore_broker_check=True, - graph_format=graph_format) + graph_format=graph_format, start=start, end=end) self.last_status = result.status if result.status.get_code() == 0: diff --git a/fabric_cf/actor/core/manage/local/local_controller.py b/fabric_cf/actor/core/manage/local/local_controller.py index 13889a79..4baf8b50 100644 --- a/fabric_cf/actor/core/manage/local/local_controller.py +++ b/fabric_cf/actor/core/manage/local/local_controller.py @@ -93,16 +93,31 @@ def get_brokers(self, *, broker: ID = None, id_token: str = None) -> List[ProxyA except Exception as e: self.on_exception(e=e, traceback_str=traceback.format_exc()) - def get_broker_query_model(self, *, broker: ID, id_token: str, level: int, - graph_format: GraphFormat) -> BrokerQueryModelAvro: + def get_broker_query_model(self, *, broker: ID, id_token: str, level: int, graph_format: GraphFormat, + start: datetime = None, end: datetime = None, + includes: str = None, excludes: str = None) -> BrokerQueryModelAvro: self.clear_last() try: result = self.manager.get_broker_query_model(broker=broker, caller=self.auth, id_token=id_token, - level=level, graph_format=graph_format) + level=level, graph_format=graph_format, + start=start, end=end, excludes=excludes, includes=includes) self.last_status = result.status if result.status.get_code() == 0: return result.model + except Exception as e: + print(e) + self.on_exception(e=e, traceback_str=traceback.format_exc()) + + def build_broker_query_model(self, level_0_broker_query_model: str, level: int, + graph_format: GraphFormat = GraphFormat.GRAPHML, + start: datetime = None, end: datetime = None, + includes: str = None, excludes: str = None) -> str: + self.clear_last() + try: + return self.manager.build_broker_query_model(level_0_broker_query_model=level_0_broker_query_model, + level=level, graph_format=graph_format, start=start, + end=end, includes=includes, excludes=excludes) except Exception as e: self.on_exception(e=e, traceback_str=traceback.format_exc()) @@ -228,4 +243,4 @@ def poa(self, *, poa: PoaAvro) -> bool: except Exception as e: self.on_exception(e=e, traceback_str=traceback.format_exc()) - return False \ No newline at end of file + return False diff --git a/fabric_cf/actor/core/manage/management_utils.py b/fabric_cf/actor/core/manage/management_utils.py index bc58a7fa..2b2e8aad 100644 --- a/fabric_cf/actor/core/manage/management_utils.py +++ b/fabric_cf/actor/core/manage/management_utils.py @@ -34,7 +34,6 @@ from fabric_cf.actor.core.common.constants import Constants from fabric_cf.actor.core.apis.abc_mgmt_container import ABCMgmtContainer from fabric_cf.actor.core.manage.local.local_container import LocalContainer -from fabric_cf.actor.core.proxies.kafka.translate import Translate from fabric_cf.actor.core.util.id import ID from fabric_cf.actor.core.util.rpc_exception import RPCException, RPCError from fabric_cf.actor.core.apis.abc_client_reservation import ABCClientReservation diff --git a/fabric_cf/actor/core/plugins/db/actor_database.py b/fabric_cf/actor/core/plugins/db/actor_database.py index 6285dc7e..934eb264 100644 --- a/fabric_cf/actor/core/plugins/db/actor_database.py +++ b/fabric_cf/actor/core/plugins/db/actor_database.py @@ -201,7 +201,8 @@ def remove_slice(self, *, slice_id: ID): def get_slices(self, *, slice_id: ID = None, slice_name: str = None, project_id: str = None, email: str = None, states: list[int] = None, oidc_sub: str = None, slc_type: List[SliceTypes] = None, - limit: int = None, offset: int = None, lease_end: datetime = None) -> List[ABCSlice] or None: + limit: int = None, offset: int = None, lease_end: datetime = None, + search: str = None, exact_match: bool = False) -> List[ABCSlice] or None: result = [] try: try: @@ -212,7 +213,7 @@ def get_slices(self, *, slice_id: ID = None, slice_name: str = None, project_id: sid = str(slice_id) if slice_id is not None else None slices = self.db.get_slices(slice_id=sid, slice_name=slice_name, project_id=project_id, email=email, states=states, oidc_sub=oidc_sub, slc_type=slice_type, limit=limit, - offset=offset, lease_end=lease_end) + offset=offset, lease_end=lease_end, search=search, exact_match=exact_match) finally: if self.lock.locked(): self.lock.release() @@ -229,6 +230,46 @@ def get_slices(self, *, slice_id: ID = None, slice_name: str = None, project_id: self.lock.release() return result + def increment_metrics(self, *, project_id: str, oidc_sub: str, slice_count: int = 1) -> bool: + try: + self.lock.acquire() + self.db.increment_metrics(project_id=project_id, user_id=oidc_sub, slice_count=slice_count) + return True + except Exception as e: + self.logger.error(e) + self.logger.error(traceback.format_exc()) + finally: + if self.lock.locked(): + self.lock.release() + return False + + def get_metrics(self, *, project_id: str, oidc_sub: str, excluded_projects: List[str] = None) -> list: + try: + return self.db.get_metrics(project_id=project_id, user_id=oidc_sub, excluded_projects=excluded_projects) + except Exception as e: + self.logger.error(e) + self.logger.error(traceback.format_exc()) + finally: + if self.lock.locked(): + self.lock.release() + + def get_slice_count(self, *, project_id: str = None, email: str = None, states: list[int] = None, + oidc_sub: str = None, slc_type: List[SliceTypes] = None, + excluded_projects: List[str] = None) -> int: + try: + slice_type = [SliceTypes.ClientSlice.value] + if slc_type is not None: + slice_type = [x.value for x in slc_type] + return self.db.get_slice_count(project_id=project_id, email=email, states=states, oidc_sub=oidc_sub, + slc_type=slice_type, excluded_projects=excluded_projects) + except Exception as e: + self.logger.error(e) + self.logger.error(traceback.format_exc()) + finally: + if self.lock.locked(): + self.lock.release() + return -1 + def add_reservation(self, *, reservation: ABCReservationMixin): try: #self.lock.acquire() @@ -244,23 +285,64 @@ def add_reservation(self, *, reservation: ABCReservationMixin): site = None rsv_type = None components = None - if reservation.get_resources() is not None and reservation.get_resources().get_sliver() is not None: + host = None + ip_subnet = None + sliver = None + from fabric_cf.actor.core.kernel.reservation_client import ReservationClient + if isinstance(reservation, ReservationClient) and reservation.get_leased_resources() and \ + reservation.get_leased_resources().get_sliver(): + sliver = reservation.get_leased_resources().get_sliver() + if not sliver and reservation.get_resources() and reservation.get_resources().get_sliver(): sliver = reservation.get_resources().get_sliver() - site = sliver.get_site() + + if sliver: rsv_type = sliver.get_type().name from fim.slivers.network_service import NetworkServiceSliver + from fim.slivers.network_node import NodeSliver + if isinstance(sliver, NetworkServiceSliver) and sliver.interface_info: + site = sliver.get_site() + if sliver.get_gateway(): + ip_subnet = sliver.get_gateway().subnet + components = [] for interface in sliver.interface_info.interfaces.values(): graph_id_node_id_component_id, bqm_if_name = interface.get_node_map() - if ":" in graph_id_node_id_component_id: - split_string = graph_id_node_id_component_id.split(":") + if ":" in graph_id_node_id_component_id or "#" in graph_id_node_id_component_id: + if "#" in graph_id_node_id_component_id: + split_string = graph_id_node_id_component_id.split("#") + else: + split_string = graph_id_node_id_component_id.split(":") node_id = split_string[1] if len(split_string) > 1 else None comp_id = split_string[2] if len(split_string) > 2 else None bdf = ":".join(split_string[3:]) if len(split_string) > 3 else None if node_id and comp_id and bdf: components.append((node_id, comp_id, bdf)) + elif isinstance(sliver, NodeSliver): + site = sliver.get_site() + if sliver.get_labels() and sliver.get_labels().instance_parent: + host = sliver.get_labels().instance_parent + if sliver.get_label_allocations() and sliver.get_label_allocations().instance_parent: + host = sliver.get_label_allocations().instance_parent + if sliver.get_management_ip(): + ip_subnet = str(sliver.get_management_ip()) + + node_id = reservation.get_graph_node_id() + if node_id and sliver.attached_components_info: + components = [] + for c in sliver.attached_components_info.devices.values(): + if c.get_node_map(): + bqm_id, comp_id = c.get_node_map() + if c.labels and c.labels.bdf: + bdf = c.labels.bdf + if isinstance(c.labels.bdf, str): + bdf = [c.labels.bdf] + for x in bdf: + components.append((node_id, comp_id, x)) + + term = reservation.get_term() + self.db.add_reservation(slc_guid=str(reservation.get_slice_id()), rsv_resid=str(reservation.get_reservation_id()), rsv_category=reservation.get_category().value, @@ -270,7 +352,10 @@ def add_reservation(self, *, reservation: ABCReservationMixin): properties=properties, rsv_graph_node_id=reservation.get_graph_node_id(), oidc_claim_sub=oidc_claim_sub, email=email, site=site, rsv_type=rsv_type, - components=components) + components=components, + lease_start=term.get_start_time() if term else None, + lease_end=term.get_end_time() if term else None, + host=host, ip_subnet=ip_subnet) self.logger.debug( "Reservation {} added to slice {}".format(reservation.get_reservation_id(), reservation.get_slice())) finally: @@ -290,23 +375,62 @@ def update_reservation(self, *, reservation: ABCReservationMixin): site = None rsv_type = None components = None - if reservation.get_resources() is not None and reservation.get_resources().get_sliver() is not None: + ip_subnet = None + host = None + sliver = None + from fabric_cf.actor.core.kernel.reservation_client import ReservationClient + if isinstance(reservation, ReservationClient) and reservation.get_leased_resources() and \ + reservation.get_leased_resources().get_sliver(): + sliver = reservation.get_leased_resources().get_sliver() + if not sliver and reservation.get_resources() and reservation.get_resources().get_sliver(): sliver = reservation.get_resources().get_sliver() - site = sliver.get_site() + + if sliver: rsv_type = sliver.get_type().name from fim.slivers.network_service import NetworkServiceSliver + from fim.slivers.network_node import NodeSliver if isinstance(sliver, NetworkServiceSliver) and sliver.interface_info: + site = sliver.get_site() + + if sliver.get_gateway(): + ip_subnet = sliver.get_gateway().subnet + components = [] for interface in sliver.interface_info.interfaces.values(): graph_id_node_id_component_id, bqm_if_name = interface.get_node_map() - if ":" in graph_id_node_id_component_id: - split_string = graph_id_node_id_component_id.split(":") + if ":" in graph_id_node_id_component_id or "#" in graph_id_node_id_component_id: + if "#" in graph_id_node_id_component_id: + split_string = graph_id_node_id_component_id.split("#") + else: + split_string = graph_id_node_id_component_id.split(":") node_id = split_string[1] if len(split_string) > 1 else None comp_id = split_string[2] if len(split_string) > 2 else None bdf = ":".join(split_string[3:]) if len(split_string) > 3 else None if node_id and comp_id and bdf: components.append((node_id, comp_id, bdf)) - + elif isinstance(sliver, NodeSliver): + site = sliver.get_site() + + if sliver.get_labels() and sliver.get_labels().instance_parent: + host = sliver.get_labels().instance_parent + if sliver.get_label_allocations() and sliver.get_label_allocations().instance_parent: + host = sliver.get_label_allocations().instance_parent + if sliver.get_management_ip(): + ip_subnet = str(sliver.get_management_ip()) + node_id = reservation.get_graph_node_id() + if node_id and sliver.attached_components_info: + components = [] + for c in sliver.attached_components_info.devices.values(): + if c.get_node_map(): + bqm_id, comp_id = c.get_node_map() + if c.labels and c.labels.bdf: + bdf = c.labels.bdf + if isinstance(c.labels.bdf, str): + bdf = [c.labels.bdf] + for x in bdf: + components.append((node_id, comp_id, x)) + + term = reservation.get_term() begin = time.time() properties = pickle.dumps(reservation) diff = int(time.time() - begin) @@ -321,7 +445,10 @@ def update_reservation(self, *, reservation: ABCReservationMixin): rsv_joining=reservation.get_join_state().value, properties=properties, rsv_graph_node_id=reservation.get_graph_node_id(), - site=site, rsv_type=rsv_type, components=components) + site=site, rsv_type=rsv_type, components=components, + lease_start=term.get_start_time() if term else None, + lease_end=term.get_end_time() if term else None, + ip_subnet=ip_subnet, host=host) diff = int(time.time() - begin) if diff > 0: self.logger.info(f"DB TIME: {diff}") @@ -458,10 +585,11 @@ def get_authority_reservations(self) -> List[ABCReservationMixin]: return result def get_components(self, *, node_id: str, states: list[int], rsv_type: list[str], component: str = None, - bdf: str = None) -> Dict[str, List[str]]: + bdf: str = None, start: datetime = None, end: datetime = None, + excludes: List[str] = None) -> Dict[str, List[str]]: try: return self.db.get_components(node_id=node_id, states=states, component=component, bdf=bdf, - rsv_type=rsv_type) + rsv_type=rsv_type, start=start, end=end, excludes=excludes) except Exception as e: self.logger.error(e) finally: @@ -469,17 +597,17 @@ def get_components(self, *, node_id: str, states: list[int], rsv_type: list[str] self.lock.release() def get_reservations(self, *, slice_id: ID = None, graph_node_id: str = None, project_id: str = None, - email: str = None, oidc_sub: str = None, rid: ID = None, - states: list[int] = None, site: str = None, - rsv_type: list[str] = None) -> List[ABCReservationMixin]: + email: str = None, oidc_sub: str = None, rid: ID = None, states: list[int] = None, + site: str = None, rsv_type: list[str] = None, start: datetime = None, + end: datetime = None, ip_subnet: str = None, host: str = None) -> List[ABCReservationMixin]: result = [] try: #self.lock.acquire() sid = str(slice_id) if slice_id is not None else None res_id = str(rid) if rid is not None else None - res_dict_list = self.db.get_reservations(slice_id=sid, graph_node_id=graph_node_id, + res_dict_list = self.db.get_reservations(slice_id=sid, graph_node_id=graph_node_id, host=host, ip_subnet=ip_subnet, project_id=project_id, email=email, oidc_sub=oidc_sub, rid=res_id, - states=states, site=site, rsv_type=rsv_type) + states=states, site=site, rsv_type=rsv_type, start=start, end=end) if self.lock.locked(): self.lock.release() result = self._load_reservations_from_db(res_dict_list=res_dict_list) diff --git a/fabric_cf/actor/core/policy/authority_calendar_policy.py b/fabric_cf/actor/core/policy/authority_calendar_policy.py index daa48174..a17c7c9e 100644 --- a/fabric_cf/actor/core/policy/authority_calendar_policy.py +++ b/fabric_cf/actor/core/policy/authority_calendar_policy.py @@ -39,7 +39,7 @@ from fabric_cf.actor.core.common.constants import Constants from fabric_cf.actor.core.core.authority_policy import AuthorityPolicy from fabric_cf.actor.core.common.exceptions import AuthorityException -from fabric_cf.actor.core.kernel.reservation_states import ReservationStates +from fabric_cf.actor.core.kernel.reservation_states import ReservationStates, ReservationPendingStates from fabric_cf.actor.core.kernel.resource_set import ResourceSet from fabric_cf.actor.core.plugins.handlers.config_token import ConfigToken from fabric_cf.actor.core.apis.abc_resource_control import ABCResourceControl @@ -402,23 +402,28 @@ def map(self, *, reservation: ABCAuthorityReservation, node_id_to_reservations: @param node_id_to_reservations: node_id_to_reservations @throws Exception in case of error """ - assigned = self.assign_reservation(reservation=reservation, node_id_to_reservations=node_id_to_reservations) - if assigned is not None: - approved = reservation.get_requested_term() - reservation.set_approved(term=approved, approved_resources=assigned) - reservation.set_bid_pending(value=False) - node_id = assigned.get_sliver().get_node_map()[1] - - if node_id_to_reservations.get(node_id, None) is None: - node_id_to_reservations[node_id] = ReservationSet() - node_id_to_reservations[node_id].add(reservation=reservation) - else: - if not reservation.is_terminal(): - self.logger.debug(f"Deferring reservation {reservation} for the next cycle: " - f"{self.actor.get_current_cycle() + 1}") - self.reschedule(reservation=reservation) + try: + assigned = self.assign_reservation(reservation=reservation, node_id_to_reservations=node_id_to_reservations) + if assigned is not None: + approved = reservation.get_requested_term() + reservation.set_approved(term=approved, approved_resources=assigned) + reservation.set_bid_pending(value=False) + node_id = assigned.get_sliver().get_node_map()[1] + + if node_id_to_reservations.get(node_id, None) is None: + node_id_to_reservations[node_id] = ReservationSet() + node_id_to_reservations[node_id].add(reservation=reservation) + else: + if not reservation.is_terminal(): + self.logger.debug(f"Deferring reservation {reservation} for the next cycle: " + f"{self.actor.get_current_cycle() + 1}") + self.reschedule(reservation=reservation) - return node_id_to_reservations + return node_id_to_reservations + except Exception as e: + self.logger.error(f"Could not assign {e}") + reservation.fail(message=str(e)) + return node_id_to_reservations def assign_reservation(self, *, reservation: ABCAuthorityReservation, node_id_to_reservations: dict): """ @@ -472,7 +477,7 @@ def assign_reservation(self, *, reservation: ABCAuthorityReservation, node_id_to except Exception as e: self.logger.error(traceback.format_exc()) self.logger.error(f"Could not assign {e}") - return None + raise e def configuration_complete(self, *, action: str, token: ConfigToken, out_properties: dict): super().configuration_complete(action=action, token=token, out_properties=out_properties) @@ -610,6 +615,13 @@ def get_existing_reservations(self, node_id: str, node_id_to_reservations: dict) existing_reservations = self.actor.get_plugin().get_database().get_reservations(graph_node_id=node_id, states=states) + if existing_reservations: + closing_reservations = [] + for r in existing_reservations: + if r.get_pending_state() == ReservationPendingStates.Closing: + closing_reservations.append(r) + for c in closing_reservations: + existing_reservations.remove(c) reservations_allocated_in_cycle = node_id_to_reservations.get(node_id, None) diff --git a/fabric_cf/actor/core/policy/broker_simpler_units_policy.py b/fabric_cf/actor/core/policy/broker_simpler_units_policy.py index 7601000b..bac382bb 100644 --- a/fabric_cf/actor/core/policy/broker_simpler_units_policy.py +++ b/fabric_cf/actor/core/policy/broker_simpler_units_policy.py @@ -26,9 +26,10 @@ from __future__ import annotations import enum +import random import threading import traceback -from datetime import datetime +from datetime import datetime, timezone from enum import Enum from typing import TYPE_CHECKING, Tuple, List, Any, Dict @@ -41,6 +42,7 @@ from fim.slivers.interface_info import InterfaceSliver, InterfaceType from fim.slivers.network_node import NodeSliver, NodeType from fim.slivers.network_service import NetworkServiceSliver, ServiceType, NSLayer +from fim.slivers.path_info import Path from fabric_cf.actor.boot.configuration import ActorConfig from fabric_cf.actor.core.apis.abc_broker_reservation import ABCBrokerReservation @@ -50,7 +52,7 @@ from fabric_cf.actor.core.container.maintenance import Maintenance from fabric_cf.actor.core.delegation.resource_ticket import ResourceTicketFactory from fabric_cf.actor.core.common.exceptions import BrokerException, ExceptionErrorCode -from fabric_cf.actor.core.kernel.reservation_states import ReservationStates +from fabric_cf.actor.core.kernel.reservation_states import ReservationStates, ReservationOperation from fabric_cf.actor.core.policy.broker_calendar_policy import BrokerCalendarPolicy from fabric_cf.actor.core.policy.fifo_queue import FIFOQueue from fabric_cf.actor.core.policy.network_node_inventory import NetworkNodeInventory @@ -67,6 +69,7 @@ from fabric_cf.actor.fim.plugins.broker.aggregate_bqm_plugin import AggregatedBQMPlugin from fabric_cf.actor.core.util.resource_type import ResourceType from fabric_cf.actor.core.policy.inventory_for_type import InventoryForType +from fim.slivers.interface_info import InterfaceSliver if TYPE_CHECKING: from fabric_cf.actor.core.apis.abc_broker_mixin import ABCBrokerMixin @@ -76,6 +79,7 @@ class BrokerAllocationAlgorithm(Enum): FirstFit = enum.auto() BestFit = enum.auto() WorstFit = enum.auto() + Random = enum.auto() def __repr__(self): return self.name @@ -222,10 +226,10 @@ def register_inventory(self, *, resource_type: ResourceType, inventory: Inventor def bind(self, *, reservation: ABCBrokerReservation) -> bool: term = reservation.get_requested_term() - self.logger.info("SlottedAgent bind arrived at cycle {} requested term {}".format( - self.actor.get_current_cycle(), term)) + self.logger.info(f"SlottedAgent bind arrived at cycle {self.actor.get_current_cycle()} requested term {term}") bid_cycle = self.get_allocation(reservation=reservation) + self.logger.info(f"SlottedAgent bind assigned cycle: {bid_cycle} requested term {term}") self.calendar.add_request(reservation=reservation, cycle=bid_cycle) @@ -278,6 +282,12 @@ def get_allocation(self, *, reservation: ABCBrokerReservation) -> int: if intervals <= 0: intervals = 1 + # Hack for Advanced Scheduling; force the advanced slivers to be scheduled in next cycle + now = datetime.now(timezone.utc) + diff = (reservation.get_requested_term().get_new_start_time() - now).total_seconds() + if diff > 120: + intervals = 2 + start = self.last_allocation + (intervals * self.CALL_INTERVAL) + self.ADVANCE_TIME return start @@ -468,7 +478,8 @@ def ticket(self, *, reservation: ABCBrokerReservation, node_id_to_reservations: self.logger.debug(f"Inventory type: {type(inv)}") term = Term(start=start, end=end) return self.ticket_inventory(reservation=reservation, inv=inv, term=term, - node_id_to_reservations=node_id_to_reservations) + node_id_to_reservations=node_id_to_reservations, + operation=ReservationOperation.Create) else: reservation.fail(message=Constants.NO_POOL) else: @@ -488,6 +499,8 @@ def __candidate_nodes(self, *, sliver: NodeSliver) -> List[str]: node_props = {ABCPropertyGraphConstants.PROP_SITE: sliver.site, ABCPropertyGraphConstants.PROP_TYPE: str(NodeType.Server)} + if sliver.get_type() == NodeType.Switch: + node_props[ABCPropertyGraphConstants.PROP_TYPE] = str(NodeType.Switch) storage_components = [] # remove storage components before the check @@ -502,6 +515,16 @@ def __candidate_nodes(self, *, sliver: NodeSliver) -> List[str]: label=ABCPropertyGraphConstants.CLASS_NetworkNode, props=node_props, comps=sliver.attached_components_info) + + # Skip nodes without any delegations which would be data-switch in this case + if sliver.get_type() == NodeType.Switch: + exclude = [] + for n in result: + if "p4" not in n: + exclude.append(n) + for e in exclude: + result.remove(e) + # re-add storage components if len(storage_components) > 0: for c in storage_components: @@ -536,7 +559,8 @@ def __prune_nodes_in_maintenance(self, node_id_list: List[str], site: str, reser return node_id_list def __find_first_fit(self, node_id_list: List[str], node_id_to_reservations: dict, inv: NetworkNodeInventory, - reservation: ABCBrokerReservation) -> Tuple[str, BaseSliver, Any]: + reservation: ABCBrokerReservation, term: Term, sliver: NodeSliver, + operation: ReservationOperation = ReservationOperation.Create) -> Tuple[str, BaseSliver, Any]: """ Find First Available Node which can serve the reservation @param node_id_list: Candidate Nodes @@ -546,35 +570,38 @@ def __find_first_fit(self, node_id_list: List[str], node_id_to_reservations: dic @return tuple containing delegation id, sliver, error message if any """ delegation_id = None - sliver = None error_msg = None self.logger.debug(f"Possible candidates to serve {reservation} candidates# {node_id_list}") - requested_sliver = reservation.get_requested_resources().get_sliver() - is_create = requested_sliver.get_node_map() is None for node_id in node_id_list: try: self.logger.debug(f"Attempting to allocate {reservation} via graph_node# {node_id}") graph_node = self.get_network_node_from_graph(node_id=node_id) - if requested_sliver.labels is not None and requested_sliver.labels.instance_parent is not None: - self.logger.info(f"Sliver {requested_sliver} is requested on worker: " - f"{requested_sliver.labels.instance_parent}") - if graph_node.get_name() != requested_sliver.labels.instance_parent: + if sliver.labels is not None and sliver.labels.instance_parent is not None: + self.logger.info(f"Sliver {sliver} is requested on worker: " + f"{sliver.labels.instance_parent}") + if graph_node.get_name() != sliver.labels.instance_parent: self.logger.info(f"Skipping candidate node: {graph_node}") continue existing_reservations = self.get_existing_reservations(node_id=node_id, - node_id_to_reservations=node_id_to_reservations) + node_id_to_reservations=node_id_to_reservations, + start=term.get_start_time(), + end=term.get_end_time()) - existing_components = self.get_existing_components(node_id=node_id) + include_ns = False if operation == ReservationOperation.Extend else True + existing_components = self.get_existing_components(node_id=node_id, start=term.get_start_time(), + end=term.get_end_time(), + excludes=[str(reservation.get_reservation_id())], + include_ns=include_ns) delegation_id, sliver = inv.allocate(rid=reservation.get_reservation_id(), - requested_sliver=requested_sliver, + requested_sliver=sliver, graph_id=self.combined_broker_model_graph_id, graph_node=graph_node, existing_reservations=existing_reservations, existing_components=existing_components, - is_create=is_create) + operation=operation) if delegation_id is not None and sliver is not None: break @@ -585,14 +612,16 @@ def __find_first_fit(self, node_id_list: List[str], node_id_to_reservations: dic else: raise e - if delegation_id is None and requested_sliver.labels is not None and requested_sliver.labels.instance_parent is not None: - error_msg = f"Insufficient Resources: {requested_sliver.labels.instance_parent} " \ - f"cannot serve the requested sliver" + if delegation_id is None and sliver.labels is not None and \ + sliver.labels.instance_parent is not None: + error_msg = f"Insufficient Resources: {sliver.labels.instance_parent} " \ + f"cannot serve the requested sliver - {error_msg}" return delegation_id, sliver, error_msg def __allocate_nodes(self, *, reservation: ABCBrokerReservation, inv: NetworkNodeInventory, sliver: NodeSliver, - node_id_to_reservations: dict) -> Tuple[str or None, BaseSliver, Any]: + node_id_to_reservations: dict, term: Term, + operation: ReservationOperation = ReservationOperation.Create) -> Tuple[str or None, BaseSliver, Any]: """ Allocate Network Node Slivers @param reservation Reservation @@ -603,6 +632,8 @@ def __allocate_nodes(self, *, reservation: ABCBrokerReservation, inv: NetworkNod """ delegation_id = None node_id_list = self.__candidate_nodes(sliver=sliver) + if self.get_algorithm_type(site=sliver.site) == BrokerAllocationAlgorithm.Random: + random.shuffle(node_id_list) if len(node_id_list) == 0 and sliver.site not in self.combined_broker_model.get_sites(): error_msg = f'Unknown site {sliver.site} requested for {reservation}' @@ -615,204 +646,323 @@ def __allocate_nodes(self, *, reservation: ABCBrokerReservation, inv: NetworkNod # no candidate nodes found if len(node_id_list) == 0: - error_msg = f'Insufficient resources: No candidates nodes found to serve {reservation}' + error_msg = f'Insufficient resources: No hosts available to provision the {reservation}' self.logger.error(error_msg) return delegation_id, sliver, error_msg - if self.get_algorithm_type().lower() == BrokerAllocationAlgorithm.FirstFit.name.lower(): - return self.__find_first_fit(node_id_list=node_id_list, - node_id_to_reservations=node_id_to_reservations, - inv=inv, reservation=reservation) + return self.__find_first_fit(node_id_list=node_id_list, + node_id_to_reservations=node_id_to_reservations, + inv=inv, reservation=reservation, term=term, sliver=sliver, + operation=operation) - else: - raise BrokerException(error_code=ExceptionErrorCode.NOT_SUPPORTED, - msg=f"Broker currently only supports First Fit") + def __can_extend_interface_sliver(self, rid: ID, inv: NetworkServiceInventory, + ifs: InterfaceSliver, sliver: NetworkServiceSliver, + node_id_to_reservations: dict, term: Term): + """ + Checks if VLAN attached to an interface are assigned to any advanced reservations in this case + @param rid + @param inv + @param ifs + @param sliver + @param node_id_to_reservations + @param term + + @raises BrokerException in case VLAN is already assigned to any future sliver + """ + ns_node_id, ns_bqm_node_id = sliver.get_node_map() + node_id, bqm_node_id = ifs.get_node_map() + bqm_cp = self.get_interface_sliver_from_graph(node_id=bqm_node_id) + self.logger.debug(f"BQM IFS: {bqm_cp}") + owner_switch, owner_mpls, owner_ns = self.get_owners(node_id=bqm_node_id, ns_type=sliver.get_type()) + self.logger.debug(f"Owner SWITCH: {owner_switch}") + self.logger.debug(f"Owner MPLS: {owner_mpls}") + self.logger.debug(f"Owner NS: {owner_ns}") + + # Handle IPV6Ext services + ns_bqm_node_id = ns_bqm_node_id.node_id.replace('ipv6ext-ns', + 'ipv6-ns') if 'ipv6ext-ns' in ns_bqm_node_id else ns_bqm_node_id + + existing_reservations = self.get_existing_reservations( + node_id=ns_bqm_node_id, + node_id_to_reservations=node_id_to_reservations, + start=term.get_start_time(), + end=term.get_end_time(), + ) + + inv.allocate_ifs( + rid=rid, + requested_ns=sliver, + requested_ifs=ifs, + owner_ns=owner_ns, + bqm_ifs=bqm_cp, + existing_reservations=existing_reservations, + operation=ReservationOperation.Extend + ) def __allocate_services(self, *, rid: ID, inv: NetworkServiceInventory, sliver: NetworkServiceSliver, - node_id_to_reservations: dict) -> Tuple[str, BaseSliver, Any]: + node_id_to_reservations: dict, term: Term, + operation: ReservationOperation = ReservationOperation.Create) -> Tuple[str, BaseSliver, Any]: """ Allocate Network Service Slivers @param rid Reservation Id @param inv Inventory @param sliver Requested sliver @param node_id_to_reservations + @param operation @return tuple containing delegation id, sliver, error message if any """ - self.logger.debug(f"Processing Network Service sliver: {sliver}") delegation_id = None error_msg = None - owner_ns = None - owner_ns_id = None - bqm_component = None - is_vnic = False - owner_mpls_ns = None - owner_switch = None - - peered_ns_interfaces = [] - - # For each Interface Sliver; - for ifs in sliver.interface_info.interfaces.values(): - node_map_id = self.combined_broker_model_graph_id - - # Fetch Network Node Id and BQM Component Id - node_id, bqm_component_id = ifs.get_node_map() - - # Skipping the already allocated interface on a modify - #if node_id == self.combined_broker_model_graph_id: - if self.combined_broker_model_graph_id in node_id: - continue - - if node_id == str(NodeType.Facility): - bqm_component = self.get_facility_sliver(node_name=bqm_component_id) - # Peered Interfaces are handled at the end - elif node_id == str(Constants.PEERED): - peered_ns_interfaces.append(ifs) - continue - else: - # For VM interfaces - bqm_component = self.get_component_sliver(node_id=bqm_component_id) - node_map_id = f"{node_map_id}:{node_id}:{bqm_component_id}:{ifs.get_labels().bdf}" - - if bqm_component is None: - raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES) - - # Get BQM Connection Point in Site Delegation (c) - site_cp = FimHelper.get_site_interface_sliver(component=bqm_component, - local_name=ifs.get_labels().local_name, - region=ifs.get_labels().region, - device_name=ifs.get_labels().device_name) - self.logger.debug(f"Interface Sliver [Site Delegation] (C): {site_cp}") - - # Get BQM Peer Connection Point in Site Delegation (a) - net_cp = self.get_peer_interface_sliver(site_ifs_id=site_cp.node_id, - interface_type=InterfaceType.TrunkPort) - - if net_cp is None: - error_msg = "Peer Connection Point not found from Network AM" - raise BrokerException(msg=error_msg) - - self.logger.debug(f"Peer Interface Sliver [Network Delegation] (A): {net_cp}") - - # need to find the owner switch of the network service in CBM and take it's name or labels.local_name - owner_switch, owner_mpls_ns, owner_ns = self.get_owners(node_id=net_cp.node_id, - ns_type=sliver.get_type()) - - # Hack for IPV6Ext services - owner_ns_id = owner_ns.node_id - if 'ipv6ext-ns' in owner_ns_id: - owner_ns_id = owner_ns_id.replace('ipv6ext-ns', 'ipv6-ns') - - bqm_cp = net_cp - if bqm_component.get_type() == NodeType.Facility or \ - (sliver.get_type() == ServiceType.L2Bridge and - bqm_component.get_model() == Constants.OPENSTACK_VNIC_MODEL): - bqm_cp = site_cp - - if bqm_component.get_type() == ComponentType.SharedNIC: - if bqm_component.get_model() == Constants.OPENSTACK_VNIC_MODEL: - is_vnic = True - - # VLAN is already set by the Orchestrator using the information from the Node Sliver Parent Reservation - if ifs.get_labels().vlan is None and not is_vnic: - message = "Shared NIC VLAN cannot be None" - self.logger.error(message) - raise BrokerException(error_code=ExceptionErrorCode.FAILURE, - msg=f"{message}") - else: - existing_reservations = self.get_existing_reservations(node_id=owner_ns_id, - node_id_to_reservations=node_id_to_reservations) - # Set vlan - source: (c) - only for dedicated NICs - ifs = inv.allocate_ifs(requested_ns=sliver, requested_ifs=ifs, owner_ns=owner_ns, - bqm_ifs=bqm_cp, existing_reservations=existing_reservations) - - local_name = net_cp.get_name() - device_name = owner_switch.get_name() + try: + self.logger.debug(f"Processing Network Service sliver: {sliver}") + owner_ns = None + owner_ns_id = None + bqm_node = None + is_vnic = False + owner_mpls_ns = None + owner_switch = None + + peered_ns_interfaces = [] + ero_source_end_info = [] + + # For each Interface Sliver; + for ifs in sliver.interface_info.interfaces.values(): + node_map_id = self.combined_broker_model_graph_id + + # Fetch Network Node Id and BQM Component Id + node_id, bqm_node_id = ifs.get_node_map() + + # Skipping the already allocated interface on a modify + if self.combined_broker_model_graph_id in node_id: + + if operation == ReservationOperation.Extend: + self.__can_extend_interface_sliver(rid=rid, inv=inv, ifs=ifs, sliver=sliver, + node_id_to_reservations=node_id_to_reservations, term=term) + continue - if device_name == Constants.AL2S: + if node_id == str(NodeType.Facility): + bqm_node = self.get_facility_sliver(node_name=bqm_node_id) + # Peered Interfaces are handled at the end + elif node_id == str(Constants.PEERED): + peered_ns_interfaces.append(ifs) + continue + elif node_id == str(NodeType.Switch): + bqm_node = self.get_network_node_from_graph(node_id=bqm_node_id) + node_map_id = f"{node_map_id}#{bqm_node.get_name()}#{bqm_node_id}#{ifs.get_labels().local_name}" + else: + # For VM interfaces + bqm_node = self.get_component_sliver(node_id=bqm_node_id) + node_map_id = f"{node_map_id}:{node_id}:{bqm_node_id}:{ifs.get_labels().bdf}" + + if bqm_node is None: + raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES) + + # Get BQM Connection Point in Site Delegation (c) + site_cp = FimHelper.get_site_interface_sliver(component=bqm_node, + local_name=ifs.get_labels().local_name, + region=ifs.get_labels().region, + device_name=ifs.get_labels().device_name) + self.logger.debug(f"Interface Sliver [Site Delegation] (C): {site_cp}") + + # Get BQM Peer Connection Point in Site Delegation (a) + net_cp = self.get_peer_interface_sliver(site_ifs_id=site_cp.node_id, + interface_type=InterfaceType.TrunkPort) + + if net_cp is None: + error_msg = "Peer Connection Point not found from Network AM" + raise BrokerException(msg=error_msg) + + self.logger.debug(f"Peer Interface Sliver [Network Delegation] (A): {net_cp}") + + # need to find the owner switch of the network service in CBM and take it's name or labels.local_name + owner_switch, owner_mpls_ns, owner_ns = self.get_owners(node_id=net_cp.node_id, + ns_type=sliver.get_type()) + + # Hack for IPV6Ext services + owner_ns_id = owner_ns.node_id + if 'ipv6ext-ns' in owner_ns_id: + owner_ns_id = owner_ns_id.replace('ipv6ext-ns', 'ipv6-ns') + + bqm_cp = net_cp + if bqm_node.get_type() == NodeType.Facility or \ + (sliver.get_type() == ServiceType.L2Bridge and + bqm_node.get_model() == Constants.OPENSTACK_VNIC_MODEL): + bqm_cp = site_cp + + if bqm_node.get_type() == ComponentType.SharedNIC: + if bqm_node.get_model() == Constants.OPENSTACK_VNIC_MODEL: + is_vnic = True + + # VLAN is already set by the Orchestrator using the information from the Node Sliver Parent Reservation + if ifs.get_labels().vlan is None and not is_vnic: + message = "Shared NIC VLAN cannot be None" + self.logger.error(message) + raise BrokerException(error_code=ExceptionErrorCode.FAILURE, + msg=f"{message}") + else: + existing_reservations = self.get_existing_reservations(node_id=owner_ns_id, + node_id_to_reservations=node_id_to_reservations, + start=term.get_start_time(), + end=term.get_end_time()) + # Set vlan - source: (c) - only for dedicated NICs + ifs = inv.allocate_ifs(rid=rid, requested_ns=sliver, requested_ifs=ifs, owner_ns=owner_ns, + bqm_ifs=bqm_cp, existing_reservations=existing_reservations) + + local_name = net_cp.get_name() + device_name = owner_switch.get_name() + + if device_name == Constants.AL2S: + delegation_id, delegated_label = InventoryForType.get_delegations(lab_cap_delegations= + net_cp.get_label_delegations()) + device_name = delegated_label.device_name + local_name = delegated_label.local_name + + # local_name source: (a) + ifs_labels = ifs.get_labels() + ifs_labels = Labels.update(ifs_labels, local_name=local_name) + + # NSO device name source: (a) - need to find the owner switch of the network service in CBM + # and take its name or labels.local_name + # Set the NSO device-name + ifs_labels = Labels.update(ifs_labels, device_name=device_name) + adm_ids = owner_switch.get_structural_info().adm_graph_ids + site_adm_ids = bqm_node.get_structural_info().adm_graph_ids + + self.logger.debug(f"Owner Network Service: {owner_ns}") + self.logger.debug(f"Owner Switch: {owner_switch}") + if owner_switch.network_service_info is not None: + self.logger.debug(f"Owner Switch NS: {owner_switch.network_service_info.network_services.values()}") + + net_adm_ids = site_adm_ids + if bqm_node.get_type() != NodeType.Facility and not is_vnic: + net_adm_ids = [x for x in adm_ids if not x in site_adm_ids or site_adm_ids.remove(x)] + # For sites like EDC which share switch with other sites like NCSA, + # the net_adm_ids also includes delegation id from the other side, + # this results in this list having more than one entry and no way for + # the code to know which delegation is from Network AM + # Using a hack here to pick the delegation id from one of the + # layer 3 network services in the owner switch + if len(net_adm_ids) > 1: + for x in owner_switch.network_service_info.network_services.values(): + if x.get_layer() == NSLayer.L2: + continue + net_adm_ids = x.get_structural_info().adm_graph_ids + break + else: + if bqm_cp.labels is not None and bqm_cp.labels.ipv4_subnet is not None: + ifs_labels = Labels.update(ifs_labels, ipv4_subnet=bqm_cp.labels.ipv4_subnet) + if bqm_cp.labels is not None and bqm_cp.labels.ipv6_subnet is not None: + ifs_labels = Labels.update(ifs_labels, ipv6_subnet=bqm_cp.labels.ipv6_subnet) + if len(net_adm_ids) != 1: + error_msg = f"More than 1 or 0 Network Delegations found! net_adm_ids: {net_adm_ids}" + self.logger.error(error_msg) + raise BrokerException(msg=error_msg) + + if bqm_node.get_type() == NodeType.Facility: + node_map_id = f"{node_map_id}#{bqm_node.get_name()}#{bqm_cp.node_id}#{ifs_labels.vlan}" + + # Update the Interface Sliver Node Map to map to (a) + ifs.set_node_map(node_map=(node_map_id, bqm_cp.node_id)) + #ifs.set_node_map(node_map=(self.combined_broker_model_graph_id, bqm_cp.node_id)) + + delegation_id = net_adm_ids[0] + + ifs.labels = ifs_labels + ifs.label_allocations = Labels.update(lab=ifs_labels) + + self.logger.info(f"Allocated Interface Sliver: {ifs} delegation: {delegation_id}") + + owner_v4_service = self.get_ns_from_switch(switch=owner_switch, ns_type=ServiceType.FABNetv4) + if owner_v4_service and owner_v4_service.get_labels(): + ero_source_end_info.append((owner_switch.node_id, owner_v4_service.get_labels().ipv4)) + + if not owner_ns: + bqm_graph_id, bqm_node_id = sliver.get_node_map() + owner_ns, owner_switch = self.get_network_service_from_graph(node_id=bqm_node_id, + parent=True) + # Hack for IPV6Ext services + owner_ns_id = owner_ns.node_id + if 'ipv6ext-ns' in owner_ns_id: + owner_ns_id = owner_ns_id.replace('ipv6ext-ns', 'ipv6-ns') + + owner_mpls_ns = None + if owner_switch: + for ns in owner_switch.network_service_info.network_services.values(): + if ServiceType.MPLS == ns.get_type(): + owner_mpls_ns = ns + break delegation_id, delegated_label = InventoryForType.get_delegations(lab_cap_delegations= - net_cp.get_label_delegations()) - device_name = delegated_label.device_name - local_name = delegated_label.local_name - - # local_name source: (a) - ifs_labels = ifs.get_labels() - ifs_labels = Labels.update(ifs_labels, local_name=local_name) - - # NSO device name source: (a) - need to find the owner switch of the network service in CBM - # and take its name or labels.local_name - # Set the NSO device-name - ifs_labels = Labels.update(ifs_labels, device_name=device_name) - adm_ids = owner_switch.get_structural_info().adm_graph_ids - site_adm_ids = bqm_component.get_structural_info().adm_graph_ids - - self.logger.debug(f"Owner Network Service: {owner_ns}") - self.logger.debug(f"Owner Switch: {owner_switch}") - if owner_switch.network_service_info is not None: - self.logger.debug(f"Owner Switch NS: {owner_switch.network_service_info.network_services.values()}") - - net_adm_ids = site_adm_ids - if bqm_component.get_type() != NodeType.Facility and not is_vnic: - net_adm_ids = [x for x in adm_ids if not x in site_adm_ids or site_adm_ids.remove(x)] - # For sites like EDC which share switch with other sites like NCSA, - # the net_adm_ids also includes delegation id from the other side, - # this results in this list having more than one entry and no way for - # the code to know which delegation is from Network AM - # Using a hack here to pick the delegation id from one of the - # layer 3 network services in the owner switch - if len(net_adm_ids) > 1: - for x in owner_switch.network_service_info.network_services.values(): - if x.get_layer() == NSLayer.L2: - continue - net_adm_ids = x.get_structural_info().adm_graph_ids - break - else: - if bqm_cp.labels is not None and bqm_cp.labels.ipv4_subnet is not None: - ifs_labels = Labels.update(ifs_labels, ipv4_subnet=bqm_cp.labels.ipv4_subnet) - if bqm_cp.labels is not None and bqm_cp.labels.ipv6_subnet is not None: - ifs_labels = Labels.update(ifs_labels, ipv6_subnet=bqm_cp.labels.ipv6_subnet) - if len(net_adm_ids) != 1: - error_msg = f"More than 1 or 0 Network Delegations found! net_adm_ids: {net_adm_ids}" - self.logger.error(error_msg) - raise BrokerException(msg=error_msg) - - # Update the Interface Sliver Node Map to map to (a) - ifs.set_node_map(node_map=(node_map_id, bqm_cp.node_id)) - #ifs.set_node_map(node_map=(self.combined_broker_model_graph_id, bqm_cp.node_id)) - - delegation_id = net_adm_ids[0] - - ifs.labels = ifs_labels - ifs.label_allocations = Labels.update(lab=ifs_labels) - - self.logger.info(f"Allocated Interface Sliver: {ifs} delegation: {delegation_id}") - - # Update the Network Service Sliver Node Map to map to parent of (a) - sliver.set_node_map(node_map=(self.combined_broker_model_graph_id, owner_ns_id)) - - # Set the Subnet and gateway from the Owner Switch (a) - existing_reservations = self.get_existing_reservations(node_id=owner_ns_id, - node_id_to_reservations=node_id_to_reservations) - - # Allocate VLAN for the Network Service - if is_vnic: - site_adm_ids = bqm_component.get_structural_info().adm_graph_ids - delegation_id = site_adm_ids[0] - inv.allocate_vnic(rid=rid, requested_ns=sliver, owner_ns=owner_ns, - existing_reservations=existing_reservations) - else: - sliver = inv.allocate(rid=rid, requested_ns=sliver, owner_ns=owner_ns, + owner_ns.get_label_delegations()) + + # Set the Subnet and gateway from the Owner Switch (a) + existing_reservations = self.get_existing_reservations(node_id=owner_ns_id, + node_id_to_reservations=node_id_to_reservations, + start=term.get_start_time(), end=term.get_end_time()) + + # Allocate VLAN for the Network Service + if is_vnic: + site_adm_ids = bqm_node.get_structural_info().adm_graph_ids + delegation_id = site_adm_ids[0] + inv.allocate_vnic(rid=rid, requested_ns=sliver, owner_ns=owner_ns, existing_reservations=existing_reservations) - - self.__allocate_peered_interfaces(peered_interfaces=peered_ns_interfaces, owner_switch=owner_switch, - owner_mpls=owner_mpls_ns, inv=inv, sliver=sliver, owner_ns=owner_ns, - node_id_to_reservations=node_id_to_reservations) - + else: + sliver = inv.allocate(rid=rid, requested_ns=sliver, owner_ns=owner_ns, + existing_reservations=existing_reservations) + + # Update the Network Service Sliver Node Map to map to parent of (a) + sliver.set_node_map(node_map=(self.combined_broker_model_graph_id, owner_ns_id)) + + self.__allocate_peered_interfaces(rid=rid, peered_interfaces=peered_ns_interfaces, owner_switch=owner_switch, + owner_mpls=owner_mpls_ns, inv=inv, sliver=sliver, owner_ns=owner_ns, + node_id_to_reservations=node_id_to_reservations, term=term) + + if sliver.ero and len(sliver.ero.get()) and len(ero_source_end_info) == 2: + self.logger.info(f"Requested ERO: {sliver.ero}") + ero_hops = [] + new_path = [ero_source_end_info[0][1]] + type, path = sliver.ero.get() + for hop in path.get()[0]: + # User passes the site names; Broker maps the sites names to the respective switch IP + hop_switch = self.get_switch_sliver(site=hop) + self.logger.debug(f"Switch information for {hop}: {hop_switch}") + if not hop_switch: + self.logger.error(f"Requested hop: {hop} in the ERO does not exist") + raise BrokerException(error_code=ExceptionErrorCode.INVALID_ARGUMENT, + msg=f"Requested hop: {hop} in the ERO does not exist ") + + hop_v4_service = self.get_ns_from_switch(switch=hop_switch, ns_type=ServiceType.FABNetv4) + if hop_v4_service and hop_v4_service.get_labels() and hop_v4_service.get_labels().ipv4: + self.logger.debug(f"Fabnetv4 information for {hop}: {hop_v4_service}") + ero_hops.append(f"{hop_switch.node_id}-ns") + new_path.append(hop_v4_service.get_labels().ipv4) + + new_path.append(ero_source_end_info[1][1]) + + if len(new_path): + if not self.validate_requested_ero_path(source_node=ero_source_end_info[0][0], + end_node=ero_source_end_info[1][0], + hops=ero_hops): + raise BrokerException(error_code=ExceptionErrorCode.INVALID_ARGUMENT, + msg=f"Requested ERO path: {sliver.ero} is invalid!") + ero_path = Path() + ero_path.set_symmetric(new_path) + sliver.ero.set(ero_path) + self.logger.info(f"Allocated ERO: {sliver.ero}") + + except BrokerException as e: + delegation_id = None + if e.error_code == ExceptionErrorCode.INSUFFICIENT_RESOURCES: + self.logger.error(f"Exception occurred: {e}") + error_msg = e.msg + else: + raise e + self.logger.debug(f"Allocate Services returning: {delegation_id} {sliver} {error_msg}") return delegation_id, sliver, error_msg - def __allocate_peered_interfaces(self, *, peered_interfaces: List[InterfaceSliver], owner_switch: NodeSliver, + def __allocate_peered_interfaces(self, *, rid: ID, peered_interfaces: List[InterfaceSliver], owner_switch: NodeSliver, inv: NetworkServiceInventory, sliver: NetworkServiceSliver, owner_mpls: NetworkServiceSliver, owner_ns: NetworkServiceSliver, - node_id_to_reservations: dict): + node_id_to_reservations: dict, term: Term): if not len(peered_interfaces): return for pfs in peered_interfaces: @@ -859,9 +1009,11 @@ def __allocate_peered_interfaces(self, *, peered_interfaces: List[InterfaceSlive msg=f"Unable to find BQM interface for {pfs.get_name()}") existing_reservations = self.get_existing_reservations(node_id=owner_ns.node_id, - node_id_to_reservations=node_id_to_reservations) + node_id_to_reservations=node_id_to_reservations, + start=term.get_start_time(), + end=term.get_end_time()) - pfs = inv.allocate_peered_ifs(owner_switch=owner_switch, requested_ifs=pfs, + pfs = inv.allocate_peered_ifs(rid=rid, owner_switch=owner_switch, requested_ifs=pfs, bqm_interface=bqm_interface, existing_reservations=existing_reservations) @@ -875,10 +1027,14 @@ def __allocate_peered_interfaces(self, *, peered_interfaces: List[InterfaceSlive sliver.set_node_map(node_map=(self.combined_broker_model_graph_id, owner_ns.node_id)) def ticket_inventory(self, *, reservation: ABCBrokerReservation, inv: InventoryForType, term: Term, - node_id_to_reservations: dict) -> Tuple[bool, dict, Any]: + node_id_to_reservations: dict, + operation: ReservationOperation = ReservationOperation.Create) -> Tuple[bool, dict, Any]: error_msg = None try: - rset = reservation.get_requested_resources() + if operation == ReservationOperation.Extend: + rset = reservation.get_resources() + else: + rset = reservation.get_requested_resources() needed = rset.get_units() # for network node slivers @@ -889,18 +1045,19 @@ def ticket_inventory(self, *, reservation: ABCBrokerReservation, inv: InventoryF # intended link (and possibly interfaces connected to it) res_sliver = rset.get_sliver() - delegation_id = None - sliver = None if isinstance(res_sliver, NodeSliver): delegation_id, sliver, error_msg = self.__allocate_nodes(reservation=reservation, inv=inv, sliver=res_sliver, - node_id_to_reservations=node_id_to_reservations) + node_id_to_reservations=node_id_to_reservations, + term=term, + operation=operation) elif isinstance(res_sliver, NetworkServiceSliver): delegation_id, sliver, error_msg = self.__allocate_services(rid=reservation.get_reservation_id(), inv=inv, sliver=res_sliver, - node_id_to_reservations=node_id_to_reservations) + node_id_to_reservations=node_id_to_reservations, + term=term, operation=operation) else: self.logger.error(f'Reservation {reservation} sliver type is neither Node, nor NetworkServiceSliver') raise BrokerException(msg=f"Reservation sliver type is neither Node " @@ -916,11 +1073,13 @@ def ticket_inventory(self, *, reservation: ABCBrokerReservation, inv: InventoryF if node_id_to_reservations.get(node_id, None) is None: node_id_to_reservations[node_id] = ReservationSet() node_id_to_reservations[node_id].add(reservation=reservation) + self.logger.debug(f"Ticket Inventory returning: True {error_msg}") return True, node_id_to_reservations, error_msg except Exception as e: self.logger.error(traceback.format_exc()) self.logger.error(e) reservation.fail(message=str(e)) + self.logger.debug(f"Ticket Inventory returning: False {error_msg}") return False, node_id_to_reservations, error_msg def __is_modify_on_openstack_vnic(self, *, sliver: BaseSliver) -> bool: @@ -948,20 +1107,23 @@ def extend_private(self, *, reservation: ABCBrokerReservation, inv: InventoryFor sliver = current_resources.get_sliver() diff = sliver.diff(other_sliver=requested_resources.get_sliver()) - #if diff is not None and (diff.added is None or - # (len(diff.added.components) == 0 and len(diff.added.interfaces) == 0)): + operation = ReservationOperation.Extend if diff is not None: sliver = requested_resources.get_sliver() + operation = ReservationOperation.Modify - if diff is None or diff.added is None or \ - (len(diff.added.components) == 0 and len(diff.added.interfaces) == 0) or \ - self.__is_modify_on_openstack_vnic(sliver=sliver): + #if diff is None or diff.added is None or \ + # (len(diff.added.components) == 0 and len(diff.added.interfaces) == 0) or \ + # self.__is_modify_on_openstack_vnic(sliver=sliver): + + if self.__is_modify_on_openstack_vnic(sliver=sliver): self.issue_ticket(reservation=reservation, units=needed, rtype=requested_resources.get_type(), term=term, source=reservation.get_source(), sliver=sliver) else: status, node_id_to_reservations, error_msg = self.ticket_inventory(reservation=reservation, inv=inv, term=term, - node_id_to_reservations=node_id_to_reservations) + node_id_to_reservations=node_id_to_reservations, + operation=operation) if not status and not reservation.is_failed(): fail_message = f"Insufficient resources for specified start time, Failing reservation: " \ f"{reservation.get_reservation_id()}" @@ -1061,9 +1223,20 @@ def query(self, *, p: dict) -> dict: else: bqm_format = GraphFormat.GRAPHML + start = p.get(Constants.START, None) + if start: + start = datetime.strptime(start, Constants.LEASE_TIME_FORMAT) + end = p.get(Constants.END, None) + if end: + end = datetime.strptime(end, Constants.LEASE_TIME_FORMAT) + + excludes = p.get(Constants.EXCLUDES, None) + includes = p.get(Constants.INCLUDES, None) + try: if self.query_cbm is not None: - graph = self.query_cbm.get_bqm(query_level=query_level) + graph = self.query_cbm.get_bqm(query_level=query_level, start=start, end=end, includes=includes, + excludes=excludes) graph_string = None if graph is not None: graph_string = graph.serialize_graph(format=bqm_format) @@ -1224,28 +1397,56 @@ def get_peer_node(self, *, site: str, node_type: str, node_name: str) -> NodeSli else: return self.get_switch_sliver(site=site) - def get_switch_sliver(self, *, site: str) -> NodeSliver: + @staticmethod + def get_ns_from_switch(switch: NodeSliver, ns_type: ServiceType) -> NetworkServiceSliver: + """ + Extract specific type of service from a switch + :param switch: switch + :param ns_type: type of service requested + :return Network Service + """ + if switch and switch.network_service_info: + for service in switch.network_service_info.network_services.values(): + if service.get_type() == ns_type: + return service + + def validate_requested_ero_path(self, source_node: str, end_node: str, hops: List[str]) -> bool: + try: + self.lock.acquire() + if self.combined_broker_model: + path = self.combined_broker_model.get_nodes_on_path_with_hops(node_a=source_node, + node_z=end_node, hops=hops, cut_off=200) + self.logger.debug(f"Network path from source:{source_node} to end: {end_node} " + f"with hops: {hops} is path: {path}") + if len(path) and path[0] == source_node and path[-1] == end_node: + return True + finally: + self.lock.release() + return False + + def get_switch_sliver(self, *, site: str, stitch: bool = True) -> NodeSliver: """ Get Component Sliver from BQM @param site: Node Site Name + @param stitch: Flag indicating if the StitchNode is being looked up @return Facility Sliver """ try: self.lock.acquire() - if self.combined_broker_model is None: - return None - node_props = {ABCPropertyGraphConstants.PROP_SITE: site, - ABCPropertyGraphConstants.PROP_TYPE: str(NodeType.Switch)} - candidates = self.combined_broker_model.get_matching_nodes_with_components( - label=ABCPropertyGraphConstants.CLASS_NetworkNode, - props=node_props) - - if candidates is not None: - for c in candidates: - ns_sliver = self.combined_broker_model.build_deep_node_sliver(node_id=c) - return ns_sliver - - return None + if self.combined_broker_model: + node_props = {ABCPropertyGraphConstants.PROP_SITE: site, + ABCPropertyGraphConstants.PROP_TYPE: str(NodeType.Switch)} + #ABCPropertyGraphConstants.PROP_STITCH_NODE: str(stitch).lower()} + candidates = self.combined_broker_model.get_matching_nodes_with_components( + label=ABCPropertyGraphConstants.CLASS_NetworkNode, + props=node_props) + + if candidates is not None: + for c in candidates: + if stitch and "p4" in c: + continue + ns_sliver = self.combined_broker_model.build_deep_node_sliver(node_id=c) + return ns_sliver finally: self.lock.release() @@ -1290,6 +1491,20 @@ def get_owners(self, *, node_id: str, ns_type: ServiceType) -> Tuple[NodeSliver, finally: self.lock.release() + def get_interface_sliver_from_graph(self, *, node_id: str) -> InterfaceSliver or None: + """ + Get InterfaceSliver from CBM + :param node_id: + :return: + """ + try: + self.lock.acquire() + if self.combined_broker_model is None: + return None + return self.combined_broker_model.build_deep_interface_sliver(node_id=node_id) + finally: + self.lock.release() + def get_network_node_from_graph(self, *, node_id: str) -> NodeSliver or None: """ Get Node from CBM @@ -1327,11 +1542,14 @@ def get_network_service_from_graph(self, *, node_id: str, finally: self.lock.release() - def get_existing_reservations(self, node_id: str, node_id_to_reservations: dict) -> List[ABCReservationMixin]: + def get_existing_reservations(self, node_id: str, node_id_to_reservations: dict, + start: datetime = None, end: datetime = None) -> List[ABCReservationMixin]: """ Get existing reservations which are served by CBM node identified by node_id :param node_id: :param node_id_to_reservations: + :param start + :param end :return: list of reservations """ states = [ReservationStates.Active.value, @@ -1341,7 +1559,9 @@ def get_existing_reservations(self, node_id: str, node_id_to_reservations: dict) # Only get Active or Ticketing reservations existing_reservations = self.actor.get_plugin().get_database().get_reservations(graph_node_id=node_id, - states=states) + states=states, + start=start, + end=end) reservations_allocated_in_cycle = node_id_to_reservations.get(node_id, None) @@ -1360,10 +1580,17 @@ def get_existing_reservations(self, node_id: str, node_id_to_reservations: dict) return existing_reservations - def get_existing_components(self, node_id: str) -> Dict[str, List[str]]: + def get_existing_components(self, node_id: str, start: datetime = None, end: datetime = None, + excludes: List[str] = None, include_ns: bool = True, + include_node: bool = True) -> Dict[str, List[str]]: """ Get existing components attached to Active/Ticketed Network Service Slivers :param node_id: + :param start: + :param end: + :param excludes: + :param include_node: + :param include_ns: :return: list of components """ states = [ReservationStates.Active.value, @@ -1373,11 +1600,17 @@ def get_existing_components(self, node_id: str) -> Dict[str, List[str]]: ReservationStates.CloseFail.value] res_type = [] - for x in ServiceType: - res_type.append(str(x)) + if include_ns: + for x in ServiceType: + res_type.append(str(x)) + + if include_node: + for x in NodeType: + res_type.append(str(x)) # Only get Active or Ticketing reservations - return self.actor.get_plugin().get_database().get_components(node_id=node_id, rsv_type=res_type, states=states) + return self.actor.get_plugin().get_database().get_components(node_id=node_id, rsv_type=res_type, states=states, + start=start, end=end, excludes=excludes) def set_logger(self, logger): """ @@ -1442,12 +1675,17 @@ def unmerge_adm(self, *, graph_id: str): self.combined_broker_model.rollback(graph_id=snapshot_graph_id) raise e - def get_algorithm_type(self) -> str: + def get_algorithm_type(self, site: str) -> BrokerAllocationAlgorithm: if self.properties is not None: - algo_str = self.properties.get(Constants.ALGORITHM, None) - if algo_str is not None: - return algo_str - return BrokerAllocationAlgorithm.FirstFit.name + algorithms = self.properties.get(Constants.ALGORITHM, None) + random_algo = algorithms.get(str(BrokerAllocationAlgorithm.Random)) + if random_algo and random_algo.get('enabled') and random_algo.get('sites') and \ + site in random_algo.get('sites'): + return BrokerAllocationAlgorithm.Random + first_fit_algo = algorithms.get(BrokerAllocationAlgorithm.Random.name) + if first_fit_algo and first_fit_algo.get('enabled'): + return BrokerAllocationAlgorithm.FirstFit + return BrokerAllocationAlgorithm.FirstFit if __name__ == '__main__': diff --git a/fabric_cf/actor/core/policy/controller_simple_policy.py b/fabric_cf/actor/core/policy/controller_simple_policy.py index 5a78e31c..77af98f2 100644 --- a/fabric_cf/actor/core/policy/controller_simple_policy.py +++ b/fabric_cf/actor/core/policy/controller_simple_policy.py @@ -143,14 +143,6 @@ def process_demand(self, *, cycle: int) -> ReservationSet: if demand is None: return ReservationSet() - ''' - for reservation in demand.values(): - kernel_slice = reservation.get_slice() - for slice_reservation in kernel_slice.get_reservations().values(): - self.logger.debug(f"Reservation {slice_reservation.get_reservation_id()} is in state: " - f"{slice_reservation.get_state().name} type: {type(reservation)}") - ''' - broker = self.actor.get_default_broker() for reservation in demand.values(): if reservation.get_broker() is None: diff --git a/fabric_cf/actor/core/policy/inventory.py b/fabric_cf/actor/core/policy/inventory.py index 7ed0f285..f5f9621d 100644 --- a/fabric_cf/actor/core/policy/inventory.py +++ b/fabric_cf/actor/core/policy/inventory.py @@ -24,6 +24,7 @@ # # Author: Komal Thareja (kthare10@renci.org) from __future__ import annotations + from typing import TYPE_CHECKING from fabric_cf.actor.core.common.constants import Constants diff --git a/fabric_cf/actor/core/policy/network_node_control.py b/fabric_cf/actor/core/policy/network_node_control.py index 478bd531..a2e2af8e 100644 --- a/fabric_cf/actor/core/policy/network_node_control.py +++ b/fabric_cf/actor/core/policy/network_node_control.py @@ -219,7 +219,6 @@ def assign(self, *, reservation: ABCAuthorityReservation, delegation_name: str, properties=reservation.get_slice().get_config_properties()) gained = UnitSet(plugin=self.authority.get_plugin(), units={unit.reservation_id: unit}) else: - # FIX ME: handle modify self.logger.info(f"Extend Lease for now, no modify supported res# {reservation}") current_sliver = current.get_sliver() diff = current_sliver.diff(other_sliver=requested) @@ -235,7 +234,7 @@ def assign(self, *, reservation: ABCAuthorityReservation, delegation_name: str, actor_id=self.authority.get_guid(), sliver=requested, rtype=resource_type, properties=reservation.get_slice().get_config_properties()) modified = UnitSet(plugin=self.authority.get_plugin(), units={unit.reservation_id: unit}) - elif len(diff.removed.components) > 0: + elif len(diff.removed.components) > 0 or len(diff.modified.components): unit = Unit(rid=reservation.get_reservation_id(), slice_id=reservation.get_slice_id(), actor_id=self.authority.get_guid(), sliver=requested, rtype=resource_type, properties=reservation.get_slice().get_config_properties()) diff --git a/fabric_cf/actor/core/policy/network_node_inventory.py b/fabric_cf/actor/core/policy/network_node_inventory.py index 5c4eb8f3..83b2483c 100644 --- a/fabric_cf/actor/core/policy/network_node_inventory.py +++ b/fabric_cf/actor/core/policy/network_node_inventory.py @@ -31,12 +31,13 @@ from fim.slivers.delegations import Delegations from fim.slivers.instance_catalog import InstanceCatalog from fim.slivers.interface_info import InterfaceSliver -from fim.slivers.network_node import NodeSliver +from fim.slivers.network_node import NodeSliver, NodeType from fim.slivers.network_service import NSLayer from fabric_cf.actor.core.apis.abc_reservation_mixin import ABCReservationMixin from fabric_cf.actor.core.common.constants import Constants from fabric_cf.actor.core.common.exceptions import BrokerException, ExceptionErrorCode +from fabric_cf.actor.core.kernel.reservation_states import ReservationOperation from fabric_cf.actor.core.policy.inventory_for_type import InventoryForType from fabric_cf.actor.core.util.id import ID @@ -230,13 +231,13 @@ def __update_smart_nic_labels_and_capacities(self, *, available_component: Compo def __check_component_labels_and_capacities(self, *, available_component: ComponentSliver, graph_id: str, requested_component: ComponentSliver, - is_create: bool = False) -> ComponentSliver: + operation: ReservationOperation = ReservationOperation.Create) -> ComponentSliver: """ Check if available component capacities, labels to match requested component :param available_component: available component :param graph_id: BQM graph id :param requested_component: requested component - :param is_create: is_create + :param operation: operation :return: requested component annotated with properties in case of success, None otherwise """ if requested_component.get_model() is not None and \ @@ -271,7 +272,7 @@ def __check_component_labels_and_capacities(self, *, available_component: Compon node_map = tuple([graph_id, available_component.node_id]) requested_component.set_node_map(node_map=node_map) - if requested_component.labels is None or is_create: + if requested_component.labels is None or operation == ReservationOperation.Create: requested_component.labels = Labels.update(lab=requested_component.get_label_allocations()) return requested_component @@ -342,7 +343,8 @@ def __exclude_allocated_component(self, *, graph_node: NodeSliver, available_com graph_node.attached_components_info.remove_device(name=available_component.get_name()) def __exclude_components_for_existing_reservations(self, *, rid: ID, graph_node: NodeSliver, - existing_reservations: List[ABCReservationMixin]) -> NodeSliver: + existing_reservations: List[ABCReservationMixin], + operation: ReservationOperation = ReservationOperation.Create) -> NodeSliver: """ Remove already assigned components to existing reservations from the candidate node @param rid reservation ID @@ -352,7 +354,8 @@ def __exclude_components_for_existing_reservations(self, *, rid: ID, graph_node: """ for reservation in existing_reservations: # Requested reservation should be skipped only when new i.e. not ticketed - if rid == reservation.get_reservation_id() and not reservation.is_ticketed(): + if rid == reservation.get_reservation_id() and \ + (operation == ReservationOperation.Extend or not reservation.is_ticketed()): continue # For Active or Ticketed or Ticketing reservations; reduce the counts from available allocated_sliver = None @@ -391,7 +394,7 @@ def __exclude_components_for_existing_reservations(self, *, rid: ID, graph_node: def __check_components(self, *, rid: ID, requested_components: AttachedComponentsInfo, graph_id: str, graph_node: NodeSliver, existing_reservations: List[ABCReservationMixin], existing_components: Dict[str, List[str]], - is_create: bool = False) -> AttachedComponentsInfo: + operation: ReservationOperation = ReservationOperation.Create) -> AttachedComponentsInfo: """ Check if the requested capacities can be satisfied with the available capacities :param rid: reservation id of the reservation being served @@ -399,12 +402,15 @@ def __check_components(self, *, rid: ID, requested_components: AttachedComponent :param graph_id: BQM graph id :param graph_node: BQM graph node identified to serve the reservation :param existing_reservations: Existing Reservations served by the same BQM node - :param is_create: Flag indicating if this is create or modify + :param operation: Flag indicating if this is create or modify :return: Components updated with the corresponding BQM node ids :raises: BrokerException in case the request cannot be satisfied """ + self.logger.debug(f"Available on {graph_node.node_id} components: {graph_node.attached_components_info.devices.keys()}") + self.__exclude_components_for_existing_reservations(rid=rid, graph_node=graph_node, - existing_reservations=existing_reservations) + existing_reservations=existing_reservations, + operation=operation) self.logger.debug(f"Excluding components connected to Network Services: {existing_components}") @@ -429,15 +435,42 @@ def __check_components(self, *, rid: ID, requested_components: AttachedComponent comps_to_remove.append(av) for c in comps_to_remove: + self.logger.debug(f"Excluding component: {c.get_name()}") + print(f"Excluding component: {c.get_name()}") graph_node.attached_components_info.remove_device(name=c.get_name()) self.logger.debug(f"requested_components: {requested_components.devices.values()} for reservation# {rid}") for name, requested_component in requested_components.devices.items(): - if not is_create and requested_component.get_node_map() is not None: - self.logger.debug(f"==========Ignoring Allocated component: {requested_component} for modify") - # TODO exclude already allocated component to the same reservation + if operation == ReservationOperation.Modify and requested_component.get_node_map() is not None: + self.logger.debug(f"Modify: Ignoring Allocated component: {requested_component}") + continue + + if operation == ReservationOperation.Extend and requested_component.get_node_map() is not None: + bqm_id, node_id = requested_component.get_node_map() + + if requested_component.get_type() == ComponentType.SharedNIC: + allocated_bdfs = existing_components.get(node_id) + if allocated_bdfs and requested_component.labels and requested_component.labels.bdf: + bdfs = requested_component.labels.bdf + if isinstance(requested_component.labels.bdf, str): + bdfs = [requested_component.labels.bdf] + + self.logger.debug(f"Allocated BDFs: {allocated_bdfs}") + for x in bdfs: + if x in allocated_bdfs: + raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES, + msg=f"Renew failed: Component of type: {requested_component.get_model()} with PCI Address: {x}" + f"already in use by another reservation for node: {graph_node.node_id}") + else: + if node_id in existing_components.keys(): + raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES, + msg=f"Renew failed: Component of type: {requested_component.get_model()} " + f"already in use by another reservation for node: {graph_node.node_id}") + + self.logger.debug(f"Renew: Component {requested_component} still available") continue - self.logger.debug(f"==========Allocating component: {requested_component}") + + self.logger.debug(f"Create: Allocating component: {requested_component}") resource_type = requested_component.get_type() resource_model = requested_component.get_model() if resource_type == ComponentType.Storage: @@ -446,7 +479,8 @@ def __check_components(self, *, rid: ID, requested_components: AttachedComponent requested_component.label_allocations = Labels.update(lab=requested_component.get_labels()) continue available_components = graph_node.attached_components_info.get_devices_by_type(resource_type=resource_type) - self.logger.debug(f"available_components after excluding allocated components: {available_components}") + self.logger.debug(f"Available components of type: {resource_type} after excluding " + f"allocated components: {available_components}") if available_components is None or len(available_components) == 0: raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES, @@ -458,7 +492,7 @@ def __check_components(self, *, rid: ID, requested_components: AttachedComponent requested_component = self.__check_component_labels_and_capacities( available_component=component, graph_id=graph_id, requested_component=requested_component, - is_create=is_create) + operation=operation) if requested_component.get_node_map() is not None: self.logger.info(f"Assigning {component.node_id} to component# " @@ -476,9 +510,60 @@ def __check_components(self, *, rid: ID, requested_components: AttachedComponent return requested_components + def __allocate_p4_switch(self, *, rid: ID, requested_sliver: NodeSliver, graph_id: str, graph_node: NodeSliver, + existing_reservations: List[ABCReservationMixin], existing_components: Dict[str, List[str]], + operation: ReservationOperation = ReservationOperation.Create) -> Tuple[str, BaseSliver]: + """ + Allocate an extending or ticketing reservation for a P4 switch + + :param rid: reservation id of the reservation to be allocated + :param requested_sliver: requested sliver + :param graph_id: BQM graph id + :param graph_node: BQM graph node identified to serve the reservation + :param existing_components: Existing Components + :param existing_reservations: Existing Reservations served by the same BQM node + :param operation: Indicates if this is create or modify + + :return: Tuple of Delegation Id and the Requested Sliver annotated with BQM Node Id and other properties + :raises: BrokerException in case the request cannot be satisfied + """ + delegation_id = None + + if operation == ReservationOperation.Create: + # In case of modify, directly get delegation_id + if len(graph_node.get_capacity_delegations().get_delegation_ids()) > 0: + delegation_id = next(iter(graph_node.get_capacity_delegations().get_delegation_ids())) + + # Handle allocation to account for leaked Network Services + for n in existing_components.keys(): + if n in graph_node.node_id: + raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES, + msg=f"Node of type: {graph_node.get_type()} not available on site: " + f"{graph_node.get_site()}, already in use by another reservation") + + # For create, we need to allocate the P4 + requested_capacities = requested_sliver.get_capacities() + + # Check if Capacities can be satisfied + delegation_id = self.__check_capacities(rid=rid, + requested_capacities=requested_capacities, + delegated_capacities=graph_node.get_capacity_delegations(), + existing_reservations=existing_reservations) + requested_sliver.capacity_allocations = Capacities() + requested_sliver.capacity_allocations = Capacities.update(lab=requested_capacities) + requested_sliver.label_allocations = Labels(local_name=graph_node.get_name()) + + requested_sliver.set_node_map(node_map=(graph_id, graph_node.node_id)) + requested_sliver.management_ip = graph_node.management_ip + + self.logger.info(f"Reservation# {rid} is being served by delegation# {delegation_id} " + f"node# [{graph_id}/{graph_node.node_id}]") + + return delegation_id, requested_sliver + def allocate(self, *, rid: ID, requested_sliver: BaseSliver, graph_id: str, graph_node: BaseSliver, existing_reservations: List[ABCReservationMixin], existing_components: Dict[str, List[str]], - is_create: bool = False) -> Tuple[str, BaseSliver]: + operation: ReservationOperation = ReservationOperation.Create) -> Tuple[str, BaseSliver]: """ Allocate an extending or ticketing reservation :param rid: reservation id of the reservation to be allocated @@ -487,41 +572,51 @@ def allocate(self, *, rid: ID, requested_sliver: BaseSliver, graph_id: str, grap :param graph_node: BQM graph node identified to serve the reservation :param existing_components: Existing Components :param existing_reservations: Existing Reservations served by the same BQM node - :param is_create: Indicates if this is create or modify + :param operation: Indicates if this is create or modify :return: Tuple of Delegation Id and the Requested Sliver annotated with BQM Node Id and other properties :raises: BrokerException in case the request cannot be satisfied """ if graph_node.get_capacity_delegations() is None or rid is None: - raise BrokerException(error_code=Constants.INVALID_ARGUMENT, + raise BrokerException(error_code=ExceptionErrorCode.INVALID_ARGUMENT, msg=f"capacity_delegations is missing or reservation is None") if not isinstance(requested_sliver, NodeSliver): - raise BrokerException(error_code=Constants.INVALID_ARGUMENT, + raise BrokerException(error_code=ExceptionErrorCode.INVALID_ARGUMENT, msg=f"resource type: {requested_sliver.get_type()}") if not isinstance(graph_node, NodeSliver): - raise BrokerException(error_code=Constants.INVALID_ARGUMENT, + raise BrokerException(error_code=ExceptionErrorCode.INVALID_ARGUMENT, msg=f"resource type: {graph_node.get_type()}") + if requested_sliver.get_type() not in [NodeType.VM, NodeType.Switch]: + raise BrokerException(error_code=ExceptionErrorCode.INVALID_ARGUMENT, + msg=f"Unsupported resource type: {graph_node.get_type()}") + + if requested_sliver.get_type() == NodeType.Switch: + return self.__allocate_p4_switch(rid=rid, requested_sliver=requested_sliver, graph_id=graph_id, + graph_node=graph_node, existing_reservations=existing_reservations, + existing_components=existing_components, operation=operation) + delegation_id = None requested_capacities = None # For create, we need to allocate the VM - if is_create: + if operation == ReservationOperation.Create: # Always use requested capacities to be mapped from flavor i.e. capacity hints requested_capacity_hints = requested_sliver.get_capacity_hints() catalog = InstanceCatalog() requested_capacities = catalog.get_instance_capacities(instance_type=requested_capacity_hints.instance_type) - - # Check if Capacities can be satisfied - delegation_id = self.__check_capacities(rid=rid, - requested_capacities=requested_capacities, - delegated_capacities=graph_node.get_capacity_delegations(), - existing_reservations=existing_reservations) else: + requested_capacities = requested_sliver.get_capacity_allocations() # In case of modify, directly get delegation_id if len(graph_node.get_capacity_delegations().get_delegation_ids()) > 0: delegation_id = next(iter(graph_node.get_capacity_delegations().get_delegation_ids())) + # Check if Capacities can be satisfied + delegation_id = self.__check_capacities(rid=rid, + requested_capacities=requested_capacities, + delegated_capacities=graph_node.get_capacity_delegations(), + existing_reservations=existing_reservations) + # Check if Components can be allocated if requested_sliver.attached_components_info is not None: requested_sliver.attached_components_info = self.__check_components( @@ -531,10 +626,10 @@ def allocate(self, *, rid: ID, requested_sliver: BaseSliver, graph_id: str, grap graph_node=graph_node, existing_reservations=existing_reservations, existing_components=existing_components, - is_create=is_create) + operation=operation) # Do this only for create - if is_create: + if operation == ReservationOperation.Create: requested_sliver.capacity_allocations = Capacities() requested_sliver.capacity_allocations = Capacities.update(lab=requested_capacities) requested_sliver.label_allocations = Labels(instance_parent=graph_node.get_name()) @@ -547,4 +642,4 @@ def allocate(self, *, rid: ID, requested_sliver: BaseSliver, graph_id: str, grap return delegation_id, requested_sliver def free(self, *, count: int, request: dict = None, resource: dict = None) -> dict: - return + pass diff --git a/fabric_cf/actor/core/policy/network_service_inventory.py b/fabric_cf/actor/core/policy/network_service_inventory.py index 4d3abc27..d5456b09 100644 --- a/fabric_cf/actor/core/policy/network_service_inventory.py +++ b/fabric_cf/actor/core/policy/network_service_inventory.py @@ -27,7 +27,7 @@ import random import traceback from ipaddress import IPv6Network, IPv4Network -from typing import List, Tuple +from typing import List, Tuple, Union from fim.slivers.capacities_labels import Labels from fim.slivers.gateway import Gateway @@ -38,6 +38,7 @@ from fabric_cf.actor.core.apis.abc_reservation_mixin import ABCReservationMixin from fabric_cf.actor.core.common.constants import Constants from fabric_cf.actor.core.common.exceptions import BrokerException, ExceptionErrorCode +from fabric_cf.actor.core.kernel.reservation_states import ReservationOperation from fabric_cf.actor.core.policy.inventory_for_type import InventoryForType from fabric_cf.actor.core.util.id import ID @@ -63,13 +64,16 @@ def __extract_vlan_range(*, labels: Labels) -> List[int] or None: vlan_range = [int(labels.vlan)] return vlan_range - def __exclude_allocated_vlans(self, *, available_vlan_range: List[int], bqm_ifs: InterfaceSliver, + def __exclude_allocated_vlans(self, *, rid: ID, available_vlan_range: List[int], bqm_ifs: InterfaceSliver, existing_reservations: List[ABCReservationMixin]) -> List[int]: # Exclude the already allocated VLANs and subnets if existing_reservations is None: return available_vlan_range for reservation in existing_reservations: + if rid == reservation.get_reservation_id(): + continue + # For Active or Ticketed or Ticketing reservations; reduce the counts from available allocated_sliver = None if reservation.is_ticketing() and reservation.get_approved_resources() is not None: @@ -110,9 +114,10 @@ def __exclude_allocated_vlans(self, *, available_vlan_range: List[int], bqm_ifs: msg=f"No VLANs available!") return available_vlan_range - def allocate_ifs(self, *, requested_ns: NetworkServiceSliver, requested_ifs: InterfaceSliver, + def allocate_ifs(self, *, rid: ID, requested_ns: NetworkServiceSliver, requested_ifs: InterfaceSliver, owner_ns: NetworkServiceSliver, bqm_ifs: InterfaceSliver, - existing_reservations: List[ABCReservationMixin]) -> InterfaceSliver: + existing_reservations: List[ABCReservationMixin], + operation: ReservationOperation = ReservationOperation.Create) -> InterfaceSliver: """ Allocate Interface Sliver - For L2 services, validate the VLAN tag specified is within the allowed range @@ -120,20 +125,21 @@ def allocate_ifs(self, *, requested_ns: NetworkServiceSliver, requested_ifs: Int - grab the VLAN from BQM Site specific NetworkService - exclude the VLAN already assigned to other Interface Sliver on the same port - allocate the first available VLAN to the Interface Sliver + :param rid: Reservation ID :param requested_ns: Requested NetworkService :param requested_ifs: Requested Interface Sliver :param owner_ns: BQM NetworkService identified to serve the InterfaceSliver :param bqm_ifs: BQM InterfaceSliver identified to serve the InterfaceSliver :param existing_reservations: Existing Reservations which also are served by the owner switch + :param operation: Extend/Create/Modify Operation :return Interface Sliver updated with the allocated VLAN tag for FABNetv4 and FABNetv6 services :raises Exception if vlan tag range is not in the valid range for L2 services - Return the sliver updated with the VLAN """ - if requested_ns.get_layer() == NSLayer.L2: - requested_vlan = None - if requested_ifs.labels is not None and requested_ifs.labels.vlan is not None: - requested_vlan = int(requested_ifs.labels.vlan) + requested_vlan = None + if requested_ifs.labels and requested_ifs.labels.vlan: + requested_vlan = int(requested_ifs.labels.vlan) + if requested_ns.get_layer() == NSLayer.L2: # Validate the requested VLAN is in range specified on MPLS Network Service in BQM # Only do this for Non FacilityPorts if bqm_ifs.get_type() != InterfaceType.FacilityPort: @@ -141,73 +147,92 @@ def allocate_ifs(self, *, requested_ns: NetworkServiceSliver, requested_ifs: Int return requested_ifs if owner_ns.get_label_delegations() is None: - if 1 > requested_vlan > 4095: - raise BrokerException(error_code=ExceptionErrorCode.FAILURE, - msg=f"Vlan for L2 service {requested_vlan} " - f"is outside the allowed range 1-4095") - else: - return requested_ifs + if not (1 <= requested_vlan <= 4095): + raise BrokerException( + error_code=ExceptionErrorCode.FAILURE, + msg=f"Vlan for L2 service {requested_vlan} is outside the allowed range 1-4095" + ) + return requested_ifs delegation_id, delegated_label = self.get_delegations(lab_cap_delegations=owner_ns.get_label_delegations()) vlan_range = self.__extract_vlan_range(labels=delegated_label) - if vlan_range is not None and requested_vlan not in vlan_range: - raise BrokerException(error_code=ExceptionErrorCode.FAILURE, - msg=f"Vlan for L2 service {requested_vlan} is outside the available range " - f"{vlan_range}") + if vlan_range and requested_vlan not in vlan_range: + raise BrokerException( + error_code=ExceptionErrorCode.FAILURE, + msg=f"Vlan for L2 service {requested_vlan} is outside the available range {vlan_range}" + ) - # Validate the VLANs vlan_range = self.__extract_vlan_range(labels=bqm_ifs.labels) - if vlan_range is not None: - vlan_range = self.__exclude_allocated_vlans(available_vlan_range=vlan_range, bqm_ifs=bqm_ifs, + if vlan_range: + vlan_range = self.__exclude_allocated_vlans(rid=rid, available_vlan_range=vlan_range, + bqm_ifs=bqm_ifs, existing_reservations=existing_reservations) + if operation == ReservationOperation.Extend: + if requested_vlan and requested_vlan not in vlan_range: + raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES, + msg=f"Renew failed: VLAN {requested_vlan} for Interface : " + f"{requested_ifs.get_name()/bqm_ifs.node_id} already in " + f"use by another reservation") + return requested_ifs + if requested_vlan is None: - #requested_ifs.labels.vlan = str(random.choice(vlan_range)) - requested_ifs.labels.vlan = str(vlan_range[0]) + requested_ifs.labels.vlan = str(random.choice(vlan_range)) return requested_ifs if requested_vlan not in vlan_range: - raise BrokerException(error_code=ExceptionErrorCode.FAILURE, - msg=f"Vlan for L2 service {requested_vlan} is outside the available range " - f"{vlan_range}") - + raise BrokerException( + error_code=ExceptionErrorCode.FAILURE, + msg=f"Vlan for L2 service {requested_vlan} is outside the available range {vlan_range}" + ) else: - # Grab Label Delegations - delegation_id, delegated_label = self.get_delegations( - lab_cap_delegations=owner_ns.get_label_delegations()) - - # Get the VLAN range if bqm_ifs.get_type() != InterfaceType.FacilityPort: + delegation_id, delegated_label = self.get_delegations( + lab_cap_delegations=owner_ns.get_label_delegations()) vlan_range = self.__extract_vlan_range(labels=delegated_label) else: vlan_range = self.__extract_vlan_range(labels=bqm_ifs.labels) - if vlan_range is not None: - vlan_range = self.__exclude_allocated_vlans(available_vlan_range=vlan_range, bqm_ifs=bqm_ifs, + if vlan_range: + vlan_range = self.__exclude_allocated_vlans(rid=rid, available_vlan_range=vlan_range, + bqm_ifs=bqm_ifs, existing_reservations=existing_reservations) + + if operation == ReservationOperation.Extend: + if requested_vlan and requested_vlan not in vlan_range: + raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES, + msg=f"Renew failed: VLAN {requested_vlan} for Interface : " + f"{requested_ifs.get_name()}/{bqm_ifs.node_id} already in " + f"use by another reservation") + return requested_ifs + if bqm_ifs.get_type() != InterfaceType.FacilityPort: - # Allocate the first available VLAN - #requested_ifs.labels.vlan = str(random.choice(vlan_range)) - requested_ifs.labels.vlan = str(vlan_range[0]) + requested_ifs.labels.vlan = str(random.choice(vlan_range)) requested_ifs.label_allocations = Labels(vlan=requested_ifs.labels.vlan) else: - if requested_ifs.labels is None: + if not requested_ifs.labels: return requested_ifs if requested_ifs.labels.vlan is None: - #requested_ifs.labels.vlan = str(random.choice(vlan_range)) - requested_ifs.labels.vlan = str(vlan_range[0]) + requested_ifs.labels.vlan = str(random.choice(vlan_range)) if int(requested_ifs.labels.vlan) not in vlan_range: - raise BrokerException(error_code=ExceptionErrorCode.FAILURE, - msg=f"Vlan for L3 service {requested_ifs.labels.vlan} " - f"is outside the available range " - f"{vlan_range}") + raise BrokerException( + error_code=ExceptionErrorCode.FAILURE, + msg=f"Vlan for L3 service {requested_ifs.labels.vlan} is outside the " + f"available range {vlan_range}" + ) return requested_ifs def __allocate_ip_address_to_ifs(self, *, requested_ns: NetworkServiceSliver) -> NetworkServiceSliver: + """ + Allocate IP addresses to the interfaces of the requested network service sliver. + + :param requested_ns: The requested network service sliver. + :return: The updated network service sliver with allocated IP addresses. + """ if requested_ns.gateway is None: return requested_ns @@ -301,8 +326,7 @@ def allocate(self, *, rid: ID, requested_ns: NetworkServiceSliver, owner_ns: Net :param requested_ns: Requested NetworkService :param owner_ns: BQM Network Service identified to serve the NetworkService :param existing_reservations: Existing Reservations which also are served by the owner switch - :return NetworkService updated with the allocated subnet for FABNetv4 and FABNetv6 services - Return the sliver updated with the subnet + :return: NetworkService updated with the allocated subnet for FABNetv4 and FABNetv6 services """ try: if requested_ns.get_type() not in Constants.L3_SERVICES: @@ -312,12 +336,11 @@ def allocate(self, *, rid: ID, requested_ns: NetworkServiceSliver, owner_ns: Net delegation_id, delegated_label = self.get_delegations(lab_cap_delegations=owner_ns.get_label_delegations()) # HACK to use FabNetv6 for FabNetv6Ext as both have the same range - # Needs to be removed if FabNetv6/FabNetv6Ext are configured with different ranges requested_ns_type = requested_ns.get_type() if requested_ns_type == ServiceType.FABNetv6Ext: requested_ns_type = ServiceType.FABNetv6 - # Hack End + # Handle L3VPN type specifically if requested_ns_type == ServiceType.L3VPN: if requested_ns.labels is not None: requested_ns.labels = Labels.update(requested_ns.labels, asn=delegated_label.asn) @@ -325,108 +348,195 @@ def allocate(self, *, rid: ID, requested_ns: NetworkServiceSliver, owner_ns: Net requested_ns.labels = Labels(asn=delegated_label.asn) return requested_ns - subnet_list = None + ip_network, subnet_list = self._generate_subnet_list(owner_ns=owner_ns, delegated_label=delegated_label) - # Get Subnet - if owner_ns.get_type() in Constants.L3_FABNETv6_SERVICES: - ip_network = IPv6Network(delegated_label.ipv6_subnet) - subnet_list = list(ip_network.subnets(new_prefix=64)) - # Exclude the 1st subnet as it is reserved for control plane - subnet_list.pop(0) + # Exclude the already allocated subnets + subnet_list = self._exclude_allocated_subnets(subnet_list=subnet_list, requested_ns_type=requested_ns_type, + rid=rid, existing_reservations=existing_reservations) + + # Extend Case + if requested_ns.get_node_map(): + self._can_extend(subnet_list=subnet_list, requested_ns=requested_ns) + return requested_ns - elif owner_ns.get_type() == ServiceType.FABNetv4: - ip_network = IPv4Network(delegated_label.ipv4_subnet) + gateway_labels = self._assign_gateway_labels(ip_network=ip_network, subnet_list=subnet_list, + requested_ns_type=requested_ns.get_type()) + + self.logger.debug(f"Gateway Labels: {gateway_labels}") + + requested_ns.gateway = Gateway(lab=gateway_labels) + + # Allocate the IP Addresses for the requested NS + requested_ns = self.__allocate_ip_address_to_ifs(requested_ns=requested_ns) + except BrokerException as e: + raise e + except Exception as e: + self.logger.error(f"Error in allocate_gateway_for_ns: {e}") + self.logger.error(traceback.format_exc()) + raise BrokerException(msg=f"Allocation failure for Requested Network Service: {e}") + + return requested_ns + + def _generate_subnet_list(self, *, owner_ns: NetworkServiceSliver, + delegated_label: Labels) -> Tuple[Union[IPv4Network, IPv6Network], List]: + """ + Generate the list of subnets based on the owner network service type. + + :param owner_ns: The NetworkServiceSliver representing the owner network service. + :param delegated_label: The Labels object containing the delegated subnet information. + :return: A tuple containing the IP network and the list of generated subnets. + """ + subnet_list = None + ip_network = None + if owner_ns.get_type() in Constants.L3_FABNETv6_SERVICES: + ip_network = IPv6Network(delegated_label.ipv6_subnet) + subnet_list = list(ip_network.subnets(new_prefix=64)) + # Exclude the 1st subnet as it is reserved for control plane + subnet_list.pop(0) + # Exclude the last subnet for FABRIC STAR Bastion Host Allocation + subnet_list.pop(-1) + + elif owner_ns.get_type() in [ServiceType.FABNetv4, ServiceType.FABNetv4Ext]: + ip_network = IPv4Network(delegated_label.ipv4_subnet) + if owner_ns.get_type() == ServiceType.FABNetv4: subnet_list = list(ip_network.subnets(new_prefix=24)) - # Exclude the 1st subnet as it is reserved for control plane subnet_list.pop(0) elif owner_ns.get_type() == ServiceType.FABNetv4Ext: - ip_network = IPv4Network(delegated_label.ipv4_subnet) subnet_list = list(ip_network.hosts()) - # Exclude the already allocated subnets - for reservation in existing_reservations: - if rid == reservation.get_reservation_id(): - continue - # For Active or Ticketed or Ticketing reservations; reduce the counts from available - allocated_sliver = None - if reservation.is_ticketing() and reservation.get_approved_resources() is not None: - allocated_sliver = reservation.get_approved_resources().get_sliver() + self.logger.debug(f"Available Subnets: {subnet_list}") - if (reservation.is_active() or reservation.is_ticketed()) and \ - reservation.get_resources() is not None: - allocated_sliver = reservation.get_resources().get_sliver() + return ip_network, subnet_list - self.logger.debug(f"Existing res# {reservation.get_reservation_id()} " - f"allocated: {allocated_sliver}") + def _exclude_allocated_subnets(self, *, subnet_list: List, requested_ns_type: str, rid: ID, + existing_reservations: List[ABCReservationMixin]) -> List: + """ + Exclude the subnets that are already allocated. - if allocated_sliver is None: - continue + :param subnet_list: A list of available subnets to be allocated. + :param requested_ns_type: The type of the requested network service. + :param rid: The reservation ID of the current request. + :param existing_reservations: A list of existing reservations that may contain allocated subnets. + :return: A list of subnets excluding those that have already been allocated. + """ + for reservation in existing_reservations: + if rid == reservation.get_reservation_id(): + continue - # HACK to use FabNetv6 for FabNetv6Ext as both have the same range - # Needs to be removed if FabNetv6/FabNetv6Ext are configured with different ranges - allocated_sliver_type = allocated_sliver.get_type() - if allocated_sliver_type == ServiceType.FABNetv6Ext: - allocated_sliver_type = ServiceType.FABNetv6 - # HACK End + allocated_sliver = self._get_allocated_sliver(reservation) + if allocated_sliver is None: + continue - if allocated_sliver_type != requested_ns_type: - continue + # HACK to use FabNetv6 for FabNetv6Ext as both have the same range + # Needs to be removed if FabNetv6/FabNetv6Ext are configured with different ranges + allocated_sliver_type = allocated_sliver.get_type() + if allocated_sliver_type == ServiceType.FABNetv6Ext: + allocated_sliver_type = ServiceType.FABNetv6 + # HACK End + + if allocated_sliver_type != requested_ns_type: + continue - if allocated_sliver.get_type() == ServiceType.FABNetv4: - subnet_to_remove = IPv4Network(allocated_sliver.get_gateway().lab.ipv4_subnet) + if allocated_sliver.get_type() == ServiceType.FABNetv4: + subnet_to_remove = IPv4Network(allocated_sliver.get_gateway().subnet) + self.logger.debug( + f"Excluding already allocated IP4Subnet: " + f"{allocated_sliver.get_gateway().subnet}" + f" to res# {reservation.get_reservation_id()}") + if subnet_to_remove in subnet_list: subnet_list.remove(subnet_to_remove) - self.logger.debug( - f"Excluding already allocated IP4Subnet: " - f"{allocated_sliver.get_gateway().lab.ipv4_subnet}" - f" to res# {reservation.get_reservation_id()}") - - elif allocated_sliver.get_type() == ServiceType.FABNetv4Ext: - if allocated_sliver.labels is not None and allocated_sliver.labels.ipv4 is not None: - for x in allocated_sliver.labels.ipv4: - subnet_to_remove = ipaddress.IPv4Address(x) + + elif allocated_sliver.get_type() == ServiceType.FABNetv4Ext: + if allocated_sliver.labels is not None and allocated_sliver.labels.ipv4 is not None: + for x in allocated_sliver.labels.ipv4: + subnet_to_remove = ipaddress.IPv4Address(x) + self.logger.debug( + f"Excluding already allocated IP4: " + f"{x}" + f" to res# {reservation.get_reservation_id()}") + if subnet_to_remove in subnet_list: subnet_list.remove(subnet_to_remove) - self.logger.debug( - f"Excluding already allocated IPv4: {x}" - f" to res# {reservation.get_reservation_id()}") - elif allocated_sliver.get_type() in Constants.L3_FABNETv6_SERVICES: - subnet_to_remove = IPv6Network(allocated_sliver.get_gateway().lab.ipv6_subnet) + elif allocated_sliver.get_type() in Constants.L3_FABNETv6_SERVICES: + subnet_to_remove = IPv6Network(allocated_sliver.get_gateway().subnet) + self.logger.debug( + f"Excluding already allocated IP6Subnet: " + f"{allocated_sliver.get_gateway().subnet}" + f" to res# {reservation.get_reservation_id()}") + + if subnet_to_remove in subnet_list: subnet_list.remove(subnet_to_remove) - self.logger.debug( - f"Excluding already allocated IPv6Subnet: " - f"{allocated_sliver.get_gateway().lab.ipv6_subnet}" - f" to res# {reservation.get_reservation_id()}") - gateway_labels = Labels() - if requested_ns.get_type() == ServiceType.FABNetv4: - gateway_labels.ipv4_subnet = subnet_list[0].with_prefixlen - gateway_labels.ipv4 = str(list(subnet_list[0].hosts())[0]) + self.logger.debug(f"Excluding already allocated subnet for reservation {reservation.get_reservation_id()}") - elif requested_ns.get_type() == ServiceType.FABNetv4Ext: - gateway_labels.ipv4_subnet = ip_network.with_prefixlen - gateway_labels.ipv4 = str(subnet_list[0]) + return subnet_list - elif requested_ns.get_type() in Constants.L3_FABNETv6_SERVICES: - gateway_labels.ipv6_subnet = subnet_list[0].with_prefixlen - gateway_labels.ipv6 = str(next(subnet_list[0].hosts())) + def _get_allocated_sliver(self, reservation: ABCReservationMixin) -> NetworkServiceSliver: + """ + Retrieve the allocated sliver from the reservation. - self.logger.debug(f"Gateway Labels: {gateway_labels}") + :param reservation: An instance of ABCReservationMixin representing the reservation to retrieve the sliver from. + :return: The allocated NetworkServiceSliver if available, otherwise None. + """ + if reservation.is_ticketing() and reservation.get_approved_resources() is not None: + return reservation.get_approved_resources().get_sliver() + if (reservation.is_active() or reservation.is_ticketed()) and reservation.get_resources() is not None: + return reservation.get_resources().get_sliver() - requested_ns.gateway = Gateway(lab=gateway_labels) + self.logger.error("Could not find the allocated Sliver - should not reach here!") - # Allocate the IP Addresses for the requested NS - requested_ns = self.__allocate_ip_address_to_ifs(requested_ns=requested_ns) - except Exception as e: - self.logger.error(f"Error in allocate_gateway_for_ns: {e}") - self.logger.error(traceback.format_exc()) - raise BrokerException(msg=f"Allocation failure for Requested Network Service: {e}") - return requested_ns + def _assign_gateway_labels(self, *, ip_network: Union[IPv4Network, IPv6Network], subnet_list: List, + requested_ns_type: str) -> Labels: + """ + Assign gateway labels based on the requested network service type. + + :param ip_network: The IP network from which subnets are derived, either IPv4Network or IPv6Network. + :param subnet_list: A list of subnets derived from the ip_network. + :param requested_ns_type: The type of the requested network service. + :return: Gateway labels populated with the appropriate subnet and IP address. + """ + gateway_labels = Labels() + if requested_ns_type == ServiceType.FABNetv4: + gateway_labels.ipv4_subnet = subnet_list[0].with_prefixlen + gateway_labels.ipv4 = str(list(subnet_list[0].hosts())[0]) + + elif requested_ns_type == ServiceType.FABNetv4Ext: + gateway_labels.ipv4_subnet = ip_network.with_prefixlen + gateway_labels.ipv4 = str(subnet_list[0]) + + elif requested_ns_type in Constants.L3_FABNETv6_SERVICES: + gateway_labels.ipv6_subnet = subnet_list[0].with_prefixlen + gateway_labels.ipv6 = str(next(subnet_list[0].hosts())) + + self.logger.debug(f"Allocated Gateway Labels for Network Service: {gateway_labels}") + + return gateway_labels + + def _can_extend(self, *, subnet_list: List, requested_ns: NetworkServiceSliver): + if requested_ns.get_type() == ServiceType.FABNetv4: + allocated_subnet = ipaddress.IPv4Network(requested_ns.gateway.subnet) + if allocated_subnet not in subnet_list: + raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES, + msg=f"Renew failed: Subnet {requested_ns.gateway.subnet} for Network Service : " + f"{requested_ns.get_type()} already in use by another reservation") + elif requested_ns.get_type() == ServiceType.FABNetv4Ext: + if requested_ns.gateway.gateway in subnet_list: + raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES, + msg=f"Renew failed: Subnet {requested_ns.gateway.subnet} for Network Service : " + f"{requested_ns.get_type()} already in use by another reservation") + + elif requested_ns.get_type() in Constants.L3_FABNETv6_SERVICES: + allocated_subnet = ipaddress.IPv6Network(requested_ns.gateway.subnet) + if allocated_subnet not in subnet_list: + raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES, + msg=f"Renew failed: Subnet {requested_ns.gateway.subnet} for Network Service : " + f"{requested_ns.get_type()} already in use by another reservation") def free(self, *, count: int, request: dict = None, resource: dict = None) -> dict: pass - def allocate_peered_ifs(self, *, owner_switch: NodeSliver, + def allocate_peered_ifs(self, *, rid: ID, owner_switch: NodeSliver, requested_ifs: InterfaceSliver, bqm_interface: InterfaceSliver, existing_reservations: List[ABCReservationMixin]) -> InterfaceSliver: """ @@ -449,11 +559,18 @@ def allocate_peered_ifs(self, *, owner_switch: NodeSliver, if bqm_interface.labels.vlan_range is not None: vlan_range = self.__extract_vlan_range(labels=bqm_interface.labels) - available_vlans = self.__exclude_allocated_vlans(available_vlan_range=vlan_range, bqm_ifs=bqm_interface, + available_vlans = self.__exclude_allocated_vlans(rid=rid, available_vlan_range=vlan_range, bqm_ifs=bqm_interface, existing_reservations=existing_reservations) - #vlan = str(random.choice(available_vlans)) - vlan = str(available_vlans[0]) + # Extend case + if requested_ifs.get_node_map() and requested_ifs.labels and requested_ifs.labels.vlan: + if int(requested_ifs.labels.vlan) in available_vlans: + raise BrokerException(error_code=ExceptionErrorCode.INSUFFICIENT_RESOURCES, + msg=f"Renew failed: VLAN {requested_ifs.labels.vlan} for Interface : " + f"{requested_ifs.get_name()} already in use by another reservation") + + vlan = str(random.choice(available_vlans)) + #vlan = str(available_vlans[0]) ifs_labels = Labels.update(ifs_labels, vlan=vlan) requested_ifs.labels = ifs_labels diff --git a/fabric_cf/actor/core/util/utils.py b/fabric_cf/actor/core/util/utils.py index 55ac2619..90da469b 100644 --- a/fabric_cf/actor/core/util/utils.py +++ b/fabric_cf/actor/core/util/utils.py @@ -68,6 +68,9 @@ def node_sliver_to_str(*, sliver: NodeSliver): if ns.interface_info is not None and ns.interface_info.interfaces is not None: for i in ns.interface_info.interfaces.values(): result += f"\nIFS: {i}" + if i.interface_info and i.interface_info.interfaces: + for ch_ifc in i.interface_info.interfaces.values(): + result += f"\nSub IFS: {ch_ifc}" return result diff --git a/fabric_cf/actor/db/__init__.py b/fabric_cf/actor/db/__init__.py index b2d022c7..11f85f9b 100644 --- a/fabric_cf/actor/db/__init__.py +++ b/fabric_cf/actor/db/__init__.py @@ -24,8 +24,9 @@ # # Author: Komal Thareja (kthare10@renci.org) -from sqlalchemy import JSON, ForeignKey, LargeBinary, TIMESTAMP, Index -from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import JSON, ForeignKey, LargeBinary, Index, TIMESTAMP +from sqlalchemy.dialects.postgresql import JSONB +from sqlalchemy.orm import declarative_base from sqlalchemy import Column, String, Integer, Sequence from sqlalchemy.orm import relationship @@ -92,6 +93,17 @@ class Miscellaneous(Base): properties = Column(JSON) +class Metrics(Base): + """ + Represents Metrics Database Table + """ + __tablename__ = 'Metrics' + m_id = Column(Integer, Sequence('m_id', start=1, increment=1), autoincrement=True, primary_key=True) + user_id = Column(String, nullable=False, index=True) + project_id = Column(String, nullable=False, index=True) + slice_count = Column(Integer, nullable=False) + + class Proxies(Base): """ Represents Proxies Database Table @@ -113,6 +125,8 @@ class Reservations(Base): rsv_slc_id = Column(Integer, ForeignKey(FOREIGN_KEY_SLICE_ID), index=True) rsv_resid = Column(String, nullable=False, index=True) oidc_claim_sub = Column(String, nullable=True, index=True) + host = Column(String, nullable=True, index=True) + ip_subnet = Column(String, nullable=True, index=True) email = Column(String, nullable=True, index=True) project_id = Column(String, nullable=True, index=True) site = Column(String, nullable=True, index=True) @@ -121,8 +135,8 @@ class Reservations(Base): rsv_category = Column(Integer, nullable=False) rsv_pending = Column(Integer, nullable=False) rsv_joining = Column(Integer, nullable=False) - lease_start = Column(TIMESTAMP, nullable=True) - lease_end = Column(TIMESTAMP, nullable=True) + lease_start = Column(TIMESTAMP(timezone=True), nullable=True) + lease_end = Column(TIMESTAMP(timezone=True), nullable=True) properties = Column(LargeBinary) components = relationship('Components', back_populates='reservation') @@ -131,6 +145,8 @@ class Reservations(Base): Index('idx_resid_state', rsv_resid, rsv_state) Index('idx_slcid_state', rsv_slc_id, rsv_state) Index('idx_graph_id_res_id', rsv_graph_node_id, rsv_resid) + Index('idx_host', host) + Index('idx_ip_subnet', ip_subnet) class Slices(Base): @@ -148,8 +164,8 @@ class Slices(Base): slc_state = Column(Integer, nullable=False, index=True) slc_type = Column(Integer, nullable=False, index=True) slc_resource_type = Column(String) - lease_start = Column(TIMESTAMP, nullable=True) - lease_end = Column(TIMESTAMP, nullable=True) + lease_start = Column(TIMESTAMP(timezone=True), nullable=True) + lease_end = Column(TIMESTAMP(timezone=True), nullable=True) properties = Column(LargeBinary) Index('idx_slc_guid_name', slc_guid, slc_name) @@ -207,7 +223,7 @@ class Poas(Base): sliver_id = Column(String, nullable=True, index=True) state = Column(Integer, nullable=False, index=True) slice_id = Column(String, nullable=True, index=True) - last_update_time = Column(TIMESTAMP, nullable=True) + last_update_time = Column(TIMESTAMP(timezone=True), nullable=True) properties = Column(LargeBinary) Index('idx_poa_guid_email', poa_guid, email) diff --git a/fabric_cf/actor/db/psql_database.py b/fabric_cf/actor/db/psql_database.py index 5d7074af..fc4cfb0b 100644 --- a/fabric_cf/actor/db/psql_database.py +++ b/fabric_cf/actor/db/psql_database.py @@ -30,13 +30,13 @@ from datetime import datetime, timezone from typing import List, Tuple, Dict -from sqlalchemy import create_engine, desc +from sqlalchemy import create_engine, desc, func, and_, or_ from sqlalchemy.orm import scoped_session, sessionmaker, joinedload from fabric_cf.actor.core.common.constants import Constants from fabric_cf.actor.core.common.exceptions import DatabaseException from fabric_cf.actor.db import Base, Clients, ConfigMappings, Proxies, Units, Reservations, Slices, ManagerObjects, \ - Miscellaneous, Actors, Delegations, Sites, Poas, Components + Miscellaneous, Actors, Delegations, Sites, Poas, Components, Metrics @contextmanager @@ -99,6 +99,7 @@ def reset_db(self): session.query(Proxies).delete() session.query(Units).delete() session.query(Delegations).delete() + session.query(Components).delete() session.query(Reservations).delete() session.query(Slices).delete() session.query(ManagerObjects).delete() @@ -106,6 +107,7 @@ def reset_db(self): session.query(Actors).delete() session.query(Sites).delete() session.query(Poas).delete() + session.query(Metrics).delete() session.commit() except Exception as e: session.rollback() @@ -538,9 +540,44 @@ def create_slices_filter(*, slice_id: str = None, slice_name: str = None, projec filter_dict['oidc_claim_sub'] = oidc_sub return filter_dict + def get_slice_count(self, *, project_id: str = None, email: str = None, states: List[int] = None, + oidc_sub: str = None, slc_type: List[int] = None, excluded_projects: List[str]) -> int: + """ + Get slices count for an actor + @param project_id project id + @param email email + @param states states + @param oidc_sub oidc claim sub + @param slc_type slice type + @param excluded_projects excluded_projects + @return list of slices + """ + session = self.get_session() + try: + filter_dict = self.create_slices_filter(project_id=project_id, email=email, oidc_sub=oidc_sub) + + rows = session.query(Slices).filter_by(**filter_dict) + + rows = rows.order_by(desc(Slices.lease_end)) + + if states is not None: + rows = rows.filter(Slices.slc_state.in_(states)) + + if slc_type is not None: + rows = rows.filter(Slices.slc_type.in_(slc_type)) + + if excluded_projects is not None: + rows = rows.filter(Slices.project_id.notin_(excluded_projects)) + + return rows.count() + except Exception as e: + self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) + raise e + def get_slices(self, *, slice_id: str = None, slice_name: str = None, project_id: str = None, email: str = None, states: list[int] = None, oidc_sub: str = None, slc_type: list[int] = None, limit: int = None, - offset: int = None, lease_end: datetime = None) -> List[dict]: + offset: int = None, lease_end: datetime = None, search: str = None, + exact_match: bool = False) -> List[dict]: """ Get slices for an actor @param slice_id actor id @@ -553,6 +590,8 @@ def get_slices(self, *, slice_id: str = None, slice_name: str = None, project_id @param limit limit @param offset offset @param lease_end lease_end + @param search: search term applied + @param exact_match: Exact Match for Search term @return list of slices """ result = [] @@ -563,6 +602,16 @@ def get_slices(self, *, slice_id: str = None, slice_name: str = None, project_id rows = session.query(Slices).filter_by(**filter_dict) + if search: + if exact_match: + search_term = func.lower(search) + rows = rows.filter(((func.lower(Slices.email) == search_term) | + (func.lower(Slices.oidc_claim_sub) == search_term))) + else: + rows = rows.filter( + ((Slices.email.ilike("%" + search + "%")) | + (Slices.oidc_claim_sub.ilike("%" + search + "%")))) + if lease_end is not None: rows = rows.filter(Slices.lease_end < lease_end) @@ -607,7 +656,7 @@ def add_reservation(self, *, slc_guid: str, rsv_resid: str, rsv_category: int, r rsv_pending: int, rsv_joining: int, properties, lease_start: datetime = None, lease_end: datetime = None, rsv_graph_node_id: str = None, oidc_claim_sub: str = None, email: str = None, project_id: str = None, site: str = None, rsv_type: str = None, - components: List[Tuple[str, str, str]] = None): + components: List[Tuple[str, str, str]] = None, host: str = None, ip_subnet: str = None): """ Add a reservation @param slc_guid slice guid @@ -626,6 +675,8 @@ def add_reservation(self, *, slc_guid: str, rsv_resid: str, rsv_category: int, r @param site site @param rsv_type reservation type @param components list of components + @param host host + @param ip_subnet ip_subnet """ session = self.get_session() try: @@ -634,7 +685,7 @@ def add_reservation(self, *, slc_guid: str, rsv_resid: str, rsv_category: int, r rsv_state=rsv_state, rsv_pending=rsv_pending, rsv_joining=rsv_joining, lease_start=lease_start, lease_end=lease_end, properties=properties, oidc_claim_sub=oidc_claim_sub, email=email, - project_id=project_id, site=site, rsv_type=rsv_type) + project_id=project_id, site=site, rsv_type=rsv_type, host=host, ip_subnet=ip_subnet) if rsv_graph_node_id is not None: rsv_obj.rsv_graph_node_id = rsv_graph_node_id @@ -655,7 +706,8 @@ def add_reservation(self, *, slc_guid: str, rsv_resid: str, rsv_category: int, r def update_reservation(self, *, slc_guid: str, rsv_resid: str, rsv_category: int, rsv_state: int, rsv_pending: int, rsv_joining: int, properties, lease_start: datetime = None, lease_end: datetime = None, rsv_graph_node_id: str = None, site: str = None, - rsv_type: str = None, components: List[Tuple[str, str, str]] = None): + rsv_type: str = None, components: List[Tuple[str, str, str]] = None, + host: str = None, ip_subnet: str = None): """ Update a reservation @param slc_guid slice guid @@ -671,6 +723,8 @@ def update_reservation(self, *, slc_guid: str, rsv_resid: str, rsv_category: int @param site site @param rsv_type reservation type @param components list of components + @param ip_subnet ip subnet + @param host host """ session = self.get_session() try: @@ -683,6 +737,10 @@ def update_reservation(self, *, slc_guid: str, rsv_resid: str, rsv_category: int rsv_obj.properties = properties rsv_obj.lease_end = lease_end rsv_obj.lease_start = lease_start + if host: + rsv_obj.host = host + if ip_subnet: + rsv_obj.ip_subnet = ip_subnet if site is not None: rsv_obj.site = site if rsv_graph_node_id is not None: @@ -748,7 +806,8 @@ def remove_reservation(self, *, rsv_resid: str): raise e def create_reservation_filter(self, *, slice_id: str = None, graph_node_id: str = None, project_id: str = None, - email: str = None, oidc_sub: str = None, rid: str = None, site: str = None) -> dict: + email: str = None, oidc_sub: str = None, rid: str = None, site: str = None, + ip_subnet: str = None, host: str = None) -> dict: filter_dict = {} if slice_id is not None: @@ -766,11 +825,18 @@ def create_reservation_filter(self, *, slice_id: str = None, graph_node_id: str filter_dict['rsv_resid'] = rid if site is not None: filter_dict['site'] = site + if ip_subnet: + filter_dict['ip_subnet'] = ip_subnet + if host: + filter_dict['host'] = host + return filter_dict def get_reservations(self, *, slice_id: str = None, graph_node_id: str = None, project_id: str = None, email: str = None, oidc_sub: str = None, rid: str = None, states: list[int] = None, - category: list[int] = None, site: str = None, rsv_type: list[str] = None) -> List[dict]: + category: list[int] = None, site: str = None, rsv_type: list[str] = None, + start: datetime = None, end: datetime = None, ip_subnet: str = None, + host: str = None) -> List[dict]: """ Get Reservations for an actor @param slice_id slice id @@ -783,6 +849,10 @@ def get_reservations(self, *, slice_id: str = None, graph_node_id: str = None, p @param category reservation category @param site site name @param rsv_type rsv_type + @param start search for slivers with lease_end_time after start + @param end search for slivers with lease_end_time before end + @param ip_subnet ip subnet + @param host host @return list of reservations """ @@ -791,7 +861,7 @@ def get_reservations(self, *, slice_id: str = None, graph_node_id: str = None, p try: filter_dict = self.create_reservation_filter(slice_id=slice_id, graph_node_id=graph_node_id, project_id=project_id, email=email, oidc_sub=oidc_sub, - rid=rid, site=site) + rid=rid, site=site, ip_subnet=ip_subnet, host=host) rows = session.query(Reservations).filter_by(**filter_dict) if rsv_type is not None: @@ -803,6 +873,28 @@ def get_reservations(self, *, slice_id: str = None, graph_node_id: str = None, p if category is not None: rows = rows.filter(Reservations.rsv_category.in_(category)) + # Ensure start and end are datetime objects + if start and isinstance(start, str): + start = datetime.fromisoformat(start) + if end and isinstance(end, str): + end = datetime.fromisoformat(end) + + # Construct filter condition for lease_end within the given time range + if start is not None or end is not None: + lease_end_filter = True # Initialize with True to avoid NoneType comparison + if start is not None and end is not None: + lease_end_filter = or_( + and_(start <= Reservations.lease_end, Reservations.lease_end <= end), + and_(start <= Reservations.lease_start, Reservations.lease_start <= end), + and_(Reservations.lease_start <= start, Reservations.lease_end >= end) + ) + elif start is not None: + lease_end_filter = start <= Reservations.lease_end + elif end is not None: + lease_end_filter = Reservations.lease_end <= end + + rows = rows.filter(lease_end_filter) + for row in rows.all(): result.append(self.generate_dict_from_row(row=row)) except Exception as e: @@ -811,25 +903,58 @@ def get_reservations(self, *, slice_id: str = None, graph_node_id: str = None, p return result def get_components(self, *, node_id: str, states: list[int], rsv_type: list[str], component: str = None, - bdf: str = None) -> Dict[str, List[str]]: + bdf: str = None, start: datetime = None, end: datetime = None, + excludes: List[str] = None) -> Dict[str, List[str]]: + """ + Returns components matching the search criteria + @param node_id: Worker Node ID to which components belong + @param states: list of states used to find reservations + @param rsv_type: type of reservations + @param component: component name + @param bdf: Component's PCI address + @param start: start time + @param end: end time + @param excludes: list of the reservations ids to exclude + + NOTE# For P4 switches; node_id=node+renc-p4-sw component=ip+192.168.11.8 bdf=p1 + + @return Dictionary with component name as the key and value as list of associated PCI addresses in use. + """ result = {} session = self.get_session() try: + lease_end_filter = True # Initialize with True to avoid NoneType comparison + # Construct filter condition for lease_end within the given time range + if start is not None or end is not None: + if start is not None and end is not None: + lease_end_filter = or_( + and_(start <= Reservations.lease_end, Reservations.lease_end <= end), + and_(start <= Reservations.lease_start, Reservations.lease_start <= end), + and_(Reservations.lease_start <= start, Reservations.lease_end >= end) + ) + elif start is not None: + lease_end_filter = start <= Reservations.lease_end + elif end is not None: + lease_end_filter = Reservations.lease_end <= end + # Query to retrieve Components based on specific Reservation types and states rows = ( session.query(Components) .join(Reservations, Components.reservation_id == Reservations.rsv_id) .filter(Reservations.rsv_type.in_(rsv_type)) .filter(Reservations.rsv_state.in_(states)) + .filter(lease_end_filter) .filter(Components.node_id == node_id) .options(joinedload(Components.reservation)) - # Use joinedload to efficiently load the associated Reservation ) + # Add excludes filter if excludes list is not None and not empty + if excludes: + rows = rows.filter(Reservations.rsv_resid.notin_(excludes)) + # Query Component records for reservations in the specified state and owner with the target string if component is not None and bdf is not None: - rows = rows.filter(Components.component == component, - Components.bdf == bdf) + rows = rows.filter(Components.component == component, Components.bdf == bdf) elif component is not None: rows = rows.filter(Components.component == component) elif bdf is not None: @@ -1537,6 +1662,59 @@ def get_poas(self, *, poa_guid: str = None, project_id: str = None, email: str = raise e return result + def increment_metrics(self, *, project_id: str, user_id: str, slice_count: int = 1): + """ + Add or Update Metrics + @param project_id: project_id + @param user_id: user_id + @param slice_count: slice_count + """ + session = self.get_session() + try: + metric_obj = session.query(Metrics).filter_by(project_id=project_id, user_id=user_id).one_or_none() + if not metric_obj: + metric_obj = Metrics(project_id=project_id, user_id=user_id, slice_count=slice_count) + session.add(metric_obj) + else: + metric_obj.slice_count += slice_count + session.commit() + except Exception as e: + session.rollback() + self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) + raise e + + def get_metrics(self, *, project_id: str = None, user_id: str = None, excluded_projects: List[str] = None) -> list: + """ + Get Metric count + @param project_id: project_id + @param user_id: user_id + @param excluded_projects: excluded_projects + @return list of metrics + """ + result = [] + session = self.get_session() + try: + filter_criteria = True + # Construct filter condition + if project_id and user_id: + filter_criteria = and_(Metrics.project_id == project_id, Metrics.user_id == user_id) + elif project_id is not None: + filter_criteria = and_(Metrics.project_id == project_id) + elif user_id is not None: + filter_criteria = and_(Metrics.user_id == user_id) + + if excluded_projects: + filter_criteria = and_(Metrics.project_id.notin_(excluded_projects)) + + rows = session.query(Metrics).filter(filter_criteria).all() + + for r in rows: + result.append(self.generate_dict_from_row(row=r)) + return result + except Exception as e: + self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) + raise e + def test(): logger = logging.getLogger('PsqlDatabase') @@ -1722,3 +1900,8 @@ def test3(): test2() #test() #test3() + + logger = logging.getLogger('PsqlDatabase') + db = PsqlDatabase(user='fabric', password='fabric', database='orchestrator', db_host='127.0.0.1:5432', + logger=logger) + comps = db.get_components(node_id="HX7LQ53") \ No newline at end of file diff --git a/fabric_cf/actor/fim/fim_helper.py b/fabric_cf/actor/fim/fim_helper.py index 51db98e5..d29e9550 100644 --- a/fabric_cf/actor/fim/fim_helper.py +++ b/fabric_cf/actor/fim/fim_helper.py @@ -23,11 +23,21 @@ # # # Author: Komal Thareja (kthare10@renci.org) +from __future__ import annotations + +from collections import defaultdict +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from fabric_cf.actor.core.apis.abc_database import ABCDatabase + import logging import random -import traceback +from datetime import datetime from typing import Tuple, List, Union + +from fabric_cf.actor.fim.plugins.broker.aggregate_bqm_plugin import AggregatedBQMPlugin from fim.graph.abc_property_graph import ABCPropertyGraph, ABCGraphImporter from fim.graph.neo4j_property_graph import Neo4jGraphImporter, Neo4jPropertyGraph from fim.graph.networkx_property_graph import NetworkXGraphImporter @@ -35,6 +45,7 @@ from fim.graph.resources.abc_cbm import ABCCBMPropertyGraph from fim.graph.resources.neo4j_arm import Neo4jARMGraph from fim.graph.resources.neo4j_cbm import Neo4jCBMGraph, Neo4jCBMFactory +from fim.graph.resources.networkx_abqm import NetworkXABQMFactory from fim.graph.slices.abc_asm import ABCASMPropertyGraph from fim.graph.slices.neo4j_asm import Neo4jASMFactory from fim.graph.slices.networkx_asm import NetworkxASM, NetworkXASMFactory @@ -45,8 +56,10 @@ from fim.slivers.interface_info import InterfaceSliver, InterfaceType from fim.slivers.network_node import NodeSliver from fim.slivers.network_service import NetworkServiceSliver, ServiceType -from fim.user import ExperimentTopology, Labels, NodeType, Component, ReservationInfo +from fim.user import ExperimentTopology, NodeType, Component, ReservationInfo, Node, GraphFormat, Labels +from fim.user.composite_node import CompositeNode from fim.user.interface import Interface +from fim.user.topology import AdvertizedTopology from fabric_cf.actor.core.common.constants import Constants from fabric_cf.actor.core.kernel.reservation_states import ReservationStates @@ -123,7 +136,7 @@ class FimHelper: """ Provides methods to load Graph Models and perform various operations on them """ - __neo4j_graph_importer = None + _neo4j_graph_importer = None @staticmethod def get_neo4j_importer(neo4j_config: dict = None) -> ABCGraphImporter: @@ -132,17 +145,17 @@ def get_neo4j_importer(neo4j_config: dict = None) -> ABCGraphImporter: :return: Neo4jGraphImporter """ logger = None - if neo4j_config is None: - from fabric_cf.actor.core.container.globals import GlobalsSingleton - neo4j_config = GlobalsSingleton.get().get_config().get_global_config().get_neo4j_config() - logger = GlobalsSingleton.get().get_logger() - - if FimHelper.__neo4j_graph_importer is None: - FimHelper.__neo4j_graph_importer = Neo4jGraphImporter(url=neo4j_config["url"], user=neo4j_config["user"], - pswd=neo4j_config["pass"], - import_host_dir=neo4j_config["import_host_dir"], - import_dir=neo4j_config["import_dir"], logger=logger) - return FimHelper.__neo4j_graph_importer + if FimHelper._neo4j_graph_importer is None: + if neo4j_config is None: + from fabric_cf.actor.core.container.globals import GlobalsSingleton + neo4j_config = GlobalsSingleton.get().get_config().get_global_config().get_neo4j_config() + logger = GlobalsSingleton.get().get_logger() + + FimHelper._neo4j_graph_importer = Neo4jGraphImporter(url=neo4j_config["url"], user=neo4j_config["user"], + pswd=neo4j_config["pass"], + import_host_dir=neo4j_config["import_host_dir"], + import_dir=neo4j_config["import_dir"], logger=logger) + return FimHelper._neo4j_graph_importer @staticmethod def get_networkx_importer(logger: logging.Logger = None) -> ABCGraphImporter: @@ -423,17 +436,29 @@ def get_interface_sliver_mapping(ifs_node_id: str, slice_graph: ABCASMPropertyGr raise Exception(f"More than one Peer Interface Sliver found for IFS: {ifs_node_id}!") peer_ifs = next(iter(peer_interfaces)) - peer_ns_node_name, peer_ns_id = slice_graph.get_parent(node_id=peer_ifs.node_id, - rel=ABCPropertyGraph.REL_CONNECTS, - parent=ABCPropertyGraph.CLASS_NetworkService) + if peer_ifs.get_type() == InterfaceType.SubInterface: + parent_cp_node_name, parent_cp_node_id = slice_graph.get_parent(node_id=peer_ifs.node_id, + rel=ABCPropertyGraph.REL_CONNECTS, + parent=ABCPropertyGraph.CLASS_ConnectionPoint) + peer_ns_node_name, peer_ns_id = slice_graph.get_parent(node_id=parent_cp_node_id, + rel=ABCPropertyGraph.REL_CONNECTS, + parent=ABCPropertyGraph.CLASS_NetworkService) + else: + peer_ns_node_name, peer_ns_id = slice_graph.get_parent(node_id=peer_ifs.node_id, + rel=ABCPropertyGraph.REL_CONNECTS, + parent=ABCPropertyGraph.CLASS_NetworkService) component_name = None facility = False peer_site = None - if peer_ifs.get_type() in [InterfaceType.DedicatedPort, InterfaceType.SharedPort]: + if peer_ifs.get_type() in [InterfaceType.DedicatedPort, InterfaceType.SharedPort, InterfaceType.SubInterface]: component_name, component_id = slice_graph.get_parent(node_id=peer_ns_id, rel=ABCPropertyGraph.REL_HAS, parent=ABCPropertyGraph.CLASS_Component) + # Possibly P4 switch; parent will be a switch + if not component_name: + component_id = peer_ns_id + component_name = str(NodeType.Switch) node_name, node_id = slice_graph.get_parent(node_id=component_id, rel=ABCPropertyGraph.REL_HAS, parent=ABCPropertyGraph.CLASS_NetworkNode) @@ -450,6 +475,7 @@ def get_interface_sliver_mapping(ifs_node_id: str, slice_graph: ABCASMPropertyGr # Peer Network Service is FABRIC L3VPN connected to a FABRIC Site # Determine the site to which AL2S Peered Interface is connected to + for ifs in peer_ns.interface_info.interfaces.values(): # Skip the peered interface if ifs.node_id == peer_ifs.node_id: @@ -528,6 +554,9 @@ def get_site_interface_sliver(*, component: ComponentSliver or NodeSliver, local """ result = None for ns in component.network_service_info.network_services.values(): + if not ns.interface_info: + continue + # Filter on region if region is not None: result = list(filter(lambda x: (region in x.labels.region), ns.interface_info.interfaces.values())) @@ -580,23 +609,85 @@ def get_owners(*, bqm: ABCCBMPropertyGraph, node_id: str, return switch, mpls_ns, requested_ns @staticmethod - def get_parent_node(*, graph_model: ABCPropertyGraph, component: Component = None, interface: Interface = None, + def get_parent_node(*, graph_model: ABCPropertyGraph, node: Union[Component, Interface], sliver: bool = True) -> Tuple[Union[NodeSliver, NetworkServiceSliver, None], str]: - node = None - if component is not None: - node_name, node_id = graph_model.get_parent(node_id=component.node_id, rel=ABCPropertyGraph.REL_HAS, - parent=ABCPropertyGraph.CLASS_NetworkNode) - if sliver: - node = graph_model.build_deep_node_sliver(node_id=node_id) - elif interface is not None: - node_name, node_id = graph_model.get_parent(node_id=interface.node_id, rel=ABCPropertyGraph.REL_CONNECTS, - parent=ABCPropertyGraph.CLASS_NetworkService) - if sliver: - node = graph_model.build_deep_ns_sliver(node_id=node_id) - else: + """ + Retrieve the parent node of a given component or interface in the graph model. + + This method determines the parent node of a specified component or interface within the provided + property graph model. It can return either a node sliver or a network service sliver based on the + type of the input node and the `sliver` flag. + + :param graph_model: The property graph model used to find parent nodes. + :type graph_model: ABCPropertyGraph + + :param node: The component or interface for which to find the parent node. + :type node: Union[Component, Interface] + + :param sliver: Flag indicating whether to build and return a sliver object for the parent node. + Defaults to True. + :type sliver: bool + + :return: A tuple containing the parent node sliver (or network service sliver) and the parent node ID. + If no parent node is found, returns (None, None). + :rtype: Tuple[Union[NodeSliver, NetworkServiceSliver, None], str] + + :raises Exception: If the `node` argument is None or is neither a Component nor an Interface. + + Example: + >>> parent_node, parent_node_id = get_parent_node(graph_model=my_graph_model, node=my_component) + >>> print(parent_node, parent_node_id) + """ + if node is None: raise Exception("Invalid Arguments - component/interface both are None") - return node, node_id + parent_node = None + parent_node_id = None + + if isinstance(node, Component): + node_name, parent_node_id = graph_model.get_parent( + node_id=node.node_id, + rel=ABCPropertyGraph.REL_HAS, + parent=ABCPropertyGraph.CLASS_NetworkNode + ) + if sliver: + parent_node = graph_model.build_deep_node_sliver(node_id=parent_node_id) + elif isinstance(node, Interface): + if node.type == InterfaceType.SubInterface: + # Get the OVS Network Service attached to Sub Interface + sub_cp_nbs = graph_model.get_first_and_second_neighbor( + node_id=node.node_id, + rel1=ABCPropertyGraph.REL_CONNECTS, + node1_label=ABCPropertyGraph.CLASS_ConnectionPoint, + rel2=ABCPropertyGraph.REL_CONNECTS, + node2_label=ABCPropertyGraph.CLASS_NetworkService + ) + if len(sub_cp_nbs) == 0: + raise Exception(f"Parent (NS-OVS) for Sub Interface: {node.name} cannot be found!") + + # Get the component and node associated with Sub Interface + sub_node = graph_model.get_first_and_second_neighbor( + node_id=sub_cp_nbs[0][1], + rel1=ABCPropertyGraph.REL_HAS, + node1_label=ABCPropertyGraph.CLASS_Component, + rel2=ABCPropertyGraph.REL_HAS, + node2_label=ABCPropertyGraph.CLASS_NetworkNode + ) + if len(sub_node) == 0: + raise Exception(f"Parent for Sub Interface: {node.name} cannot be found!") + parent_node_id = sub_node[0][1] + if sliver: + parent_node = graph_model.build_deep_node_sliver(node_id=parent_node_id) + else: + node_name, parent_node_id = graph_model.get_parent( + node_id=node.node_id, + rel=ABCPropertyGraph.REL_CONNECTS, + parent=ABCPropertyGraph.CLASS_NetworkService + ) + if sliver: + parent_node = graph_model.build_deep_ns_sliver(node_id=parent_node_id) + + return parent_node, parent_node_id @staticmethod def prune_graph(*, graph_id: str) -> ExperimentTopology: @@ -611,3 +702,84 @@ def prune_graph(*, graph_id: str) -> ExperimentTopology: slice_topology.prune(reservation_state=ReservationStates.CloseFail.name) return slice_topology + + @staticmethod + def get_workers(site: CompositeNode) -> dict: + node_id_list = site.topo.graph_model.get_first_neighbor( + node_id=site.node_id, + rel=ABCPropertyGraph.REL_HAS, + node_label=ABCPropertyGraph.CLASS_NetworkNode, + ) + workers = dict() + for nid in node_id_list: + _, node_props = site.topo.graph_model.get_node_properties(node_id=nid) + n = Node( + name=node_props[ABCPropertyGraph.PROP_NAME], + node_id=nid, + topo=site.topo, + ) + if n.type != NodeType.Facility: + workers[n.name] = n + return workers + + @staticmethod + def build_broker_query_model(db: ABCDatabase, level_0_broker_query_model: str, level: int, + graph_format: GraphFormat = GraphFormat.GRAPHML, + start: datetime = None, end: datetime = None, + includes: str = None, excludes: str = None) -> str: + if level == 2: + sites_to_include = [s.strip().upper() for s in includes.split(",")] if includes else [] + sites_to_exclude = [s.strip().upper() for s in excludes.split(",")] if excludes else [] + + if level_0_broker_query_model and len(level_0_broker_query_model) > 0: + topology = AdvertizedTopology() + + nx_pgraph = topology.graph_model.importer.import_graph_from_string(graph_string=level_0_broker_query_model) + topology.graph_model = NetworkXABQMFactory.create(nx_pgraph) + + sites_to_remove = [] + + for site_name, site in topology.sites.items(): + if len(sites_to_include) and site_name not in sites_to_include: + sites_to_remove.append(site_name) + continue + + if len(sites_to_exclude) and site_name in sites_to_exclude: + sites_to_remove.append(site_name) + continue + + site_cap_alloc = Capacities() + + for child_name, child in site.children.items(): + allocated_caps, allocated_comp_caps = AggregatedBQMPlugin.occupied_node_capacity(db=db, + node_id=child.node_id, + start=start, + end=end) + site_cap_alloc += allocated_caps + child.set_property(pname="capacity_allocations", pval=allocated_caps) + + # merge allocated component capacities + for kt, v in allocated_comp_caps.items(): + for km, vcap in v.items(): + name = f"{kt}-{km}" + if child.components.get(name) is not None: + capacity_allocations = Capacities() + if child.components[name].capacity_allocations: + capacity_allocations = child.components[name].capacity_allocations + capacity_allocations += vcap + child.components[name].set_property(pname="capacity_allocations", + pval=capacity_allocations) + + for s in sites_to_remove: + topology.remove_node(s) + + for f_name, facility in topology.facilities.items(): + for if_name, interface in facility.interfaces.items(): + allocated_vlans = AggregatedBQMPlugin.occupied_vlans(db=db, node_id=f_name, + component_name=interface.node_id, + start=start, end=end) + if allocated_vlans and len(allocated_vlans): + label_allocations = Labels(vlan=allocated_vlans) + interface.set_property(pname="label_allocations", pval=label_allocations) + + return topology.serialize(fmt=graph_format) diff --git a/fabric_cf/actor/fim/plugins/broker/aggregate_bqm_plugin.py b/fabric_cf/actor/fim/plugins/broker/aggregate_bqm_plugin.py index 7f731a52..c265336c 100644 --- a/fabric_cf/actor/fim/plugins/broker/aggregate_bqm_plugin.py +++ b/fabric_cf/actor/fim/plugins/broker/aggregate_bqm_plugin.py @@ -23,8 +23,11 @@ # # # Author: Ilya Baldin (ibaldin@renci.org) +from __future__ import annotations -from typing import Tuple, List, Dict +import json +from datetime import datetime +from typing import Tuple, Dict, TYPE_CHECKING, List from collections import defaultdict import uuid @@ -34,7 +37,7 @@ from fim.graph.resources.abc_bqm import ABCBQMPropertyGraph from fim.graph.networkx_property_graph import NetworkXGraphImporter from fim.graph.resources.networkx_abqm import NetworkXAggregateBQM -from fim.slivers.capacities_labels import Capacities, Flags +from fim.slivers.capacities_labels import Capacities, Flags, Labels from fim.slivers.delegations import DelegationFormat from fim.slivers.maintenance_mode import MaintenanceInfo, MaintenanceEntry, MaintenanceState from fim.slivers.network_node import CompositeNodeSliver, NodeType, NodeSliver @@ -44,6 +47,9 @@ from fabric_cf.actor.core.kernel.reservation_states import ReservationStates +if TYPE_CHECKING: + from fabric_cf.actor.core.apis.abc_database import ABCDatabase + class AggregatedBQMPlugin: """ @@ -81,8 +87,40 @@ def __site_maintenance_info(self, *, site_name: str): result.finalize() return result - def __occupied_node_capacity(self, *, node_id: str) -> Tuple[Capacities, - Dict[ComponentType, Dict[str, Capacities]]]: + @staticmethod + def occupied_vlans(db: ABCDatabase, node_id: str, component_name: str, start: datetime = None, + end: datetime = None) -> List[str]: + """ + Get existing components attached to Active/Ticketed Network Service Slivers + :param db: + :param node_id: + :param component_name: + :param start: + :param end: + :return: list of components + """ + assert node_id is not None + states = [ReservationStates.Active.value, + ReservationStates.ActiveTicketed.value, + ReservationStates.Ticketed.value, + ReservationStates.Nascent.value] + + result = [] + res_type = [] + for x in ServiceType: + res_type.append(str(x)) + + # Only get Active or Ticketing reservations + comps = db.get_components(node_id=node_id, component=component_name, rsv_type=res_type, states=states, + start=start, end=end) + if comps is not None: + if comps.get(component_name): + result = comps.get(component_name) + return result + + @staticmethod + def occupied_node_capacity(*, db: ABCDatabase, node_id: str, start: datetime, + end: datetime) -> Tuple[Capacities, Dict[ComponentType, Dict[str, Capacities]]]: """ Figure out the total capacity occupied in the network node and return a tuple of capacities occupied in this node and a dict of component capacities that are occupied @@ -95,9 +133,7 @@ def __occupied_node_capacity(self, *, node_id: str) -> Tuple[Capacities, ReservationStates.Nascent.value] # get existing reservations for this node - existing_reservations = self.actor.get_plugin().get_database().get_reservations(graph_node_id=node_id, - states=states) - + existing_reservations = db.get_reservations(graph_node_id=node_id, states=states, start=start, end=end) # node capacities occupied_capacities = Capacities() occupied_component_capacities = defaultdict(dict) @@ -114,7 +150,7 @@ def __occupied_node_capacity(self, *, node_id: str) -> Tuple[Capacities, allocated_sliver = reservation.get_resources().get_sliver() if allocated_sliver is not None: - occupied_capacities = occupied_capacities + allocated_sliver.get_capacities() + occupied_capacities = occupied_capacities + allocated_sliver.get_capacity_allocations() if allocated_sliver.attached_components_info is not None: for allocated_component in allocated_sliver.attached_components_info.devices.values(): @@ -142,25 +178,50 @@ def plug_produce_bqm(self, *, cbm: ABCCBMPropertyGraph, **kwargs) -> ABCBQMPrope if kwargs.get('query_level', None) is None or kwargs['query_level'] > 2: return cbm.clone_graph(new_graph_id=str(uuid.uuid4())) + includes = kwargs.get('includes', None) + excludes = kwargs.get('excludes', None) + + sites_to_include = [s.strip().upper() for s in includes.split(",")] if includes else [] + sites_to_exclude = [s.strip().upper() for s in excludes.split(",")] if excludes else [] + + start = kwargs.get('start', None) + end = kwargs.get('end', None) + if not self.DEBUG_FLAG: + db = self.actor.get_plugin().get_database() + else: + db = None + # do a one-pass aggregation of servers, their components and interfaces # and some flags (e.g. PTP availability) # this includes facilities nnodes = cbm.get_all_nodes_by_class(label=ABCPropertyGraph.CLASS_NetworkNode) slivers_by_site = defaultdict(list) + p4s_by_site = defaultdict(list) for n in nnodes: # build deep slivers for each advertised server, aggregate by site node_sliver = cbm.build_deep_node_sliver(node_id=n) slivers_by_site[node_sliver.site].append(node_sliver) + if node_sliver.get_type() == NodeType.Switch and "p4" in node_sliver.get_name(): + p4s_by_site[node_sliver.site].append(node_sliver) # create a new blank Aggregated BQM NetworkX graph - abqm = NetworkXAggregateBQM(graph_id=str(uuid.uuid4()), - importer=NetworkXGraphImporter(logger=self.logger), - logger=self.logger) + if kwargs['query_level'] == 0: + abqm = NetworkXAggregateBQM(graph_id=cbm.graph_id, + importer=NetworkXGraphImporter(logger=self.logger), + logger=self.logger) + else: + abqm = NetworkXAggregateBQM(graph_id=str(uuid.uuid4()), + importer=NetworkXGraphImporter(logger=self.logger), + logger=self.logger) site_to_composite_node_id = dict() site_to_ns_node_id = dict() facilities_by_site = defaultdict(list) for s, ls in slivers_by_site.items(): + if len(sites_to_include) and s not in sites_to_include: + continue + if len(sites_to_exclude) and s in sites_to_exclude: + continue # add up capacities and delegated capacities, skip labels for now # count up components and figure out links between site @@ -196,24 +257,30 @@ def plug_produce_bqm(self, *, cbm: ABCCBMPropertyGraph, **kwargs) -> ABCBQMPrope worker_sliver.resource_type = NodeType.Server worker_sliver.set_site(s) worker_sliver.node_id = str(uuid.uuid4()) - if self.DEBUG_FLAG: + if self.DEBUG_FLAG or kwargs['query_level'] == 0: # for debugging and running in a test environment + # also for level 0; only return capacity information allocated_comp_caps = dict() + worker_sliver.node_id = sliver.node_id else: # query database for everything taken on this node - allocated_caps, allocated_comp_caps = self.__occupied_node_capacity(node_id=sliver.node_id) + allocated_caps, allocated_comp_caps = self.occupied_node_capacity(db=db, node_id=sliver.node_id, + start=start, end=end) site_sliver.capacity_allocations = site_sliver.capacity_allocations + allocated_caps worker_sliver.capacity_allocations = allocated_caps # get the location if available if loc is None: loc = sliver.get_location() + worker_sliver.set_location(loc) # look at flags flags = sliver.get_flags() if flags and not ptp and flags.ptp: ptp = True + site_sliver.set_flags(Flags(ptp=ptp)) + # calculate available node capacities based on delegations if sliver.get_capacity_delegations() is not None: # CBM only has one delegation if it has one @@ -223,8 +290,12 @@ def plug_produce_bqm(self, *, cbm: ABCCBMPropertyGraph, **kwargs) -> ABCBQMPrope site_sliver.capacities = site_sliver.capacities + \ delegation.get_details() worker_sliver.capacities = delegation.get_details() + # This for the case when BQM is generated from Orchestrator + else: + site_sliver.capacities += sliver.get_capacities() + worker_sliver.capacities = sliver.get_capacities() - # collect available components in lists by type and model for the site (for later aggregation) + # collect available components in lists by type and model for the site (for later aggregation) if sliver.attached_components_info is None: continue worker_sliver.attached_components_info = AttachedComponentsInfo() @@ -272,8 +343,32 @@ def plug_produce_bqm(self, *, cbm: ABCCBMPropertyGraph, **kwargs) -> ABCBQMPrope abqm.add_node(node_id=site_sliver.node_id, label=ABCPropertyGraph.CLASS_CompositeNode, props=site_props) + p4s = p4s_by_site.get(site_sliver.site) + if p4s: + for p4 in p4s: + p4_sliver = NodeSliver() + p4_sliver.node_id = str(uuid.uuid4()) + p4_sliver.set_type(resource_type=NodeType.Switch) + p4_sliver.set_name(resource_name=p4.get_name()) + p4_sliver.set_site(site=p4.get_site()) + p4_sliver.capacities = Capacities() + p4_sliver.capacity_allocations = Capacities() + p4_sliver.capacities += p4.get_capacities() + if not self.DEBUG_FLAG: + # query database for everything taken on this node + allocated_caps, allocated_comp_caps = self.occupied_node_capacity(db=db, node_id=p4.node_id, + start=start, end=end) + if allocated_caps: + p4_sliver.capacity_allocations = p4_sliver.capacity_allocations + allocated_caps + p4_props = abqm.node_sliver_to_graph_properties_dict(p4_sliver) + node_id = p4.node_id + if kwargs['query_level'] != 0: + node_id = p4_sliver.node_id + abqm.add_node(node_id=node_id, label=ABCPropertyGraph.CLASS_NetworkNode, props=p4_props) + abqm.add_link(node_a=site_sliver.node_id, rel=ABCPropertyGraph.REL_HAS, node_b=node_id) + # Add per worker metrics for query level 2 - if kwargs['query_level'] == 2: + if kwargs['query_level'] == 2 or kwargs['query_level'] == 0: for w in workers: # Add workers abqm.add_node(node_id=w.node_id, label=ABCPropertyGraph.CLASS_NetworkNode, @@ -331,6 +426,11 @@ def plug_produce_bqm(self, *, cbm: ABCCBMPropertyGraph, **kwargs) -> ABCBQMPrope sink_site = l[4] source_cp = l[5] sink_cp = l[6] + # Exclude the sites requested to be filtered + if includes and (source_site not in includes or sink_site not in includes): + continue + if excludes and (source_site in excludes or sink_site not in excludes): + continue _, cbm_source_cp_props = cbm.get_node_properties(node_id=source_cp) _, cbm_sink_cp_props = cbm.get_node_properties(node_id=sink_cp) _, cbm_link_props = cbm.get_node_properties(node_id=link) @@ -380,93 +480,117 @@ def plug_produce_bqm(self, *, cbm: ABCCBMPropertyGraph, **kwargs) -> ABCBQMPrope for s, lf in facilities_by_site.items(): # multiple facilities per site possible for fac_sliver in lf: - fac_nbs = cbm.get_first_and_second_neighbor(node_id=fac_sliver.node_id, - rel1=ABCPropertyGraph.REL_HAS, - node1_label=ABCPropertyGraph.CLASS_NetworkService, - rel2=ABCPropertyGraph.REL_CONNECTS, - node2_label=ABCPropertyGraph.CLASS_ConnectionPoint) - try: - fac_ns_node_id = fac_nbs[0][0] - fac_cp_node_id = fac_nbs[0][1] - except KeyError: + ns_list = cbm.get_first_neighbor(node_id=fac_sliver.node_id, + rel=ABCPropertyGraph.REL_HAS, + node_label=ABCPropertyGraph.CLASS_NetworkService) + + if not ns_list or not len(ns_list): if self.logger: - self.logger.warning(f'Unable to trace facility ConnectionPoint for ' + self.logger.warning(f'Unable to trace facility NetworkService for ' f'facility {fac_sliver.resource_name}, continuing') else: - print(f'Unable to trace facility ConnectionPoint for ' + print(f'Unable to trace facility NetworkService for ' f'facility {fac_sliver.resource_name}, continuing') continue - _, fac_props = cbm.get_node_properties(node_id=fac_sliver.node_id) - _, fac_ns_props = cbm.get_node_properties(node_id=fac_ns_node_id) - _, fac_cp_props = cbm.get_node_properties(node_id=fac_cp_node_id) - # filter down only the needed properties then recreate the structure of facility in ABQM + _, fac_props = cbm.get_node_properties(node_id=fac_sliver.node_id) new_fac_props = {ABCPropertyGraph.PROP_NAME: fac_props[ABCPropertyGraph.PROP_NAME], ABCPropertyGraph.PROP_TYPE: fac_props[ABCPropertyGraph.PROP_TYPE], ABCPropertyGraph.PROP_SITE: s } abqm.add_node(node_id=fac_sliver.node_id, label=ABCPropertyGraph.CLASS_NetworkNode, props=new_fac_props) + + fac_ns_node_id = ns_list[0] + _, fac_ns_props = cbm.get_node_properties(node_id=fac_ns_node_id) + + # filter down only the needed properties then recreate the structure of facility in ABQM new_ns_props = {ABCPropertyGraph.PROP_NAME: fac_ns_props[ABCPropertyGraph.PROP_NAME], ABCPropertyGraph.PROP_TYPE: fac_ns_props[ABCPropertyGraph.PROP_TYPE] } + abqm.add_node(node_id=fac_ns_node_id, label=ABCPropertyGraph.CLASS_NetworkService, props=new_ns_props) - new_cp_props = {ABCPropertyGraph.PROP_NAME: fac_cp_props[ABCPropertyGraph.PROP_NAME], - ABCPropertyGraph.PROP_TYPE: fac_cp_props[ABCPropertyGraph.PROP_TYPE], - ABCPropertyGraph.PROP_LABELS: fac_cp_props.get(ABCPropertyGraph.PROP_LABELS), - ABCPropertyGraph.PROP_CAPACITIES: fac_cp_props.get(ABCPropertyGraph.PROP_CAPACITIES) - } - new_cp_props = {k: v for (k, v) in new_cp_props.items() if v} - abqm.add_node(node_id=fac_cp_node_id, label=ABCPropertyGraph.CLASS_ConnectionPoint, - props=new_cp_props) + abqm.add_link(node_a=fac_sliver.node_id, rel=ABCPropertyGraph.REL_HAS, node_b=fac_ns_node_id) - abqm.add_link(node_a=fac_ns_node_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=fac_cp_node_id) - - # trace the link to a switch port/ConnectionPoint and replicate them for simplicity - fac_cp_nbs = cbm.get_first_and_second_neighbor(node_id=fac_cp_node_id, - rel1=ABCPropertyGraph.REL_CONNECTS, - node1_label=ABCPropertyGraph.CLASS_Link, - rel2=ABCPropertyGraph.REL_CONNECTS, - node2_label=ABCPropertyGraph.CLASS_ConnectionPoint) - if len(fac_cp_nbs) == 0 or len(fac_cp_nbs) > 1: + + fac_ns_cp_list = cbm.get_all_ns_or_link_connection_points(link_id=ns_list[0]) + if not fac_ns_cp_list: if self.logger: - self.logger.warning(f'Unable to trace switch port from Facility port ' - f'for facility {fac_sliver.resource_name} {fac_cp_nbs}') + self.logger.warning(f'Unable to trace facility ConnectionPoint for ' + f'facility {fac_sliver.resource_name}, continuing') else: - print(f'Unable to trace switch port from Facility port ' - f'for facility {fac_sliver.resource_name} {fac_cp_nbs}') + print(f'Unable to trace facility ConnectionPoint for ' + f'facility {fac_sliver.resource_name}, continuing') continue - fac_link_id = fac_cp_nbs[0][0] - fac_sp_id = fac_cp_nbs[0][1] - - _, fac_link_props = cbm.get_node_properties(node_id=fac_link_id) - # selectively replicate link properties - new_link_props = {ABCPropertyGraph.PROP_NAME: fac_link_props[ABCPropertyGraph.PROP_NAME], - ABCPropertyGraph.PROP_TYPE: fac_link_props[ABCPropertyGraph.PROP_TYPE], - ABCPropertyGraph.PROP_LAYER: fac_link_props[ABCPropertyGraph.PROP_LAYER] - } - abqm.add_node(node_id=fac_link_id, label=ABCPropertyGraph.CLASS_Link, - props=new_link_props) - try: - abqm.get_node_properties(node_id=fac_sp_id) - except PropertyGraphQueryException: - # if the node doesn't exist we need to create it (it could have been created in the first pass) - _, fac_sp_props = cbm.get_node_properties(node_id=fac_sp_id) - new_sp_props = {ABCPropertyGraph.PROP_NAME: fac_sp_props[ABCPropertyGraph.PROP_NAME], - ABCPropertyGraph.PROP_TYPE: fac_sp_props[ABCPropertyGraph.PROP_TYPE], - ABCPropertyGraph.PROP_CAPACITIES: fac_sp_props.get( - ABCPropertyGraph.PROP_CAPACITIES), - ABCPropertyGraph.PROP_LABELS: fac_sp_props.get(ABCPropertyGraph.PROP_LABELS) + for fac_cp_node_id in fac_ns_cp_list: + _, fac_cp_props = cbm.get_node_properties(node_id=fac_cp_node_id) + + new_cp_props = {ABCPropertyGraph.PROP_NAME: fac_cp_props[ABCPropertyGraph.PROP_NAME], + ABCPropertyGraph.PROP_TYPE: fac_cp_props[ABCPropertyGraph.PROP_TYPE], + ABCPropertyGraph.PROP_LABELS: fac_cp_props.get(ABCPropertyGraph.PROP_LABELS), + ABCPropertyGraph.PROP_CAPACITIES: fac_cp_props.get(ABCPropertyGraph.PROP_CAPACITIES) } - new_sp_props = {k: v for (k, v) in new_sp_props.items() if v} - abqm.add_node(node_id=fac_sp_id, label=ABCPropertyGraph.CLASS_ConnectionPoint, - props=new_sp_props) - - # link these together - abqm.add_link(node_a=fac_cp_node_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=fac_link_id) - abqm.add_link(node_a=fac_link_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=fac_sp_id) - abqm.add_link(node_a=fac_sp_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=site_to_ns_node_id[s]) + + if not self.DEBUG_FLAG and kwargs['query_level'] != 0: + allocated_vlans = self.occupied_vlans(db=db, node_id=fac_sliver.resource_name, + component_name=fac_cp_node_id, start=start, end=end) + + if allocated_vlans and len(allocated_vlans): + labels = Labels(vlan=allocated_vlans) + new_cp_props[ABCPropertyGraph.PROP_LABEL_ALLOCATIONS] = labels.to_json() + + new_cp_props = {k: v for (k, v) in new_cp_props.items() if v} + + abqm.add_node(node_id=fac_cp_node_id, label=ABCPropertyGraph.CLASS_ConnectionPoint, + props=new_cp_props) + abqm.add_link(node_a=fac_ns_node_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=fac_cp_node_id) + + # trace the link to a switch port/ConnectionPoint and replicate them for simplicity + fac_cp_nbs = cbm.get_first_and_second_neighbor(node_id=fac_cp_node_id, + rel1=ABCPropertyGraph.REL_CONNECTS, + node1_label=ABCPropertyGraph.CLASS_Link, + rel2=ABCPropertyGraph.REL_CONNECTS, + node2_label=ABCPropertyGraph.CLASS_ConnectionPoint) + if len(fac_cp_nbs) == 0 or len(fac_cp_nbs) > 1: + if self.logger: + self.logger.warning(f'Unable to trace switch port from Facility port ' + f'for facility {fac_sliver.resource_name} {fac_cp_nbs}') + else: + print(f'Unable to trace switch port from Facility port ' + f'for facility {fac_sliver.resource_name} {fac_cp_nbs}') + continue + + fac_link_id = fac_cp_nbs[0][0] + fac_sp_id = fac_cp_nbs[0][1] + + _, fac_link_props = cbm.get_node_properties(node_id=fac_link_id) + # selectively replicate link properties + new_link_props = {ABCPropertyGraph.PROP_NAME: fac_link_props[ABCPropertyGraph.PROP_NAME], + ABCPropertyGraph.PROP_TYPE: fac_link_props[ABCPropertyGraph.PROP_TYPE], + ABCPropertyGraph.PROP_LAYER: fac_link_props[ABCPropertyGraph.PROP_LAYER] + } + abqm.add_node(node_id=fac_link_id, label=ABCPropertyGraph.CLASS_Link, + props=new_link_props) + try: + new_sp_props = abqm.get_node_properties(node_id=fac_sp_id) + except PropertyGraphQueryException: + # if the node doesn't exist we need to create it (it could have been created in the first pass) + _, fac_sp_props = cbm.get_node_properties(node_id=fac_sp_id) + new_sp_props = {ABCPropertyGraph.PROP_NAME: fac_sp_props[ABCPropertyGraph.PROP_NAME], + ABCPropertyGraph.PROP_TYPE: fac_sp_props[ABCPropertyGraph.PROP_TYPE], + ABCPropertyGraph.PROP_CAPACITIES: fac_sp_props.get( + ABCPropertyGraph.PROP_CAPACITIES), + ABCPropertyGraph.PROP_LABELS: fac_sp_props.get(ABCPropertyGraph.PROP_LABELS) + } + new_sp_props = {k: v for (k, v) in new_sp_props.items() if v} + abqm.add_node(node_id=fac_sp_id, label=ABCPropertyGraph.CLASS_ConnectionPoint, + props=new_sp_props) + + # link these together + abqm.add_link(node_a=fac_cp_node_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=fac_link_id) + abqm.add_link(node_a=fac_link_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=fac_sp_id) + abqm.add_link(node_a=fac_sp_id, rel=ABCPropertyGraph.REL_CONNECTS, node_b=site_to_ns_node_id[s]) return abqm diff --git a/fabric_cf/actor/handlers/no_op_handler.py b/fabric_cf/actor/handlers/no_op_handler.py index 9c49cdb0..c61bcc13 100644 --- a/fabric_cf/actor/handlers/no_op_handler.py +++ b/fabric_cf/actor/handlers/no_op_handler.py @@ -28,7 +28,7 @@ from typing import Tuple from fim.slivers.attached_components import ComponentType -from fim.slivers.network_node import NodeSliver +from fim.slivers.network_node import NodeSliver, NodeType from fim.slivers.network_service import NetworkServiceSliver, ServiceType from fabric_cf.actor.core.common.constants import Constants @@ -125,7 +125,8 @@ def create(self, unit: ConfigToken) -> Tuple[dict, ConfigToken]: time.sleep(10) if isinstance(sliver, NodeSliver): - self.__process_node_sliver(sliver=sliver) + if sliver.get_type() == NodeType.VM: + self.__process_node_sliver(sliver=sliver) elif isinstance(sliver, NetworkServiceSliver): self.__process_ns_sliver(sliver=sliver) diff --git a/fabric_cf/actor/test/schema/message.avsc b/fabric_cf/actor/test/schema/message.avsc index d08941b9..a6e96b09 100644 --- a/fabric_cf/actor/test/schema/message.avsc +++ b/fabric_cf/actor/test/schema/message.avsc @@ -638,6 +638,11 @@ "type": ["null", "string"], "default": null }, + { + "name": "site", + "type": ["null", "string"], + "default": null + }, { "name": "sequence", "type": ["null", "int"], @@ -817,6 +822,117 @@ "default": null } ] + }, + { + "namespace": "fabric.cf.model", + "name": "PoaRequest", + "type": "record", + "fields": [{ + "name": "name", + "type": "string" + }, + { + "name": "callback_topic", + "type": "string" + }, + { + "name": "message_id", + "type": "string" + }, + { + "name": "poa_id", + "type": "string" + }, + { + "name": "project_id", + "type": "string" + }, + { + "name": "operation", + "type": "string" + }, + { + "name": "rid", + "type": "string" + }, + { + "name": "slice_id", + "type": "string" + }, + { + "name": "sequence", + "type": "int" + }, + { + "name": "auth", + "type": "fabric.cf.model.AuthRecord" + }, + { + "name": "id_token", + "type": ["null", "string"], + "default": null + }, + { + "name": "vcpu_cpu_map", + "type": ["null", "bytes"], + "default": null + }, + { + "name": "node_set", + "type": ["null", "bytes"], + "default": null + }, + { + "name": "keys", + "type": ["null", "bytes"], + "default": null + } + ] + }, + { + "namespace": "fabric.cf.model", + "type": "record", + "name": "PoaInInfoRecord", + "fields": [ + { + "name": "operation", + "type": "string" + }, + { + "name": "poa_id", + "type": "string" + }, + { + "name": "state", + "type": "string" + }, + { + "name": "rid", + "type": "string" + }, + { + "name": "slice_id", + "type": "string" + }, + { + "name": "project_id", + "type": "string" + }, + { + "name": "error", + "type": ["null", "string"], + "default": null + }, + { + "name": "auth", + "type": "fabric.cf.model.AuthRecord" + }, + { + "name": "info", + "type": ["null", "bytes"], + "default": null + } + ] }, { "namespace": "fabric.cf.model", @@ -874,7 +990,6 @@ }], "default": null }, - { "name": "proxies", "type": ["null", { @@ -888,6 +1003,14 @@ "type": ["null", "fabric.cf.model.BrokerQueryModelRecord"], "default": null }, + { + "name": "poas", + "type": ["null", { + "type":"array", + "items" : "fabric.cf.model.PoaInInfoRecord" + }], + "default": null + }, { "name": "delegations", "type": ["null", { @@ -1073,6 +1196,16 @@ "name": "site", "type": ["null", "string"], "default": null + }, + { + "name": "start", + "type": ["null", "string"], + "default": null + }, + { + "name": "end", + "type": ["null", "string"], + "default": null } ] }, diff --git a/fabric_cf/actor/test/test_abqm.py b/fabric_cf/actor/test/test_abqm.py index 56a65aeb..bc9f7e4e 100644 --- a/fabric_cf/actor/test/test_abqm.py +++ b/fabric_cf/actor/test/test_abqm.py @@ -4,6 +4,8 @@ from fim.graph.neo4j_property_graph import Neo4jGraphImporter, Neo4jPropertyGraph from fim.graph.resources.neo4j_arm import Neo4jARMGraph from fim.graph.resources.neo4j_cbm import Neo4jCBMFactory, Neo4jCBMGraph +from fim.user import NodeType + from fabric_cf.actor.fim.plugins.broker.aggregate_bqm_plugin import AggregatedBQMPlugin """ @@ -28,9 +30,10 @@ class ABQM_Test(unittest.TestCase): def test_abqm(self): self.n4j_imp.delete_all_graphs() # these are produced by substrate tests - site_ads = ['../../../neo4j/Network-ad.graphml', '../../../neo4j/LBNL-ad.graphml', - '../../../neo4j/RENCI-ad.graphml', - '../../../neo4j/UKY-ad.graphml'] + site_ads = ['../../../neo4j/Network-dev.graphml', + '../../../neo4j/LBNL.graphml', + '../../../neo4j/RENC.graphml', + '../../../neo4j/UKY.graphml'] cbm = Neo4jCBMGraph(importer=self.n4j_imp) @@ -106,6 +109,38 @@ def test_abqm(self): with open('cbm.graphml', 'w') as f: f.write(cbm_string) + plain_cbm = self.n4j_imp.import_graph_from_string_direct(graph_string=abqm_level2_string) + temp = Neo4jCBMFactory.create(Neo4jPropertyGraph(graph_id=plain_cbm.graph_id, + importer=self.n4j_imp)) + + site_node_ids = {} + for s in temp.get_all_nodes_by_class_and_type(label=ABCPropertyGraph.CLASS_CompositeNode, + ntype=str(NodeType.Server)): + labels, props = temp.get_node_properties(node_id=s) + site_node_ids[props.get('Site')] = s + + ns_node_ids = {} + for s in temp.get_all_network_service_nodes(): + labels, props = temp.get_node_properties(node_id=s) + ns_node_ids[props.get('Name')] = s + + path = temp.get_nodes_on_path_with_hops(node_a=site_node_ids['UKY'], node_z=site_node_ids['LBNL'], + hops=[ns_node_ids['RENC_ns']]) + + assert(len(path) != 0) + from fim.user.topology import AdvertizedTopology + substrate = AdvertizedTopology() + substrate.load(graph_string=abqm_level2_string) + + uky_node_id = substrate.sites.get("UKY").node_id + lbnl_node_id = substrate.sites.get("LBNL").node_id + renc_ns_node_id = substrate.network_services.get("RENC_ns").node_id + + path = substrate.graph_model.get_nodes_on_path_with_hops(node_a=uky_node_id, node_z=lbnl_node_id, + hops=[renc_ns_node_id]) + + assert(len(path) != 0) + self.n4j_imp.delete_all_graphs() def test_cbm(self): diff --git a/fabric_cf/authority/config.site.am.yaml b/fabric_cf/authority/config.site.am.yaml index d42a9ad6..1d754062 100644 --- a/fabric_cf/authority/config.site.am.yaml +++ b/fabric_cf/authority/config.site.am.yaml @@ -143,9 +143,18 @@ actor: class: VMHandler properties: config.properties.file: /etc/fabric/actor/config/vm_handler_config.yml + - resource: + type: Switch + label: Switch AM + description: Switch AM + handler: + module: fabric_am.handlers.switch_handler + class: SwitchHandler + properties: + config.properties.file: /etc/fabric/actor/config/switch_handler_config.yml controls: - control: - type: VM, Container, Baremetal + type: VM, Container, Baremetal, Switch module: fabric_cf.actor.core.policy.network_node_control class: NetworkNodeControl peers: diff --git a/fabric_cf/authority/docker-compose.yml b/fabric_cf/authority/docker-compose.yml index 3cecd2dc..eae01309 100644 --- a/fabric_cf/authority/docker-compose.yml +++ b/fabric_cf/authority/docker-compose.yml @@ -52,7 +52,7 @@ services: network: host context: ../../../ dockerfile: Dockerfile-auth - image: authority:1.6.2 + image: authority:1.7.0 container_name: site1-am restart: always depends_on: @@ -65,6 +65,7 @@ services: - ./arm.graphml:/etc/fabric/actor/config/neo4j/arm.graphml - ./logs/:/var/log/actor - ./vm_handler_config.yml:/etc/fabric/actor/config/vm_handler_config.yml + - ./switch_handler_config.yml:/etc/fabric/actor/config/switch_handler_config.yml - ../../../../AMHandlers/fabric_am/playbooks:/etc/fabric/actor/playbooks - ../../../../AMHandlers/fabric_am/playbooks/inventory:/etc/fabric/actor/playbooks/inventory - ~/.ssh:/root/.ssh diff --git a/fabric_cf/authority/switch_handler_config.yml b/fabric_cf/authority/switch_handler_config.yml new file mode 100644 index 00000000..c5a3fbe4 --- /dev/null +++ b/fabric_cf/authority/switch_handler_config.yml @@ -0,0 +1,30 @@ +# MIT License +# +# Copyright (c) 2020 FABRIC Testbed +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +# +# Author: Komal Thareja (kthare10@renci.org) +playbooks: + location: /etc/fabric/actor/playbooks + inventory_location: /etc/fabric/actor/playbooks/inventory + Switch: head_switch_provisioning.yml + cleanup: + ALL: head_switch_provisioning.yml \ No newline at end of file diff --git a/fabric_cf/authority/test/test.yaml b/fabric_cf/authority/test/test.yaml index 125ffea1..41a638d6 100644 --- a/fabric_cf/authority/test/test.yaml +++ b/fabric_cf/authority/test/test.yaml @@ -137,9 +137,16 @@ actor: handler: module: fabric_cf.actor.handlers.no_op_handler class: NoOpHandler + - resource: + type: Switch + label: P4 Switch AM + description: P4 Switch AM + handler: + module: fabric_cf.actor.handlers.no_op_handler + class: NoOpHandler controls: - control: - type: VM, Container, Baremetal + type: VM, Container, Baremetal, Switch module: fabric_cf.actor.core.policy.network_node_control class: NetworkNodeControl diff --git a/fabric_cf/authority/vm_handler_config.yml b/fabric_cf/authority/vm_handler_config.yml index 04e1c7ba..85bcaa67 100644 --- a/fabric_cf/authority/vm_handler_config.yml +++ b/fabric_cf/authority/vm_handler_config.yml @@ -34,23 +34,26 @@ runtime: #max_flavor: fabric.c4.m8.d10 images: default_centos8_stream: centos - default_centos9_stream: cloud-user - default_centos_7: centos - default_centos_8: centos - default_debian_10: debian + default_centos9_stream: cloud-user default_debian_11: debian - default_fedora_35: fedora - default_fedora_36: fedora - default_fedora_37: fedora + default_debian_12: debian + default_fedora_39: fedora + default_fedora_40: fedora + default_freebsd_13_zfs: freebsd + default_freebsd_14_zfs: freebsd + default_kali: kali + default_openbsd_7: openbsd default_rocky_8: rocky default_rocky_9: rocky - default_ubuntu_18: ubuntu default_ubuntu_20: ubuntu - default_ubuntu_21: ubuntu default_ubuntu_22: ubuntu + default_ubuntu_24: ubuntu docker_rocky_8: rocky + docker_rocky_9: rocky docker_ubuntu_20: ubuntu docker_ubuntu_22: ubuntu + attestable_bmv2_v1_ubuntu_20: ubuntu + attestable_bmv2_v2_ubuntu_20: ubuntu playbooks: location: /etc/fabric/actor/playbooks inventory_location: /etc/fabric/actor/playbooks/inventory diff --git a/fabric_cf/broker/config.broker.yaml b/fabric_cf/broker/config.broker.yaml index 03fa612f..0a8e9f2d 100644 --- a/fabric_cf/broker/config.broker.yaml +++ b/fabric_cf/broker/config.broker.yaml @@ -125,7 +125,7 @@ neo4j: bqm: kafka-topic: broker-resource-usage # in seconds (default set to 2 hours) - publish-interval: 7200 + publish-interval: kafka-sasl-producer-username: kafka-sasl-producer-password: @@ -139,10 +139,17 @@ actor: module: fabric_cf.actor.core.policy.broker_simpler_units_policy class: BrokerSimplerUnitsPolicy properties: - algorithm: FirstFit + algorithm: + FirstFit: # Default policy for all sites + enabled: true + Random: # Random policy for specific sites + enabled: true + sites: # Specify the sites where Random policy should be used + - EDUKY + # Add more sites as needed controls: - control: - type: VM, Container, Baremetal + type: VM, Container, Baremetal, Switch class: NetworkNodeInventory module: fabric_cf.actor.core.policy.network_node_inventory - control: diff --git a/fabric_cf/broker/docker-compose.yml b/fabric_cf/broker/docker-compose.yml index dc8d1151..7df2cb2a 100644 --- a/fabric_cf/broker/docker-compose.yml +++ b/fabric_cf/broker/docker-compose.yml @@ -54,7 +54,7 @@ services: build: context: ../../../ dockerfile: Dockerfile-broker - image: broker:1.6.2 + image: broker:1.7.0 container_name: broker restart: always networks: diff --git a/fabric_cf/broker/test/broker.py b/fabric_cf/broker/test/broker.py index 46e4b5fc..bf28498d 100644 --- a/fabric_cf/broker/test/broker.py +++ b/fabric_cf/broker/test/broker.py @@ -23,6 +23,7 @@ # # # Author: Komal Thareja (kthare10@renci.org) +import os import time import traceback @@ -33,6 +34,8 @@ from fabric_cf.actor.core.container.globals import Globals, GlobalsSingleton from fabric_cf.broker.core.broker_kernel import BrokerKernelSingleton +DEBUG_MODE = False + def main(): """ @@ -51,6 +54,42 @@ def main(): prometheus_port = int(runtime_config.get(Constants.PROPERTY_CONF_PROMETHEUS_REST_PORT, None)) prometheus_client.start_http_server(prometheus_port) + actor = GlobalsSingleton.get().get_container().get_actor() + policy = actor.get_policy() + + if DEBUG_MODE: + site_ads = ['../../../neo4j/Network-dev.graphml', + '../../../neo4j/LBNL.graphml', + '../../../neo4j/RENC.graphml', + '../../../neo4j/UKY.graphml', + '../../../neo4j/AL2S.graphml'] + + adm_ids = dict() + for ad in site_ads: + from fabric_cf.actor.fim.fim_helper import FimHelper + n4j_imp = FimHelper.get_neo4j_importer() + plain_neo4j = n4j_imp.import_graph_from_file_direct(graph_file=ad) + print(f"Validating ARM graph {ad}") + plain_neo4j.validate_graph() + + from fim.graph.resources.neo4j_arm import Neo4jARMGraph + from fim.graph.neo4j_property_graph import Neo4jPropertyGraph + site_arm = Neo4jARMGraph(graph=Neo4jPropertyGraph(graph_id=plain_neo4j.graph_id, + importer=n4j_imp)) + # generate a dict of ADMs from site graph ARM + site_adms = site_arm.generate_adms() + print('ADMS' + str(site_adms.keys())) + + # desired ADM is under 'primary' + site_adm = site_adms['primary'] + policy.combined_broker_model.merge_adm(adm=site_adm) + + print('Deleting ADM and ARM graphs') + for adm in site_adms.values(): + adm_ids[ad] = adm.graph_id + adm.delete_graph() + site_arm.delete_graph() + while True: time.sleep(0.0001) BrokerKernelSingleton.get().do_periodic() diff --git a/fabric_cf/broker/test/test.yaml b/fabric_cf/broker/test/test.yaml index 15d9f56f..de894fa3 100644 --- a/fabric_cf/broker/test/test.yaml +++ b/fabric_cf/broker/test/test.yaml @@ -121,7 +121,7 @@ neo4j: bqm: kafka-topic: broker-resource-usage # in seconds (default set to 2 hours) - publish-interval: 7200 + publish-interval: kafka-sasl-producer-username: kafka-sasl-producer-password: @@ -135,10 +135,16 @@ actor: module: fabric_cf.actor.core.policy.broker_simpler_units_policy class: BrokerSimplerUnitsPolicy properties: - algorithm: FirstFit + algorithm: + FirstFit: # Default policy for all sites + enabled: true + Random: # Random policy for specific sites + enabled: true + sites: # Specify the sites where Random policy should be used + - EDUKY controls: - control: - type: VM, Container, Baremetal + type: VM, Container, Baremetal, Switch class: NetworkNodeInventory module: fabric_cf.actor.core.policy.network_node_inventory - control: diff --git a/fabric_cf/orchestrator/config.orchestrator.yaml b/fabric_cf/orchestrator/config.orchestrator.yaml index 62a435f4..60e2ec43 100644 --- a/fabric_cf/orchestrator/config.orchestrator.yaml +++ b/fabric_cf/orchestrator/config.orchestrator.yaml @@ -49,6 +49,7 @@ runtime: rpc.request.timeout.seconds: 1200 maint.project.id: 990d8a8b-7e50-4d13-a3be-0f133ffa8653 infrastructure.project.id: 4604cab7-41ff-4c1a-a935-0ca6f20cceeb + total_slice_count_seed: 0 message.max.bytes: 1048588 rpc.retries: 5 commit.batch.size: 1 @@ -107,6 +108,7 @@ container: bqm: # in seconds (default set to 300 seconds) refresh-interval: 300 + local: True time: # This section controls settings, which are generally useful diff --git a/fabric_cf/orchestrator/core/bqm_wrapper.py b/fabric_cf/orchestrator/core/bqm_wrapper.py index ebd188ed..8ff87d66 100644 --- a/fabric_cf/orchestrator/core/bqm_wrapper.py +++ b/fabric_cf/orchestrator/core/bqm_wrapper.py @@ -25,6 +25,7 @@ # Author: Komal Thareja (kthare10@renci.org) from datetime import datetime, timezone +from fabric_cf.actor.fim.fim_helper import FimHelper from fim.user import GraphFormat @@ -39,6 +40,7 @@ def __init__(self): self.refresh_interval_in_seconds = 60 self.refresh_in_progress = False self.level = 1 + self.graph_id = None def set_refresh_interval(self, *, refresh_interval: int): """ diff --git a/fabric_cf/orchestrator/core/orchestrator_handler.py b/fabric_cf/orchestrator/core/orchestrator_handler.py index 421e6196..39b2c99b 100644 --- a/fabric_cf/orchestrator/core/orchestrator_handler.py +++ b/fabric_cf/orchestrator/core/orchestrator_handler.py @@ -27,7 +27,7 @@ import traceback from datetime import datetime, timedelta, timezone from http.client import NOT_FOUND, BAD_REQUEST, UNAUTHORIZED -from typing import List, Tuple +from typing import List, Tuple, Union from fabric_mb.message_bus.messages.auth_avro import AuthAvro from fabric_mb.message_bus.messages.poa_avro import PoaAvro @@ -64,7 +64,11 @@ def __init__(self): self.logger = self.globals.get_logger() self.jwks_url = self.globals.get_config().get_oauth_config().get(Constants.PROPERTY_CONF_O_AUTH_JWKS_URL, None) self.pdp_config = self.globals.get_config().get_global_config().get_pdp_config() - self.infrastructure_project_id = self.globals.get_config().get_runtime_config().get(Constants.INFRASTRUCTURE_PROJECT_ID, None) + self.config = self.globals.get_config() + self.infrastructure_project_id = self.config.get_runtime_config().get(Constants.INFRASTRUCTURE_PROJECT_ID, None) + self.total_slice_count_seed = self.config.get_runtime_config().get(Constants.TOTAL_SLICE_COUNT_SEED, 0) + self.local_bqm = self.globals.get_config().get_global_config().get_bqm_config().get( + Constants.LOCAL_BQM, False) def get_logger(self): """ @@ -114,7 +118,8 @@ def get_broker(self, *, controller: ABCMgmtControllerMixin) -> ID: def discover_broker_query_model(self, *, controller: ABCMgmtControllerMixin, token: str = None, level: int = 10, graph_format: GraphFormat = GraphFormat.GRAPHML, - force_refresh: bool = False) -> str or None: + force_refresh: bool = False, start: datetime = None, + end: datetime = None, includes: str = None, excludes: str = None) -> str or None: """ Discover all the available resources by querying Broker :param controller Management Controller Object @@ -122,39 +127,68 @@ def discover_broker_query_model(self, *, controller: ABCMgmtControllerMixin, tok :param level: level of details :param graph_format: Graph format :param force_refresh: Force fetching a fresh model from Broker + :param start: start time + :param end: end time + :param includes: comma separated lists of sites to include + :param excludes: comma separated lists of sites to exclude :return str or None """ broker_query_model = None - saved_bqm = self.controller_state.get_saved_bqm(graph_format=graph_format, level=level) - if saved_bqm is not None: - if not force_refresh and not saved_bqm.can_refresh() and not saved_bqm.refresh_in_progress: - broker_query_model = saved_bqm.get_bqm() - else: - saved_bqm.start_refresh() + # Always get Fresh copy for advanced resource requests + if not start and not end and not includes and not excludes and \ + (level <= 1 or graph_format == GraphFormat.JSON_NODELINK): + saved_bqm = self.controller_state.get_saved_bqm(graph_format=graph_format, level=level) + if saved_bqm is not None: + if not force_refresh and not saved_bqm.can_refresh() and not saved_bqm.refresh_in_progress: + broker_query_model = saved_bqm.get_bqm() + else: + saved_bqm.start_refresh() if broker_query_model is None: - broker = self.get_broker(controller=controller) - if broker is None: - raise OrchestratorException("Unable to determine broker proxy for this controller. " - "Please check Orchestrator container configuration and logs.") - - model = controller.get_broker_query_model(broker=broker, id_token=token, level=level, - graph_format=graph_format) - if model is None or model.get_model() is None or model.get_model() == '': - raise OrchestratorException(http_error_code=NOT_FOUND, message=f"Resource(s) not found for " - f"level: {level} format: {graph_format}!") - broker_query_model = model.get_model() - - self.controller_state.save_bqm(bqm=broker_query_model, graph_format=graph_format, level=level) + if self.local_bqm and level == 2 and not force_refresh: + saved_bqm = self.controller_state.get_saved_bqm(graph_format=GraphFormat.GRAPHML, level=0) + if saved_bqm and saved_bqm.get_bqm() and len(saved_bqm.get_bqm()): + broker_query_model = controller.build_broker_query_model(level_0_broker_query_model=saved_bqm.get_bqm(), + level=level, graph_format=graph_format, + start=start, end=end, includes=includes, + excludes=excludes) + # Request the model from Broker as a fallback + if not broker_query_model: + broker = self.get_broker(controller=controller) + if broker is None: + raise OrchestratorException("Unable to determine broker proxy for this controller. " + "Please check Orchestrator container configuration and logs.") + + model = controller.get_broker_query_model(broker=broker, id_token=token, level=level, + graph_format=graph_format, start=start, end=end, + includes=includes, excludes=excludes) + if model is None or model.get_model() is None or model.get_model() == '': + raise OrchestratorException(http_error_code=NOT_FOUND, message=f"Resource(s) not found for " + f"level: {level} format: {graph_format}!") + + broker_query_model = model.get_model() + + # Do not update cache for advance requests + if not start and not end and not includes and not excludes and \ + (level <= 1 or graph_format == GraphFormat.JSON_NODELINK): + self.controller_state.save_bqm(bqm=broker_query_model, graph_format=graph_format, level=level) return broker_query_model - def list_resources(self, *, token: str, level: int, force_refresh: bool = False) -> dict: + def list_resources(self, *, level: int, force_refresh: bool = False, start: datetime = None, + end: datetime, includes: str = None, excludes: str = None, graph_format_str: str = None, + token: str = None, authorize: bool = True) -> str: """ List Resources :param token Fabric Identity Token :param level: level of details (default set to 1) :param force_refresh: force fetching bqm from broker and override the cached model + :param start: start time + :param end: end time + :param includes: comma separated lists of sites to include + :param excludes: comma separated lists of sites to exclude + :param graph_format_str: Graph format + :param authorize: Authorize the request; Not authorized for Portal requests :raises Raises an exception in case of failure :returns Broker Query Model on success """ @@ -162,48 +196,30 @@ def list_resources(self, *, token: str, level: int, force_refresh: bool = False) controller = self.controller_state.get_management_actor() self.logger.debug(f"list_resources invoked controller:{controller}") - self.__authorize_request(id_token=token, action_id=ActionId.query) - - broker_query_model = self.discover_broker_query_model(controller=controller, token=token, level=level, - force_refresh=force_refresh) - - return ResponseBuilder.get_broker_query_model_summary(bqm=broker_query_model) - - except Exception as e: - self.logger.error(traceback.format_exc()) - self.logger.error(f"Exception occurred processing list_resources e: {e}") - raise e - - def portal_list_resources(self, *, graph_format_str: str) -> dict: - """ - List Resources - :param graph_format_str: Graph format - :raises Raises an exception in case of failure - :returns Broker Query Model on success - """ - try: - controller = self.controller_state.get_management_actor() - self.logger.debug(f"portal_list_resources invoked controller:{controller}") + graph_format = self.__translate_graph_format(graph_format=graph_format_str) if graph_format_str else GraphFormat.GRAPHML - broker_query_model = None - graph_format = self.__translate_graph_format(graph_format=graph_format_str) - broker_query_model = self.discover_broker_query_model(controller=controller, level=1, + if authorize: + self.__authorize_request(id_token=token, action_id=ActionId.query) + broker_query_model = self.discover_broker_query_model(controller=controller, token=token, level=level, + force_refresh=force_refresh, start=start, + end=end, includes=includes, excludes=excludes, graph_format=graph_format) - return ResponseBuilder.get_broker_query_model_summary(bqm=broker_query_model) + return broker_query_model except Exception as e: self.logger.error(traceback.format_exc()) - self.logger.error(f"Exception occurred processing portal_list_resources e: {e}") + self.logger.error(f"Exception occurred processing list_resources e: {e}") raise e def create_slice(self, *, token: str, slice_name: str, slice_graph: str, ssh_key: str, - lease_end_time: str) -> List[dict]: + lease_start_time: datetime = None, lease_end_time: datetime = None) -> List[dict]: """ Create a slice :param token Fabric Identity Token :param slice_name Slice Name :param slice_graph Slice Graph Model :param ssh_key: User ssh key + :param lease_start_time: Lease Start Time (UTC) :param lease_end_time: Lease End Time (UTC) :raises Raises an exception in case of failure :returns List of reservations created for the Slice on success @@ -219,8 +235,9 @@ def create_slice(self, *, token: str, slice_name: str, slice_graph: str, ssh_key fabric_token = AccessChecker.validate_and_decode_token(token=token) project, tags, project_name = fabric_token.first_project allow_long_lived = True if Constants.SLICE_NO_LIMIT_LIFETIME in tags else False - end_time = self.__validate_lease_end_time(lease_end_time=lease_end_time, allow_long_lived=allow_long_lived, - project_id=project) + start_time, end_time = self.__compute_lease_end_time(lease_end_time=lease_end_time, + allow_long_lived=allow_long_lived, + project_id=project, lease_start_time=lease_start_time) controller = self.controller_state.get_management_actor() self.logger.debug(f"create_slice invoked for Controller: {controller}") @@ -238,7 +255,7 @@ def create_slice(self, *, token: str, slice_name: str, slice_graph: str, ssh_key # Authorize the slice create_ts = time.time() self.__authorize_request(id_token=token, action_id=ActionId.create, resource=topology, - lease_end_time=end_time) + lease_end_time=end_time) self.logger.info(f"PDP authorize: TIME= {time.time() - create_ts:.0f}") # Check if an Active slice exists already with the same name for the user @@ -270,6 +287,7 @@ def create_slice(self, *, token: str, slice_name: str, slice_graph: str, ssh_key Constants.TAGS: tags, Constants.CLAIMS_EMAIL: fabric_token.email, Constants.TOKEN_HASH: fabric_token.token_hash}) + slice_obj.set_lease_start(lease_start=start_time) slice_obj.set_lease_end(lease_end=end_time) auth = AuthAvro() auth.name = self.controller_state.get_management_actor().get_name() @@ -319,6 +337,7 @@ def create_slice(self, *, token: str, slice_name: str, slice_graph: str, ssh_key EventLoggerSingleton.get().log_slice_event(slice_object=slice_obj, action=ActionId.create, topology=topology) + controller.increment_metrics(project_id=project, oidc_sub=fabric_token.uuid) return ResponseBuilder.get_reservation_summary(res_list=computed_reservations) except Exception as e: if slice_id is not None and controller is not None and asm_graph is not None: @@ -379,7 +398,7 @@ def get_slivers(self, *, token: str, slice_id: str, sliver_id: str = None, as_se raise e def get_slices(self, *, token: str, states: List[str], name: str, limit: int, offset: int, - as_self: bool = True) -> List[dict]: + as_self: bool = True, search: str = None, exact_match: bool = False) -> List[dict]: """ Get User Slices :param token Fabric Identity Token @@ -388,6 +407,8 @@ def get_slices(self, *, token: str, states: List[str], name: str, limit: int, of :param limit Number of slices to return :param offset Offset :param as_self flag; True - return calling user's slices otherwise, return all slices in the project + :param search: search term applied + :param exact_match: Exact Match for Search term :raises Raises an exception in case of failure :returns List of Slices on success """ @@ -484,20 +505,28 @@ def modify_slice(self, *, token: str, slice_id: str, slice_graph: str) -> List[d FimHelper.delete_graph(graph_id=slice_obj.get_graph_id()) + # Slice has sliver modifications - add/remove/update for slivers requiring AM updates + modify_state = slice_object.has_sliver_updates_at_authority() + slice_obj.graph_id = asm_graph.get_graph_id() config_props = slice_obj.get_config_properties() config_props[Constants.PROJECT_ID] = project config_props[Constants.TAGS] = ','.join(tags) config_props[Constants.TOKEN_HASH] = fabric_token.token_hash slice_obj.set_config_properties(value=config_props) + slice_obj.state = SliceState.Modifying.value - if not controller.update_slice(slice_obj=slice_obj, modify_state=True): + if not controller.update_slice(slice_obj=slice_obj, modify_state=modify_state): self.logger.error(f"Failed to update slice: {slice_id} error: {controller.get_last_error()}") - # Enqueue the slice on the demand thread - # Demand thread is responsible for demanding the reservations - # Helps improve the create response time - self.controller_state.get_defer_thread().queue_slice(controller_slice=slice_object) + if modify_state: + # Enqueue the slice on the demand thread + # Demand thread is responsible for demanding the reservations + # Helps improve the create response time + self.controller_state.get_defer_thread().queue_slice(controller_slice=slice_object) + # Sliver has meta data update + else: + self.logger.debug("Slice only has UserData updates") EventLoggerSingleton.get().log_slice_event(slice_object=slice_obj, action=ActionId.modify, topology=topology) @@ -536,7 +565,9 @@ def delete_slices(self, *, token: str, slice_id: str = None): SliceState.StableError.value, SliceState.StableOK.value, SliceState.ModifyOK.value, - SliceState.ModifyError.value] + SliceState.ModifyError.value, + SliceState.AllocatedError.value, + SliceState.AllocatedOK.value] slice_list = controller.get_slices(slice_id=slice_guid, user_id=fabric_token.uuid, project=project, states=states) @@ -553,7 +584,8 @@ def delete_slices(self, *, token: str, slice_id: str = None): self.logger.debug(f"Slice# {slice_object.get_slice_id()} already closed") continue - if not SliceState.is_stable(state=slice_state) and not SliceState.is_modified(state=slice_state): + if not SliceState.is_stable(state=slice_state) and not SliceState.is_modified(state=slice_state) and \ + not SliceState.is_allocated(state=slice_state): self.logger.info(f"Unable to delete Slice# {slice_object.get_slice_id()} that is not yet stable, " f"try again later") failed_to_delete_slice_ids.append(slice_object.get_slice_id()) @@ -594,16 +626,22 @@ def modify_accept(self, *, token: str, slice_id: str) -> dict: slice_obj = next(iter(slice_list)) slice_state = SliceState(slice_obj.get_state()) - if not SliceState.is_modified(state=slice_state): - self.logger.info(f"Unable to accept modify Slice# {slice_guid} that was not modified") - raise OrchestratorException(f"Unable to accept modify Slice# {slice_guid} that was not modified") + # Do not throw error if modify accept is received for a stable slice + # Just return the success with slice topology + #if not SliceState.is_modified(state=slice_state): + # self.logger.info(f"Unable to accept modify Slice# {slice_guid} that was not modified") + # raise OrchestratorException(f"Unable to accept modify Slice# {slice_guid} that was not modified") if slice_obj.get_graph_id() is None: raise OrchestratorException(f"Slice# {slice_obj} does not have graph id") - slice_topology = FimHelper.prune_graph(graph_id=slice_obj.get_graph_id()) + if not SliceState.is_modified(state=slice_state): + slice_topology = FimHelper.get_graph(graph_id=slice_obj.get_graph_id()) + # Prune the slice topology only if slice was modified + else: + slice_topology = FimHelper.prune_graph(graph_id=slice_obj.get_graph_id()) - controller.accept_update_slice(slice_id=ID(uid=slice_id)) + controller.accept_update_slice(slice_id=ID(uid=slice_id)) slice_model_str = slice_topology.serialize() return ResponseBuilder.get_slice_summary(slice_list=slice_list, slice_model=slice_model_str)[0] @@ -664,7 +702,7 @@ def get_slice_graph(self, *, token: str, slice_id: str, graph_format_str: str, a self.logger.error(f"Exception occurred processing get_slice_graph e: {e}") raise e - def renew_slice(self, *, token: str, slice_id: str, new_lease_end_time: str): + def renew_slice(self, *, token: str, slice_id: str, new_lease_end_time: datetime): """ Renew a slice :param token Fabric Identity Token @@ -674,6 +712,7 @@ def renew_slice(self, *, token: str, slice_id: str, new_lease_end_time: str): :return: """ failed_to_extend_rid_list = [] + extend_rid_list = [] try: controller = self.controller_state.get_management_actor() self.logger.debug(f"renew_slice invoked for Controller: {controller}") @@ -702,9 +741,9 @@ def renew_slice(self, *, token: str, slice_id: str, new_lease_end_time: str): fabric_token = AccessChecker.validate_and_decode_token(token=token) project, tags, project_name = fabric_token.first_project allow_long_lived = True if Constants.SLICE_NO_LIMIT_LIFETIME in tags else False - new_end_time = self.__validate_lease_end_time(lease_end_time=new_lease_end_time, - allow_long_lived=allow_long_lived, - project_id=project) + start_time, new_end_time = self.__compute_lease_end_time(lease_end_time=new_lease_end_time, + allow_long_lived=allow_long_lived, + project_id=project) reservations = controller.get_reservations(slice_id=slice_id) if reservations is None or len(reservations) < 1: @@ -728,6 +767,9 @@ def renew_slice(self, *, token: str, slice_id: str, new_lease_end_time: str): if new_end_time < current_end_time: raise OrchestratorException(f"Attempted new term end time is shorter than current slice end time") + if new_end_time == current_end_time: + continue + self.logger.debug(f"Extending reservation with reservation# {r.get_reservation_id()}") result = controller.extend_reservation(reservation=ID(uid=r.get_reservation_id()), new_end_time=new_end_time, @@ -735,57 +777,85 @@ def renew_slice(self, *, token: str, slice_id: str, new_lease_end_time: str): if not result: self.logger.error(f"Error: {controller.get_last_error()}") failed_to_extend_rid_list.append(r.get_reservation_id()) + else: + extend_rid_list.append(r.get_reservation_id()) + ''' if len(failed_to_extend_rid_list) == 0: slice_object.set_lease_end(lease_end=new_end_time) if not controller.update_slice(slice_obj=slice_object): self.logger.error(f"Failed to update lease end time: {new_end_time} in Slice: {slice_object}") self.logger.error(controller.get_last_error()) + ''' if len(failed_to_extend_rid_list) > 0: raise OrchestratorException(f"Failed to extend reservation# {failed_to_extend_rid_list}") + if len(extend_rid_list): + slice_object.state = SliceState.Configuring.value + if not controller.update_slice(slice_obj=slice_object, modify_state=True): + self.logger.error(f"Failed to update slice: {slice_id} error: {controller.get_last_error()}") + EventLoggerSingleton.get().log_slice_event(slice_object=slice_object, action=ActionId.renew) except Exception as e: self.logger.error(traceback.format_exc()) self.logger.error(f"Exception occurred processing renew e: {e}") raise e - def __validate_lease_end_time(self, lease_end_time: str, allow_long_lived: bool = False, - project_id: str = None) -> datetime: + @staticmethod + def validate_lease_time(lease_time: str) -> Union[datetime, None]: + """ + Validate Lease Time + :param lease_time: Lease Time + :return Lease Time + :raises Exception if new lease time is in past + """ + if lease_time is None: + return lease_time + try: + new_time = datetime.strptime(lease_time, Constants.LEASE_TIME_FORMAT) + except Exception as e: + raise OrchestratorException(f"Lease Time is not in format {Constants.LEASE_TIME_FORMAT}", + http_error_code=BAD_REQUEST) + + now = datetime.now(timezone.utc) + if new_time <= now: + raise OrchestratorException(f"New lease time {new_time} is in the past! ", + http_error_code=BAD_REQUEST) + + return new_time + + def __compute_lease_end_time(self, lease_end_time: datetime, allow_long_lived: bool = False, + project_id: str = None, lease_start_time: datetime = None) -> Tuple[datetime, datetime]: """ Validate Lease End Time :param lease_end_time: New End Time :param allow_long_lived: Allow long lived tokens :param project_id: Project Id + :param lease_start_time: New Start Time :return End Time :raises Exception if new end time is in past """ + base_time = datetime.now(timezone.utc) + if lease_start_time and lease_start_time > base_time: + base_time = lease_start_time if lease_end_time is None: - new_end_time = datetime.now(timezone.utc) + timedelta(hours=Constants.DEFAULT_LEASE_IN_HOURS) - return new_end_time - try: - new_end_time = datetime.strptime(lease_end_time, Constants.LEASE_TIME_FORMAT) - except Exception as e: - raise OrchestratorException(f"Lease End Time is not in format {Constants.LEASE_TIME_FORMAT}", - http_error_code=BAD_REQUEST) + new_end_time = base_time + timedelta(hours=Constants.DEFAULT_LEASE_IN_HOURS) + return base_time, new_end_time - now = datetime.now(timezone.utc) - if new_end_time <= now: - raise OrchestratorException(f"New term end time {new_end_time} is in the past! ", - http_error_code=BAD_REQUEST) + new_end_time = lease_end_time if allow_long_lived: default_long_lived_duration = Constants.LONG_LIVED_SLICE_TIME_WEEKS else: default_long_lived_duration = Constants.DEFAULT_MAX_DURATION - if project_id not in self.infrastructure_project_id and (new_end_time - now) > default_long_lived_duration: + if project_id not in self.infrastructure_project_id and (new_end_time - base_time) > default_long_lived_duration: self.logger.info(f"New term end time {new_end_time} exceeds system default " f"{default_long_lived_duration}, setting to system default: ") - new_end_time = now + default_long_lived_duration + new_end_time = base_time + default_long_lived_duration - return new_end_time + return base_time, new_end_time @staticmethod def __translate_graph_format(*, graph_format: str) -> GraphFormat: @@ -830,7 +900,7 @@ def poa(self, *, token: str, sliver_id: str, poa: PoaAvro) -> Tuple[str, str]: rid = ID(uid=sliver_id) if sliver_id is not None else None - fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.modify) + fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.POA) user_id = fabric_token.uuid project, tags, project_name = fabric_token.first_project @@ -904,3 +974,44 @@ def get_poas(self, *, token: str, sliver_id: str = None, poa_id: str = None, sta self.logger.error(traceback.format_exc()) self.logger.error(f"Exception occurred processing poa e: {e}") raise e + + def get_metrics_overview(self, *, token: str = None, excluded_projects: List[str] = None): + """ + Get metrics overview + """ + try: + controller = self.controller_state.get_management_actor() + self.logger.debug(f"get_metrics_overview invoked for Controller: {controller}") + + project = None + user_id = None + # Filter based on project_id and user_id when token is provided + if token: + fabric_token = self.__authorize_request(id_token=token, action_id=ActionId.query) + projects = fabric_token.projects + if len(projects) == 1: + project, tags, project_name = fabric_token.first_project + user_id = fabric_token.uuid + + active_states = SliceState.list_values_ex_closing_dead() + active_slice_count = controller.get_slice_count(states=active_states, user_id=user_id, project=project, + excluded_projects=excluded_projects) + non_active_metrics = controller.get_metrics(oidc_sub=user_id, project_id=project, + excluded_projects=excluded_projects) + total_slices = 0 + for m in non_active_metrics: + total_slices += m.get("slice_count", 0) + if not user_id and not project: + # Get Seed value from config + total_slices += self.total_slice_count_seed + result = { + "slices": { + "active_cumulative": active_slice_count, + "non_active_cumulative": total_slices + } + } + return result + except Exception as e: + self.logger.error(traceback.format_exc()) + self.logger.error(f"Exception occurred processing get_metrics_overview e: {e}") + raise e diff --git a/fabric_cf/orchestrator/core/orchestrator_kernel.py b/fabric_cf/orchestrator/core/orchestrator_kernel.py index c644ce1a..18f59a7e 100644 --- a/fabric_cf/orchestrator/core/orchestrator_kernel.py +++ b/fabric_cf/orchestrator/core/orchestrator_kernel.py @@ -92,6 +92,7 @@ def save_bqm(self, *, bqm: str, graph_format: GraphFormat, level: int): saved_bqm.set_refresh_interval(refresh_interval=int(refresh_interval)) saved_bqm.save(bqm=bqm, graph_format=graph_format, level=level) self.bqm_cache[key] = saved_bqm + finally: self.lock.release() @@ -156,6 +157,11 @@ def start_threads(self): Start threads :return: """ + if not len(self.bqm_cache): + self.save_bqm(bqm="", graph_format=GraphFormat.GRAPHML, level=0) + saved_bqm = self.get_saved_bqm(graph_format=GraphFormat.GRAPHML, level=0) + saved_bqm.last_query_time = None + from fabric_cf.actor.core.container.globals import GlobalsSingleton GlobalsSingleton.get().get_container().register(tickable=self) diff --git a/fabric_cf/orchestrator/core/orchestrator_slice_wrapper.py b/fabric_cf/orchestrator/core/orchestrator_slice_wrapper.py index 7c1dba20..a8fc6865 100644 --- a/fabric_cf/orchestrator/core/orchestrator_slice_wrapper.py +++ b/fabric_cf/orchestrator/core/orchestrator_slice_wrapper.py @@ -42,7 +42,7 @@ from fim.slivers.network_node import NodeSliver, NodeType from fim.slivers.network_service import NetworkServiceSliver from fim.slivers.topology_diff import WhatsModifiedFlag -from fim.user import ServiceType, ExperimentTopology +from fim.user import ServiceType, ExperimentTopology, InterfaceType from fabric_cf.actor.core.common.constants import ErrorCodes, Constants from fabric_cf.actor.core.kernel.reservation_states import ReservationPendingStates, ReservationStates @@ -191,9 +191,12 @@ def __validate_node_sliver(*, sliver: NodeSliver): @param sliver Node Sliver @raises exception for invalid slivers """ - if sliver.get_capacities() is None and sliver.get_capacity_hints() is None: + if sliver.get_type() == NodeType.VM and sliver.get_capacities() is None and sliver.get_capacity_hints() is None: raise OrchestratorException(message="Either Capacity or Capacity Hints must be specified!", http_error_code=BAD_REQUEST) + if sliver.get_type() == NodeType.Switch and sliver.get_capacities() is None: + raise OrchestratorException(message="Either Capacity must be specified!", + http_error_code=BAD_REQUEST) def __build_ns_sliver_reservation(self, *, slice_graph: ABCASMPropertyGraph, node_id: str, node_res_mapping: Dict[str, str]) -> Tuple[LeaseReservationAvro or None, bool]: @@ -336,7 +339,8 @@ def __build_ns_sliver_reservation(self, *, slice_graph: ABCASMPropertyGraph, nod reservation = self.reservation_converter.generate_reservation(sliver=sliver, slice_id=self.slice_obj.get_slice_id(), end_time=self.slice_obj.get_lease_end(), - pred_list=redeem_predecessors) + pred_list=redeem_predecessors, + start_time=self.slice_obj.get_lease_start()) if sliver.node_id not in node_res_mapping: node_res_mapping[sliver.node_id] = reservation.get_reservation_id() @@ -405,31 +409,33 @@ def __build_node_sliver_reservation(self, *, slice_graph: ABCASMPropertyGraph, # Build Network Node Sliver sliver = slice_graph.build_deep_node_sliver(node_id=node_id) - if sliver.get_type() not in [NodeType.VM]: + if sliver.get_type() not in [NodeType.VM, NodeType.Switch]: return None # Validate Node Sliver self.__validate_node_sliver(sliver=sliver) - # Compute Requested Capacities from Capacity Hints - requested_capacities = sliver.get_capacities() - requested_capacity_hints = sliver.get_capacity_hints() - catalog = InstanceCatalog() - if requested_capacities is None and requested_capacity_hints is not None: - requested_capacities = catalog.get_instance_capacities( - instance_type=requested_capacity_hints.instance_type) - sliver.set_capacities(cap=requested_capacities) - - # Compute Capacity Hints from Requested Capacities - if requested_capacity_hints is None and requested_capacities is not None: - instance_type = catalog.map_capacities_to_instance(cap=requested_capacities) - requested_capacity_hints = CapacityHints(instance_type=instance_type) - sliver.set_capacity_hints(caphint=requested_capacity_hints) + if sliver.get_type() == NodeType.VM: + # Compute Requested Capacities from Capacity Hints + requested_capacities = sliver.get_capacities() + requested_capacity_hints = sliver.get_capacity_hints() + catalog = InstanceCatalog() + if requested_capacities is None and requested_capacity_hints is not None: + requested_capacities = catalog.get_instance_capacities( + instance_type=requested_capacity_hints.instance_type) + sliver.set_capacities(cap=requested_capacities) + + # Compute Capacity Hints from Requested Capacities + if requested_capacity_hints is None and requested_capacities is not None: + instance_type = catalog.map_capacities_to_instance(cap=requested_capacities) + requested_capacity_hints = CapacityHints(instance_type=instance_type) + sliver.set_capacity_hints(caphint=requested_capacity_hints) # Generate reservation for the sliver reservation = self.reservation_converter.generate_reservation(sliver=sliver, slice_id=self.slice_obj.get_slice_id(), - end_time=self.slice_obj.get_lease_end()) + end_time=self.slice_obj.get_lease_end(), + start_time=self.slice_obj.get_lease_start()) return reservation def __build_network_node_reservations(self, *, slice_graph: ABCASMPropertyGraph) \ @@ -513,7 +519,7 @@ def modify(self, *, new_slice_graph: ABCASMPropertyGraph) -> List[LeaseReservati # Add components for x in topology_diff.added.components: - sliver, parent_node_id = FimHelper.get_parent_node(graph_model=new_slice_graph, component=x) + sliver, parent_node_id = FimHelper.get_parent_node(graph_model=new_slice_graph, node=x) rid = sliver.reservation_info.reservation_id # If corresponding sliver also has add operations; it's already in the map # No need to rebuild it @@ -523,7 +529,7 @@ def modify(self, *, new_slice_graph: ABCASMPropertyGraph) -> List[LeaseReservati # Remove components for x in topology_diff.removed.components: # Grab the old sliver - sliver, parent_node_id = FimHelper.get_parent_node(graph_model=existing_topology.graph_model, component=x) + sliver, parent_node_id = FimHelper.get_parent_node(graph_model=existing_topology.graph_model, node=x) rid = sliver.reservation_info.reservation_id # If corresponding sliver also has add operations; it's already in the map # No need to rebuild it @@ -533,37 +539,44 @@ def modify(self, *, new_slice_graph: ABCASMPropertyGraph) -> List[LeaseReservati # Added Interfaces for x in topology_diff.added.interfaces: - new_sliver, parent_node_id = FimHelper.get_parent_node(graph_model=new_slice_graph, interface=x) + new_sliver, parent_node_id = FimHelper.get_parent_node(graph_model=new_slice_graph, node=x) rid = new_sliver.reservation_info.reservation_id # If corresponding sliver also has add operations; it's already in the map # No need to rebuild it if rid not in self.computed_modify_reservations: - new_reservation, dep_update_needed = self.__build_ns_sliver_reservation(slice_graph=new_slice_graph, - node_id=parent_node_id, - node_res_mapping=node_res_mapping) - self.computed_modify_reservations[rid] = ModifiedReservation(sliver=new_reservation.get_sliver(), - dependencies=new_reservation.redeem_processors) + if x.type == InterfaceType.SubInterface: + self.computed_modify_reservations[rid] = ModifiedReservation(sliver=new_sliver) + else: + new_reservation, dep_update_needed = self.__build_ns_sliver_reservation(slice_graph=new_slice_graph, + node_id=parent_node_id, + node_res_mapping=node_res_mapping) + self.computed_modify_reservations[rid] = ModifiedReservation(sliver=new_reservation.get_sliver(), + dependencies=new_reservation.redeem_processors) - if dep_update_needed: - ns_peered_reservations.append(new_reservation) - ns_mapping[new_reservation.sliver.node_id] = rid + if dep_update_needed: + ns_peered_reservations.append(new_reservation) + ns_mapping[new_reservation.sliver.node_id] = rid # Removed Interfaces for x in topology_diff.removed.interfaces: - sliver, parent_node_id = FimHelper.get_parent_node(graph_model=existing_topology.graph_model, interface=x) + sliver, parent_node_id = FimHelper.get_parent_node(graph_model=existing_topology.graph_model, node=x) rid = sliver.reservation_info.reservation_id # If corresponding sliver also has add operations; it's already in the map # No need to rebuild it if rid not in self.computed_modify_reservations: - new_reservation, dep_update_needed = self.__build_ns_sliver_reservation(slice_graph=new_slice_graph, - node_id=parent_node_id, - node_res_mapping=node_res_mapping) - self.computed_modify_reservations[rid] = ModifiedReservation(sliver=new_reservation.get_sliver(), - dependencies=new_reservation.redeem_processors) + if x.type == InterfaceType.SubInterface: + new_sliver = new_slice_graph.build_deep_node_sliver(node_id=parent_node_id) + self.computed_modify_reservations[rid] = ModifiedReservation(sliver=new_sliver) + else: + new_reservation, dep_update_needed = self.__build_ns_sliver_reservation(slice_graph=new_slice_graph, + node_id=parent_node_id, + node_res_mapping=node_res_mapping) + self.computed_modify_reservations[rid] = ModifiedReservation(sliver=new_reservation.get_sliver(), + dependencies=new_reservation.redeem_processors) - if dep_update_needed: - ns_peered_reservations.append(new_reservation) - ns_mapping[new_reservation.sliver.node_id] = rid + if dep_update_needed: + ns_peered_reservations.append(new_reservation) + ns_mapping[new_reservation.sliver.node_id] = rid # Remove nodes for x in topology_diff.removed.nodes: @@ -700,3 +713,7 @@ def update_topology(self, *, topology: ExperimentTopology): management_ip=sliver.management_ip, capacity_hints=sliver.capacity_hints, capacities=sliver.capacities) + + def has_sliver_updates_at_authority(self): + return len(self.computed_reservations) and len(self.computed_remove_reservations) or \ + len(self.computed_modify_reservations) or len(self.computed_modify_properties_reservations) diff --git a/fabric_cf/orchestrator/core/reservation_converter.py b/fabric_cf/orchestrator/core/reservation_converter.py index e7adbdde..5182c2d6 100644 --- a/fabric_cf/orchestrator/core/reservation_converter.py +++ b/fabric_cf/orchestrator/core/reservation_converter.py @@ -45,13 +45,14 @@ def __init__(self, *, controller: ABCMgmtControllerMixin, broker: ID): self.controller = controller self.broker = broker - def generate_reservation(self, *, sliver: BaseSliver, slice_id: str, end_time: datetime, + def generate_reservation(self, *, sliver: BaseSliver, slice_id: str, end_time: datetime, start_time: datetime = None, pred_list: List[str] = None) -> LeaseReservationAvro: """ Responsible to generate reservation from the sliver :param sliver Network Service or Network Node Sliver :param slice_id Slice Id :param end_time End Time + :param start_time Start Time :param pred_list Predecessor Reservation Id List :returns list of tickets """ @@ -67,6 +68,9 @@ def generate_reservation(self, *, sliver: BaseSliver, slice_id: str, end_time: d ticket.set_units(1) ticket.set_resource_type(str(sliver.get_type())) start = datetime.now(timezone.utc) + if start_time and start_time > start: + start = start_time + end = start + timedelta(hours=Constants.DEFAULT_LEASE_IN_HOURS) if end_time is not None: end = end_time diff --git a/fabric_cf/orchestrator/core/response_builder.py b/fabric_cf/orchestrator/core/response_builder.py index c9cd7365..96c16631 100644 --- a/fabric_cf/orchestrator/core/response_builder.py +++ b/fabric_cf/orchestrator/core/response_builder.py @@ -42,6 +42,8 @@ class ResponseBuilder: PROP_NAME = "name" PROP_STATE = "state" PROP_ERROR = "error" + PROP_OWNER_EMAIL = "owner_email" + PROP_OWNER_USER_ID = "owner_user_id" PROP_PROJECT_ID = "project_id" PROP_PROJECT_NAME = "project_name" PROP_MODEL = "model" @@ -121,6 +123,8 @@ def get_slice_summary(*, slice_list: List[SliceAvro], slice_model: str = None) - ResponseBuilder.PROP_NAME: s.get_slice_name(), ResponseBuilder.PROP_GRAPH_ID: s.get_graph_id(), ResponseBuilder.PROP_STATE: SliceState(s.get_state()).name, + ResponseBuilder.PROP_OWNER_EMAIL: s.get_owner().get_email(), + ResponseBuilder.PROP_OWNER_USER_ID: s.get_owner().get_oidc_sub_claim() } end_time = s.get_lease_end() if end_time is not None: diff --git a/fabric_cf/orchestrator/docker-compose.yml b/fabric_cf/orchestrator/docker-compose.yml index 4e69cb44..904eebbf 100644 --- a/fabric_cf/orchestrator/docker-compose.yml +++ b/fabric_cf/orchestrator/docker-compose.yml @@ -68,7 +68,7 @@ services: build: context: ../../../ dockerfile: Dockerfile-orchestrator - image: orchestrator:1.6.2 + image: orchestrator:1.7.0 container_name: orchestrator restart: always depends_on: diff --git a/fabric_cf/orchestrator/nginx/default.conf b/fabric_cf/orchestrator/nginx/default.conf index 754c21c3..31cd479a 100644 --- a/fabric_cf/orchestrator/nginx/default.conf +++ b/fabric_cf/orchestrator/nginx/default.conf @@ -36,8 +36,8 @@ server { proxy_pass http://orchestrator:8700; proxy_set_header Host $http_host; } - location /metrics { - proxy_pass http://orchestrator:11000; - proxy_set_header Host $http_host; - } + #location /prom/metrics { + # proxy_pass http://orchestrator:11000; + # proxy_set_header Host $http_host; + #} } diff --git a/fabric_cf/orchestrator/openapi.json b/fabric_cf/orchestrator/openapi.json index 13357fdf..5eb1610d 100644 --- a/fabric_cf/orchestrator/openapi.json +++ b/fabric_cf/orchestrator/openapi.json @@ -25,6 +25,10 @@ } ], "tags": [ + { + "name": "metrics", + "description": "Control Framework Metrics" + }, { "name": "slices", "description": "Slices in FABRIC" @@ -75,6 +79,93 @@ } } }, + "/metrics/overview": { + "get": { + "tags": [ + "metrics" + ], + "summary": "Control Framework metrics overview", + "description": "Control Framework metrics overview", + "parameters": [ + { + "name": "excluded_projects", + "in": "query", + "description": "List of projects to exclude from the metrics overview", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/metrics" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/status_400_bad_request" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/status_401_unauthorized" + } + } + } + }, + "403": { + "description": "Forbidden", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/status_403_forbidden" + } + } + } + }, + "404": { + "description": "Not Found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/status_404_not_found" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/status_500_internal_server_error" + } + } + } + } + } + } + }, "/resources": { "get": { "tags": [ @@ -106,6 +197,54 @@ "type": "boolean", "default": false } + }, + { + "name": "start_date", + "in": "query", + "description": "starting date to check availability from", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "string", + "example": "2023-01-01 16:20:15 +00:00" + } + }, + { + "name": "end_date", + "in": "query", + "description": "end date to check availability until", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "string", + "example": "2023-01-01 16:20:15 +00:00" + } + }, + { + "name": "includes", + "in": "query", + "description": "comma separated lists of sites to include", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "string", + "example": "RENC,UKY" + } + }, + { + "name": "excludes", + "in": "query", + "description": "comma separated lists of sites to exclude", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "string", + "example": "SRI,LBNL" + } } ], "responses": { @@ -201,6 +340,78 @@ "CYTOSCAPE" ] } + }, + { + "name": "level", + "in": "query", + "description": "Level of details", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "integer", + "default": 1 + } + }, + { + "name": "force_refresh", + "in": "query", + "description": "Force to retrieve current available resource information.", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "boolean", + "default": false + } + }, + { + "name": "start_date", + "in": "query", + "description": "starting date to check availability from", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "string", + "example": "2023-01-01 16:20:15 +00:00" + } + }, + { + "name": "end_date", + "in": "query", + "description": "end date to check availability until", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "string", + "example": "2023-01-01 16:20:15 +00:00" + } + }, + { + "name": "includes", + "in": "query", + "description": "comma separated lists of sites to include", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "string", + "example": "RENC,UKY" + } + }, + { + "name": "excludes", + "in": "query", + "description": "comma separated lists of sites to exclude", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "string", + "example": "SRI,LBNL" + } } ], "responses": { @@ -287,6 +498,29 @@ "type": "string" } }, + { + "name": "search", + "in": "query", + "description": "search term applied", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "string" + } + }, + { + "name": "exact_match", + "in": "query", + "description": "Exact Match for Search term", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "boolean", + "default": false + } + }, { "name": "as_self", "in": "query", @@ -320,6 +554,8 @@ "Modifying", "ModifyOK", "ModifyError", + "AllocatedOK", + "AllocatedError", "All" ] } @@ -677,6 +913,18 @@ "type": "string" } }, + { + "name": "lease_start_time", + "in": "query", + "description": "Lease End Time for the Slice", + "required": false, + "style": "form", + "explode": true, + "schema": { + "type": "string", + "example": "2023-01-01 16:20:15 +00:00" + } + }, { "name": "lease_end_time", "in": "query", @@ -685,7 +933,8 @@ "style": "form", "explode": true, "schema": { - "type": "string" + "type": "string", + "example": "2023-01-01 16:20:15 +00:00" } } ], @@ -2041,6 +2290,25 @@ } } }, + "metrics": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/status_200_ok_single" + }, + { + "type": "object", + "properties": { + "results": { + "type": "array", + "items": { + "type": "object" + } + } + } + } + ] + }, "slices": { "type": "object", "allOf": [ @@ -2113,6 +2381,12 @@ }, "slice_id": { "type": "string" + }, + "owner_user_id": { + "type": "string" + }, + "owner_email": { + "type": "string" } } }, @@ -2175,6 +2449,12 @@ }, "sliver_id": { "type": "string" + }, + "owner_user_id": { + "type": "string" + }, + "owner_email": { + "type": "string" } } }, @@ -2345,7 +2625,7 @@ "requestBodies": { "Request": { "content": { - "text/plain": { + "application/json": { "schema": { "type": "string" } diff --git a/fabric_cf/orchestrator/swagger_server/controllers/metrics_controller.py b/fabric_cf/orchestrator/swagger_server/controllers/metrics_controller.py new file mode 100644 index 00000000..410ec566 --- /dev/null +++ b/fabric_cf/orchestrator/swagger_server/controllers/metrics_controller.py @@ -0,0 +1,15 @@ +from fabric_cf.orchestrator.swagger_server.models.metrics import Metrics # noqa: E501 +from fabric_cf.orchestrator.swagger_server.response import metrics_controller as rc + + +def metrics_overview_get(excluded_projects=None): # noqa: E501 + """Control Framework metrics overview + + Control Framework metrics overview # noqa: E501 + + :param excluded_projects: List of projects to exclude from the metrics overview + :type excluded_projects: List[str] + + :rtype: Metrics + """ + return rc.metrics_overview_get(excluded_projects=excluded_projects) diff --git a/fabric_cf/orchestrator/swagger_server/controllers/resources_controller.py b/fabric_cf/orchestrator/swagger_server/controllers/resources_controller.py index 33c1b53e..3ea4d46c 100644 --- a/fabric_cf/orchestrator/swagger_server/controllers/resources_controller.py +++ b/fabric_cf/orchestrator/swagger_server/controllers/resources_controller.py @@ -2,31 +2,51 @@ from fabric_cf.orchestrator.swagger_server.response import resources_controller as rc -def portalresources_get(graph_format): # noqa: E501 +def portalresources_get(graph_format, level=None, force_refresh=None, start_date=None, end_date=None, includes=None, excludes=None): # noqa: E501 """Retrieve a listing and description of available resources for portal Retrieve a listing and description of available resources for portal # noqa: E501 :param graph_format: graph format :type graph_format: str + :param level: Level of details + :type level: int + :param force_refresh: Force to retrieve current available resource information. + :type force_refresh: bool + :param start_date: starting date to check availability from + :type start_date: str + :param end_date: end date to check availability until + :type end_date: str + :param includes: comma separated lists of sites to include + :type includes: str + :param excludes: comma separated lists of sites to exclude + :type excludes: str :rtype: Resources """ - return rc.portalresources_get(graph_format) + return rc.portalresources_get(graph_format=graph_format, level=level, force_refresh=force_refresh, + start_date=start_date, end_date=end_date, includes=includes, excludes=excludes) -def resources_get(level, force_refresh): # noqa: E501 - """Retrieve a listing and description of available resources. By default, a cached available resource information - is returned. User can force to request the current available resources. +def resources_get(level, force_refresh, start_date=None, end_date=None, includes=None, excludes=None): # noqa: E501 + """Retrieve a listing and description of available resources. By default, a cached available resource information is returned. User can force to request the current available resources. - Retrieve a listing and description of available resources. By default, a cached available resource information is - returned. User can force to request the current available resources. # noqa: E501 + Retrieve a listing and description of available resources. By default, a cached available resource information is returned. User can force to request the current available resources. # noqa: E501 :param level: Level of details :type level: int :param force_refresh: Force to retrieve current available resource information. :type force_refresh: bool + :param start_date: starting date to check availability from + :type start_date: str + :param end_date: end date to check availability until + :type end_date: str + :param includes: comma separated lists of sites to include + :type includes: str + :param excludes: comma separated lists of sites to exclude + :type excludes: str :rtype: Resources """ - return rc.resources_get(level, force_refresh) + return rc.resources_get(level=level, force_refresh=force_refresh, start_date=start_date, + end_date=end_date, includes=includes, excludes=excludes) diff --git a/fabric_cf/orchestrator/swagger_server/controllers/slices_controller.py b/fabric_cf/orchestrator/swagger_server/controllers/slices_controller.py index 7b1dd8dc..ec643207 100644 --- a/fabric_cf/orchestrator/swagger_server/controllers/slices_controller.py +++ b/fabric_cf/orchestrator/swagger_server/controllers/slices_controller.py @@ -1,6 +1,5 @@ import connexion -from fabric_cf.orchestrator.swagger_server.models import SlicesPost from fabric_cf.orchestrator.swagger_server.models.slice_details import SliceDetails # noqa: E501 from fabric_cf.orchestrator.swagger_server.models.slices import Slices # noqa: E501 from fabric_cf.orchestrator.swagger_server.models.slices_post import SlicesPost # noqa: E501 @@ -33,10 +32,10 @@ def slices_create_post(body, name, ssh_key, lease_end_time=None): # noqa: E501 post_body = SlicesPost() post_body.graph_model = body.decode("utf-8") post_body.ssh_keys = [ssh_key] - return rc.slices_create_post(post_body, name, lease_end_time) + return rc.slices_create_post(body=post_body, name=name, lease_end_time=lease_end_time) -def slices_creates_post(body, name, lease_end_time=None): # noqa: E501 +def slices_creates_post(body, name, lease_start_time=None, lease_end_time=None): # noqa: E501 """Create slice Request to create slice as described in the request. Request would be a graph ML describing the requested resources. @@ -50,6 +49,8 @@ def slices_creates_post(body, name, lease_end_time=None): # noqa: E501 :type body: dict | bytes :param name: Slice Name :type name: str + :param lease_start_time: Lease End Time for the Slice + :type lease_start_time: str :param lease_end_time: Lease End Time for the Slice :type lease_end_time: str @@ -57,7 +58,7 @@ def slices_creates_post(body, name, lease_end_time=None): # noqa: E501 """ if connexion.request.is_json: body = SlicesPost.from_dict(connexion.request.get_json()) # noqa: E501 - return rc.slices_create_post(body, name, lease_end_time) + return rc.slices_create_post(body=body, name=name, lease_start_time=lease_start_time, lease_end_time=lease_end_time) def slices_delete_delete(): # noqa: E501 @@ -86,14 +87,17 @@ def slices_delete_slice_id_delete(slice_id): # noqa: E501 return rc.slices_delete_slice_id_delete(slice_id) -def slices_get(name=None, as_self=None, states=None, limit=None, offset=None): # noqa: E501 +def slices_get(name=None, search=None, exact_match=None, as_self=None, states=None, limit=None, offset=None): # noqa: E501 """Retrieve a listing of user slices - Retrieve a listing of user slices. It returns list of all slices belonging to all members in a project when - 'as_self' is False otherwise returns only the all user's slices in a project. # noqa: E501 + Retrieve a listing of user slices. It returns list of all slices belonging to all members in a project when 'as_self' is False otherwise returns only the all user's slices in a project. # noqa: E501 :param name: Search for Slices with the name :type name: str + :param search: search term applied + :type search: str + :param exact_match: Exact Match for Search term + :type exact_match: bool :param as_self: GET object as Self :type as_self: bool :param states: Search for Slices in the specified states @@ -105,7 +109,8 @@ def slices_get(name=None, as_self=None, states=None, limit=None, offset=None): :rtype: Slices """ - return rc.slices_get(name, states, limit, offset, as_self=as_self) + return rc.slices_get(name=name, states=states, limit=limit, offset=offset, as_self=as_self, + search=search, exact_match=exact_match) def slices_modify_slice_id_accept_post(slice_id): # noqa: E501 diff --git a/fabric_cf/orchestrator/swagger_server/models/__init__.py b/fabric_cf/orchestrator/swagger_server/models/__init__.py index a2ec9497..a43cb545 100644 --- a/fabric_cf/orchestrator/swagger_server/models/__init__.py +++ b/fabric_cf/orchestrator/swagger_server/models/__init__.py @@ -3,6 +3,7 @@ # flake8: noqa from __future__ import absolute_import # import models into model package +from fabric_cf.orchestrator.swagger_server.models.metrics import Metrics from fabric_cf.orchestrator.swagger_server.models.poa import Poa from fabric_cf.orchestrator.swagger_server.models.poa_data import PoaData from fabric_cf.orchestrator.swagger_server.models.poa_post import PoaPost diff --git a/fabric_cf/orchestrator/swagger_server/models/metrics.py b/fabric_cf/orchestrator/swagger_server/models/metrics.py new file mode 100644 index 00000000..84e52901 --- /dev/null +++ b/fabric_cf/orchestrator/swagger_server/models/metrics.py @@ -0,0 +1,141 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from fabric_cf.orchestrator.swagger_server.models.base_model_ import Model +from fabric_cf.orchestrator.swagger_server.models.status200_ok_single import Status200OkSingle # noqa: F401,E501 +from fabric_cf.orchestrator.swagger_server import util + + +class Metrics(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, size: int=1, status: int=200, type: str=None, results: List[object]=None): # noqa: E501 + """Metrics - a model defined in Swagger + + :param size: The size of this Metrics. # noqa: E501 + :type size: int + :param status: The status of this Metrics. # noqa: E501 + :type status: int + :param type: The type of this Metrics. # noqa: E501 + :type type: str + :param results: The results of this Metrics. # noqa: E501 + :type results: List[object] + """ + self.swagger_types = { + 'size': int, + 'status': int, + 'type': str, + 'results': List[object] + } + + self.attribute_map = { + 'size': 'size', + 'status': 'status', + 'type': 'type', + 'results': 'results' + } + self._size = size + self._status = status + self._type = type + self._results = results + + @classmethod + def from_dict(cls, dikt) -> 'Metrics': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The metrics of this Metrics. # noqa: E501 + :rtype: Metrics + """ + return util.deserialize_model(dikt, cls) + + @property + def size(self) -> int: + """Gets the size of this Metrics. + + + :return: The size of this Metrics. + :rtype: int + """ + return self._size + + @size.setter + def size(self, size: int): + """Sets the size of this Metrics. + + + :param size: The size of this Metrics. + :type size: int + """ + + self._size = size + + @property + def status(self) -> int: + """Gets the status of this Metrics. + + + :return: The status of this Metrics. + :rtype: int + """ + return self._status + + @status.setter + def status(self, status: int): + """Sets the status of this Metrics. + + + :param status: The status of this Metrics. + :type status: int + """ + + self._status = status + + @property + def type(self) -> str: + """Gets the type of this Metrics. + + + :return: The type of this Metrics. + :rtype: str + """ + return self._type + + @type.setter + def type(self, type: str): + """Sets the type of this Metrics. + + + :param type: The type of this Metrics. + :type type: str + """ + + self._type = type + + @property + def results(self) -> List[object]: + """Gets the results of this Metrics. + + + :return: The results of this Metrics. + :rtype: List[object] + """ + return self._results + + @results.setter + def results(self, results: List[object]): + """Sets the results of this Metrics. + + + :param results: The results of this Metrics. + :type results: List[object] + """ + + self._results = results diff --git a/fabric_cf/orchestrator/swagger_server/models/slice.py b/fabric_cf/orchestrator/swagger_server/models/slice.py index aaea0b63..baeb3479 100644 --- a/fabric_cf/orchestrator/swagger_server/models/slice.py +++ b/fabric_cf/orchestrator/swagger_server/models/slice.py @@ -14,7 +14,7 @@ class Slice(Model): Do not edit the class manually. """ - def __init__(self, model: str=None, lease_start_time: str=None, lease_end_time: str=None, state: str=None, project_id: str=None, project_name: str=None, graph_id: str=None, name: str=None, slice_id: str=None): # noqa: E501 + def __init__(self, model: str=None, lease_start_time: str=None, lease_end_time: str=None, state: str=None, project_id: str=None, project_name: str=None, graph_id: str=None, name: str=None, slice_id: str=None, owner_user_id: str=None, owner_email: str=None): # noqa: E501 """Slice - a model defined in Swagger :param model: The model of this Slice. # noqa: E501 @@ -35,6 +35,10 @@ def __init__(self, model: str=None, lease_start_time: str=None, lease_end_time: :type name: str :param slice_id: The slice_id of this Slice. # noqa: E501 :type slice_id: str + :param owner_user_id: The owner_user_id of this Slice. # noqa: E501 + :type owner_user_id: str + :param owner_email: The owner_email of this Slice. # noqa: E501 + :type owner_email: str """ self.swagger_types = { 'model': str, @@ -45,7 +49,9 @@ def __init__(self, model: str=None, lease_start_time: str=None, lease_end_time: 'project_name': str, 'graph_id': str, 'name': str, - 'slice_id': str + 'slice_id': str, + 'owner_user_id': str, + 'owner_email': str } self.attribute_map = { @@ -57,7 +63,9 @@ def __init__(self, model: str=None, lease_start_time: str=None, lease_end_time: 'project_name': 'project_name', 'graph_id': 'graph_id', 'name': 'name', - 'slice_id': 'slice_id' + 'slice_id': 'slice_id', + 'owner_user_id': 'owner_user_id', + 'owner_email': 'owner_email' } self._model = model self._lease_start_time = lease_start_time @@ -68,6 +76,8 @@ def __init__(self, model: str=None, lease_start_time: str=None, lease_end_time: self._graph_id = graph_id self._name = name self._slice_id = slice_id + self._owner_user_id = owner_user_id + self._owner_email = owner_email @classmethod def from_dict(cls, dikt) -> 'Slice': @@ -274,3 +284,45 @@ def slice_id(self, slice_id: str): raise ValueError("Invalid value for `slice_id`, must not be `None`") # noqa: E501 self._slice_id = slice_id + + @property + def owner_user_id(self) -> str: + """Gets the owner_user_id of this Slice. + + + :return: The owner_user_id of this Slice. + :rtype: str + """ + return self._owner_user_id + + @owner_user_id.setter + def owner_user_id(self, owner_user_id: str): + """Sets the owner_user_id of this Slice. + + + :param owner_user_id: The owner_user_id of this Slice. + :type owner_user_id: str + """ + + self._owner_user_id = owner_user_id + + @property + def owner_email(self) -> str: + """Gets the owner_email of this Slice. + + + :return: The owner_email of this Slice. + :rtype: str + """ + return self._owner_email + + @owner_email.setter + def owner_email(self, owner_email: str): + """Sets the owner_email of this Slice. + + + :param owner_email: The owner_email of this Slice. + :type owner_email: str + """ + + self._owner_email = owner_email diff --git a/fabric_cf/orchestrator/swagger_server/models/sliver.py b/fabric_cf/orchestrator/swagger_server/models/sliver.py index d2dbd079..de11dce9 100644 --- a/fabric_cf/orchestrator/swagger_server/models/sliver.py +++ b/fabric_cf/orchestrator/swagger_server/models/sliver.py @@ -14,7 +14,7 @@ class Sliver(Model): Do not edit the class manually. """ - def __init__(self, notice: str=None, sliver_type: str=None, sliver: object=None, lease_start_time: str=None, lease_end_time: str=None, state: str=None, pending_state: str=None, join_state: str=None, graph_node_id: str=None, slice_id: str=None, sliver_id: str=None): # noqa: E501 + def __init__(self, notice: str=None, sliver_type: str=None, sliver: object=None, lease_start_time: str=None, lease_end_time: str=None, state: str=None, pending_state: str=None, join_state: str=None, graph_node_id: str=None, slice_id: str=None, sliver_id: str=None, owner_user_id: str=None, owner_email: str=None): # noqa: E501 """Sliver - a model defined in Swagger :param notice: The notice of this Sliver. # noqa: E501 @@ -39,6 +39,10 @@ def __init__(self, notice: str=None, sliver_type: str=None, sliver: object=None, :type slice_id: str :param sliver_id: The sliver_id of this Sliver. # noqa: E501 :type sliver_id: str + :param owner_user_id: The owner_user_id of this Sliver. # noqa: E501 + :type owner_user_id: str + :param owner_email: The owner_email of this Sliver. # noqa: E501 + :type owner_email: str """ self.swagger_types = { 'notice': str, @@ -51,7 +55,9 @@ def __init__(self, notice: str=None, sliver_type: str=None, sliver: object=None, 'join_state': str, 'graph_node_id': str, 'slice_id': str, - 'sliver_id': str + 'sliver_id': str, + 'owner_user_id': str, + 'owner_email': str } self.attribute_map = { @@ -65,7 +71,9 @@ def __init__(self, notice: str=None, sliver_type: str=None, sliver: object=None, 'join_state': 'join_state', 'graph_node_id': 'graph_node_id', 'slice_id': 'slice_id', - 'sliver_id': 'sliver_id' + 'sliver_id': 'sliver_id', + 'owner_user_id': 'owner_user_id', + 'owner_email': 'owner_email' } self._notice = notice self._sliver_type = sliver_type @@ -78,6 +86,8 @@ def __init__(self, notice: str=None, sliver_type: str=None, sliver: object=None, self._graph_node_id = graph_node_id self._slice_id = slice_id self._sliver_id = sliver_id + self._owner_user_id = owner_user_id + self._owner_email = owner_email @classmethod def from_dict(cls, dikt) -> 'Sliver': @@ -326,3 +336,45 @@ def sliver_id(self, sliver_id: str): raise ValueError("Invalid value for `sliver_id`, must not be `None`") # noqa: E501 self._sliver_id = sliver_id + + @property + def owner_user_id(self) -> str: + """Gets the owner_user_id of this Sliver. + + + :return: The owner_user_id of this Sliver. + :rtype: str + """ + return self._owner_user_id + + @owner_user_id.setter + def owner_user_id(self, owner_user_id: str): + """Sets the owner_user_id of this Sliver. + + + :param owner_user_id: The owner_user_id of this Sliver. + :type owner_user_id: str + """ + + self._owner_user_id = owner_user_id + + @property + def owner_email(self) -> str: + """Gets the owner_email of this Sliver. + + + :return: The owner_email of this Sliver. + :rtype: str + """ + return self._owner_email + + @owner_email.setter + def owner_email(self, owner_email: str): + """Sets the owner_email of this Sliver. + + + :param owner_email: The owner_email of this Sliver. + :type owner_email: str + """ + + self._owner_email = owner_email diff --git a/fabric_cf/orchestrator/swagger_server/response/constants.py b/fabric_cf/orchestrator/swagger_server/response/constants.py index 32d33c99..62d8c88a 100644 --- a/fabric_cf/orchestrator/swagger_server/response/constants.py +++ b/fabric_cf/orchestrator/swagger_server/response/constants.py @@ -50,3 +50,5 @@ POAS_GET_POA_ID_PATH = '/poas/{poa_id}' VERSIONS_PATH = '/version' + +METRICS_GET_PATH = '/metrics/overview' diff --git a/fabric_cf/orchestrator/swagger_server/response/cors_response.py b/fabric_cf/orchestrator/swagger_server/response/cors_response.py index edf309e6..8efc4ed8 100644 --- a/fabric_cf/orchestrator/swagger_server/response/cors_response.py +++ b/fabric_cf/orchestrator/swagger_server/response/cors_response.py @@ -8,7 +8,7 @@ from fabric_cf.orchestrator.swagger_server.models import Resources, Slices, Slivers, Version, Status200OkNoContent, \ SliceDetails, Status200OkNoContentData, Status400BadRequestErrors, Status400BadRequest, Status401UnauthorizedErrors, \ Status401Unauthorized, Status403ForbiddenErrors, Status403Forbidden, Status404NotFoundErrors, Status404NotFound, \ - Status500InternalServerErrorErrors, Status500InternalServerError + Status500InternalServerErrorErrors, Status500InternalServerError, Metrics _INDENT = int(os.getenv('OC_API_JSON_RESPONSE_INDENT', '4')) @@ -51,7 +51,7 @@ def cors_response(req: request, status_code: int = 200, body: object = None, x_e def cors_200(response_body: Union[Resources, Slices, SliceDetails, Slivers, Version, - Status200OkNoContent] = None) -> cors_response: + Status200OkNoContent, Metrics] = None) -> cors_response: """ Return 200 - OK """ diff --git a/fabric_cf/orchestrator/swagger_server/response/metrics_controller.py b/fabric_cf/orchestrator/swagger_server/response/metrics_controller.py new file mode 100644 index 00000000..7f9f74a1 --- /dev/null +++ b/fabric_cf/orchestrator/swagger_server/response/metrics_controller.py @@ -0,0 +1,58 @@ +from typing import List + +from fabric_cf.orchestrator.swagger_server.response.utils import get_token, cors_error_response, cors_success_response + +from fabric_cf.orchestrator.swagger_server.response.constants import GET_METHOD, METRICS_GET_PATH + +from fabric_cf.orchestrator.swagger_server import received_counter, success_counter, failure_counter + +from fabric_cf.orchestrator.core.orchestrator_handler import OrchestratorHandler + +from fabric_cf.orchestrator.swagger_server.response.cors_response import cors_200 + +from fabric_cf.orchestrator.swagger_server.models import Metrics + + +def metrics_overview_get(excluded_projects: List[str] = None) -> Metrics: # noqa: E501 + """Control Framework metrics overview + { + "results": [ + { + "last_updated": "2024-04-02 19:50:00.00+00", + "slices": { + "active_cumulative": 164, + "non_active_cumulative": 0 + } + } + ], + "size": 1, + "status": 200, + "type": "metrics.overview" + } + + :rtype: Metrics + """ + handler = OrchestratorHandler() + logger = handler.get_logger() + received_counter.labels(GET_METHOD, METRICS_GET_PATH).inc() + try: + token = get_token() + metrics = handler.get_metrics_overview(token=token, excluded_projects=excluded_projects) + response = Metrics() + if metrics: + if isinstance(metrics, list): + response.results = metrics + else: + response.results = [metrics] + else: + response.results = [] + + response.size = len(response.results) + response.status = 200 + response.type = 'metrics.overview' + success_counter.labels(GET_METHOD, METRICS_GET_PATH).inc() + return cors_success_response(response_body=response) + except Exception as e: + logger.exception(e) + failure_counter.labels(GET_METHOD, METRICS_GET_PATH).inc() + return cors_error_response(error=e) diff --git a/fabric_cf/orchestrator/swagger_server/response/resources_controller.py b/fabric_cf/orchestrator/swagger_server/response/resources_controller.py index 16ce2e9c..d2749c16 100644 --- a/fabric_cf/orchestrator/swagger_server/response/resources_controller.py +++ b/fabric_cf/orchestrator/swagger_server/response/resources_controller.py @@ -33,13 +33,26 @@ from fabric_cf.orchestrator.swagger_server.response.utils import get_token, cors_error_response, cors_success_response -def portalresources_get(graph_format) -> Resources: # noqa: E501 +def portalresources_get(graph_format: str, level: int = 1, force_refresh: bool = False, start_date: str = None, + end_date: str = None, includes: str = None, excludes: str = None) -> Resources: # noqa: E501 """Retrieve a listing and description of available resources for portal Retrieve a listing and description of available resources for portal # noqa: E501 :param graph_format: graph format :type graph_format: str + :param level: Level of details + :type level: int + :param force_refresh: Force to retrieve current available resource information. + :type force_refresh: bool + :param start_date: starting date to check availability from + :type start_date: str + :param end_date: end date to check availability until + :type end_date: str + :param includes: comma separated lists of sites to include + :type includes: str + :param excludes: comma separated lists of sites to exclude + :type excludes: str :rtype: Resources """ @@ -47,9 +60,12 @@ def portalresources_get(graph_format) -> Resources: # noqa: E501 logger = handler.get_logger() received_counter.labels(GET_METHOD, PORTAL_RESOURCES_PATH).inc() try: - bqm_dict = handler.portal_list_resources(graph_format_str=graph_format) + start = handler.validate_lease_time(lease_time=start_date) + end = handler.validate_lease_time(lease_time=end_date) + model = handler.list_resources(graph_format_str=graph_format, level=level, force_refresh=force_refresh, + start=start, end=end, includes=includes, excludes=excludes, authorize=False) response = Resources() - response.data = [Resource().from_dict(bqm_dict)] + response.data = [Resource(model)] response.size = 1 response.type = "resources" success_counter.labels(GET_METHOD, PORTAL_RESOURCES_PATH).inc() @@ -64,7 +80,8 @@ def portalresources_get(graph_format) -> Resources: # noqa: E501 return cors_error_response(error=e) -def resources_get(level, force_refresh) -> Resources: # noqa: E501 +def resources_get(level: int = 1, force_refresh: bool = False, start_date: str = None, + end_date: str = None, includes: str = None, excludes: str = None) -> Resources: # noqa: E501 """Retrieve a listing and description of available resources Retrieve a listing and description of available resources # noqa: E501 @@ -73,6 +90,14 @@ def resources_get(level, force_refresh) -> Resources: # noqa: E501 :type level: int :param force_refresh: Force to retrieve current available resource information. :type force_refresh: bool + :param start_date: starting date to check availability from + :type start_date: str + :param end_date: end date to check availability until + :type end_date: str + :param includes: comma separated lists of sites to include + :type includes: str + :param excludes: comma separated lists of sites to exclude + :type excludes: str :rtype: Resources """ @@ -81,9 +106,12 @@ def resources_get(level, force_refresh) -> Resources: # noqa: E501 received_counter.labels(GET_METHOD, RESOURCES_PATH).inc() try: token = get_token() - bqm_dict = handler.list_resources(token=token, level=level, force_refresh=force_refresh) + start = handler.validate_lease_time(lease_time=start_date) + end = handler.validate_lease_time(lease_time=end_date) + model = handler.list_resources(token=token, level=level, force_refresh=force_refresh, + start=start, end=end, includes=includes, excludes=excludes) response = Resources() - response.data = [Resource().from_dict(bqm_dict)] + response.data = [Resource(model)] response.size = 1 response.type = "resources" success_counter.labels(GET_METHOD, RESOURCES_PATH).inc() diff --git a/fabric_cf/orchestrator/swagger_server/response/slices_controller.py b/fabric_cf/orchestrator/swagger_server/response/slices_controller.py index 17d8d0ee..4abd9104 100644 --- a/fabric_cf/orchestrator/swagger_server/response/slices_controller.py +++ b/fabric_cf/orchestrator/swagger_server/response/slices_controller.py @@ -23,6 +23,7 @@ # # # Author: Komal Thareja (kthare10@renci.org) +from typing import List from fabric_cf.orchestrator.core.exceptions import OrchestratorException from fabric_cf.orchestrator.core.orchestrator_handler import OrchestratorHandler @@ -38,7 +39,8 @@ from fabric_cf.orchestrator.swagger_server.response.utils import get_token, cors_error_response, cors_success_response -def slices_create_post(body: SlicesPost, name, lease_end_time) -> Slivers: # noqa: E501 +def slices_create_post(body: SlicesPost, name: str, lease_start_time: str = None, + lease_end_time: str = None) -> Slivers: # noqa: E501 """Create slice Request to create slice as described in the request. Request would be a graph ML describing the requested resources. @@ -48,11 +50,13 @@ def slices_create_post(body: SlicesPost, name, lease_end_time) -> Slivers: # no resources asynchronously on the appropriate sites either now or in the future as requested. Experimenter can invoke get slice API to get the latest state of the requested resources. # noqa: E501 - :param body: - :type body: SlicesPost + :param body: Create new Slice + :type body: dict | bytes :param name: Slice Name :type name: str - :param lease_end_time: New Lease End Time for the Slice + :param lease_start_time: Lease End Time for the Slice + :type lease_start_time: str + :param lease_end_time: Lease End Time for the Slice :type lease_end_time: str :rtype: Slivers @@ -64,8 +68,11 @@ def slices_create_post(body: SlicesPost, name, lease_end_time) -> Slivers: # no try: token = get_token() ssh_key = ','.join(body.ssh_keys) + start = handler.validate_lease_time(lease_time=lease_start_time) + end = handler.validate_lease_time(lease_time=lease_end_time) slivers_dict = handler.create_slice(token=token, slice_name=name, slice_graph=body.graph_model, - lease_end_time=lease_end_time, ssh_key=ssh_key) + lease_start_time=start, lease_end_time=end, + ssh_key=ssh_key) response = Slivers() response.data = [] for s in slivers_dict: @@ -158,21 +165,27 @@ def slices_delete_slice_id_delete(slice_id) -> Status200OkNoContent: # noqa: E5 return cors_error_response(error=e) -def slices_get(name=None, states=None, limit=None, offset=None, as_self=True) -> Slices: # noqa: E501 +def slices_get(name: str = None, search: str = None, exact_match: bool = False, + as_self: bool = True, states: List[str] = None, limit: int = 5, offset: int = 0): # noqa: E501 """Retrieve a listing of user slices - Retrieve a listing of user slices # noqa: E501 + Retrieve a listing of user slices. It returns list of all slices belonging to all members in a project when + 'as_self' is False otherwise returns only the all user's slices in a project. # noqa: E501 :param name: Search for Slices with the name :type name: str + :param search: search term applied + :type search: str + :param exact_match: Exact Match for Search term + :type exact_match: str + :param as_self: GET object as Self + :type as_self: bool :param states: Search for Slices in the specified states :type states: List[str] :param limit: maximum number of results to return per page (1 or more) :type limit: int :param offset: number of items to skip before starting to collect the result set :type offset: int - :param as_self: GET object as Self - :type as_self: bool :rtype: Slices """ @@ -182,7 +195,7 @@ def slices_get(name=None, states=None, limit=None, offset=None, as_self=True) -> try: token = get_token() slices_dict = handler.get_slices(token=token, states=states, name=name, limit=limit, offset=offset, - as_self=as_self) + as_self=as_self, search=search, exact_match=exact_match) response = Slices() response.data = [] response.type = 'slices' @@ -299,7 +312,8 @@ def slices_renew_slice_id_post(slice_id, lease_end_time) -> Status200OkNoContent try: token = get_token() - handler.renew_slice(token=token, slice_id=slice_id, new_lease_end_time=lease_end_time) + end = handler.validate_lease_time(lease_time=lease_end_time) + handler.renew_slice(token=token, slice_id=slice_id, new_lease_end_time=end) success_counter.labels(POST_METHOD, SLICES_RENEW_PATH).inc() slice_info = Status200OkNoContentData() diff --git a/fabric_cf/orchestrator/swagger_server/swagger/swagger.yaml b/fabric_cf/orchestrator/swagger_server/swagger/swagger.yaml index 89bfbccd..687ef854 100644 --- a/fabric_cf/orchestrator/swagger_server/swagger/swagger.yaml +++ b/fabric_cf/orchestrator/swagger_server/swagger/swagger.yaml @@ -16,6 +16,8 @@ servers: - url: https://virtserver.swaggerhub.com/kthare10/orchestrator/1.0.1 description: SwaggerHub API Auto Mocking tags: +- name: metrics + description: Control Framework Metrics - name: slices description: Slices in FABRIC - name: slivers @@ -46,6 +48,62 @@ paths: schema: $ref: '#/components/schemas/status_500_internal_server_error' x-openapi-router-controller: fabric_cf.orchestrator.swagger_server.controllers.version_controller + /metrics/overview: + get: + tags: + - metrics + summary: Control Framework metrics overview + description: Control Framework metrics overview + operationId: metrics_overview_get + parameters: + - name: excluded_projects + in: query + description: List of projects to exclude from the metrics overview + required: false + style: form + explode: true + schema: + type: array + items: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/metrics' + "400": + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/status_400_bad_request' + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/status_401_unauthorized' + "403": + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/status_403_forbidden' + "404": + description: Not Found + content: + application/json: + schema: + $ref: '#/components/schemas/status_404_not_found' + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/status_500_internal_server_error' + x-openapi-router-controller: fabric_cf.orchestrator.swagger_server.controllers.metrics_controller /resources: get: tags: @@ -76,6 +134,42 @@ paths: schema: type: boolean default: false + - name: start_date + in: query + description: starting date to check availability from + required: false + style: form + explode: true + schema: + type: string + example: 2023-01-01 16:20:15 +00:00 + - name: end_date + in: query + description: end date to check availability until + required: false + style: form + explode: true + schema: + type: string + example: 2023-01-01 16:20:15 +00:00 + - name: includes + in: query + description: comma separated lists of sites to include + required: false + style: form + explode: true + schema: + type: string + example: "RENC,UKY" + - name: excludes + in: query + description: comma separated lists of sites to exclude + required: false + style: form + explode: true + schema: + type: string + example: "SRI,LBNL" responses: "200": description: OK @@ -137,6 +231,60 @@ paths: - JSON_NODELINK - CYTOSCAPE default: GRAPHML + - name: level + in: query + description: Level of details + required: false + style: form + explode: true + schema: + type: integer + default: 1 + - name: force_refresh + in: query + description: Force to retrieve current available resource information. + required: false + style: form + explode: true + schema: + type: boolean + default: false + - name: start_date + in: query + description: starting date to check availability from + required: false + style: form + explode: true + schema: + type: string + example: 2023-01-01 16:20:15 +00:00 + - name: end_date + in: query + description: end date to check availability until + required: false + style: form + explode: true + schema: + type: string + example: 2023-01-01 16:20:15 +00:00 + - name: includes + in: query + description: comma separated lists of sites to include + required: false + style: form + explode: true + schema: + type: string + example: "RENC,UKY" + - name: excludes + in: query + description: comma separated lists of sites to exclude + required: false + style: form + explode: true + schema: + type: string + example: "SRI,LBNL" responses: "200": description: OK @@ -194,6 +342,23 @@ paths: schema: minLength: 3 type: string + - name: search + in: query + description: search term applied + required: false + style: form + explode: true + schema: + type: string + - name: exact_match + in: query + description: Exact Match for Search term + required: false + style: form + explode: true + schema: + type: boolean + default: false - name: as_self in: query description: GET object as Self @@ -223,6 +388,8 @@ paths: - Modifying - ModifyOK - ModifyError + - AllocatedOK + - AllocatedError - All - name: limit in: query @@ -474,6 +641,15 @@ paths: schema: minLength: 3 type: string + - name: lease_start_time + in: query + description: Lease End Time for the Slice + required: false + style: form + explode: true + schema: + type: string + example: 2023-01-01 16:20:15 +00:00 - name: lease_end_time in: query description: Lease End Time for the Slice @@ -482,6 +658,7 @@ paths: explode: true schema: type: string + example: 2023-01-01 16:20:15 +00:00 requestBody: $ref: '#/components/requestBodies/payload_slices_create' responses: @@ -1388,6 +1565,16 @@ components: default: Internal Server Error details: type: string + metrics: + type: object + allOf: + - $ref: '#/components/schemas/status_200_ok_single' + - type: object + properties: + results: + type: array + items: + type: object slices: type: object allOf: @@ -1433,6 +1620,10 @@ components: type: string slice_id: type: string + owner_user_id: + type: string + owner_email: + type: string slivers: type: object allOf: @@ -1472,6 +1663,10 @@ components: type: string sliver_id: type: string + owner_user_id: + type: string + owner_email: + type: string version: type: object allOf: @@ -1575,7 +1770,7 @@ components: requestBodies: Request: content: - text/plain: + application/json: schema: type: string required: true diff --git a/fabric_cf/orchestrator/swagger_server/test/test_metrics_controller.py b/fabric_cf/orchestrator/swagger_server/test/test_metrics_controller.py new file mode 100644 index 00000000..80552d13 --- /dev/null +++ b/fabric_cf/orchestrator/swagger_server/test/test_metrics_controller.py @@ -0,0 +1,36 @@ +# coding: utf-8 + +from __future__ import absolute_import + +from flask import json +from six import BytesIO + +from fabric_cf.orchestrator.swagger_server.models.metrics import Metrics # noqa: E501 +from fabric_cf.orchestrator.swagger_server.models.status400_bad_request import Status400BadRequest # noqa: E501 +from fabric_cf.orchestrator.swagger_server.models.status401_unauthorized import Status401Unauthorized # noqa: E501 +from fabric_cf.orchestrator.swagger_server.models.status403_forbidden import Status403Forbidden # noqa: E501 +from fabric_cf.orchestrator.swagger_server.models.status404_not_found import Status404NotFound # noqa: E501 +from fabric_cf.orchestrator.swagger_server.models.status500_internal_server_error import Status500InternalServerError # noqa: E501 +from fabric_cf.orchestrator.swagger_server.test import BaseTestCase + + +class TestMetricsController(BaseTestCase): + """MetricsController integration test stubs""" + + def test_metrics_overview_get(self): + """Test case for metrics_overview_get + + Control Framework metrics overview + """ + query_string = [('excluded_projects', 'excluded_projects_example')] + response = self.client.open( + '//metrics/overview', + method='GET', + query_string=query_string) + self.assert200(response, + 'Response body is : ' + response.data.decode('utf-8')) + + +if __name__ == '__main__': + import unittest + unittest.main() diff --git a/fabric_cf/orchestrator/swagger_server/test/test_resources_controller.py b/fabric_cf/orchestrator/swagger_server/test/test_resources_controller.py index 9e33f8e1..6285d36b 100644 --- a/fabric_cf/orchestrator/swagger_server/test/test_resources_controller.py +++ b/fabric_cf/orchestrator/swagger_server/test/test_resources_controller.py @@ -22,7 +22,13 @@ def test_portalresources_get(self): Retrieve a listing and description of available resources for portal """ - query_string = [('graph_format', 'GRAPHML')] + query_string = [('graph_format', 'GRAPHML'), + ('level', 1), + ('force_refresh', false), + ('start_date', 'start_date_example'), + ('end_date', 'end_date_example'), + ('includes', 'includes_example'), + ('excludes', 'excludes_example')] response = self.client.open( '//portalresources', method='GET', @@ -36,7 +42,11 @@ def test_resources_get(self): Retrieve a listing and description of available resources. By default, a cached available resource information is returned. User can force to request the current available resources. """ query_string = [('level', 1), - ('force_refresh', false)] + ('force_refresh', false), + ('start_date', 'start_date_example'), + ('end_date', 'end_date_example'), + ('includes', 'includes_example'), + ('excludes', 'excludes_example')] response = self.client.open( '//resources', method='GET', diff --git a/fabric_cf/orchestrator/swagger_server/test/test_slices_controller.py b/fabric_cf/orchestrator/swagger_server/test/test_slices_controller.py index 80febabc..88edcbc9 100644 --- a/fabric_cf/orchestrator/swagger_server/test/test_slices_controller.py +++ b/fabric_cf/orchestrator/swagger_server/test/test_slices_controller.py @@ -34,7 +34,7 @@ def test_slices_create_post(self): '//slices/create', method='POST', data=json.dumps(body), - content_type='text/plain', + content_type='application/json', query_string=query_string) self.assert200(response, 'Response body is : ' + response.data.decode('utf-8')) @@ -46,6 +46,7 @@ def test_slices_creates_post(self): """ body = SlicesPost() query_string = [('name', 'name_example'), + ('lease_start_time', 'lease_start_time_example'), ('lease_end_time', 'lease_end_time_example')] response = self.client.open( '//slices/creates', @@ -84,6 +85,8 @@ def test_slices_get(self): Retrieve a listing of user slices """ query_string = [('name', 'name_example'), + ('search', 'search_example'), + ('exact_match', false), ('as_self', true), ('states', 'states_example'), ('limit', 200), diff --git a/fabric_cf/orchestrator/test/test.yaml b/fabric_cf/orchestrator/test/test.yaml index d6f56cae..f8641d73 100644 --- a/fabric_cf/orchestrator/test/test.yaml +++ b/fabric_cf/orchestrator/test/test.yaml @@ -48,6 +48,7 @@ runtime: commit.batch.size: 1 enable.auto.commit: False consumer.poll.timeout: 250 + infrastructure.project.id: 4604cab7-41ff-4c1a-a935-0ca6f20cceeb logging: ## The directory in which actor should create log files. @@ -99,6 +100,7 @@ container: bqm: # in seconds (default set to 300 seconds) refresh-interval: 300 + local: True time: # This section controls settings, which are generally useful diff --git a/psql.upgrade b/psql.upgrade index 34c3f7b2..05603177 100644 --- a/psql.upgrade +++ b/psql.upgrade @@ -21,3 +21,67 @@ CREATE TABLE IF NOT EXISTS "Components" ( ); ALTER TABLE "Reservations" ADD COLUMN IF NOT EXISTS components VARCHAR; + + +-- Add new columns with the TIMESTAMP WITH TIME ZONE data type +ALTER TABLE "Reservations" ADD COLUMN lease_start_with_tz TIMESTAMPTZ; +ALTER TABLE "Reservations" ADD COLUMN lease_end_with_tz TIMESTAMPTZ; + +-- Update the new columns with data from the existing columns +UPDATE "Reservations" SET lease_start_with_tz = lease_start::TIMESTAMPTZ; +UPDATE "Reservations" SET lease_end_with_tz = lease_end::TIMESTAMPTZ; + +-- Drop the existing columns +ALTER TABLE "Reservations" DROP COLUMN lease_start; +ALTER TABLE "Reservations" DROP COLUMN lease_end; + +-- Rename the new columns to the original column names +ALTER TABLE "Reservations" RENAME COLUMN lease_start_with_tz TO lease_start; +ALTER TABLE "Reservations" RENAME COLUMN lease_end_with_tz TO lease_end; + + +ALTER TABLE "Slices" ADD COLUMN IF NOT EXISTS components VARCHAR; + + +-- Add new columns with the TIMESTAMP WITH TIME ZONE data type +ALTER TABLE "Slices" ADD COLUMN lease_start_with_tz TIMESTAMPTZ; +ALTER TABLE "Slices" ADD COLUMN lease_end_with_tz TIMESTAMPTZ; + +-- Update the new columns with data from the existing columns +UPDATE "Slices" SET lease_start_with_tz = lease_start::TIMESTAMPTZ; +UPDATE "Slices" SET lease_end_with_tz = lease_end::TIMESTAMPTZ; + +-- Drop the existing columns +ALTER TABLE "Slices" DROP COLUMN lease_start; +ALTER TABLE "Slices" DROP COLUMN lease_end; + +-- Rename the new columns to the original column names +ALTER TABLE "Slices" RENAME COLUMN lease_start_with_tz TO lease_start; +ALTER TABLE "Slices" RENAME COLUMN lease_end_with_tz TO lease_end; + + + +-- Add new columns with the TIMESTAMP WITH TIME ZONE data type +ALTER TABLE "Poas" ADD COLUMN last_update_time_with_tz TIMESTAMPTZ; + +-- Update the new columns with data from the existing columns +UPDATE "Poas" SET last_update_time_with_tz = last_update_time::TIMESTAMPTZ; + +-- Drop the existing columns +ALTER TABLE "Poas" DROP COLUMN last_update_time; + +-- Rename the new columns to the original column names +ALTER TABLE "Poas" RENAME COLUMN last_update_time_with_tz TO last_update_time; + +CREATE TABLE IF NOT EXISTS "Metrics" ( + m_id INTEGER NOT NULL DEFAULT nextval('m_id') PRIMARY KEY, + user_id VARCHAR NOT NULL, + project_id VARCHAR NOT NULL, + slice_count INTEGER NOT NULL, +); + +ALTER TABLE "Reservations" ADD COLUMN host VARCHAR(255) NULL; +ALTER TABLE "Reservations" ADD COLUMN ip_subnet VARCHAR(255) NULL; + +CREATE INDEX idx_host ON "Reservations"(host); +CREATE INDEX idx_ip_subnet ON "Reservations"(ip_subnet); \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 0e8dfc14..0603574a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,10 +26,10 @@ dependencies = [ "connexion==2.14.2", "swagger-ui-bundle==0.0.9", "PyYAML", - "fabric_fss_utils==1.5.0", - "fabric-message-bus==1.6.2", - "fabric-fim==1.6.1", - "fabric-credmgr-client==1.6.0", + "fabric_fss_utils==1.5.1", + "fabric-message-bus==1.7.0", + "fabric-fim==1.7.0", + "fabric-credmgr-client==1.6.1", "ansible" ]