diff --git a/apply_sfc_patch.sh b/apply_sfc_patch.sh new file mode 100644 index 00000000..47b8ea72 --- /dev/null +++ b/apply_sfc_patch.sh @@ -0,0 +1,128 @@ +#!/bin/sh + +VERSION="3.0.1" + +PATCH_PATH=$1 +PATCH_PORT=$2 + +Usage () { + echo -e "\n\nUsage: apply_sfc_patch.sh \n" + echo -e "\nThe patch SHOULD belong to version $VERSION and should be " + echo -e "applied ONLY on top of sfc rpm with version $VERSION.\n" +} + +# diff -u networking_sfc.rpm/db/sfc_db.py networking_sfc/db/sfc_db.py > sfc_db-3.0.1.patch +# diff -u networking_sfc.rpm/db/flowclassifier_db.py networking_sfc/db/flowclassifier_db.py >> sfc_db-3.0.1.patch +# diff -Naur -x "*.py?" networking_sfc.rpm/services/sfc/agent/extensions/oc/ networking_sfc/services/sfc/agent/extensions/oc/ > sfc_agent-3.0.1.patch +# diff -Nau networking_sfc.rpm/services/sfc/drivers/oc/ networking_sfc/services/sfc/drivers/oc/ > sfc_plugin-3.0.1.patch +# diff -Nau networking_sfc.rpm/services/flowclassifier/drivers/oc/ networking_sfc/services/flowclassifier/drivers/oc/ >> sfc_plugin-3.0.1.patch + +function backup_files_to_be_modifed(){ + echo "Backing up files that are to be modified while patching with $1." + if [[ $1 == "sfc_db-$VERSION.patch" ]]; then + cp $PYTHON_MODULE_PATH/networking_sfc/db/sfc_db.py $PYTHON_MODULE_PATH/networking_sfc/db/sfc_db.py.bk + cp $PYTHON_MODULE_PATH/networking_sfc/db/flowclassifier_db.py $PYTHON_MODULE_PATH/networking_sfc/db/flowclassifier_db.py.bk + elif [[ $1 == "sfc_agent-$VERSION.patch" ]]; then + mkdir -p $PYTHON_MODULE_PATH/networking_sfc/services/sfc/agent/extensions/oc + cp $PYTHON_MODULE_PATH/networking_sfc/services/sfc/agent/extensions/oc/sfc_driver.py $PYTHON_MODULE_PATH/networking_sfc/services/sfc/agent/extensions/oc/sfc_driver.py.bk 2>/dev/null + touch $PYTHON_MODULE_PATH/networking_sfc/services/sfc/agent/extensions/oc/__init__.py + elif [[ $1 == "sfc_plugin-$VERSION.patch" ]]; then + mkdir -p $PYTHON_MODULE_PATH/networking_sfc/services/sfc/drivers/oc + mkdir -p $PYTHON_MODULE_PATH/networking_sfc/services/flowclassifier/drivers/oc + cp $PYTHON_MODULE_PATH/networking_sfc/services/sfc/drivers/oc/driver.py $PYTHON_MODULE_PATH/networking_sfc/services/sfc/drivers/oc/driver.py.bk 2>/dev/null + cp $PYTHON_MODULE_PATH/networking_sfc/services/flowclassifier/drivers/oc/driver.py $PYTHON_MODULE_PATH/networking_sfc/services/flowclassifier/drivers/oc/driver.py.bk 2>/dev/null + touch $PYTHON_MODULE_PATH/networking_sfc/services/sfc/drivers/oc/__init__.py + touch $PYTHON_MODULE_PATH/networking_sfc/services/flowclassifier/drivers/oc/__init__.py + else + : + fi +} + +function apply_patches(){ + cd $PYTHON_MODULE_PATH + for patch in ${PATCH_ARRAY[@]} + do + if [[ $patch != *"$VERSION"* ]]; then + echo -e "\nThe patches must be of version $VERSION." + exit 1 + fi + test_patch=`patch -R -p0 --dry-run --silent > /dev/null < $patch` + patch_status=`echo $?` + if [ $patch_status == 0 ]; then + echo "$patch is already applied so skipping." + else + backup_files_to_be_modifed $patch + echo "Applying patch $patch ..." + patch -p0 < $patch + sleep 2 + fi + done +} + +function check_for_sfc(){ + sfc_rpm="$(rpm -qa | grep networking-sfc)" + if [[ -z $sfc_rpm ]]; then + echo -e "\nSFC RPM is missing in this node. This script works ONLY if a valid SFC rpm is installed. Aborting..." + exit 1 + fi + + if [[ $sfc_rpm != *"$VERSION"* ]]; then + echo -e "\nThis script works ONLY on SFC RPM installed with version $VERSION." + exit 1 + fi +} + +function copy_sfc_patches(){ + cp $PATCH_PATH/sfc-patches/sfc_plugin-$VERSION.patch $PYTHON_MODULE_PATH/. + cp $PATCH_PATH/sfc-patches/sfc_agent-$VERSION.patch $PYTHON_MODULE_PATH/. + cp $PATCH_PATH/sfc-patches/sfc_db-$VERSION.patch $PYTHON_MODULE_PATH/. +} + +function configure_sfc(){ + NEUTRON_CONF="/etc/neutron/neutron.conf" + OVS_INI="/etc/neutron/plugins/ml2/openvswitch_agent.ini" + EGG_FILE="/usr/lib/python2.7/site-packages/networking_sfc-*egg*/entry_points.txt" + crudini --set --verbose $NEUTRON_CONF sfc drivers oc + crudini --set --verbose $NEUTRON_CONF flowclassifier drivers oc + if [[ `crudini --get $NEUTRON_CONF DEFAULT service_plugins` != *"trunk"* ]]; then + crudini --set --verbose $NEUTRON_CONF DEFAULT service_plugins `crudini --get $NEUTRON_CONF DEFAULT service_plugins`,trunk + fi + crudini --set --verbose $OVS_INI ovs phy_patch_ofport $PATCH_PORT + crudini --set --verbose $EGG_FILE networking_sfc.flowclassifier.drivers oc networking_sfc.services.flowclassifier.drivers.oc.driver:OCFlowClassifierDriver + crudini --set --verbose $EGG_FILE networking_sfc.sfc.agent_drivers ovs networking_sfc.services.sfc.agent.extensions.oc.sfc_driver:SfcOCAgentDriver + crudini --set --verbose $EGG_FILE networking_sfc.sfc.drivers oc networking_sfc.services.sfc.drivers.oc.driver:OCSfcDriver + + systemctl restart neutron-server.service + systemctl restart neutron-openvswitch-agent.service +} + +function cleanup(){ + rm -f $PYTHON_MODULE_PATH/sfc_agent-$VERSION.patch + rm -f $PYTHON_MODULE_PATH/sfc_plugin-$VERSION.patch + rm -f $PYTHON_MODULE_PATH/sfc_db-$VERSION.patch +} + +function install_patch(){ + check_for_sfc + copy_sfc_patches + apply_patches + cleanup + configure_sfc +} + +if [[ -z $PATCH_PATH ]]; then + Usage + exit 1 +fi +if [[ -z $PATCH_PORT ]]; then + Usage + echo -e "Please give the patch port of br-int that interfaces the switch " + echo -e "connected to the extreme switch. Choose the correct one from the following:\n" + ovs-vsctl list-ports br-int + echo -e "\nIt maybe something like int-br-vlan or int-extreme-br or the like" + exit 1 +fi + +PYTHON_MODULE_PATH="/usr/lib/python2.7/site-packages" +PATCH_ARRAY=("sfc_agent-$VERSION.patch" "sfc_plugin-$VERSION.patch" "sfc_db-$VERSION.patch") +install_patch diff --git a/demo.sh b/demo.sh new file mode 100644 index 00000000..7fd40431 --- /dev/null +++ b/demo.sh @@ -0,0 +1,152 @@ +#!/bin/sh + +########################## FILL THIS INFO ##################################### +# Note: Fill with hostnames. Blank, if compute does not exist. +computeA=node-204.localdomain +computeB=compute-5 +computeC=compute-210 + +# Note: Fill with SF and client glance image names. +sf_image=sf +client_image=client +############################################################################### + + + +if [[ -z $computeA && -z $computeB && -z $computeC ]]; then + echo "Fill the compute node details." + exit +fi + + +sf_id=`glance image-list | grep " $sf_image " | awk '{print $2}'` +client_id=`glance image-list | grep " $client_image " | awk '{print $2}'` +if [[ -z $sf_id || -z $client_id ]]; then + echo "Fill in valid glance image names for both SF and Client VMs." + exit +fi + +operation=$1 +if [ "$operation" != "create" ] && [ "$operation" != "delete" ]; then + echo "This script expects a case-sensitive argument --> create/delete" + exit +fi + +if [ ! -z $computeC ] ; then + c2_compute=$computeC + c1_compute=$computeA + sf_compute=$computeB +elif [ ! -z $computeB ] ; then + sf_compute=$computeB + c2_compute=$computeA + c1_compute=$computeA +else + sf_compute=$computeA + c2_compute=$computeA + c1_compute=$computeA +fi + +if [ "$operation" == "create" ]; then + echo -e "\nCreating network Inspected_net" + openstack network create inspected_net + openstack subnet create --subnet-range 11.0.0.0/24 --network inspected_net inspected_subnet + sleep 50 + echo -e "\nCreating network Inspection_net" + openstack network create inspection_net + openstack subnet create --subnet-range 12.0.0.0/24 --network inspection_net inspection_subnet + sleep 50 + #openstack router create sfc-router + + #openstack router add subnet sfc-router inspected_subnet + #openstack router add subnet sfc-router inspection_subnet + + openstack port create --network inspected_net c1 + openstack port create --network inspected_net c2 + openstack port create --network inspected_net c3 + openstack port create --network inspection_net p1 + openstack port create --network inspection_net p2 + + p1_id=`openstack port list -f value | grep " p1 " | awk '{print $1}'` + p2_id=`openstack port list -f value | grep " p2 " | awk '{print $1}'` + p1_mac=`openstack port list -f value | grep " p1 " | awk '{print $3}'` + p2_mac=`openstack port list -f value | grep " p2 " | awk '{print $3}'` + + c1_id=`openstack port list -f value | grep " c1 " | awk '{print $1}'` + c2_id=`openstack port list -f value | grep " c2 " | awk '{print $1}'` + c3_id=`openstack port list -f value | grep " c3 " | awk '{print $1}'` + + echo -e "\nCreating Neutron Trunk port" + openstack network trunk create --parent-port $p1_id trunk1 + + echo -e "\nLaunching VNF on Node $sf_compute..." + nova boot --image $sf_image --flavor 3 --nic port-id=$p1_id\ + --nic port-id=$p2_id --availability-zone nova:$sf_compute VNF + + sleep 5 + + echo -e "\nLaunching Client1 on Node $c1_compute..." + nova boot --image $client_image --flavor 3 --nic port-id=$c1_id --availability-zone nova:$c1_compute Client1 + echo -e "\nLaunching Client2 on Node $sf_compute..." + nova boot --image $client_image --flavor 3 --nic port-id=$c3_id --availability-zone nova:$sf_compute Client2 + echo -e "\nLaunching Server on Node $c2_compute..." + nova boot --image $client_image --flavor 3 --nic port-id=$c2_id --availability-zone nova:$c2_compute Server + + sleep 5 + + vnf_id=`openstack server list | grep VNF | awk '{print $2}'` + + echo -e "\nCreating a shadow port s1" + openstack port create --network inspected_net --mac-address $p1_mac s1 + s1_id=`openstack port list -f value | grep " s1 " | awk '{print $1}'` + seg_id=`openstack network show inspected_net | grep segmentation_id | awk '{print $4}'` + + echo -e "\nCreating a shadow port s2" + openstack port create --network inspected_net --device $p2_id --mac-address $p2_mac s2 + + openstack network trunk set --subport port=$s1_id,segmentation-type=vlan,segmentation-id=$seg_id trunk1 + + openstack port set --device $p1_id s1 + + sleep 3 + + echo -e "\nCreating SFC Port Pair pp1." + openstack port pair create --ingress s1 --egress s2 pp1 + echo -e "\nCreating SFC Port Pair Group ppg1." + openstack port pair group create --port-pair pp1 ppg1 + echo -e "\nCreating SFC Flow Classifier fc1." + openstack flow classifier create --protocol TCP --logical-source-port c1 --logical-destination-port c2 fc1 + echo -e "\nCreating SFC Flow Classifier fc2." + openstack flow classifier create --protocol TCP --logical-source-port c3 --logical-destination-port c2 fc2 + + while true; do + read -p "Create or delete the port chain? (create/delete/exit) " op + case $op in + create ) echo -e "\nCreating SFC Port Chain pc1" && openstack port chain create --port-pair-group ppg1 --flow-classifier fc1 --flow-classifier fc2 pc1;; + delete ) echo -e "\nDeleting SFC Port Chain pc1" && openstack port chain delete pc1;; + exit ) exit;; + * ) echo "Please choose either of (create/delete/exit).";; + esac + done + +else + openstack port chain delete pc1 + openstack flow classifier delete fc1 + openstack flow classifier delete fc2 + openstack port pair group delete ppg1 + openstack port pair delete pp1 + + s1_id=`openstack port list -f value | grep " s1 " | awk '{print $1}'` + openstack network trunk unset --subport $s1_id trunk1 + + openstack server delete Client1 Server VNF Client2 + + openstack network trunk delete trunk1 + + openstack port delete c1 c2 p1 p2 c3 s1 s2 + + #openstack router remove subnet sfc-router inspected_subnet + #openstack router remove subnet sfc-router inspection_subnet + #openstack router delete sfc-router + openstack network delete inspected_net inspection_net +fi + diff --git a/networking_sfc/db/flowclassifier_db.py b/networking_sfc/db/flowclassifier_db.py index 91e4bcce..e3085958 100644 --- a/networking_sfc/db/flowclassifier_db.py +++ b/networking_sfc/db/flowclassifier_db.py @@ -111,8 +111,8 @@ def _check_ip_prefix_valid(cls, ip_prefix, ethertype): @classmethod def _logical_port_conflict(cls, first_logical_port, second_logical_port): - if first_logical_port is None or second_logical_port is None: - return True + '''if first_logical_port is None or second_logical_port is None: + return True''' return first_logical_port == second_logical_port @classmethod diff --git a/networking_sfc/db/sfc_db.py b/networking_sfc/db/sfc_db.py index 860c8dd3..ee628e39 100644 --- a/networking_sfc/db/sfc_db.py +++ b/networking_sfc/db/sfc_db.py @@ -239,7 +239,7 @@ def _validate_flow_classifiers(self, context, fc_ids, pc_id=None): if fc_assoc and fc_assoc['portchain_id'] != pc_id: raise ext_fc.FlowClassifierInUse(id=fc.id) - query = self._model_query(context, PortChain) + '''query = self._model_query(context, PortChain) for port_chain_db in query.all(): if port_chain_db['id'] == pc_id: continue @@ -260,7 +260,7 @@ def _validate_flow_classifiers(self, context, fc_ids, pc_id=None): raise ext_sfc.PortChainFlowClassifierInConflict( fc_id=fc['id'], pc_id=port_chain_db['id'], pc_fc_id=pc_fc['id'] - ) + )''' def _setup_chain_group_associations( self, context, port_chain, pg_ids @@ -431,10 +431,10 @@ def _validate_port_pair_ingress_egress(self, ingress, egress): raise ext_sfc.PortPairEgressNoHost( egress=egress['id'] ) - if ingress['device_id'] != egress['device_id']: + '''if ingress['device_id'] != egress['device_id']: raise ext_sfc.PortPairIngressEgressDifferentHost( ingress=ingress['id'], - egress=egress['id']) + egress=egress['id'])''' @log_helpers.log_method_call def create_port_pair(self, context, port_pair): diff --git a/networking_sfc/services/flowclassifier/drivers/oc/__init__.py b/networking_sfc/services/flowclassifier/drivers/oc/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/networking_sfc/services/flowclassifier/drivers/oc/driver.py b/networking_sfc/services/flowclassifier/drivers/oc/driver.py new file mode 100644 index 00000000..eb48cf25 --- /dev/null +++ b/networking_sfc/services/flowclassifier/drivers/oc/driver.py @@ -0,0 +1,38 @@ +# Copyright 2016 Futurewei. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import helpers as log_helpers + +from networking_sfc.services.flowclassifier.common import exceptions as exc +from networking_sfc.services.flowclassifier.drivers.ovs import driver as fc_dvr + + +class OCFlowClassifierDriver(fc_dvr.OVSFlowClassifierDriver): + """FlowClassifier Driver Base Class.""" + + @log_helpers.log_method_call + def create_flow_classifier_precommit(self, context): + """OVS Driver precommit before transaction committed. + + Make sure that either the logical_source_port + or the logical_destination_port is not None. + """ + + flow_classifier = context.current + logical_source_port = flow_classifier['logical_source_port'] + logical_destination_port = flow_classifier['logical_destination_port'] + if (logical_source_port or logical_destination_port) is None: + raise exc.FlowClassifierBadRequest(message=( + 'FlowClassifier %s requires either logical destination port or' + ' logical source port in ovs driver' % flow_classifier['id'])) diff --git a/networking_sfc/services/sfc/agent/extensions/oc/__init__.py b/networking_sfc/services/sfc/agent/extensions/oc/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/networking_sfc/services/sfc/agent/extensions/oc/sfc_driver.py b/networking_sfc/services/sfc/agent/extensions/oc/sfc_driver.py new file mode 100644 index 00000000..c19e46fd --- /dev/null +++ b/networking_sfc/services/sfc/agent/extensions/oc/sfc_driver.py @@ -0,0 +1,446 @@ +# Copyright 2015 Huawei. +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from neutron_lib import constants as n_consts +from oslo_config import cfg +from oslo_log import log as logging + +from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ + as ovs_consts +from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager + +from networking_sfc._i18n import _LE +from networking_sfc.services.sfc.common import ovs_ext_lib +from networking_sfc.services.sfc.agent.extensions.openvswitch import sfc_driver +from networking_sfc.services.sfc.drivers.ovs import constants + +LOG = logging.getLogger(__name__) + +cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.' + 'common.config') + +# This table is used to process the traffic across differet subnet scenario. +# Flow 1: pri=1, ip,dl_dst=nexthop_mac,nw_src=nexthop_subnet. actions= +# push_mpls:0x8847,set_mpls_label,set_mpls_ttl,push_vlan,output:(patch port +# or resubmit to table(INGRESS_TABLE) +# Flow 2: pri=0, ip,dl_dst=nexthop_mac,, action=push_mpls:0x8847, +# set_mpls_label,set_mpls_ttl,push_vlan,output:(patch port or resubmit to +# table(INGRESS_TABLE) +ACROSS_SUBNET_TABLE = 5 + +# The table has multiple flows that steer traffic for the different chains +# to the ingress port of different service functions hosted on this Compute +# node. +INGRESS_TABLE = 10 + +# port chain default flow rule priority +PC_DEF_PRI = 20 +PC_INGRESS_PRI = 30 + +sfc_ovs_opt = [ + cfg.StrOpt('local_hostname', + default='', help='Hostname of the local machine'), + cfg.StrOpt('phy_patch_ofport', + default='', help='Patch port of integration bridge ' + 'to tun/vlan bridge.')] +cfg.CONF.register_opts(sfc_ovs_opt, 'OVS') + + +class SfcOCAgentDriver(sfc_driver.SfcOVSAgentDriver): + """This class will support MPLS frame + + Ethernet + MPLS + IPv4 Packet: + +-------------------------------+---------------+--------------------+ + |Outer Ethernet, ET=0x8847 | MPLS head, | original IP Packet | + +-------------------------------+---------------+--------------------+ + """ + + REQUIRED_PROTOCOLS = [ + ovs_consts.OPENFLOW10, + ovs_consts.OPENFLOW11, + ovs_consts.OPENFLOW12, + ovs_consts.OPENFLOW13, + ] + + def __init__(self): + super(SfcOCAgentDriver, self).__init__() + self.ovs_sfc_dvr = sfc_driver.SfcOVSAgentDriver() + + def consume_api(self, agent_api): + self.agent_api = agent_api + + def initialize(self): + self.br_int = ovs_ext_lib.SfcOVSBridgeExt( + self.agent_api.request_int_br()) + self.br_int.set_protocols(SfcOCAgentDriver.REQUIRED_PROTOCOLS) + + self.local_ip = cfg.CONF.OVS.local_ip + self.phy_patch_ofport = self.br_int.get_port_ofport( + cfg.CONF.OVS.phy_patch_ofport) + self.vlan_manager = vlanmanager.LocalVlanManager() + + self._clear_sfc_flow_on_int_br() + + def update_flow_rules(self, flowrule, flowrule_status): + try: + if flowrule.get('egress'): + self._setup_egress_flow_rules(flowrule) + self._setup_reverse_ingress_flow_rules(flowrule) + if flowrule.get('ingress'): + self._setup_ingress_flow_rules(flowrule) + self._setup_reverse_egress_flow_rules(flowrule) + + flowrule_status_temp = {} + flowrule_status_temp['id'] = flowrule['id'] + flowrule_status_temp['status'] = constants.STATUS_ACTIVE + flowrule_status.append(flowrule_status_temp) + except Exception as e: + flowrule_status_temp = {} + flowrule_status_temp['id'] = flowrule['id'] + flowrule_status_temp['status'] = constants.STATUS_ERROR + flowrule_status.append(flowrule_status_temp) + LOG.exception(e) + LOG.error(_LE("update_flow_rules failed")) + + def delete_flow_rule(self, flowrule, flowrule_status): + try: + LOG.debug("delete_flow_rule, flowrule = %s", + flowrule) + + node_type = flowrule['node_type'] + # delete tunnel table flow rule on br-int(egress match) + if flowrule['egress'] is not None: + self._setup_source_based_flows( + flowrule, flowrule['del_fcs'], add_flow=False) + # delete group table, need to check again + group_id = flowrule.get('next_group_id', None) + if group_id and flowrule.get('group_refcnt', None) <= 1: + self.br_int.delete_group(group_id=group_id) + # Note(dpak): Check for repercussions. Loop previously depended on grp_rfcnt + for item in flowrule['next_hops']: + self.br_int.delete_flows( + table=ACROSS_SUBNET_TABLE, + dl_dst=item['mac_address']) + + if flowrule['ingress'] is not None: + self._setup_destination_based_flows(flowrule, + flowrule['del_fcs'], + add_flow=False) + # delete table INGRESS_TABLE ingress match flow rule + # on br-int(ingress match) + vif_port = self.br_int.get_vif_port_by_id(flowrule['ingress']) + if vif_port: + # third, install br-int flow rule on table INGRESS_TABLE + # for ingress traffic + self.br_int.delete_flows( + table=INGRESS_TABLE, + dl_type=0x8847, + dl_dst=vif_port.vif_mac, + mpls_label=flowrule['nsp'] << 8 | (flowrule['nsi'] + 1) + ) + if flowrule.get('reverse_path'): + rev_flowrule = self._reverse_flow_rules(flowrule, node_type) + if (flowrule['ingress'] is not None or ( + node_type == 'sf_node')): + self._setup_source_based_flows( + rev_flowrule, rev_flowrule['del_fcs'], add_flow=False) + if (flowrule['egress'] is not None or ( + node_type == 'sf_node')): + self._setup_destination_based_flows( + rev_flowrule, rev_flowrule['del_fcs'], + add_flow=False) + except Exception as e: + flowrule_status_temp = {} + flowrule_status_temp['id'] = flowrule['id'] + flowrule_status_temp['status'] = constants.STATUS_ERROR + flowrule_status.append(flowrule_status_temp) + LOG.exception(e) + LOG.error(_LE("delete_flow_rule failed")) + + def _reverse_flow_rules(self, flowrule, node_type): + rev_flowrule = copy.deepcopy(flowrule) + + def _reverse_fcs(op): + for fc in rev_flowrule[op]: + fc['logical_destination_port'], fc['logical_source_port'] = ( + fc['logical_source_port'], fc['logical_destination_port']) + fc['ldp_mac_address'], fc['lsp_mac_address'] = ( + fc['lsp_mac_address'], fc['ldp_mac_address']) + fc['destination_ip_prefix'], fc['source_ip_prefix'] = ( + fc['source_ip_prefix'], fc['destination_ip_prefix']) + + for op in ['add_fcs', 'del_fcs']: + _reverse_fcs(op) + + if node_type == 'src_node': + rev_flowrule['ingress'], rev_flowrule['egress'] = ( + rev_flowrule['egress'], rev_flowrule['ingress']) + + return rev_flowrule + + def _setup_reverse_ingress_flow_rules(self, flowrule): + if not flowrule['reverse_path']: + return + rev_flowrule = self._reverse_flow_rules(flowrule, + flowrule['node_type']) + self._setup_ingress_flow_rules(rev_flowrule) + + def _setup_reverse_egress_flow_rules(self, flowrule): + if not flowrule['reverse_path']: + return + rev_flowrule = self._reverse_flow_rules(flowrule, + flowrule['node_type']) + self._setup_egress_flow_rules(rev_flowrule) + + def _setup_egress_flow_rules(self, flowrule, match_inport=True): + group_id = flowrule.get('next_group_id', None) + next_hops = flowrule.get('next_hops', None) + global_vlan_tag = flowrule['segment_id'] + + # if the group is not none, install the egress rule for this SF + if ( + group_id and next_hops + ): + # 1st, install br-int flow rule on table ACROSS_SUBNET_TABLE + # and group table + buckets = [] + # A2 Group Creation + for item in next_hops: + bucket = ( + 'bucket=weight=%d, mod_dl_dst:%s,' + 'resubmit(,%d)' % ( + item['weight'], + item['mac_address'], + ACROSS_SUBNET_TABLE + ) + ) + buckets.append(bucket) + # A3 In table 5, send to either patch port + # or table 10 for remote and local node respectively. + subnet_actions_list = [] + + priority = 30 + if item['local_endpoint'] == flowrule['host']: + subnet_actions = ( + "mod_vlan_vid:%d, resubmit(,%d)" % (global_vlan_tag, + INGRESS_TABLE)) + else: + # same subnet with next hop + subnet_actions = "output:%s" % self.phy_patch_ofport + subnet_actions_list.append(subnet_actions) + + self.br_int.add_flow( + table=ACROSS_SUBNET_TABLE, + priority=priority, + dl_dst=item['mac_address'], + dl_type=0x0800, + actions="%s" % ','.join(subnet_actions_list)) + + buckets = ','.join(buckets) + group_content = self.br_int.dump_group_for_id(group_id) + if group_content.find('group_id=%d' % group_id) == -1: + self.br_int.add_group(group_id=group_id, + type='select', buckets=buckets) + else: + self.br_int.mod_group(group_id=group_id, + type='select', buckets=buckets) + + self._setup_source_based_flows( + flowrule, + flowrule['add_fcs'], + add_flow=True, + match_inport=True) + + def _update_flows(self, table, priority, + match_info, actions=None, add_flow=True): + if add_flow: + self.br_int.add_flow(table=table, + priority=priority, + actions=actions, + **match_info) + else: + self.br_int.delete_flows(table=table, + priority=priority, + **match_info) + + def _check_if_local_port(self, port_id): + try: + if self.br_int.get_vif_port_by_id(port_id): + return True + except Exception: + pass + return False + + def _setup_source_based_flows( + self, flowrule, flow_classifier_list, + add_flow=True, match_inport=True + ): + inport_match = {} + priority = 50 + global_vlan_tag = flowrule['segment_id'] + local_vlan_tag = self._get_vlan_by_port(flowrule['egress']) + + if match_inport is True: + egress_port = self.br_int.get_vif_port_by_id(flowrule['egress']) + if egress_port: + inport_match = dict(in_port=egress_port.ofport) + + group_id = flowrule.get('next_group_id') + next_hops = flowrule.get('next_hops') + if not (group_id and next_hops): + local_vlan_tag = self._get_vlan_by_port(flowrule['ingress']) + # B6. For packets coming out of SF, we resubmit to table 5. + match_info = dict(dl_type=0x0800, **inport_match) + actions = ("resubmit(,%s)" % ACROSS_SUBNET_TABLE) + + self._update_flows(ovs_consts.LOCAL_SWITCHING, 60, + match_info, actions, add_flow) + + ingress_port = self.br_int.get_vif_port_by_id(flowrule['ingress']) + ingress_mac = ingress_port.vif_mac + # B7 In table 5, we decide whether to send it locally or remotely. + for fc in flow_classifier_list: + ldp_port_id = fc['logical_destination_port'] + ldp_mac = fc['ldp_mac_address'] + + if self._check_if_local_port(ldp_port_id): + ldp_port = self.br_int.get_vif_port_by_id(ldp_port_id) + ldp_ofport = ldp_port.ofport + actions = ("strip_vlan, mod_dl_dst:%s, output:%s" % ( + ldp_mac, ldp_ofport)) + else: + actions = ("mod_vlan_vid:%s, mod_dl_src:%s, " + "mod_dl_dst:%s, output:%s" % ( + (local_vlan_tag, ingress_mac, + ldp_mac, self.phy_patch_ofport))) + + match_info = dict(nw_dst=fc['destination_ip_prefix'], + dl_dst=ingress_mac, + dl_type=0x0800, + dl_vlan=global_vlan_tag) + + self._update_flows(ACROSS_SUBNET_TABLE, priority, + match_info, actions, add_flow) + return + + # A1. Flow inserted at LSP egress. Matches on ip, in_port and LDP IP. + # Action is redirect to group. + for fc in flow_classifier_list: + ldp_mac = fc['ldp_mac_address'] + actions = ("mod_vlan_vid:%s, group:%d" % ( + local_vlan_tag, group_id)) + match_info = dict(inport_match, **{'dl_dst': ldp_mac, + 'dl_type': '0x0800'}) + self._update_flows(ovs_consts.LOCAL_SWITCHING, priority, + match_info, actions, add_flow) + + def _get_port_info(self, port_id, info_type): + ''' Returns specific port info + + @param port_id: Neutron port id + @param info_type: Type is List [mac,ofport,vlan] + @return: Tuple (MAC address, openflow port number) + + ''' + + res = () + port = self.br_int.get_vif_port_by_id(port_id) + + port_type_map = { + 'mac': port.vif_mac, + 'ofport': port.ofport, + 'vlan': self._get_vlan_by_port(port_id)} + + for each in info_type: + res += (port_type_map[each],) + + return res + + def _get_vlan_by_port(self, port_id): + try: + net_uuid = self.vlan_manager.get_net_uuid(port_id) + return self.vlan_manager.get(net_uuid).vlan + except (vlanmanager.VifIdNotFound, vlanmanager.MappingNotFound): + return None + + def _setup_destination_based_flows(self, flowrule, + flow_classifier_list, + add_flow=True): + priority = 50 + ingress_mac, ingress_ofport = self._get_port_info( + flowrule['ingress'], ['mac', 'ofport']) + global_vlan_tag = flowrule['segment_id'] + + group_id = flowrule.get('next_group_id') + next_hops = flowrule.get('next_hops') + if not (group_id and next_hops): + # B5. At ingress of SF, if dl_dst belongs to SF and nw_dst + # belongs to Dest VM, output to ingress port of SF. + match_info = dict(dl_type=0x0800, + dl_vlan=global_vlan_tag, + dl_dst=ingress_mac) + actions = ("strip_vlan, output:%s" % (ingress_ofport)) + + self._update_flows(INGRESS_TABLE, 60, + match_info, actions, add_flow) + + # B4. At ingress of SF, if dest mac matches with SF ingress, + # vlan matches, then resubmit to 10. + # This is per ldp because ldps can have different vlan tags. + match_info = dict( + dl_type=0x0800, in_port=self.phy_patch_ofport, + dl_vlan=global_vlan_tag, dl_dst=ingress_mac) + actions = ("resubmit(,%s)" % INGRESS_TABLE) + self._update_flows(ovs_consts.LOCAL_SWITCHING, priority, + match_info, actions, add_flow) + return + + # B9. Match IP packets with vlan and source IP. Actions will be to + # strip vlan and modify src MAC and output to Dest VM port. + for fc in flow_classifier_list: + nw_src = fc['source_ip_prefix'] + + src_port_mac = fc['lsp_mac_address'] + actions = ('strip_vlan, mod_dl_src:%s, output:%s' % ( + src_port_mac, ingress_ofport)) + + match_field = dict( + dl_type=0x0800, + dl_vlan=global_vlan_tag, + dl_dst=ingress_mac, + nw_src=nw_src) + + self._update_flows(10, priority, + match_field, actions, add_flow) + + # B8. At ingress of dest, match ip packet with vlan tag and dest + # MAC address. Action will be to resubmit to 10. + match_field = dict( + dl_type=0x0800, + dl_vlan=global_vlan_tag, + dl_dst=ingress_mac, + nw_src=nw_src) + actions = ("resubmit(,%s)" % 10) + + self._update_flows(ovs_consts.LOCAL_SWITCHING, priority, + match_field, actions, add_flow) + + def _setup_ingress_flow_rules(self, flowrule): + flow_classifier_list = flowrule['add_fcs'] + self._setup_destination_based_flows(flowrule, + flow_classifier_list) diff --git a/networking_sfc/services/sfc/drivers/oc/__init__.py b/networking_sfc/services/sfc/drivers/oc/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/networking_sfc/services/sfc/drivers/oc/driver.py b/networking_sfc/services/sfc/drivers/oc/driver.py new file mode 100644 index 00000000..4fc2a075 --- /dev/null +++ b/networking_sfc/services/sfc/drivers/oc/driver.py @@ -0,0 +1,555 @@ +# Copyright e015 nuturewei. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr + +from oslo_log import helpers as log_helpers +from oslo_log import log as logging +from oslo_serialization import jsonutils + +from neutron import manager + +from neutron.plugins.common import constants as np_const + +from networking_sfc._i18n import _LE, _LW +from networking_sfc.services.sfc.common import exceptions as exc +from networking_sfc.services.sfc.drivers.ovs import ( + constants as ovs_const) +from networking_sfc.services.sfc.drivers.ovs import driver as sfc_dvr + + +LOG = logging.getLogger(__name__) + + +class OCSfcDriver(sfc_dvr.OVSSfcDriver): + """Sfc Driver Base Class.""" + + def initialize(self): + super(OCSfcDriver, self).initialize() + + @log_helpers.log_method_call + def _add_flowclassifier_port_assoc(self, fc_ids, tenant_id, + src_node): + for fc in self._get_fcs_by_ids(fc_ids): + need_assoc = True + src_pd_filter = dst_pd_filter = None + + if fc['logical_source_port']: + # lookup the source port + src_pd_filter = dict( + egress=fc['logical_source_port'], + tenant_id=tenant_id + ) + if fc['logical_destination_port']: + # lookup the source port + dst_pd_filter = dict( + ingress=fc['logical_destination_port'], + tenant_id=tenant_id + ) + src_pd = self.get_port_detail_by_filter(src_pd_filter) + dst_pd = self.get_port_detail_by_filter(dst_pd_filter) + + new_src_pd = new_dst_pd = '' + if not (src_pd and dst_pd): + if not src_pd: + # Create source port detail + new_src_pd = self._create_port_detail(src_pd_filter) + LOG.debug('create src port detail: %s', new_src_pd) + if not dst_pd: + # Create destination port detail + new_dst_pd = self._create_port_detail(dst_pd_filter) + LOG.debug('create dst port detail: %s', new_dst_pd) + else: + for path_node in src_pd['path_nodes']: + if path_node['pathnode_id'] == src_node['id']: + need_assoc = False + if need_assoc: + # Create associate relationship + if new_src_pd: + assco_args = { + 'portpair_id': new_src_pd['id'], + 'pathnode_id': src_node['id'], + 'weight': 1, + } + sna = self.create_pathport_assoc(assco_args) + LOG.debug('create assoc src port with node: %s', sna) + src_node['portpair_details'].append(new_src_pd['id']) + if new_dst_pd: + assco_args = { + 'portpair_id': new_dst_pd['id'], + 'pathnode_id': src_node['id'], + 'weight': 1, + } + sna = self.create_pathport_assoc(assco_args) + LOG.debug('create assoc src port with node: %s', sna) + src_node['portpair_details'].append(new_dst_pd['id']) + + def _create_src_and_dest_nodes(self, port_chain, next_group_intid, + next_group_members, path_nodes): + path_id = port_chain['chain_id'] + port_pair_groups = port_chain['port_pair_groups'] + sf_path_length = len(port_pair_groups) + + # Create a head node object for port chain + src_args = {'tenant_id': port_chain['tenant_id'], + 'node_type': ovs_const.SRC_NODE, + 'nsp': path_id, + 'nsi': 0xff, + 'portchain_id': port_chain['id'], + 'status': ovs_const.STATUS_BUILDING, + 'next_group_id': next_group_intid, + 'next_hop': jsonutils.dumps(next_group_members), + } + src_node = self.create_path_node(src_args) + LOG.debug('create src node: %s', src_node) + path_nodes.append(src_node) + + # Create a destination node object for port chain + dst_args = { + 'tenant_id': port_chain['tenant_id'], + 'node_type': ovs_const.DST_NODE, + 'nsp': path_id, + 'nsi': 0xff - sf_path_length - 1, + 'portchain_id': port_chain['id'], + 'status': ovs_const.STATUS_BUILDING, + 'next_group_id': None, + 'next_hop': None + } + dst_node = self.create_path_node(dst_args) + LOG.debug('create dst node: %s', dst_node) + path_nodes.append(dst_node) + + return src_node + + def _check_if_bi_node(self, src_node): + ''' We assume that if the flow classifiers associated with + a port chain contain both LSP and LDP, we treat it as a bi-directional + chain. LSP and LDP can be part of same or different flow + classifier(s) ''' + + is_lsp = False + is_ldp = False + portpair_details = src_node['portpair_details'] + for each in portpair_details: + port_detail = self.get_port_detail_by_filter(dict(id=each)) + if port_detail['egress']: + is_lsp = True + else: + is_ldp = True + if is_lsp and is_ldp: + return True + return False + + @log_helpers.log_method_call + def _create_portchain_path(self, context, port_chain): + path_nodes = [] + # Create an assoc object for chain_id and path_id + # context = context._plugin_context + path_id = port_chain['chain_id'] + + if not path_id: + LOG.error(_LE('No path_id available for creating port chain path')) + return + + port_pair_groups = port_chain['port_pair_groups'] + sf_path_length = len(port_pair_groups) + + # Detect cross-subnet transit + # Compare subnets for logical source ports + # and first PPG ingress ports + '''for fc in self._get_fcs_by_ids(port_chain['flow_classifiers']): + if fc['logical_source_port']: + subnet1 = self._get_subnet_by_port(fc['logical_source_port']) + else: + subnet1 = self._get_subnet_by_port(fc[ + 'logical_destination_port']) + cidr1 = subnet1['cidr'] + ppg = context._plugin.get_port_pair_group(context._plugin_context, + port_pair_groups[0]) + for pp_id1 in ppg['port_pairs']: + pp1 = context._plugin.get_port_pair(context._plugin_context, + pp_id1) + filter1 = {} + if pp1.get('ingress', None): + filter1 = dict(dict(ingress=pp1['ingress']), **filter1) + pd1 = self.get_port_detail_by_filter(filter1) + subnet2 = self._get_subnet_by_port(pd1['ingress']) + cidr2 = subnet2['cidr'] + if cidr1 != cidr2: + LOG.error(_LE('Cross-subnet chain not supported')) + raise exc.SfcDriverError() + return None''' + + # Compare subnets for PPG egress ports + # and next PPG ingress ports + for i in range(sf_path_length - 1): + ppg = context._plugin.get_port_pair_group(context._plugin_context, + port_pair_groups[i]) + next_ppg = context._plugin.get_port_pair_group( + context._plugin_context, port_pair_groups[i + 1]) + for pp_id1 in ppg['port_pairs']: + pp1 = context._plugin.get_port_pair(context._plugin_context, + pp_id1) + filter1 = {} + if pp1.get('egress', None): + filter1 = dict(dict(egress=pp1['egress']), **filter1) + pd1 = self.get_port_detail_by_filter(filter1) + subnet1 = self._get_subnet_by_port(pd1['egress']) + cidr3 = subnet1['cidr'] + + for pp_id2 in next_ppg['port_pairs']: + pp2 = context._plugin.get_port_pair( + context._plugin_context, pp_id2) + filter2 = {} + if pp2.get('ingress', None): + filter2 = dict(dict(ingress=pp2['ingress']), **filter2) + pd2 = self.get_port_detail_by_filter(filter2) + subnet2 = self._get_subnet_by_port(pd2['ingress']) + cidr4 = subnet2['cidr'] + if cidr3 != cidr4: + LOG.error(_LE('Cross-subnet chain not supported')) + raise exc.SfcDriverError() + return None + + next_group_intid, next_group_members = self._get_portgroup_members( + context, port_chain['port_pair_groups'][0]) + + src_node = self._create_src_and_dest_nodes( + port_chain, next_group_intid, + next_group_members, path_nodes) + + self._add_flowclassifier_port_assoc( + port_chain['flow_classifiers'], + port_chain['tenant_id'], + src_node + ) + + is_bi_node = self._check_if_bi_node(src_node) + + if is_bi_node: + path_nodes[0].update(dict(reverse_path=True)) + else: + path_nodes[0].update(dict(reverse_path=False)) + + for i in range(sf_path_length): + cur_group_members = next_group_members + # next_group for next hop + if i < sf_path_length - 1: + next_group_intid, next_group_members = ( + self._get_portgroup_members( + context, port_pair_groups[i + 1]) + ) + else: + next_group_intid = None + next_group_members = None + + # Create a node object + node_args = { + 'tenant_id': port_chain['tenant_id'], + 'node_type': ovs_const.SF_NODE, + 'nsp': path_id, + 'nsi': 0xfe - i, + 'portchain_id': port_chain['id'], + 'status': ovs_const.STATUS_BUILDING, + 'next_group_id': next_group_intid, + 'next_hop': ( + None if not next_group_members else + jsonutils.dumps(next_group_members) + ) + } + sf_node = self.create_path_node(node_args) + LOG.debug('chain path node: %s', sf_node) + + # If Src Node is bi, then SF node shall naturally be bi. + if is_bi_node: + sf_node.update(dict(reverse_path=True)) + else: + sf_node.update(dict(reverse_path=False)) + # Create the assocation objects that combine the pathnode_id with + # the ingress of the port_pairs in the current group + # when port_group does not reach tail + for member in cur_group_members: + assco_args = {'portpair_id': member['portpair_id'], + 'pathnode_id': sf_node['id'], + 'weight': member['weight'], } + sfna = self.create_pathport_assoc(assco_args) + LOG.debug('create assoc port with node: %s', sfna) + sf_node['portpair_details'].append(member['portpair_id']) + path_nodes.append(sf_node) + + return path_nodes + + def _delete_path_node_port_flowrule(self, node, port, fc_ids): + # if this port is not binding, don't to generate flow rule + if not port['host_id']: + return + flow_rule = self._build_portchain_flowrule_body( + node, + port, + None, + fc_ids) + + if flow_rule['reverse_path']: + if (flow_rule['node_type'] == ovs_const.SRC_NODE and flow_rule[ + 'ingress']): + flow_rule = self._reverse_flow_rules(flow_rule) + + LOG.info("ZZZ FLOW RULES %r" % flow_rule) + self.ovs_driver_rpc.ask_agent_to_delete_flow_rules( + self.admin_context, + flow_rule) + + self._delete_agent_fdb_entries(flow_rule) + + def _delete_path_node_flowrule(self, node, fc_ids): + if node['portpair_details'] is None: + return + for each in node['portpair_details']: + port = self.get_port_detail_by_filter(dict(id=each)) + if port: + if node['node_type'] == ovs_const.SF_NODE: + _, egress = self._get_ingress_egress_tap_ports(port) + port.update({'egress': egress}) + self._delete_path_node_port_flowrule( + node, port, fc_ids) + + @log_helpers.log_method_call + def _delete_portchain_path(self, context, port_chain): + pds = self.get_path_nodes_by_filter( + dict(portchain_id=port_chain['id'])) + src_node = None + is_bi_node = True + if pds: + for pd in pds: + ''' We cannot assume that Src node always comes first. + # If Src Node is bi, then SF node shall naturally be bi. Here, + # we assume that Src node will be the first in 'pds'. + if self._check_if_bi_node(pd): + is_bi_node = True''' + if is_bi_node: + pd.update(dict(reverse_path=True)) + else: + pd.update(dict(reverse_path=False)) + + if pd['node_type'] == ovs_const.SRC_NODE: + src_node = pd + + self._delete_path_node_flowrule( + pd, + port_chain['flow_classifiers'] + ) + for pd in pds: + self.delete_path_node(pd['id']) + + # delete the ports on the traffic classifier + self._remove_flowclassifier_port_assoc( + port_chain['flow_classifiers'], + port_chain['tenant_id'], + src_node + ) + + def _filter_flow_classifiers(self, flow_rule, fc_ids): + """Filter flow classifiers. + + @return: list of the flow classifiers + """ + + fc_return = [] + core_plugin = manager.NeutronManager.get_plugin() + + if not fc_ids: + return fc_return + fcs = self._get_fcs_by_ids(fc_ids) + for fc in fcs: + new_fc = fc.copy() + new_fc.pop('id') + new_fc.pop('name') + new_fc.pop('tenant_id') + new_fc.pop('description') + + lsp = new_fc.get('logical_source_port') + ldp = new_fc.get('logical_destination_port') + if lsp: + port_detail = core_plugin.get_port(self.admin_context, lsp) + new_fc['lsp_mac_address'] = port_detail['mac_address'] + new_fc['source_ip_prefix'] = port_detail[ + 'fixed_ips'][0]['ip_address'] + if ldp: + port_detail = core_plugin.get_port(self.admin_context, ldp) + new_fc['ldp_mac_address'] = port_detail['mac_address'] + new_fc['destination_ip_prefix'] = port_detail[ + 'fixed_ips'][0]['ip_address'] + + if ( + flow_rule['node_type'] in [ovs_const.SRC_NODE] and + flow_rule['egress'] == fc['logical_source_port'] + ) or ( + flow_rule['node_type'] in [ovs_const.SRC_NODE] and + flow_rule['ingress'] == fc['logical_destination_port'] + ): + fc_return.append(new_fc) + elif flow_rule['node_type'] in [ovs_const.SF_NODE]: + fc_return.append(new_fc) + + return fc_return + + def _reverse_flow_rules(self, flowrule): + + def _reverse_fcs(op): + for fc in flowrule[op]: + fc['logical_destination_port'], fc['logical_source_port'] = ( + fc['logical_source_port'], fc['logical_destination_port']) + fc['ldp_mac_address'], fc['lsp_mac_address'] = ( + fc['lsp_mac_address'], fc['ldp_mac_address']) + fc['destination_ip_prefix'], fc['source_ip_prefix'] = ( + fc['source_ip_prefix'], fc['destination_ip_prefix']) + + for op in ['add_fcs', 'del_fcs']: + _reverse_fcs(op) + + flowrule['ingress'], flowrule['egress'] = ( + flowrule['egress'], flowrule['ingress']) + + return flowrule + + def _update_path_node_port_flowrules(self, node, port, + add_fc_ids=None, del_fc_ids=None): + # if this port is not binding, don't to generate flow rule + if not port['host_id']: + return + + flow_rule = self._build_portchain_flowrule_body( + node, + port, + add_fc_ids, + del_fc_ids) + + if flow_rule['reverse_path']: + if (flow_rule['node_type'] == ovs_const.SRC_NODE and flow_rule[ + 'ingress']): + flow_rule = self._reverse_flow_rules(flow_rule) + + LOG.info("YYY FLOW RULES %r" % flow_rule) + self.ovs_driver_rpc.ask_agent_to_update_flow_rules( + self.admin_context, + flow_rule) + + self._update_agent_fdb_entries(flow_rule) + + def _update_path_node_flowrules(self, node, + add_fc_ids=None, del_fc_ids=None): + if node['portpair_details'] is None: + return + for each in node['portpair_details']: + port = self.get_port_detail_by_filter(dict(id=each)) + + if port: + if node['node_type'] == ovs_const.SF_NODE: + _, egress = self._get_ingress_egress_tap_ports(port) + port.update({'egress': egress}) + self._update_path_node_port_flowrules( + node, port, add_fc_ids, del_fc_ids) + + @log_helpers.log_method_call + def create_port_chain(self, context): + port_chain = context.current + path_nodes = self._create_portchain_path(context, port_chain) + LOG.info("XXX PATH NODES %r" % path_nodes) + self._update_path_nodes( + path_nodes, + port_chain['flow_classifiers'], + None) + + @log_helpers.log_method_call + def _get_portpair_detail_info(self, portpair_id): + """Get port detail. + + @param: portpair_id: uuid + @return: (host_id, local_ip, network_type, segment_id, + service_insert_type): tuple + """ + + core_plugin = manager.NeutronManager.get_plugin() + port_detail = core_plugin.get_port(self.admin_context, portpair_id) + host_id, local_ip, network_type, segment_id, mac_address = ( + (None, ) * 5) + + if port_detail: + host_id = port_detail['binding:host_id'] + network_id = port_detail['network_id'] + mac_address = port_detail['mac_address'] + network_info = core_plugin.get_network( + self.admin_context, network_id) + network_type = network_info['provider:network_type'] + segment_id = network_info['provider:segmentation_id'] + + if network_type not in [np_const.TYPE_VXLAN, np_const.TYPE_VLAN]: + LOG.warning(_LW("Currently only support vxlan and vlan networks")) + return ((None, ) * 5) + elif not host_id: + LOG.warning(_LW("This port has not been binding")) + return ((None, ) * 5) + else: + if network_type == np_const.TYPE_VXLAN: + driver = core_plugin.type_manager.drivers.get(network_type) + host_endpoint = driver.obj.get_endpoint_by_host(host_id) + if host_endpoint: + local_ip = host_endpoint['ip_address'] + else: + local_ip = None + else: + local_ip = host_id + + return host_id, local_ip, network_type, segment_id, mac_address + + def _get_ingress_egress_tap_ports(self, port_pair): + ingress_shadow_port_id = port_pair.get('ingress') + egress_shadow_port_id = port_pair.get('egress') + + core_plugin = manager.NeutronManager.get_plugin() + in_shadow_pd = core_plugin.get_port(self.admin_context, + ingress_shadow_port_id) + eg_shadow_pd = core_plugin.get_port(self.admin_context, + egress_shadow_port_id) + return in_shadow_pd['device_id'], eg_shadow_pd['device_id'] + + @log_helpers.log_method_call + def _create_port_detail(self, port_pair): + # since first node may not assign the ingress port, and last node may + # not assign the egress port. we use one of the + # port as the key to get the SF information. + port = None + if port_pair.get('ingress', None): + port = port_pair['ingress'] + elif port_pair.get('egress', None): + port = port_pair['egress'] + + host_id, local_endpoint, network_type, segment_id, mac_address = ( + self._get_portpair_detail_info(port)) + + ingress, egress = port_pair.get('ingress'), port_pair.get('egress') + + port_detail = { + 'ingress': ingress, + 'egress': egress, + 'tenant_id': port_pair['tenant_id'], + 'host_id': host_id, + 'segment_id': segment_id, + 'network_type': network_type, + 'local_endpoint': local_endpoint, + 'mac_address': mac_address + } + r = self.create_port_detail(port_detail) + LOG.debug('create port detail: %s', r) + return r diff --git a/setup.cfg b/setup.cfg index 1dd9aa50..4d720942 100644 --- a/setup.cfg +++ b/setup.cfg @@ -63,13 +63,16 @@ neutron.service_plugins = networking_sfc.sfc.drivers = dummy = networking_sfc.services.sfc.drivers.dummy.dummy:DummyDriver ovs = networking_sfc.services.sfc.drivers.ovs.driver:OVSSfcDriver + oc = networking_sfc.services.sfc.drivers.oc.driver:OCSfcDriver networking_sfc.flowclassifier.drivers = dummy = networking_sfc.services.flowclassifier.drivers.dummy.dummy:DummyDriver ovs = networking_sfc.services.flowclassifier.drivers.ovs.driver:OVSFlowClassifierDriver + oc = networking_sfc.services.flowclassifier.drivers.oc.driver:OCFlowClassifierDriver neutron.agent.l2.extensions = sfc = networking_sfc.services.sfc.agent.extensions.sfc:SfcAgentExtension networking_sfc.sfc.agent_drivers = - ovs = networking_sfc.services.sfc.agent.extensions.openvswitch.sfc_driver:SfcOVSAgentDriver + #ovs = networking_sfc.services.sfc.agent.extensions.openvswitch.sfc_driver:SfcOVSAgentDriver + ovs = networking_sfc.services.sfc.agent.extensions.oc.sfc_driver:SfcOCAgentDriver tempest.test_plugins = networking-sfc = networking_sfc.tests.tempest_plugin.plugin:NetworkingSfcPlugin oslo.config.opts =