"neutron/services",
"neutron/plugins/ml2",
"neutron/plugins/openvswitch",
- "neutron/plugins/linuxbridge"]
+ "neutron/plugins/linuxbridge",
+ "neutron/plugins/cisco"]
return any([dir in filename for dir in dirs])
from neutron.openstack.common import periodic_task
from neutron.openstack.common import service
from neutron.openstack.common import timeutils
+from neutron.openstack.common.gettextutils import _LE, _LI, _LW
from neutron.plugins.cisco.cfg_agent import device_status
from neutron.plugins.cisco.common import cisco_constants as c_constants
from neutron import service as neutron_service
self.routing_service_helper = importutils.import_object(
svc_helper_class, host, self.conf, self)
except ImportError as e:
- LOG.warn(_("Error in loading routing service helper. Class "
+ LOG.warning(_LW("Error in loading routing service helper. Class "
"specified is %(class)s. Reason:%(reason)s"),
{'class': self.conf.cfg_agent.routing_svc_helper_class,
'reason': e})
self.loop.start(interval=self.conf.cfg_agent.rpc_loop_interval)
def after_start(self):
- LOG.info(_("Cisco cfg agent started"))
+ LOG.info(_LI("Cisco cfg agent started"))
def get_routing_service_helper(self):
return self.routing_service_helper
self.routing_service_helper.process_service(device_ids,
removed_devices_info)
else:
- LOG.warn(_("No routing service helper loaded"))
+ LOG.warning(_LW("No routing service helper loaded"))
LOG.debug("Processing services completed")
def _process_backlogged_hosting_devices(self, context):
if payload['hosting_data'].keys():
self.process_services(removed_devices_info=payload)
except KeyError as e:
- LOG.error(_("Invalid payload format for received RPC message "
+ LOG.error(_LE("Invalid payload format for received RPC message "
"`hosting_devices_removed`. Error is %{error}s. "
"Payload is %(payload)s"),
{'error': e, 'payload': payload})
self.send_agent_report(self.agent_state, context)
res = self.devmgr_rpc.register_for_duty(context)
if res is True:
- LOG.info(_("[Agent registration] Agent successfully "
+ LOG.info(_LI("[Agent registration] Agent successfully "
"registered"))
return
elif res is False:
- LOG.warn(_("[Agent registration] Neutron server said that "
- "device manager was not ready. Retrying in %0.2f "
- "seconds "), REGISTRATION_RETRY_DELAY)
+ LOG.warning(_LW("[Agent registration] Neutron server said "
+ "that device manager was not ready. Retrying "
+ "in %0.2f seconds "), REGISTRATION_RETRY_DELAY)
time.sleep(REGISTRATION_RETRY_DELAY)
elif res is None:
- LOG.error(_("[Agent registration] Neutron server said that no "
- "device manager was found. Cannot "
- "continue. Exiting!"))
+ LOG.error(_LE("[Agent registration] Neutron server said that "
+ "no device manager was found. Cannot continue. "
+ "Exiting!"))
raise SystemExit("Cfg Agent exiting")
- LOG.error(_("[Agent registration] %d unsuccessful registration "
+ LOG.error(_LE("[Agent registration] %d unsuccessful registration "
"attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS)
raise SystemExit("Cfg Agent exiting")
LOG.debug("Send agent report successfully completed")
except AttributeError:
# This means the server does not support report_state
- LOG.warn(_("Neutron server does not support state report. "
+ LOG.warning(_LW("Neutron server does not support state report. "
"State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
- LOG.exception(_("Failed sending agent report!"))
+ LOG.exception(_LE("Failed sending agent report!"))
def main(manager='neutron.plugins.cisco.cfg_agent.'
from oslo.config import cfg
+from neutron.openstack.common.gettextutils import _LE, _LI, _LW
from neutron.plugins.cisco.cfg_agent import cfg_exceptions as cfg_exc
from neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv import (
cisco_csr1kv_snippets as snippets)
self._csr_conn = None
self._intfs_enabled = False
except KeyError as e:
- LOG.error(_("Missing device parameter:%s. Aborting "
+ LOG.error(_LE("Missing device parameter:%s. Aborting "
"CSR1kvRoutingDriver initialization"), e)
raise cfg_exc.CSR1kvInitializationException()
elif action is 'delete':
self._remove_static_route(dest, dest_mask, next_hop, vrf_name)
else:
- LOG.error(_('Unknown route command %s'), action)
+ LOG.error(_LE('Unknown route command %s'), action)
def _csr_create_vrf(self, ri):
vrf_name = self._csr_get_vrf_name(ri)
parse = ciscoconfparse.CiscoConfParse(ioscfg)
intfs_raw = parse.find_lines("^interface GigabitEthernet")
intfs = [raw_if.strip().split(' ')[1] for raw_if in intfs_raw]
- LOG.info(_("Interfaces:%s"), intfs)
+ LOG.info(_LI("Interfaces:%s"), intfs)
return intfs
def _get_interface_ip(self, interface_name):
for line in children:
if 'ip address' in line:
ip_address = line.strip().split(' ')[2]
- LOG.info(_("IP Address:%s"), ip_address)
+ LOG.info(_LI("IP Address:%s"), ip_address)
return ip_address
- LOG.warn(_("Cannot find interface: %s"), interface_name)
+ LOG.warning(_LW("Cannot find interface: %s"), interface_name)
return None
def _interface_exists(self, interface):
confstr = snippets.ENABLE_INTF % i
rpc_obj = conn.edit_config(target='running', config=confstr)
if self._check_response(rpc_obj, 'ENABLE_INTF'):
- LOG.info(_("Enabled interface %s "), i)
+ LOG.info(_LI("Enabled interface %s "), i)
time.sleep(1)
except Exception:
return False
# raw format ['ip vrf <vrf-name>',....]
vrf_name = line.strip().split(' ')[2]
vrfs.append(vrf_name)
- LOG.info(_("VRFs:%s"), vrfs)
+ LOG.info(_LI("VRFs:%s"), vrfs)
return vrfs
def _get_capabilities(self):
if acls_raw:
if exp_cfg_lines[1] in acls_raw:
return True
- LOG.error(_("Mismatch in ACL configuration for %s"), acl_no)
+ LOG.error(_LE("Mismatch in ACL configuration for %s"), acl_no)
return False
LOG.debug("%s is not present in config", acl_no)
return False
confstr = snippets.CREATE_VRF % vrf_name
rpc_obj = conn.edit_config(target='running', config=confstr)
if self._check_response(rpc_obj, 'CREATE_VRF'):
- LOG.info(_("VRF %s successfully created"), vrf_name)
+ LOG.info(_LI("VRF %s successfully created"), vrf_name)
except Exception:
- LOG.exception(_("Failed creating VRF %s"), vrf_name)
+ LOG.exception(_LE("Failed creating VRF %s"), vrf_name)
def _remove_vrf(self, vrf_name):
if vrf_name in self._get_vrfs():
confstr = snippets.REMOVE_VRF % vrf_name
rpc_obj = conn.edit_config(target='running', config=confstr)
if self._check_response(rpc_obj, 'REMOVE_VRF'):
- LOG.info(_("VRF %s removed"), vrf_name)
+ LOG.info(_LI("VRF %s removed"), vrf_name)
else:
- LOG.warning(_("VRF %s not present"), vrf_name)
+ LOG.warning(_LW("VRF %s not present"), vrf_name)
def _create_subinterface(self, subinterface, vlan_id, vrf_name, ip, mask):
if vrf_name not in self._get_vrfs():
- LOG.error(_("VRF %s not present"), vrf_name)
+ LOG.error(_LE("VRF %s not present"), vrf_name)
confstr = snippets.CREATE_SUBINTERFACE % (subinterface, vlan_id,
vrf_name, ip, mask)
self._edit_running_config(confstr, 'CREATE_SUBINTERFACE')
def _set_ha_HSRP(self, subinterface, vrf_name, priority, group, ip):
if vrf_name not in self._get_vrfs():
- LOG.error(_("VRF %s not present"), vrf_name)
+ LOG.error(_LE("VRF %s not present"), vrf_name)
confstr = snippets.SET_INTC_HSRP % (subinterface, vrf_name, group,
priority, group, ip)
action = "SET_INTC_HSRP (Group: %s, Priority: % s)" % (group, priority)
xml_str = rpc_obj.xml
if "<ok />" in xml_str:
LOG.debug("RPCReply for %s is OK", snippet_name)
- LOG.info(_("%s successfully executed"), snippet_name)
+ LOG.info(_LI("%s successfully executed"), snippet_name)
return True
# Not Ok, we throw a ConfigurationException
e_type = rpc_obj._root[0][0].text
# under the License.
from neutron.openstack.common import excutils
+from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.cfg_agent import cfg_exceptions
return driver
except ImportError:
with excutils.save_and_reraise_exception(reraise=False):
- LOG.exception(_("Error loading cfg agent driver %(driver)s "
+ LOG.exception(_LE("Error loading cfg agent driver %(driver)s "
"for hosting device template "
"%(t_name)s(%(t_id)s)"),
{'driver': driver_class, 't_id': hd_id,
from neutron.agent.linux import utils as linux_utils
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
+from neutron.openstack.common.gettextutils import _LI, _LW
+
LOG = logging.getLogger(__name__)
linux_utils.execute(ping_cmd, check_exit_code=True)
return True
except RuntimeError:
- LOG.warn(_("Cannot ping ip address: %s"), ip)
+ LOG.warning(_LW("Cannot ping ip address: %s"), ip)
return False
hd = self.backlog_hosting_devices[hd_id]['hd']
if not timeutils.is_older_than(hd['created_at'],
hd['booting_time']):
- LOG.info(_("Hosting device: %(hd_id)s @ %(ip)s hasn't passed "
- "minimum boot time. Skipping it. "),
+ LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s hasn't "
+ "passed minimum boot time. Skipping it. "),
{'hd_id': hd_id, 'ip': hd['management_ip_address']})
continue
- LOG.info(_("Checking hosting device: %(hd_id)s @ %(ip)s for "
+ LOG.info(_LI("Checking hosting device: %(hd_id)s @ %(ip)s for "
"reachability."), {'hd_id': hd_id,
'ip': hd['management_ip_address']})
if _is_pingable(hd['management_ip_address']):
hd.pop('backlog_insertion_ts', None)
del self.backlog_hosting_devices[hd_id]
response_dict['reachable'].append(hd_id)
- LOG.info(_("Hosting device: %(hd_id)s @ %(ip)s is now "
+ LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s is now "
"reachable. Adding it to response"),
{'hd_id': hd_id, 'ip': hd['management_ip_address']})
else:
- LOG.info(_("Hosting device: %(hd_id)s @ %(ip)s still not "
+ LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s still not "
"reachable "), {'hd_id': hd_id,
'ip': hd['management_ip_address']})
if timeutils.is_older_than(
from neutron import context as n_context
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
-
+from neutron.openstack.common.gettextutils import _LE, _LI, _LW
from neutron.plugins.cisco.cfg_agent import cfg_exceptions
from neutron.plugins.cisco.cfg_agent.device_drivers import driver_mgr
from neutron.plugins.cisco.cfg_agent import device_status
self._drivermgr.remove_driver_for_hosting_device(hd_id)
LOG.debug("Routing service processing successfully completed")
except Exception:
- LOG.exception(_("Failed processing routers"))
+ LOG.exception(_LE("Failed processing routers"))
self.fullsync = True
def collect_state(self, configurations):
return self.plugin_rpc.get_routers(self.context,
hd_ids=device_ids)
except messaging.MessagingException:
- LOG.exception(_("RPC Error in fetching routers from plugin"))
+ LOG.exception(_LE("RPC Error in fetching routers from plugin"))
self.fullsync = True
@staticmethod
cur_router_ids.add(r['id'])
hd = r['hosting_device']
if not self._dev_status.is_hosting_device_reachable(hd):
- LOG.info(_("Router: %(id)s is on an unreachable "
+ LOG.info(_LI("Router: %(id)s is on an unreachable "
"hosting device. "), {'id': r['id']})
continue
if r['id'] not in self.router_info:
ri.router = r
self._process_router(ri)
except KeyError as e:
- LOG.exception(_("Key Error, missing key: %s"), e)
+ LOG.exception(_LE("Key Error, missing key: %s"), e)
self.updated_routers.add(r['id'])
continue
except cfg_exceptions.DriverException as e:
- LOG.exception(_("Driver Exception on router:%(id)s. "
+ LOG.exception(_LE("Driver Exception on router:%(id)s. "
"Error is %(e)s"), {'id': r['id'], 'e': e})
self.updated_routers.update(r['id'])
continue
for router in removed_routers:
self._router_removed(router['id'])
except Exception:
- LOG.exception(_("Exception in processing routers on device:%s"),
+ LOG.exception(_LE("Exception in processing routers on device:%s"),
device_id)
self.sync_devices.add(device_id)
"""
ri = self.router_info.get(router_id)
if ri is None:
- LOG.warn(_("Info for router %s was not found. "
+ LOG.warning(_LW("Info for router %s was not found. "
"Skipping router removal"), router_id)
return
ri.router['gw_port'] = None
del self.router_info[router_id]
self.removed_routers.discard(router_id)
except cfg_exceptions.DriverException:
- LOG.warn(_("Router remove for router_id: %s was incomplete. "
+ LOG.warning(_LW("Router remove for router_id: %s was incomplete. "
"Adding the router to removed_routers list"), router_id)
self.removed_routers.add(router_id)
# remove this router from updated_routers if it is there. It might
if not ips:
raise Exception(_("Router port %s has no IP address") % port['id'])
if len(ips) > 1:
- LOG.error(_("Ignoring multiple IPs on router port %s"), port['id'])
+ LOG.error(_LE("Ignoring multiple IPs on router port %s"),
+ port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
from neutron.openstack.common import uuidutils
+from neutron.openstack.common.gettextutils import _LE, _LI, _LW
from neutron.plugins.cisco.common import cisco_constants as c_constants
from neutron.plugins.cisco.db.l3 import l3_models
from neutron.plugins.cisco.l3 import service_vm_lib
name=cfg.CONF.general.l3_admin_tenant)
cls._l3_tenant_uuid = tenant.id
except k_exceptions.NotFound:
- LOG.error(_('No tenant with a name or ID of %s exists.'),
+ LOG.error(_LE('No tenant with a name or ID of %s exists.'),
cfg.CONF.general.l3_admin_tenant)
except k_exceptions.NoUniqueMatch:
- LOG.error(_('Multiple tenants matches found for %s'),
+ LOG.error(_LE('Multiple tenants matches found for %s'),
cfg.CONF.general.l3_admin_tenant)
return cls._l3_tenant_uuid
if len(net) == 1:
num_subnets = len(net[0]['subnets'])
if num_subnets == 0:
- LOG.error(_('The virtual management network has no '
+ LOG.error(_LE('The virtual management network has no '
'subnet. Please assign one.'))
return
elif num_subnets > 1:
- LOG.info(_('The virtual management network has %d '
+ LOG.info(_LI('The virtual management network has %d '
'subnets. The first one will be used.'),
num_subnets)
cls._mgmt_nw_uuid = net[0].get('id')
elif len(net) > 1:
# Management network must have a unique name.
- LOG.error(_('The virtual management network does not have '
+ LOG.error(_LE('The virtual management network does not have '
'unique name. Please ensure that it is.'))
else:
# Management network has not been created.
- LOG.error(_('There is no virtual management network. Please '
+ LOG.error(_LE('There is no virtual management network. Please '
'create one.'))
return cls._mgmt_nw_uuid
cls._mgmt_sec_grp_id = res[0].get('id')
elif len(res) > 1:
# the mgmt sec group must be unique.
- LOG.error(_('The security group for the virtual management '
+ LOG.error(_LE('The security group for the virtual management '
'network does not have unique name. Please ensure '
'that it is.'))
else:
# CSR Mgmt security group is not present.
- LOG.error(_('There is no security group for the virtual '
+ LOG.error(_LE('There is no security group for the virtual '
'management network. Please create one.'))
return cls._mgmt_sec_grp_id
cls._hosting_device_driver = importutils.import_object(
cfg.CONF.hosting_devices.csr1kv_device_driver)
except (ImportError, TypeError, n_exc.NeutronException):
- LOG.exception(_('Error loading hosting device driver'))
+ LOG.exception(_LE('Error loading hosting device driver'))
return cls._hosting_device_driver
@classmethod
cls._plugging_driver = importutils.import_object(
cfg.CONF.hosting_devices.csr1kv_plugging_driver)
except (ImportError, TypeError, n_exc.NeutronException):
- LOG.exception(_('Error loading plugging driver'))
+ LOG.exception(_LE('Error loading plugging driver'))
return cls._plugging_driver
def get_hosting_devices_qry(self, context, hosting_device_ids,
return False
if self.is_agent_down(
cfg_agent.heartbeat_timestamp):
- LOG.warn(_('Cisco cfg agent %s is not alive'), cfg_agent.id)
+ LOG.warning(_LW('Cisco cfg agent %s is not alive'),
+ cfg_agent.id)
query = context.session.query(l3_models.HostingDevice)
query = query.filter_by(cfg_agent_id=None)
for hd in query:
if self._svc_vm_mgr.nova_services_up():
self.__class__._nova_running = True
else:
- LOG.info(_('Not all Nova services are up and running. '
+ LOG.info(_LI('Not all Nova services are up and running. '
'Skipping this CSR1kv vm create request.'))
return
plugging_drv = self.get_hosting_device_plugging_driver()
plugging_drv.delete_hosting_device_resources(
context, self.l3_tenant_id(), **res)
return
- LOG.info(_('Created a CSR1kv hosting device VM'))
+ LOG.info(_LI('Created a CSR1kv hosting device VM'))
return hosting_device
def _delete_service_vm_hosting_device(self, context, hosting_device):
self.l3_tenant_id(), self.mgmt_nw_id())
if not self._svc_vm_mgr.delete_service_vm(context,
hosting_device['id']):
- LOG.error(_('Failed to delete hosting device %s service VM. '
+ LOG.error(_LE('Failed to delete hosting device %s service VM. '
'Will un-register it anyway.'),
hosting_device['id'])
plugging_drv.delete_hosting_device_resources(
with context.session.begin(subtransactions=True):
active_cfg_agents = self._get_cfg_agents(context, active=True)
if not active_cfg_agents:
- LOG.warn(_('There are no active Cisco cfg agents'))
+ LOG.warning(_LW('There are no active Cisco cfg agents'))
# No worries, once a Cisco cfg agent is started and
# announces itself any "dangling" hosting devices
# will be scheduled to it.
from neutron.db import models_v2
from neutron.db import portbindings_db as p_binding
from neutron.extensions import providernet as pr_net
+from neutron.openstack.common.gettextutils import _LE, _LI
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
port_db = self._get_router_port_db_on_subnet(
context, router_id, subnet_db)
else:
- msg = "Either subnet_id or port_id must be specified"
+ msg = _("Either subnet_id or port_id must be specified")
raise n_exc.BadRequest(resource='router', msg=msg)
routers = [self.get_router(context, router_id)]
with context.session.begin(subtransactions=True):
return sync_data
def schedule_router_on_hosting_device(self, context, r_hd_binding):
- LOG.info(_('Attempting to schedule router %s.'),
+ LOG.info(_LI('Attempting to schedule router %s.'),
r_hd_binding['router']['id'])
result = self._create_csr1kv_vm_hosting_device(context.elevated())
if result is None:
router = r_hd_binding['router']
r_hd_binding.hosting_device = result
self.remove_router_from_backlog(router['id'])
- LOG.info(_('Successfully scheduled router %(r_id)s to '
+ LOG.info(_LI('Successfully scheduled router %(r_id)s to '
'hosting device %(d_id)s'),
{'r_id': r_hd_binding['router']['id'],
'd_id': result['id']})
return True
def unschedule_router_from_hosting_device(self, context, r_hd_binding):
- LOG.info(_('Un-schedule router %s.'),
+ LOG.info(_LI('Un-schedule router %s.'),
r_hd_binding['router']['id'])
hosting_device = r_hd_binding['hosting_device']
if r_hd_binding['hosting_device'] is None:
if ((router or {}).get('id') is None or
router['id'] in self._backlogged_routers):
return
- LOG.info(_('Backlogging router %s for renewed scheduling attempt '
+ LOG.info(_LI('Backlogging router %s for renewed scheduling attempt '
'later'), router['id'])
self._backlogged_routers[router['id']] = router
@lockutils.synchronized('routers', 'neutron-')
def remove_router_from_backlog(self, id):
self._backlogged_routers.pop(id, None)
- LOG.info(_('Router %s removed from backlog'), id)
+ LOG.info(_LI('Router %s removed from backlog'), id)
@lockutils.synchronized('routerbacklog', 'neutron-')
def _process_backlogged_routers(self):
return
context = n_context.get_admin_context()
scheduled_routers = []
- LOG.info(_('Processing router (scheduling) backlog'))
+ LOG.info(_LI('Processing router (scheduling) backlog'))
# try to reschedule
for r_id, router in self._backlogged_routers.items():
self._add_type_and_hosting_device_info(context, router)
interval=cfg.CONF.general.backlog_processing_interval)
def _sync_router_backlog(self):
- LOG.info(_('Synchronizing router (scheduling) backlog'))
+ LOG.info(_LI('Synchronizing router (scheduling) backlog'))
context = n_context.get_admin_context()
query = context.session.query(l3_models.RouterHostingDeviceBinding)
query = query.options(joinedload('router'))
return query.one()
except exc.NoResultFound:
# This should not happen
- LOG.error(_('DB inconsistency: No type and hosting info associated'
- ' with router %s'), id)
+ LOG.error(_LE('DB inconsistency: No type and hosting info '
+ 'associated with router %s'), id)
raise RouterBindingInfoError(router_id=id)
except exc.MultipleResultsFound:
# This should not happen either
- LOG.error(_('DB inconsistency: Multiple type and hosting info'
- ' associated with router %s'), id)
+ LOG.error(_LE('DB inconsistency: Multiple type and hosting info '
+ 'associated with router %s'), id)
raise RouterBindingInfoError(router_id=id)
def _get_hosting_device_bindings(self, context, id, load_routers=False,
binding_info = self._get_router_binding_info(context,
router['id'])
except RouterBindingInfoError:
- LOG.error(_('DB inconsistency: No hosting info associated with '
+ LOG.error(_LE('DB inconsistency: No hosting info associated with '
'router %s'), router['id'])
router['hosting_device'] = None
return
alloc = plugging_driver.allocate_hosting_port(
context, router_id, port_db, network_type, hosting_device_id)
if alloc is None:
- LOG.error(_('Failed to allocate hosting port for port %s'),
+ LOG.error(_LE('Failed to allocate hosting port for port %s'),
port_db['id'])
return
with context.session.begin(subtransactions=True):
from neutron.common import exceptions as n_exc
import neutron.db.api as db
from neutron.db import models_v2
+from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.common import cisco_constants as c_const
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
# Sort the range to ensure min, max is in order
seg_min, seg_max = sorted(
int(i) for i in network_profile.segment_range.split('-'))
- LOG.debug(_("seg_min %(seg_min)s, seg_max %(seg_max)s"),
+ LOG.debug("seg_min %(seg_min)s, seg_max %(seg_max)s",
{'seg_min': seg_min, 'seg_max': seg_max})
return seg_min, seg_max
else:
raise n_exc.VlanIdInUse(vlan_id=vlan_id,
physical_network=physical_network)
- LOG.debug(_("Reserving specific vlan %(vlan)s on physical "
- "network %(network)s from pool"),
+ LOG.debug("Reserving specific vlan %(vlan)s on physical network "
+ "%(network)s from pool",
{"vlan": vlan_id, "network": physical_network})
alloc.allocated = True
db_session.add(alloc)
one())
alloc.allocated = False
except exc.NoResultFound:
- LOG.warning(_("vlan_id %(vlan)s on physical network %(network)s "
+ LOG.warning(_LW("vlan_id %(vlan)s on physical network %(network)s "
"not found"),
{"vlan": vlan_id, "network": physical_network})
one())
if alloc.allocated:
raise c_exc.VxlanIDInUse(vxlan_id=vxlan_id)
- LOG.debug(_("Reserving specific vxlan %s from pool"), vxlan_id)
+ LOG.debug("Reserving specific vxlan %s from pool", vxlan_id)
alloc.allocated = True
db_session.add(alloc)
except exc.NoResultFound:
one())
alloc.allocated = False
except exc.NoResultFound:
- LOG.warning(_("vxlan_id %s not found"), vxlan_id)
+ LOG.warning(_LW("vxlan_id %s not found"), vxlan_id)
def set_port_status(port_id, status):
def create_network_profile(db_session, network_profile):
"""Create a network profile."""
- LOG.debug(_("create_network_profile()"))
+ LOG.debug("create_network_profile()")
with db_session.begin(subtransactions=True):
kwargs = {"name": network_profile["name"],
"segment_type": network_profile["segment_type"]}
def delete_network_profile(db_session, id):
"""Delete Network Profile."""
- LOG.debug(_("delete_network_profile()"))
+ LOG.debug("delete_network_profile()")
with db_session.begin(subtransactions=True):
try:
network_profile = get_network_profile(db_session, id)
def update_network_profile(db_session, id, network_profile):
"""Update Network Profile."""
- LOG.debug(_("update_network_profile()"))
+ LOG.debug("update_network_profile()")
with db_session.begin(subtransactions=True):
profile = get_network_profile(db_session, id)
profile.update(network_profile)
def get_network_profile(db_session, id):
"""Get Network Profile."""
- LOG.debug(_("get_network_profile()"))
+ LOG.debug("get_network_profile()")
try:
return db_session.query(
n1kv_models_v2.NetworkProfile).filter_by(id=id).one()
def create_policy_profile(policy_profile):
"""Create Policy Profile."""
- LOG.debug(_("create_policy_profile()"))
+ LOG.debug("create_policy_profile()")
db_session = db.get_session()
with db_session.begin(subtransactions=True):
p_profile = n1kv_models_v2.PolicyProfile(id=policy_profile["id"],
def delete_policy_profile(id):
"""Delete Policy Profile."""
- LOG.debug(_("delete_policy_profile()"))
+ LOG.debug("delete_policy_profile()")
db_session = db.get_session()
with db_session.begin(subtransactions=True):
policy_profile = get_policy_profile(db_session, id)
def update_policy_profile(db_session, id, policy_profile):
"""Update a policy profile."""
- LOG.debug(_("update_policy_profile()"))
+ LOG.debug("update_policy_profile()")
with db_session.begin(subtransactions=True):
_profile = get_policy_profile(db_session, id)
_profile.update(policy_profile)
def get_policy_profile(db_session, id):
"""Get Policy Profile."""
- LOG.debug(_("get_policy_profile()"))
+ LOG.debug("get_policy_profile()")
try:
return db_session.query(
n1kv_models_v2.PolicyProfile).filter_by(id=id).one()
def _profile_binding_exists(db_session, tenant_id, profile_id, profile_type):
"""Check if the profile-tenant binding exists."""
- LOG.debug(_("_profile_binding_exists()"))
+ LOG.debug("_profile_binding_exists()")
db_session = db_session or db.get_session()
return (db_session.query(n1kv_models_v2.ProfileBinding).
filter_by(tenant_id=tenant_id, profile_id=profile_id,
def get_profile_binding(db_session, tenant_id, profile_id):
"""Get Network/Policy Profile - Tenant binding."""
- LOG.debug(_("get_profile_binding()"))
+ LOG.debug("get_profile_binding()")
try:
return (db_session.query(n1kv_models_v2.ProfileBinding).filter_by(
tenant_id=tenant_id, profile_id=profile_id).one())
def delete_profile_binding(db_session, tenant_id, profile_id):
"""Delete Policy Binding."""
- LOG.debug(_("delete_profile_binding()"))
+ LOG.debug("delete_profile_binding()")
db_session = db_session or db.get_session()
try:
binding = get_profile_binding(db_session, tenant_id, profile_id)
with db_session.begin(subtransactions=True):
db_session.delete(binding)
except c_exc.ProfileTenantBindingNotFound:
- LOG.debug(_("Profile-Tenant binding missing for profile ID "
- "%(profile_id)s and tenant ID %(tenant_id)s"),
+ LOG.debug("Profile-Tenant binding missing for profile ID "
+ "%(profile_id)s and tenant ID %(tenant_id)s",
{"profile_id": profile_id, "tenant_id": tenant_id})
return
def get_all_qoss(tenant_id):
"""Lists all the qos to tenant associations."""
- LOG.debug(_("get_all_qoss() called"))
+ LOG.debug("get_all_qoss() called")
session = db.get_session()
return (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).all())
def get_qos(tenant_id, qos_id):
"""Lists the qos given a tenant_id and qos_id."""
- LOG.debug(_("get_qos() called"))
+ LOG.debug("get_qos() called")
session = db.get_session()
try:
return (session.query(network_models_v2.QoS).
def add_qos(tenant_id, qos_name, qos_desc):
"""Adds a qos to tenant association."""
- LOG.debug(_("add_qos() called"))
+ LOG.debug("add_qos() called")
session = db.get_session()
try:
qos = (session.query(network_models_v2.QoS).
from oslo.config import cfg
from neutron import manager
+from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.l3 import hosting_device_drivers
vm_cfg_data += line
return {'iosxe_config.txt': vm_cfg_data}
except IOError as e:
- LOG.error(_('Failed to create config file: %s. Trying to'
+ LOG.error(_LE('Failed to create config file: %s. Trying to'
'clean up.'), str(e))
self.delete_configdrive_files(context, mgmtport)
raise
from neutron.extensions import providernet as pr_net
from neutron import manager
from neutron.openstack.common import log as logging
+from neutron.openstack.common.gettextutils import _LE, _LI, _LW
from neutron.plugins.cisco.db.l3 import l3_models
from neutron.plugins.cisco.extensions import n1kv
import neutron.plugins.cisco.l3.plugging_drivers as plug
return profiles[0]['id']
elif len(profiles) > 1:
# Profile must have a unique name.
- LOG.error(_('The %(resource)s %(name)s does not have unique name. '
- 'Please refer to admin guide and create one.'),
+ LOG.error(_LE('The %(resource)s %(name)s does not have unique '
+ 'name. Please refer to admin guide and create one.'),
{'resource': resource, 'name': name})
else:
# Profile has not been created.
- LOG.error(_('There is no %(resource)s %(name)s. Please refer to '
+ LOG.error(_LE('There is no %(resource)s %(name)s. Please refer to '
'admin guide and create one.'),
{'resource': resource, 'name': name})
n1kv_const.T2_PORT_NAME, self.t2_port_profile_id(),
t_p)
except n_exc.NeutronException as e:
- LOG.error(_('Error %s when creating service VM resources. '
+ LOG.error(_LE('Error %s when creating service VM resources. '
'Cleaning up.'), e)
resources = {'ports': t_p, 'networks': t1_n + t2_n,
'subnets': t1_sn + t2_sn}
while mgmt_port is not None or port_ids or subnet_ids or net_ids:
if attempts == DELETION_ATTEMPTS:
- LOG.warning(_('Aborting resource deletion after %d '
+ LOG.warning(_LW('Aborting resource deletion after %d '
'unsuccessful attempts'), DELETION_ATTEMPTS)
return
else:
if attempts > 1:
eventlet.sleep(SECONDS_BETWEEN_DELETION_ATTEMPTS)
- LOG.info(_('Resource deletion attempt %d starting'), attempts)
+ LOG.info(_LI('Resource deletion attempt %d starting'),
+ attempts)
# Remove anything created.
if mgmt_port is not None:
ml = set([mgmt_port['id']])
self._core_plugin.delete_network,
n_exc.NetworkNotFound, net_ids)
attempts += 1
- LOG.info(_('Resource deletion succeeded'))
+ LOG.info(_LI('Resource deletion succeeded'))
def _delete_resources(self, context, name, deleter, exception_type,
resource_ids):
except exception_type:
resource_ids.remove(item_id)
except n_exc.NeutronException as e:
- LOG.error(_('Failed to delete %(resource_name)s %(net_id)s '
+ LOG.error(_LE('Failed to delete %(resource_name)s %(net_id)s '
'for service vm due to %(err)s'),
{'resource_name': name, 'net_id': item_id, 'err': e})
str(port_db.hosting_info.segmentation_id))
else:
trunk_spec = port_db['network_id']
- LOG.info(_('Updating trunk: %(action)s VLAN %(tag)d for network_id '
+ LOG.info(_LI('Updating trunk: %(action)s VLAN %(tag)d for network_id '
'%(id)s'), {'action': action,
'tag': port_db.hosting_info.segmentation_id,
'id': port_db['network_id']})
if res is None:
if attempts >= MAX_HOSTING_PORT_LOOKUP_ATTEMPTS:
# This should not happen ...
- LOG.error(_('Hosting port DB inconsistency for '
+ LOG.error(_LE('Hosting port DB inconsistency for '
'hosting device %s'), hd_id)
return
else:
# The service VM may not have plugged its VIF into the
# Neutron Port yet so we wait and make another lookup.
attempts += 1
- LOG.info(_('Attempt %(attempt)d to find trunk ports for '
+ LOG.info(_LI('Attempt %(attempt)d to find trunk ports for '
'hosting device %(hd_id)s failed. Trying '
'again in %(time)d seconds.'),
{'attempt': attempts, 'hd_id': hd_id,
return other_port['id']
except (exc.NoResultFound, exc.MultipleResultsFound):
# This should not happen ...
- LOG.error(_('Port trunk pair DB inconsistency for port %s'),
+ LOG.error(_LE('Port trunk pair DB inconsistency for port %s'),
port_id)
return
from oslo.config import cfg
from neutron import manager
+from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.common import cisco_constants as c_constants
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
nova_exc.ConnectionRefused, nova_exc.ClientException,
Exception) as e:
- LOG.error(_('Failure determining running Nova services: %s'), e)
+ LOG.error(_LE('Failure determining running Nova services: %s'), e)
return False
return not bool(required.difference(
[service.binary for service in services
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
nova_exc.ConnectionRefused, nova_exc.ClientException,
Exception) as e:
- LOG.error(_('Failed to get status of service VM instance %(id)s, '
- 'due to %(err)s'), {'id': vm_id, 'err': e})
+ LOG.error(_LE('Failed to get status of service VM instance '
+ '%(id)s, due to %(err)s'), {'id': vm_id, 'err': e})
status = c_constants.SVM_ERROR
return status
image = n_utils.find_resource(self._nclient.images, vm_image)
flavor = n_utils.find_resource(self._nclient.flavors, vm_flavor)
except (nova_exc.CommandError, Exception) as e:
- LOG.error(_('Failure finding needed Nova resource: %s'), e)
+ LOG.error(_LE('Failure finding needed Nova resource: %s'), e)
return
try:
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
nova_exc.ConnectionRefused, nova_exc.ClientException,
Exception) as e:
- LOG.error(_('Failed to create service VM instance: %s'), e)
+ LOG.error(_LE('Failed to create service VM instance: %s'), e)
return
return {'id': server.id}
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
nova_exc.ConnectionRefused, nova_exc.ClientException,
Exception) as e:
- LOG.error(_('Failed to delete service VM instance %(id)s, '
+ LOG.error(_LE('Failed to delete service VM instance %(id)s, '
'due to %(err)s'), {'id': vm_id, 'err': e})
return False
from neutron.extensions import providernet as provider
from neutron import neutron_plugin_base_v2
from neutron.openstack.common import excutils
-from neutron.openstack.common.gettextutils import _LE
+from neutron.openstack.common.gettextutils import _LE, _LI
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.common import cisco_constants as const
# Initialize credential store after database initialization
cred.Store.initialize()
- LOG.debug(_("%(module)s.%(name)s init done"),
+ LOG.debug("%(module)s.%(name)s init done",
{'module': __name__,
'name': self.__class__.__name__})
plugin implementation) for completing this operation.
"""
if plugin_key not in self._plugins:
- LOG.info(_("No %s Plugin loaded"), plugin_key)
- LOG.info(_("%(plugin_key)s: %(function_name)s with args %(args)s "
- "ignored"),
+ LOG.info(_LI("No %s Plugin loaded"), plugin_key)
+ LOG.info(_LI("%(plugin_key)s: %(function_name)s with args "
+ "%(args)s ignored"),
{'plugin_key': plugin_key,
'function_name': function_name,
'args': args})
Perform this operation in the context of the configured device
plugins.
"""
- LOG.debug(_("create_network() called"))
+ LOG.debug("create_network() called")
provider_vlan_id = self._get_provider_vlan_id(network[const.NETWORK])
args = [context, network]
switch_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
cdb.add_provider_network(network_id,
const.NETWORK_TYPE_VLAN,
provider_vlan_id)
- LOG.debug(_("Provider network added to DB: %(network_id)s, "
- "%(vlan_id)s"),
+ LOG.debug("Provider network added to DB: %(network_id)s, "
+ "%(vlan_id)s",
{'network_id': network_id, 'vlan_id': provider_vlan_id})
return switch_output
Perform this operation in the context of the configured device
plugins.
"""
- LOG.debug(_("update_network() called"))
+ LOG.debug("update_network() called")
# We can only support updating of provider attributes if all the
# configured sub-plugins support it. Currently we have no method
self._func_name(),
args)
if cdb.remove_provider_network(id):
- LOG.debug(_("Provider network removed from DB: %s"), id)
+ LOG.debug("Provider network removed from DB: %s", id)
return switch_output
def get_network(self, context, id, fields=None):
Perform this operation in the context of the configured device
plugins.
"""
- LOG.debug(_("create_port() called"))
+ LOG.debug("create_port() called")
args = [context, port]
return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
Perform this operation in the context of the configured device
plugins.
"""
- LOG.debug(_("update_port() called"))
+ LOG.debug("update_port() called")
args = [context, id, port]
return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
Perform this operation in the context of the configured device
plugins.
"""
- LOG.debug(_("delete_port() called"))
+ LOG.debug("delete_port() called")
port = self.get_port(context, id)
try:
:param network_profile: network profile dict
:param tenant_id: UUID representing the tenant
"""
- LOG.debug(_("Logical network"))
+ LOG.debug("Logical network")
body = {'description': network_profile['name'],
'tenantId': tenant_id}
logical_network_name = (network_profile['id'] +
:param network_profile: network profile dict
:param tenant_id: UUID representing the tenant
"""
- LOG.debug(_("network_segment_pool"))
+ LOG.debug("network_segment_pool")
logical_network_name = (network_profile['id'] +
c_const.LOGICAL_NETWORK_SUFFIX)
body = {'name': network_profile['name'],
headers['Accept'] = self._set_content_type('json')
if body:
body = jsonutils.dumps(body, indent=2)
- LOG.debug(_("req: %s"), body)
+ LOG.debug("req: %s", body)
try:
resp = self.pool.spawn(requests.request,
method,
timeout=self.timeout).wait()
except Exception as e:
raise c_exc.VSMConnectionFailed(reason=e)
- LOG.debug(_("status_code %s"), resp.status_code)
+ LOG.debug("status_code %s", resp.status_code)
if resp.status_code == requests.codes.OK:
if 'application/json' in resp.headers['content-type']:
try:
except ValueError:
return {}
elif 'text/plain' in resp.headers['content-type']:
- LOG.debug(_("VSM: %s"), resp.text)
+ LOG.debug("VSM: %s", resp.text)
else:
raise c_exc.VSMError(reason=resp.text)
from neutron.extensions import providernet
from neutron import manager
from neutron.openstack.common import excutils
+from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils as uuidutils
is instantiated for the first time and then continue to poll for
policy profile updates.
"""
- LOG.debug(_('_setup_vsm'))
+ LOG.debug('_setup_vsm')
self.agent_vsm = True
# Poll VSM for create/delete of policy profile.
eventlet.spawn(self._poll_policy_profiles)
from the VSM. Hence we associate the policy profiles with fake
tenant-ids.
"""
- LOG.debug(_('_populate_policy_profiles'))
+ LOG.debug('_populate_policy_profiles')
try:
n1kvclient = n1kv_client.Client()
policy_profiles = n1kvclient.list_port_profiles()
self._remove_all_fake_policy_profiles()
except (cisco_exceptions.VSMError,
cisco_exceptions.VSMConnectionFailed):
- LOG.warning(_('No policy profile populated from VSM'))
+ LOG.warning(_LW('No policy profile populated from VSM'))
def _extend_network_dict_provider(self, context, network):
"""Add extended network parameters."""
that needs to be trunked
:param oper: Operation to be performed
"""
- LOG.debug(_('_populate_member_segments %s'), segment_pairs)
+ LOG.debug('_populate_member_segments %s', segment_pairs)
trunk_list = []
for (segment, dot1qtag) in segment_pairs:
net = self.get_network(context, segment)
else:
pair_list.append((segment1, segment2))
else:
- LOG.debug(_('Invalid UUID supplied in %s'), pair)
+ LOG.debug('Invalid UUID supplied in %s', pair)
msg = _("Invalid UUID supplied")
raise n_exc.InvalidInput(error_message=msg)
return pair_list
raise n_exc.InvalidInput(error_message=msg)
pair_list.append((segment, dot1qtag))
else:
- LOG.debug(_('%s is not a valid uuid'), segment)
+ LOG.debug('%s is not a valid uuid', segment)
msg = _("'%s' is not a valid UUID") % segment
raise n_exc.InvalidInput(error_message=msg)
return pair_list
:param network_profile: network profile dictionary
:param tenant_id: UUID representing the tenant
"""
- LOG.debug(_('_send_create_logical_network'))
+ LOG.debug('_send_create_logical_network')
n1kvclient = n1kv_client.Client()
n1kvclient.create_logical_network(network_profile, tenant_id)
:param context: neutron api request context
:param profile: network profile dictionary
"""
- LOG.debug(_('_send_create_network_profile_request: %s'), profile['id'])
+ LOG.debug('_send_create_network_profile_request: %s', profile['id'])
n1kvclient = n1kv_client.Client()
n1kvclient.create_network_segment_pool(profile, context.tenant_id)
:param profile: network profile dictionary
"""
- LOG.debug(_('_send_update_network_profile_request: %s'), profile['id'])
+ LOG.debug('_send_update_network_profile_request: %s', profile['id'])
n1kvclient = n1kv_client.Client()
n1kvclient.update_network_segment_pool(profile)
:param profile: network profile dictionary
"""
- LOG.debug(_('_send_delete_network_profile_request: %s'),
+ LOG.debug('_send_delete_network_profile_request: %s',
profile['name'])
n1kvclient = n1kv_client.Client()
n1kvclient.delete_network_segment_pool(profile['id'])
:param segment_pairs: List of segments in UUID pairs
that need to be bridged
"""
- LOG.debug(_('_send_create_network_request: %s'), network['id'])
+ LOG.debug('_send_create_network_request: %s', network['id'])
profile = self.get_network_profile(context,
network[n1kv.PROFILE_ID])
n1kvclient = n1kv_client.Client()
:param del_segments: List of segments bindings
that need to be deleted
"""
- LOG.debug(_('_send_update_network_request: %s'), network['id'])
+ LOG.debug('_send_update_network_request: %s', network['id'])
db_session = context.session
profile = n1kv_db_v2.get_network_profile(
db_session, network[n1kv.PROFILE_ID])
body['segmentType'] = profile['sub_type']
body['addSegments'] = network['add_segment_list']
body['delSegments'] = network['del_segment_list']
- LOG.debug(_('add_segments=%s'), body['addSegments'])
- LOG.debug(_('del_segments=%s'), body['delSegments'])
+ LOG.debug('add_segments=%s', body['addSegments'])
+ LOG.debug('del_segments=%s', body['delSegments'])
if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY:
encap_profile = (network['id'] +
c_const.ENCAPSULATION_PROFILE_SUFFIX)
:param context: neutron api request context
:param network: network dictionary
"""
- LOG.debug(_('_send_delete_network_request: %s'), network['id'])
+ LOG.debug('_send_delete_network_request: %s', network['id'])
n1kvclient = n1kv_client.Client()
session = context.session
if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY:
:param context: neutron api request context
:param subnet: subnet dictionary
"""
- LOG.debug(_('_send_create_subnet_request: %s'), subnet['id'])
+ LOG.debug('_send_create_subnet_request: %s', subnet['id'])
n1kvclient = n1kv_client.Client()
n1kvclient.create_ip_pool(subnet)
:param subnet: subnet dictionary
"""
- LOG.debug(_('_send_update_subnet_request: %s'), subnet['name'])
+ LOG.debug('_send_update_subnet_request: %s', subnet['name'])
n1kvclient = n1kv_client.Client()
n1kvclient.update_ip_pool(subnet)
:param context: neutron api request context
:param subnet: subnet dictionary
"""
- LOG.debug(_('_send_delete_subnet_request: %s'), subnet['name'])
+ LOG.debug('_send_delete_subnet_request: %s', subnet['name'])
body = {'ipPool': subnet['id'], 'deleteSubnet': True}
n1kvclient = n1kv_client.Client()
n1kvclient.update_network_segment(subnet['network_id'], body=body)
:param vm_network_name: string representing the name of the VM
network
"""
- LOG.debug(_('_send_create_port_request: %s'), port)
+ LOG.debug('_send_create_port_request: %s', port)
n1kvclient = n1kv_client.Client()
if port_count == 1:
n1kvclient.create_vm_network(port,
:param mac_address: string representing the mac address
:param vm_network_name: VM network name to which the port is bound
"""
- LOG.debug(_('_send_update_port_request: %s'), port_id)
+ LOG.debug('_send_update_port_request: %s', port_id)
body = {'portId': port_id,
'macAddress': mac_address}
n1kvclient = n1kv_client.Client()
:param port: port object which is to be deleted
:param vm_network: VM network object with which the port is associated
"""
- LOG.debug(_('_send_delete_port_request: %s'), port['id'])
+ LOG.debug('_send_delete_port_request: %s', port['id'])
n1kvclient = n1kv_client.Client()
n1kvclient.delete_n1kv_port(vm_network['name'], port['id'])
network['network'])
profile_id = self._process_network_profile(context, network['network'])
segment_pairs = None
- LOG.debug(_('Create network: profile_id=%s'), profile_id)
+ LOG.debug('Create network: profile_id=%s', profile_id)
session = context.session
with session.begin(subtransactions=True):
if not network_type:
(physical_network, network_type, segmentation_id,
multicast_ip) = n1kv_db_v2.alloc_network(session,
profile_id)
- LOG.debug(_('Physical_network %(phy_net)s, '
- 'seg_type %(net_type)s, '
- 'seg_id %(seg_id)s, '
- 'multicast_ip %(multicast_ip)s'),
+ LOG.debug('Physical_network %(phy_net)s, '
+ 'seg_type %(net_type)s, '
+ 'seg_id %(seg_id)s, '
+ 'multicast_ip %(multicast_ip)s',
{'phy_net': physical_network,
'net_type': network_type,
'seg_id': segmentation_id,
segment_pairs = (
self._parse_multi_segments(context, network['network'],
n1kv.SEGMENT_ADD))
- LOG.debug(_('Seg list %s '), segment_pairs)
+ LOG.debug('Seg list %s ', segment_pairs)
elif network_type == c_const.NETWORK_TYPE_TRUNK:
network_profile = self.get_network_profile(context,
profile_id)
physical_network,
network_profile['sub_type']
))
- LOG.debug(_('Seg list %s '), segment_pairs)
+ LOG.debug('Seg list %s ', segment_pairs)
else:
if not segmentation_id:
raise n_exc.TenantNetworksDisabled()
with excutils.save_and_reraise_exception():
self._delete_network_db(context, net['id'])
else:
- LOG.debug(_("Created network: %s"), net['id'])
+ LOG.debug("Created network: %s", net['id'])
return net
def update_network(self, context, id, network):
if binding.network_type != c_const.NETWORK_TYPE_MULTI_SEGMENT:
self._send_update_network_request(context, net, add_segments,
del_segments)
- LOG.debug(_("Updated network: %s"), net['id'])
+ LOG.debug("Updated network: %s", net['id'])
return net
def delete_network(self, context, id):
:param id: UUID representing the network to fetch
:returns: requested network dictionary
"""
- LOG.debug(_("Get network: %s"), id)
+ LOG.debug("Get network: %s", id)
net = super(N1kvNeutronPluginV2, self).get_network(context, id, None)
self._extend_network_dict_provider(context, net)
self._extend_network_dict_profile(context, net)
dictionary. Only these fields will be returned.
:returns: list of network dictionaries.
"""
- LOG.debug(_("Get networks"))
+ LOG.debug("Get networks")
nets = super(N1kvNeutronPluginV2, self).get_networks(context, filters,
None)
for net in nets:
profile_id = self._process_policy_profile(context,
port['port'])
- LOG.debug(_('Create port: profile_id=%s'), profile_id)
+ LOG.debug('Create port: profile_id=%s', profile_id)
session = context.session
with session.begin(subtransactions=True):
pt = super(N1kvNeutronPluginV2, self).create_port(context,
with excutils.save_and_reraise_exception():
self._delete_port_db(context, pt, vm_network)
else:
- LOG.debug(_("Created port: %s"), pt)
+ LOG.debug("Created port: %s", pt)
return pt
def update_port(self, context, id, port):
:param id: UUID representing the port to update
:returns: updated port object
"""
- LOG.debug(_("Update port: %s"), id)
+ LOG.debug("Update port: %s", id)
with context.session.begin(subtransactions=True):
updated_port = super(N1kvNeutronPluginV2,
self).update_port(context, id, port)
dictionary. Only these fields will be returned.
:returns: port dictionary
"""
- LOG.debug(_("Get port: %s"), id)
+ LOG.debug("Get port: %s", id)
port = super(N1kvNeutronPluginV2, self).get_port(context, id, None)
self._extend_port_dict_profile(context, port)
return self._fields(port, fields)
dictionary. Only these fields will be returned.
:returns: list of port dictionaries
"""
- LOG.debug(_("Get ports"))
+ LOG.debug("Get ports")
ports = super(N1kvNeutronPluginV2, self).get_ports(context, filters,
None)
for port in ports:
:param subnet: subnet dictionary
:returns: subnet object
"""
- LOG.debug(_('Create subnet'))
+ LOG.debug('Create subnet')
sub = super(N1kvNeutronPluginV2, self).create_subnet(context, subnet)
try:
self._send_create_subnet_request(context, sub)
super(N1kvNeutronPluginV2,
self).delete_subnet(context, sub['id'])
else:
- LOG.debug(_("Created subnet: %s"), sub['id'])
+ LOG.debug("Created subnet: %s", sub['id'])
if not q_conf.CONF.network_auto_schedule:
# Schedule network to a DHCP agent
net = self.get_network(context, sub['network_id'])
:param id: UUID representing subnet to update
:returns: updated subnet object
"""
- LOG.debug(_('Update subnet'))
+ LOG.debug('Update subnet')
sub = super(N1kvNeutronPluginV2, self).update_subnet(context,
id,
subnet)
:param id: UUID representing subnet to delete
:returns: deleted subnet object
"""
- LOG.debug(_('Delete subnet: %s'), id)
+ LOG.debug('Delete subnet: %s', id)
subnet = self.get_subnet(context, id)
self._send_delete_subnet_request(context, subnet)
return super(N1kvNeutronPluginV2, self).delete_subnet(context, id)
dictionary. Only these fields will be returned.
:returns: subnet object
"""
- LOG.debug(_("Get subnet: %s"), id)
+ LOG.debug("Get subnet: %s", id)
subnet = super(N1kvNeutronPluginV2, self).get_subnet(context, id,
None)
return self._fields(subnet, fields)
dictionary. Only these fields will be returned.
:returns: list of dictionaries of subnets
"""
- LOG.debug(_("Get subnets"))
+ LOG.debug("Get subnets")
subnets = super(N1kvNeutronPluginV2, self).get_subnets(context,
filters,
None)
# Extend the fault map
self._extend_fault_map()
- LOG.debug(_("Plugin initialization complete"))
+ LOG.debug("Plugin initialization complete")
def __getattribute__(self, name):
"""Delegate core API calls to the model class.
"""
def get_all_qoss(self, tenant_id):
"""Get all QoS levels."""
- LOG.debug(_("get_all_qoss() called"))
+ LOG.debug("get_all_qoss() called")
qoslist = cdb.get_all_qoss(tenant_id)
return qoslist
def get_qos_details(self, tenant_id, qos_id):
"""Get QoS Details."""
- LOG.debug(_("get_qos_details() called"))
+ LOG.debug("get_qos_details() called")
return cdb.get_qos(tenant_id, qos_id)
def create_qos(self, tenant_id, qos_name, qos_desc):
"""Create a QoS level."""
- LOG.debug(_("create_qos() called"))
+ LOG.debug("create_qos() called")
qos = cdb.add_qos(tenant_id, qos_name, str(qos_desc))
return qos
def delete_qos(self, tenant_id, qos_id):
"""Delete a QoS level."""
- LOG.debug(_("delete_qos() called"))
+ LOG.debug("delete_qos() called")
return cdb.remove_qos(tenant_id, qos_id)
def rename_qos(self, tenant_id, qos_id, new_name):
"""Rename QoS level."""
- LOG.debug(_("rename_qos() called"))
+ LOG.debug("rename_qos() called")
return cdb.update_qos(tenant_id, qos_id, new_name)
def get_all_credentials(self):
"""Get all credentials."""
- LOG.debug(_("get_all_credentials() called"))
+ LOG.debug("get_all_credentials() called")
credential_list = cdb.get_all_credentials()
return credential_list
def get_credential_details(self, credential_id):
"""Get a particular credential."""
- LOG.debug(_("get_credential_details() called"))
+ LOG.debug("get_credential_details() called")
return cdb.get_credential(credential_id)
def rename_credential(self, credential_id, new_name, new_password):
"""Rename the particular credential resource."""
- LOG.debug(_("rename_credential() called"))
+ LOG.debug("rename_credential() called")
return cdb.update_credential(credential_id, new_name,
new_password=new_password)
from neutron.agent.common import config
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
-from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.cisco.cfg_agent import cfg_agent
from neutron.tests import base
HOSTNAME = 'myhost'
FAKE_ID = _uuid()
-LOG = logging.getLogger(__name__)
-
def prepare_router_data(enable_snat=None, num_internal_ports=1):
router_id = _uuid()
import datetime
import mock
-from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
sys.modules['ncclient'] = mock.MagicMock()
from neutron.tests import base
_uuid = uuidutils.generate_uuid
-LOG = logging.getLogger(__name__)
TYPE_STRING = 'string'
TYPE_DATETIME = 'datetime'
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
-from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.cisco.cfg_agent import cfg_agent
from neutron.plugins.cisco.cfg_agent import cfg_exceptions
HOST = 'myhost'
FAKE_ID = _uuid()
-LOG = logging.getLogger(__name__)
-
def prepare_router_data(enable_snat=None, num_internal_ports=1):
router_id = _uuid()
from neutron import context as n_context
from neutron import manager
from neutron.openstack.common import excutils
+from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
self._delete('ports', port['id'])
except Exception as e:
with excutils.save_and_reraise_exception(reraise=False):
- LOG.error('Failed to delete port %(p_id)s for vm instance '
- '%(v_id)s due to %(err)s',
+ LOG.error(_LE('Failed to delete port %(p_id)s for vm '
+ 'instance %(v_id)s due to %(err)s'),
{'p_id': port['id'], 'v_id': vm_id, 'err': e})
raise nova_exc.InternalServerError()
# License for the specific language governing permissions and limitations
# under the License.
-from neutron.openstack.common import log as logging
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.n1kv import n1kv_client
-LOG = logging.getLogger(__name__)
-
_resource_metadata = {'port': ['id', 'macAddress', 'ipAddress', 'subnetId'],
'vmnetwork': ['name', 'networkSegmentId',
'networkSegment', 'portProfile',