language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def _init_bigips(self):
""" Connect to all BIG-IPs """
if self.operational:
LOG.debug('iControl driver reports connection is operational')
return
LOG.debug('initializing communications to BIG-IPs')
try:
if not self.conf.debug:
sudslog = std_logging.getLogger('suds.client')
sudslog.setLevel(std_logging.FATAL)
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.ERROR)
requests_log.propagate = False
else:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.DEBUG)
requests_log.propagate = True
self.__last_connect_attempt = datetime.datetime.now()
for hostname in self.hostnames:
# connect to each BIG-IP and set it status
bigip = self._open_bigip(hostname)
if bigip.status == 'active':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('learned traffic groups from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = 'validating the current HA state'
if self._validate_ha(bigip):
LOG.debug('setting status to active for %s' % hostname)
bigip.status = 'active'
bigip.status_message = 'BIG-IP ready for provisioning'
# self._post_init()
else:
LOG.debug('setting status to error for %s' % hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.error('error opening BIG-IP %s - %s:%s'
% (hostname, bigip.status, bigip.status_message))
self._set_agent_status(False)
# self.connected = True
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
self._set_agent_status(force_resync=True) | def _init_bigips(self):
""" Connect to all BIG-IPs """
if self.operational:
LOG.debug('iControl driver reports connection is operational')
return
LOG.debug('initializing communications to BIG-IPs')
try:
if not self.conf.debug:
sudslog = std_logging.getLogger('suds.client')
sudslog.setLevel(std_logging.FATAL)
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.ERROR)
requests_log.propagate = False
else:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.DEBUG)
requests_log.propagate = True
self.__last_connect_attempt = datetime.datetime.now()
for hostname in self.hostnames:
# connect to each BIG-IP and set it status
bigip = self._open_bigip(hostname)
if bigip.status == 'active':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('learned traffic groups from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = 'validating the current HA state'
if self._validate_ha(bigip):
LOG.debug('setting status to active for %s' % hostname)
bigip.status = 'active'
bigip.status_message = 'BIG-IP ready for provisioning'
# self._post_init()
else:
LOG.debug('setting status to error for %s' % hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.error('error opening BIG-IP %s - %s:%s'
% (hostname, bigip.status, bigip.status_message))
self._set_agent_status(False)
# self.connected = True
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
self._set_agent_status(force_resync=True) |
Python | def generate_capacity_score(self, capacity_policy=None):
""" Generate the capacity score of connected devices """
if capacity_policy:
highest_metric = 0.0
highest_metric_name = None
my_methods = dir(self)
for metric in capacity_policy:
func_name = 'get_' + metric
if func_name in my_methods:
max_capacity = int(capacity_policy[metric])
metric_func = getattr(self, func_name)
global_stats = []
metric_value = 0
for host in self.__bigips:
hostbigip = self.__bigips[host]
global_stats = hostbigip.stat.get_global_statistics()
value = int(
metric_func(bigip=hostbigip,
global_statistics=global_stats)
)
LOG.debug(_('calling capacity %s on %s returned: %s'
% (func_name,
hostbigip.icontrol.hostname,
value)))
if value > metric_value:
metric_value = value
metric_capacity = float(metric_value) / float(max_capacity)
if metric_capacity > highest_metric:
highest_metric = metric_capacity
highest_metric_name = metric
else:
LOG.warn(_('capacity policy has method '
'%s which is not implemented in this driver'
% metric))
LOG.debug('capacity score: %s based on %s'
% (highest_metric, highest_metric_name))
return highest_metric
return 0 | def generate_capacity_score(self, capacity_policy=None):
""" Generate the capacity score of connected devices """
if capacity_policy:
highest_metric = 0.0
highest_metric_name = None
my_methods = dir(self)
for metric in capacity_policy:
func_name = 'get_' + metric
if func_name in my_methods:
max_capacity = int(capacity_policy[metric])
metric_func = getattr(self, func_name)
global_stats = []
metric_value = 0
for host in self.__bigips:
hostbigip = self.__bigips[host]
global_stats = hostbigip.stat.get_global_statistics()
value = int(
metric_func(bigip=hostbigip,
global_statistics=global_stats)
)
LOG.debug(_('calling capacity %s on %s returned: %s'
% (func_name,
hostbigip.icontrol.hostname,
value)))
if value > metric_value:
metric_value = value
metric_capacity = float(metric_value) / float(max_capacity)
if metric_capacity > highest_metric:
highest_metric = metric_capacity
highest_metric_name = metric
else:
LOG.warn(_('capacity policy has method '
'%s which is not implemented in this driver'
% metric))
LOG.debug('capacity score: %s based on %s'
% (highest_metric, highest_metric_name))
return highest_metric
return 0 |
Python | def flush_cache(self):
"""Remove cached objects so they can be created if necessary"""
for bigip in self.get_all_bigips():
bigip.assured_networks = []
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = [] | def flush_cache(self):
"""Remove cached objects so they can be created if necessary"""
for bigip in self.get_all_bigips():
bigip.assured_networks = []
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = [] |
Python | def remove_orphans(self, all_pools):
""" Remove out-of-date configuration on big-ips """
existing_tenants = []
existing_pools = []
for pool in all_pools:
existing_tenants.append(pool['tenant_id'])
existing_pools.append(pool['pool_id'])
for bigip in self.get_all_bigips():
bigip.pool.purge_orphaned_pools(existing_pools)
for bigip in self.get_all_bigips():
bigip.system.purge_orphaned_folders_contents(existing_tenants)
sudslog = std_logging.getLogger('suds.client')
sudslog.setLevel(std_logging.FATAL)
for bigip in self.get_all_bigips():
bigip.system.force_root_folder()
sudslog.setLevel(std_logging.ERROR)
for bigip in self.get_all_bigips():
bigip.system.purge_orphaned_folders(existing_tenants) | def remove_orphans(self, all_pools):
""" Remove out-of-date configuration on big-ips """
existing_tenants = []
existing_pools = []
for pool in all_pools:
existing_tenants.append(pool['tenant_id'])
existing_pools.append(pool['pool_id'])
for bigip in self.get_all_bigips():
bigip.pool.purge_orphaned_pools(existing_pools)
for bigip in self.get_all_bigips():
bigip.system.purge_orphaned_folders_contents(existing_tenants)
sudslog = std_logging.getLogger('suds.client')
sudslog.setLevel(std_logging.FATAL)
for bigip in self.get_all_bigips():
bigip.system.force_root_folder()
sudslog.setLevel(std_logging.ERROR)
for bigip in self.get_all_bigips():
bigip.system.purge_orphaned_folders(existing_tenants) |
Python | def fdb_add(self, fdb):
""" Add (L2toL3) forwarding database entries """
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.bigip_l2_manager.add_bigip_fdb(bigip, fdb) | def fdb_add(self, fdb):
""" Add (L2toL3) forwarding database entries """
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.bigip_l2_manager.add_bigip_fdb(bigip, fdb) |
Python | def fdb_remove(self, fdb):
""" Remove (L2toL3) forwarding database entries """
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.bigip_l2_manager.remove_bigip_fdb(bigip, fdb) | def fdb_remove(self, fdb):
""" Remove (L2toL3) forwarding database entries """
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.bigip_l2_manager.remove_bigip_fdb(bigip, fdb) |
Python | def fdb_update(self, fdb):
""" Update (L2toL3) forwarding database entries """
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.bigip_l2_manager.update_bigip_fdb(bigip, fdb) | def fdb_update(self, fdb):
""" Update (L2toL3) forwarding database entries """
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.bigip_l2_manager.update_bigip_fdb(bigip, fdb) |
Python | def _service_exists(self, service):
""" Returns whether the bigip has a pool for the service """
if not service['pool']:
return False
folder_name = bigip_interfaces.OBJ_PREFIX + service['pool']['tenant_id']
if self.lbaas_builder_bigiq_iapp:
builder = self.lbaas_builder_bigiq_iapp
readiness = builder.check_tenant_bigiq_readiness(service)
use_bigiq = readiness['found_bigips']
else:
use_bigiq = False
if use_bigiq:
return self.lbaas_builder_bigiq_iapp.exists(service)
else:
bigips = self.get_config_bigips()
for bigip in bigips:
if not bigip.system.folder_exists(folder_name):
LOG.error("Folder %s does not exists on bigip: %s" %
(folder_name, bigip.hostname))
return False
pl = bigip.pool.exists(name=service['pool']['id'],
folder=service['pool']['tenant_id'],
config_mode=self.conf.icontrol_config_mode)
if not pl:
return False
vs = bigip.virtual_server.get_virtual_servers_by_pool_name(
pool_name=bigip_interfaces.OBJ_PREFIX + service['pool']['id'],
folder=service['pool']['tenant_id']
)
if not vs:
return False
for member in service['members']:
mb = bigip.pool.member_exists(
name=service['pool']['id'],
ip_address=member['address'],
port=member['protocol_port'],
folder=service['pool']['tenant_id']
)
if not mb:
return False
return True | def _service_exists(self, service):
""" Returns whether the bigip has a pool for the service """
if not service['pool']:
return False
folder_name = bigip_interfaces.OBJ_PREFIX + service['pool']['tenant_id']
if self.lbaas_builder_bigiq_iapp:
builder = self.lbaas_builder_bigiq_iapp
readiness = builder.check_tenant_bigiq_readiness(service)
use_bigiq = readiness['found_bigips']
else:
use_bigiq = False
if use_bigiq:
return self.lbaas_builder_bigiq_iapp.exists(service)
else:
bigips = self.get_config_bigips()
for bigip in bigips:
if not bigip.system.folder_exists(folder_name):
LOG.error("Folder %s does not exists on bigip: %s" %
(folder_name, bigip.hostname))
return False
pl = bigip.pool.exists(name=service['pool']['id'],
folder=service['pool']['tenant_id'],
config_mode=self.conf.icontrol_config_mode)
if not pl:
return False
vs = bigip.virtual_server.get_virtual_servers_by_pool_name(
pool_name=bigip_interfaces.OBJ_PREFIX + service['pool']['id'],
folder=service['pool']['tenant_id']
)
if not vs:
return False
for member in service['members']:
mb = bigip.pool.member_exists(
name=service['pool']['id'],
ip_address=member['address'],
port=member['protocol_port'],
folder=service['pool']['tenant_id']
)
if not mb:
return False
return True |
Python | def _common_service_handler(self, service):
""" Assure that the service is configured on bigip(s) """
start_time = time()
if not service['pool']:
LOG.error("_common_service_handler: Service pool is None")
return
# Here we look to see if the tenant has big-ips and
# so we should use bigiq (if enabled) or fall back
# to direct icontrol to the bigip(s).
if self.lbaas_builder_bigiq_iapp:
builder = self.lbaas_builder_bigiq_iapp
readiness = builder.check_tenant_bigiq_readiness(service)
use_bigiq = readiness['found_bigips']
else:
use_bigiq = False
if not use_bigiq:
self.tenant_manager.assure_tenant_created(service)
LOG.debug(" _assure_tenant_created took %.5f secs" %
(time() - start_time))
traffic_group = self._service_to_traffic_group(service)
if not use_bigiq and self.network_builder:
start_time = time()
self.network_builder.prep_service_networking(
service, traffic_group)
if time() - start_time > .001:
LOG.debug(" _prep_service_networking "
"took %.5f secs" % (time() - start_time))
all_subnet_hints = {}
if use_bigiq:
self.lbaas_builder_bigiq_iapp.assure_service(
service, traffic_group, all_subnet_hints)
else:
for bigip in self.get_config_bigips():
# check_for_delete_subnets:
# keep track of which subnets we should check to delete
# for a deleted vip or member
# do_not_delete_subnets:
# If we add an IP to a subnet we must not delete the subnet
all_subnet_hints[bigip.device_name] = \
{'check_for_delete_subnets': {},
'do_not_delete_subnets': []}
if self.conf.icontrol_config_mode == 'iapp':
self.lbaas_builder_bigip_iapp.assure_service(
service, traffic_group, all_subnet_hints)
else:
self.lbaas_builder_bigip_objects.assure_service(
service, traffic_group, all_subnet_hints)
if not use_bigiq and self.network_builder:
start_time = time()
try:
self.network_builder.post_service_networking(
service, all_subnet_hints)
except NeutronException as exc:
LOG.error("post_service_networking exception: %s"
% str(exc.msg))
except Exception as exc:
LOG.error("post_service_networking exception: %s"
% str(exc.message))
LOG.debug(" _post_service_networking took %.5f secs" %
(time() - start_time))
if not use_bigiq:
start_time = time()
self.tenant_manager.assure_tenant_cleanup(
service, all_subnet_hints)
LOG.debug(" _assure_tenant_cleanup took %.5f secs" %
(time() - start_time))
self.update_service_status(service)
start_time = time()
self.sync_if_clustered()
LOG.debug(" final sync took %.5f secs" % (time() - start_time)) | def _common_service_handler(self, service):
""" Assure that the service is configured on bigip(s) """
start_time = time()
if not service['pool']:
LOG.error("_common_service_handler: Service pool is None")
return
# Here we look to see if the tenant has big-ips and
# so we should use bigiq (if enabled) or fall back
# to direct icontrol to the bigip(s).
if self.lbaas_builder_bigiq_iapp:
builder = self.lbaas_builder_bigiq_iapp
readiness = builder.check_tenant_bigiq_readiness(service)
use_bigiq = readiness['found_bigips']
else:
use_bigiq = False
if not use_bigiq:
self.tenant_manager.assure_tenant_created(service)
LOG.debug(" _assure_tenant_created took %.5f secs" %
(time() - start_time))
traffic_group = self._service_to_traffic_group(service)
if not use_bigiq and self.network_builder:
start_time = time()
self.network_builder.prep_service_networking(
service, traffic_group)
if time() - start_time > .001:
LOG.debug(" _prep_service_networking "
"took %.5f secs" % (time() - start_time))
all_subnet_hints = {}
if use_bigiq:
self.lbaas_builder_bigiq_iapp.assure_service(
service, traffic_group, all_subnet_hints)
else:
for bigip in self.get_config_bigips():
# check_for_delete_subnets:
# keep track of which subnets we should check to delete
# for a deleted vip or member
# do_not_delete_subnets:
# If we add an IP to a subnet we must not delete the subnet
all_subnet_hints[bigip.device_name] = \
{'check_for_delete_subnets': {},
'do_not_delete_subnets': []}
if self.conf.icontrol_config_mode == 'iapp':
self.lbaas_builder_bigip_iapp.assure_service(
service, traffic_group, all_subnet_hints)
else:
self.lbaas_builder_bigip_objects.assure_service(
service, traffic_group, all_subnet_hints)
if not use_bigiq and self.network_builder:
start_time = time()
try:
self.network_builder.post_service_networking(
service, all_subnet_hints)
except NeutronException as exc:
LOG.error("post_service_networking exception: %s"
% str(exc.msg))
except Exception as exc:
LOG.error("post_service_networking exception: %s"
% str(exc.message))
LOG.debug(" _post_service_networking took %.5f secs" %
(time() - start_time))
if not use_bigiq:
start_time = time()
self.tenant_manager.assure_tenant_cleanup(
service, all_subnet_hints)
LOG.debug(" _assure_tenant_cleanup took %.5f secs" %
(time() - start_time))
self.update_service_status(service)
start_time = time()
self.sync_if_clustered()
LOG.debug(" final sync took %.5f secs" % (time() - start_time)) |
Python | def update_service_status(self, service):
""" Update status of objects in OpenStack """
# plugin_rpc may not be set when unit testing
if not self.plugin_rpc:
return
self._update_members_status(service['members'])
self._update_pool_status(service['pool'])
self._update_pool_monitors_status(service)
self._update_vip_status(service['vip']) | def update_service_status(self, service):
""" Update status of objects in OpenStack """
# plugin_rpc may not be set when unit testing
if not self.plugin_rpc:
return
self._update_members_status(service['members'])
self._update_pool_status(service['pool'])
self._update_pool_monitors_status(service)
self._update_vip_status(service['vip']) |
Python | def _update_pool_monitors_status(self, service):
""" Update pool monitor status in OpenStack """
monitors_destroyed = []
monitors_updated = []
pool = service['pool']
LOG.debug("update_pool_monitors_status: service: %s" % service)
health_monitors_status = {}
for monitor in pool['health_monitors_status']:
health_monitors_status[monitor['monitor_id']] = \
monitor['status']
LOG.debug("update_pool_monitors_status: health_monitor_status: %s"
% health_monitors_status)
for monitor in service['health_monitors']:
if monitor['id'] in health_monitors_status:
if health_monitors_status[monitor['id']] == \
plugin_const.PENDING_DELETE:
monitors_destroyed.append(
{'health_monitor_id': monitor['id'],
'pool_id': pool['id']})
elif health_monitors_status[monitor['id']] == \
plugin_const.PENDING_UPDATE or \
health_monitors_status[monitor['id']] == \
plugin_const.PENDING_CREATE:
monitors_updated.append(
{'pool_id': pool['id'],
'health_monitor_id': monitor['id'],
'status': plugin_const.ACTIVE,
'status_description': 'monitor active'})
LOG.debug("Monitors to destroy: %s" % monitors_destroyed)
for monitor_destroyed in monitors_destroyed:
LOG.debug("Monitor destroying: %s" % monitor_destroyed)
self.plugin_rpc.health_monitor_destroyed(
**monitor_destroyed)
for monitor_updated in monitors_updated:
try:
self.plugin_rpc.update_health_monitor_status(
**monitor_updated)
except Exception as exc:
if 'PENDING_DELETE' in str(exc):
LOG.debug("Attempted to update monitor being deleted!")
else:
LOG.debug(str(exc))
raise | def _update_pool_monitors_status(self, service):
""" Update pool monitor status in OpenStack """
monitors_destroyed = []
monitors_updated = []
pool = service['pool']
LOG.debug("update_pool_monitors_status: service: %s" % service)
health_monitors_status = {}
for monitor in pool['health_monitors_status']:
health_monitors_status[monitor['monitor_id']] = \
monitor['status']
LOG.debug("update_pool_monitors_status: health_monitor_status: %s"
% health_monitors_status)
for monitor in service['health_monitors']:
if monitor['id'] in health_monitors_status:
if health_monitors_status[monitor['id']] == \
plugin_const.PENDING_DELETE:
monitors_destroyed.append(
{'health_monitor_id': monitor['id'],
'pool_id': pool['id']})
elif health_monitors_status[monitor['id']] == \
plugin_const.PENDING_UPDATE or \
health_monitors_status[monitor['id']] == \
plugin_const.PENDING_CREATE:
monitors_updated.append(
{'pool_id': pool['id'],
'health_monitor_id': monitor['id'],
'status': plugin_const.ACTIVE,
'status_description': 'monitor active'})
LOG.debug("Monitors to destroy: %s" % monitors_destroyed)
for monitor_destroyed in monitors_destroyed:
LOG.debug("Monitor destroying: %s" % monitor_destroyed)
self.plugin_rpc.health_monitor_destroyed(
**monitor_destroyed)
for monitor_updated in monitors_updated:
try:
self.plugin_rpc.update_health_monitor_status(
**monitor_updated)
except Exception as exc:
if 'PENDING_DELETE' in str(exc):
LOG.debug("Attempted to update monitor being deleted!")
else:
LOG.debug(str(exc))
raise |
Python | def tenant_to_traffic_group(self, tenant_id):
""" Hash tenant id to index of traffic group """
hexhash = hashlib.md5(tenant_id).hexdigest()
tg_index = int(hexhash, 16) % len(self.__traffic_groups)
return self.__traffic_groups[tg_index] | def tenant_to_traffic_group(self, tenant_id):
""" Hash tenant id to index of traffic group """
hexhash = hashlib.md5(tenant_id).hexdigest()
tg_index = int(hexhash, 16) % len(self.__traffic_groups)
return self.__traffic_groups[tg_index] |
Python | def _init_traffic_groups(self, bigip):
""" Count vips and gws on traffic groups """
self.__traffic_groups = bigip.cluster.get_traffic_groups()
if 'traffic-group-local-only' in self.__traffic_groups:
self.__traffic_groups.remove('traffic-group-local-only')
self.__traffic_groups.sort() | def _init_traffic_groups(self, bigip):
""" Count vips and gws on traffic groups """
self.__traffic_groups = bigip.cluster.get_traffic_groups()
if 'traffic-group-local-only' in self.__traffic_groups:
self.__traffic_groups.remove('traffic-group-local-only')
self.__traffic_groups.sort() |
Python | def sync_if_clustered(self):
""" sync device group if not in replication mode """
if self.conf.f5_ha_type == 'standalone' or \
self.conf.f5_sync_mode == 'replication' or \
len(self.get_all_bigips()) < 2:
return
bigip = self.get_bigip()
self._sync_with_retries(bigip) | def sync_if_clustered(self):
""" sync device group if not in replication mode """
if self.conf.f5_ha_type == 'standalone' or \
self.conf.f5_sync_mode == 'replication' or \
len(self.get_all_bigips()) < 2:
return
bigip = self.get_bigip()
self._sync_with_retries(bigip) |
Python | def _validate_bigip_version(bigip, hostname):
""" Ensure the BIG-IP has sufficient version """
major_version = bigip.system.get_major_version()
if major_version < f5const.MIN_TMOS_MAJOR_VERSION:
raise f5ex.MajorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
minor_version = bigip.system.get_minor_version()
if minor_version < f5const.MIN_TMOS_MINOR_VERSION:
raise f5ex.MinorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
return major_version, minor_version | def _validate_bigip_version(bigip, hostname):
""" Ensure the BIG-IP has sufficient version """
major_version = bigip.system.get_major_version()
if major_version < f5const.MIN_TMOS_MAJOR_VERSION:
raise f5ex.MajorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
minor_version = bigip.system.get_minor_version()
if minor_version < f5const.MIN_TMOS_MINOR_VERSION:
raise f5ex.MinorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
return major_version, minor_version |
Python | def create_clientssl_profile_for_certificate(
self,
certificate=None,
parent_profile='/Common/clientssl',
folder='Common'
):
"""
Creates tenant ssl profile for the specified certificate
folder to create the ssl client profile
"""
if not isinstance(certificate, Certificate): # @UndefinedVariable
raise Exception('certificate is not an instance of Certificate')
profile_name = certificate.certificate_id
user_default_parent = True
if not parent_profile == '/Common/clientssl':
parent_profile_name = os.path.basename(parent_profile)
parent_profile_folder = os.path.dirname(parent_profile)
if not self.client_profile_exists(name=parent_profile_name,
folder=parent_profile_folder):
raise ValueError('parent clientssl profile %s does not exist'
% parent_profile)
user_default_parent = False
if not self.client_profile_exits(name=profile_name, folder=folder):
# add certificates to group
self.mgmt_keycert.certificate_import_from_pem(
mode='MANAGEMENT_MODE_DEFAULT',
cert_ids=[profile_name],
pem_data=[certificate.certificate_data],
overwrite=True
)
self.mgmt_keycert.key_import_from_pem(
mode='MANAGEMENT_MODE_DEFAULT',
key_ids=[profile_name],
pem_data=[certificate.key_data],
overwrite=True
)
# add SSL profile
profile_string_cert = \
self.lb_clientssl.typefactory.create('LocalLB.ProfileString')
profile_string_cert.value = profile_name+".crt"
profile_string_cert.default_flag = False
profile_string_key = \
self.lb_clientssl.typefactory.create('LocalLB.ProfileString')
profile_string_key.value = profile_name+".key"
profile_string_key.default_flag = False
self.lb_clientssl.create_v2(
profile_names=[profile_name],
keys=[profile_string_key],
certs=[profile_string_cert]
)
if not user_default_parent:
profile_string_defaults = \
self.lb_clientssl.typefactory.create('LocalLB.ProfileString')
profile_string_defaults.value = parent_profile
profile_string_defaults.default_flag = False
self.lb_clientssl.set_default_profile(
profile_names=[profile_name],
defaults=[profile_string_defaults]
)
if certificate.__key_passphrase__:
profile_string_passphrase = \
self.lb_clientssl.typefactory.create('LocalLB.ProfileString')
profile_string_passphrase.value = \
certificate.__key_passphrase__
profile_string_passphrase.default_flag = False
self.lb_clientssl.set_passphrease(
profile_names=[profile_name],
passphrases=[profile_string_passphrase]
) | def create_clientssl_profile_for_certificate(
self,
certificate=None,
parent_profile='/Common/clientssl',
folder='Common'
):
"""
Creates tenant ssl profile for the specified certificate
folder to create the ssl client profile
"""
if not isinstance(certificate, Certificate): # @UndefinedVariable
raise Exception('certificate is not an instance of Certificate')
profile_name = certificate.certificate_id
user_default_parent = True
if not parent_profile == '/Common/clientssl':
parent_profile_name = os.path.basename(parent_profile)
parent_profile_folder = os.path.dirname(parent_profile)
if not self.client_profile_exists(name=parent_profile_name,
folder=parent_profile_folder):
raise ValueError('parent clientssl profile %s does not exist'
% parent_profile)
user_default_parent = False
if not self.client_profile_exits(name=profile_name, folder=folder):
# add certificates to group
self.mgmt_keycert.certificate_import_from_pem(
mode='MANAGEMENT_MODE_DEFAULT',
cert_ids=[profile_name],
pem_data=[certificate.certificate_data],
overwrite=True
)
self.mgmt_keycert.key_import_from_pem(
mode='MANAGEMENT_MODE_DEFAULT',
key_ids=[profile_name],
pem_data=[certificate.key_data],
overwrite=True
)
# add SSL profile
profile_string_cert = \
self.lb_clientssl.typefactory.create('LocalLB.ProfileString')
profile_string_cert.value = profile_name+".crt"
profile_string_cert.default_flag = False
profile_string_key = \
self.lb_clientssl.typefactory.create('LocalLB.ProfileString')
profile_string_key.value = profile_name+".key"
profile_string_key.default_flag = False
self.lb_clientssl.create_v2(
profile_names=[profile_name],
keys=[profile_string_key],
certs=[profile_string_cert]
)
if not user_default_parent:
profile_string_defaults = \
self.lb_clientssl.typefactory.create('LocalLB.ProfileString')
profile_string_defaults.value = parent_profile
profile_string_defaults.default_flag = False
self.lb_clientssl.set_default_profile(
profile_names=[profile_name],
defaults=[profile_string_defaults]
)
if certificate.__key_passphrase__:
profile_string_passphrase = \
self.lb_clientssl.typefactory.create('LocalLB.ProfileString')
profile_string_passphrase.value = \
certificate.__key_passphrase__
profile_string_passphrase.default_flag = False
self.lb_clientssl.set_passphrease(
profile_names=[profile_name],
passphrases=[profile_string_passphrase]
) |
Python | def assure_service(self, service, traffic_group, all_subnet_hints):
""" Assure that the service is configured """
if not service['pool']:
return
self._check_monitor_delete(service)
start_time = time()
self._assure_pool_create(service['pool'])
LOG.debug(" _assure_pool_create took %.5f secs" %
(time() - start_time))
start_time = time()
self._assure_pool_monitors(service)
LOG.debug(" _assure_pool_monitors took %.5f secs" %
(time() - start_time))
start_time = time()
self._assure_members(service, all_subnet_hints)
LOG.debug(" _assure_members took %.5f secs" %
(time() - start_time))
start_time = time()
self._assure_vip(service, traffic_group, all_subnet_hints)
LOG.debug(" _assure_vip took %.5f secs" %
(time() - start_time))
start_time = time()
self._assure_pool_delete(service)
LOG.debug(" _assure_pool_delete took %.5f secs" %
(time() - start_time))
return all_subnet_hints | def assure_service(self, service, traffic_group, all_subnet_hints):
""" Assure that the service is configured """
if not service['pool']:
return
self._check_monitor_delete(service)
start_time = time()
self._assure_pool_create(service['pool'])
LOG.debug(" _assure_pool_create took %.5f secs" %
(time() - start_time))
start_time = time()
self._assure_pool_monitors(service)
LOG.debug(" _assure_pool_monitors took %.5f secs" %
(time() - start_time))
start_time = time()
self._assure_members(service, all_subnet_hints)
LOG.debug(" _assure_members took %.5f secs" %
(time() - start_time))
start_time = time()
self._assure_vip(service, traffic_group, all_subnet_hints)
LOG.debug(" _assure_vip took %.5f secs" %
(time() - start_time))
start_time = time()
self._assure_pool_delete(service)
LOG.debug(" _assure_pool_delete took %.5f secs" %
(time() - start_time))
return all_subnet_hints |
Python | def _assure_pool_create(self, pool):
""" Provision Pool - Create/Update """
# Service Layer (Shared Config)
for bigip in self.driver.get_config_bigips():
self.bigip_pool_manager.assure_bigip_pool_create(bigip, pool) | def _assure_pool_create(self, pool):
""" Provision Pool - Create/Update """
# Service Layer (Shared Config)
for bigip in self.driver.get_config_bigips():
self.bigip_pool_manager.assure_bigip_pool_create(bigip, pool) |
Python | def _assure_pool_monitors(self, service):
"""
Provision Health Monitors - Create/Update
"""
# Service Layer (Shared Config)
for bigip in self.driver.get_config_bigips():
self.bigip_pool_manager.assure_bigip_pool_monitors(bigip, service) | def _assure_pool_monitors(self, service):
"""
Provision Health Monitors - Create/Update
"""
# Service Layer (Shared Config)
for bigip in self.driver.get_config_bigips():
self.bigip_pool_manager.assure_bigip_pool_monitors(bigip, service) |
Python | def _assure_members(self, service, all_subnet_hints):
"""
Provision Members - Create/Update
"""
# Service Layer (Shared Config)
for bigip in self.driver.get_config_bigips():
subnet_hints = all_subnet_hints[bigip.device_name]
self.bigip_pool_manager.assure_bigip_members(
bigip, service, subnet_hints)
# avoids race condition:
# deletion of pool member objects must sync before we
# remove the selfip from the peer bigips.
self.driver.sync_if_clustered() | def _assure_members(self, service, all_subnet_hints):
"""
Provision Members - Create/Update
"""
# Service Layer (Shared Config)
for bigip in self.driver.get_config_bigips():
subnet_hints = all_subnet_hints[bigip.device_name]
self.bigip_pool_manager.assure_bigip_members(
bigip, service, subnet_hints)
# avoids race condition:
# deletion of pool member objects must sync before we
# remove the selfip from the peer bigips.
self.driver.sync_if_clustered() |
Python | def _assure_vip(self, service, traffic_group, all_subnet_hints):
""" Ensure the vip is on all bigips. """
vip = service['vip']
if 'id' not in vip:
return
bigips = self.driver.get_config_bigips()
for bigip in bigips:
subnet_hints = all_subnet_hints[bigip.device_name]
subnet = vip['subnet']
# if vip['status'] == plugin_const.PENDING_CREATE or \
# vip['status'] == plugin_const.PENDING_UPDATE:
# self.bigip_vip_manager.assure_bigip_create_vip(
# bigip, service, traffic_group)
# if subnet and subnet['id'] in \
# subnet_hints['check_for_delete_subnets']:
# del subnet_hints['check_for_delete_subnets'][subnet['id']]
# if subnet and subnet['id'] not in \
# subnet_hints['do_not_delete_subnets']:
# subnet_hints['do_not_delete_subnets'].append(subnet['id'])
#
# elif vip['status'] == plugin_const.PENDING_DELETE:
# self.bigip_vip_manager.assure_bigip_delete_vip(bigip, service)
# if subnet and subnet['id'] not in \
# subnet_hints['do_not_delete_subnets']:
# subnet_hints['check_for_delete_subnets'][subnet['id']] = \
# {'network': vip['network'],
# 'subnet': subnet,
# 'is_for_member': False}
if vip['status'] != plugin_const.PENDING_DELETE:
self.bigip_vip_manager.assure_bigip_create_vip(
bigip, service, traffic_group)
if subnet and subnet['id'] in \
subnet_hints['check_for_delete_subnets']:
del subnet_hints['check_for_delete_subnets'][subnet['id']]
if subnet and subnet['id'] not in \
subnet_hints['do_not_delete_subnets']:
subnet_hints['do_not_delete_subnets'].append(subnet['id'])
else:
self.bigip_vip_manager.assure_bigip_delete_vip(bigip, service)
if subnet and subnet['id'] not in \
subnet_hints['do_not_delete_subnets']:
subnet_hints['check_for_delete_subnets'][subnet['id']] = \
{'network': vip['network'],
'subnet': subnet,
'is_for_member': False}
# avoids race condition:
# deletion of vip address must sync before we
# remove the selfip from the peer bigips.
self.driver.sync_if_clustered() | def _assure_vip(self, service, traffic_group, all_subnet_hints):
""" Ensure the vip is on all bigips. """
vip = service['vip']
if 'id' not in vip:
return
bigips = self.driver.get_config_bigips()
for bigip in bigips:
subnet_hints = all_subnet_hints[bigip.device_name]
subnet = vip['subnet']
# if vip['status'] == plugin_const.PENDING_CREATE or \
# vip['status'] == plugin_const.PENDING_UPDATE:
# self.bigip_vip_manager.assure_bigip_create_vip(
# bigip, service, traffic_group)
# if subnet and subnet['id'] in \
# subnet_hints['check_for_delete_subnets']:
# del subnet_hints['check_for_delete_subnets'][subnet['id']]
# if subnet and subnet['id'] not in \
# subnet_hints['do_not_delete_subnets']:
# subnet_hints['do_not_delete_subnets'].append(subnet['id'])
#
# elif vip['status'] == plugin_const.PENDING_DELETE:
# self.bigip_vip_manager.assure_bigip_delete_vip(bigip, service)
# if subnet and subnet['id'] not in \
# subnet_hints['do_not_delete_subnets']:
# subnet_hints['check_for_delete_subnets'][subnet['id']] = \
# {'network': vip['network'],
# 'subnet': subnet,
# 'is_for_member': False}
if vip['status'] != plugin_const.PENDING_DELETE:
self.bigip_vip_manager.assure_bigip_create_vip(
bigip, service, traffic_group)
if subnet and subnet['id'] in \
subnet_hints['check_for_delete_subnets']:
del subnet_hints['check_for_delete_subnets'][subnet['id']]
if subnet and subnet['id'] not in \
subnet_hints['do_not_delete_subnets']:
subnet_hints['do_not_delete_subnets'].append(subnet['id'])
else:
self.bigip_vip_manager.assure_bigip_delete_vip(bigip, service)
if subnet and subnet['id'] not in \
subnet_hints['do_not_delete_subnets']:
subnet_hints['check_for_delete_subnets'][subnet['id']] = \
{'network': vip['network'],
'subnet': subnet,
'is_for_member': False}
# avoids race condition:
# deletion of vip address must sync before we
# remove the selfip from the peer bigips.
self.driver.sync_if_clustered() |
Python | def _assure_pool_delete(self, service):
""" Assure pool is deleted from big-ip """
if service['pool']['status'] != plugin_const.PENDING_DELETE:
return
# Service Layer (Shared Config)
for bigip in self.driver.get_config_bigips():
self.bigip_pool_manager.assure_bigip_pool_delete(bigip, service) | def _assure_pool_delete(self, service):
""" Assure pool is deleted from big-ip """
if service['pool']['status'] != plugin_const.PENDING_DELETE:
return
# Service Layer (Shared Config)
for bigip in self.driver.get_config_bigips():
self.bigip_pool_manager.assure_bigip_pool_delete(bigip, service) |
Python | def assure_bigip_selfip(self, bigip, service, subnetinfo):
""" Create selfip on the BIG-IP """
network = subnetinfo['network']
if not network:
LOG.error(_('Attempted to create selfip and snats'
' for network with no id... skipping.'))
return
subnet = subnetinfo['subnet']
tenant_id = service['pool']['tenant_id']
# If we have already assured this subnet.. return.
# Note this cache is periodically cleared in order to
# force assurance that the configuration is present.
if tenant_id in bigip.assured_tenant_snat_subnets and \
subnet['id'] in bigip.assured_tenant_snat_subnets[tenant_id]:
return
selfip_address = self._get_bigip_selfip_address(bigip, subnet)
selfip_address += '%' + str(network['route_domain_id'])
if self.bigip_l2_manager.is_common_network(network):
network_folder = 'Common'
else:
network_folder = service['pool']['tenant_id']
(network_name, preserve_network_name) = \
self.bigip_l2_manager.get_network_name(bigip, network)
bigip.selfip.create(
name="local-" + bigip.device_name + "-" + subnet['id'],
ip_address=selfip_address,
netmask=netaddr.IPNetwork(subnet['cidr']).netmask,
vlan_name=network_name,
floating=False,
folder=network_folder,
preserve_vlan_name=preserve_network_name)
# TO DO: we need to only bind the local SelfIP to the
# local device... not treat it as if it was floating
if self.l3_binding:
self.l3_binding.bind_address(subnet_id=subnet['id'],
ip_address=selfip_address) | def assure_bigip_selfip(self, bigip, service, subnetinfo):
""" Create selfip on the BIG-IP """
network = subnetinfo['network']
if not network:
LOG.error(_('Attempted to create selfip and snats'
' for network with no id... skipping.'))
return
subnet = subnetinfo['subnet']
tenant_id = service['pool']['tenant_id']
# If we have already assured this subnet.. return.
# Note this cache is periodically cleared in order to
# force assurance that the configuration is present.
if tenant_id in bigip.assured_tenant_snat_subnets and \
subnet['id'] in bigip.assured_tenant_snat_subnets[tenant_id]:
return
selfip_address = self._get_bigip_selfip_address(bigip, subnet)
selfip_address += '%' + str(network['route_domain_id'])
if self.bigip_l2_manager.is_common_network(network):
network_folder = 'Common'
else:
network_folder = service['pool']['tenant_id']
(network_name, preserve_network_name) = \
self.bigip_l2_manager.get_network_name(bigip, network)
bigip.selfip.create(
name="local-" + bigip.device_name + "-" + subnet['id'],
ip_address=selfip_address,
netmask=netaddr.IPNetwork(subnet['cidr']).netmask,
vlan_name=network_name,
floating=False,
folder=network_folder,
preserve_vlan_name=preserve_network_name)
# TO DO: we need to only bind the local SelfIP to the
# local device... not treat it as if it was floating
if self.l3_binding:
self.l3_binding.bind_address(subnet_id=subnet['id'],
ip_address=selfip_address) |
Python | def _get_bigip_selfip_address(self, bigip, subnet):
""" Get ip address for selfip to use on BIG-IP """
selfip_name = "local-" + bigip.device_name + "-" + subnet['id']
ports = self.driver.plugin_rpc.get_port_by_name(port_name=selfip_name)
if len(ports) > 0:
port = ports[0]
else:
port = self.driver.plugin_rpc.create_port_on_subnet(
subnet_id=subnet['id'],
mac_address=None,
name=selfip_name,
fixed_address_count=1)
return port['fixed_ips'][0]['ip_address'] | def _get_bigip_selfip_address(self, bigip, subnet):
""" Get ip address for selfip to use on BIG-IP """
selfip_name = "local-" + bigip.device_name + "-" + subnet['id']
ports = self.driver.plugin_rpc.get_port_by_name(port_name=selfip_name)
if len(ports) > 0:
port = ports[0]
else:
port = self.driver.plugin_rpc.create_port_on_subnet(
subnet_id=subnet['id'],
mac_address=None,
name=selfip_name,
fixed_address_count=1)
return port['fixed_ips'][0]['ip_address'] |
Python | def assure_gateway_on_subnet(self, bigip, subnetinfo, traffic_group):
""" called for every bigip only in replication mode.
otherwise called once """
subnet = subnetinfo['subnet']
if subnet['id'] in bigip.assured_gateway_subnets:
return
network = subnetinfo['network']
(network_name, preserve_network_name) = \
self.bigip_l2_manager.get_network_name(bigip, network)
if self.bigip_l2_manager.is_common_network(network):
network_folder = 'Common'
network_name = '/Common/' + network_name
else:
network_folder = subnet['tenant_id']
# Select a traffic group for the floating SelfIP
floating_selfip_name = "gw-" + subnet['id']
netmask = netaddr.IPNetwork(subnet['cidr']).netmask
bigip.selfip.create(name=floating_selfip_name,
ip_address=subnet['gateway_ip'],
netmask=netmask,
vlan_name=network_name,
floating=True,
traffic_group=traffic_group,
folder=network_folder,
preserve_vlan_name=preserve_network_name)
if self.l3_binding:
self.l3_binding.bind_address(subnet_id=subnet['id'],
ip_address=subnet['gateway_ip'])
# Setup a wild card ip forwarding virtual service for this subnet
gw_name = "gw-" + subnet['id']
bigip.virtual_server.create_ip_forwarder(
name=gw_name, ip_address='0.0.0.0',
mask='0.0.0.0',
vlan_name=network_name,
traffic_group=traffic_group,
folder=network_folder,
preserve_vlan_name=preserve_network_name)
# Setup the IP forwarding virtual server to use the Self IPs
# as the forwarding SNAT addresses
bigip.virtual_server.set_snat_automap(name=gw_name,
folder=network_folder)
bigip.assured_gateway_subnets.append(subnet['id']) | def assure_gateway_on_subnet(self, bigip, subnetinfo, traffic_group):
""" called for every bigip only in replication mode.
otherwise called once """
subnet = subnetinfo['subnet']
if subnet['id'] in bigip.assured_gateway_subnets:
return
network = subnetinfo['network']
(network_name, preserve_network_name) = \
self.bigip_l2_manager.get_network_name(bigip, network)
if self.bigip_l2_manager.is_common_network(network):
network_folder = 'Common'
network_name = '/Common/' + network_name
else:
network_folder = subnet['tenant_id']
# Select a traffic group for the floating SelfIP
floating_selfip_name = "gw-" + subnet['id']
netmask = netaddr.IPNetwork(subnet['cidr']).netmask
bigip.selfip.create(name=floating_selfip_name,
ip_address=subnet['gateway_ip'],
netmask=netmask,
vlan_name=network_name,
floating=True,
traffic_group=traffic_group,
folder=network_folder,
preserve_vlan_name=preserve_network_name)
if self.l3_binding:
self.l3_binding.bind_address(subnet_id=subnet['id'],
ip_address=subnet['gateway_ip'])
# Setup a wild card ip forwarding virtual service for this subnet
gw_name = "gw-" + subnet['id']
bigip.virtual_server.create_ip_forwarder(
name=gw_name, ip_address='0.0.0.0',
mask='0.0.0.0',
vlan_name=network_name,
traffic_group=traffic_group,
folder=network_folder,
preserve_vlan_name=preserve_network_name)
# Setup the IP forwarding virtual server to use the Self IPs
# as the forwarding SNAT addresses
bigip.virtual_server.set_snat_automap(name=gw_name,
folder=network_folder)
bigip.assured_gateway_subnets.append(subnet['id']) |
Python | def delete_gateway_on_subnet(self, bigip, subnetinfo):
""" called for every bigip only in replication mode.
otherwise called once """
network = subnetinfo['network']
if not network:
LOG.error(_('Attempted to delete default gateway'
' for network with no id... skipping.'))
return
subnet = subnetinfo['subnet']
if self.bigip_l2_manager.is_common_network(network):
network_folder = 'Common'
else:
network_folder = subnet['tenant_id']
floating_selfip_name = "gw-" + subnet['id']
if self.driver.conf.f5_populate_static_arp:
bigip.arp.delete_by_subnet(subnet=subnetinfo['subnet']['cidr'],
mask=None,
folder=network_folder)
bigip.selfip.delete(name=floating_selfip_name,
folder=network_folder)
if self.l3_binding:
self.l3_binding.unbind_address(subnet_id=subnet['id'],
ip_address=subnet['gateway_ip'])
gw_name = "gw-" + subnet['id']
bigip.virtual_server.delete(name=gw_name,
folder=network_folder)
if subnet['id'] in bigip.assured_gateway_subnets:
bigip.assured_gateway_subnets.remove(subnet['id'])
return gw_name | def delete_gateway_on_subnet(self, bigip, subnetinfo):
""" called for every bigip only in replication mode.
otherwise called once """
network = subnetinfo['network']
if not network:
LOG.error(_('Attempted to delete default gateway'
' for network with no id... skipping.'))
return
subnet = subnetinfo['subnet']
if self.bigip_l2_manager.is_common_network(network):
network_folder = 'Common'
else:
network_folder = subnet['tenant_id']
floating_selfip_name = "gw-" + subnet['id']
if self.driver.conf.f5_populate_static_arp:
bigip.arp.delete_by_subnet(subnet=subnetinfo['subnet']['cidr'],
mask=None,
folder=network_folder)
bigip.selfip.delete(name=floating_selfip_name,
folder=network_folder)
if self.l3_binding:
self.l3_binding.unbind_address(subnet_id=subnet['id'],
ip_address=subnet['gateway_ip'])
gw_name = "gw-" + subnet['id']
bigip.virtual_server.delete(name=gw_name,
folder=network_folder)
if subnet['id'] in bigip.assured_gateway_subnets:
bigip.assured_gateway_subnets.remove(subnet['id'])
return gw_name |
Python | def _get_tunnel_name(network):
""" BIG-IP object name for a tunnel """
tunnel_type = network['provider:network_type']
tunnel_id = network['provider:segmentation_id']
return 'tunnel-' + str(tunnel_type) + '-' + str(tunnel_id) | def _get_tunnel_name(network):
""" BIG-IP object name for a tunnel """
tunnel_type = network['provider:network_type']
tunnel_id = network['provider:segmentation_id']
return 'tunnel-' + str(tunnel_type) + '-' + str(tunnel_id) |
Python | def _get_tunnel_fake_mac(network, local_ip):
""" create a fake mac for l2 records for tunnels """
network_id = str(network['provider:segmentation_id']).rjust(4, '0')
mac_prefix = '02:' + network_id[:2] + ':' + network_id[2:4] + ':'
ip_parts = local_ip.split('.')
if len(ip_parts) > 3:
mac = [int(ip_parts[-3]),
int(ip_parts[-2]),
int(ip_parts[-1])]
else:
ip_parts = local_ip.split(':')
if len(ip_parts) > 3:
mac = [int('0x' + ip_parts[-3], 16),
int('0x' + ip_parts[-2], 16),
int('0x' + ip_parts[-1], 16)]
else:
mac = [random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return mac_prefix + ':'.join("%02x" % octet for octet in mac) | def _get_tunnel_fake_mac(network, local_ip):
""" create a fake mac for l2 records for tunnels """
network_id = str(network['provider:segmentation_id']).rjust(4, '0')
mac_prefix = '02:' + network_id[:2] + ':' + network_id[2:4] + ':'
ip_parts = local_ip.split('.')
if len(ip_parts) > 3:
mac = [int(ip_parts[-3]),
int(ip_parts[-2]),
int(ip_parts[-1])]
else:
ip_parts = local_ip.split(':')
if len(ip_parts) > 3:
mac = [int('0x' + ip_parts[-3], 16),
int('0x' + ip_parts[-2], 16),
int('0x' + ip_parts[-1], 16)]
else:
mac = [random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return mac_prefix + ':'.join("%02x" % octet for octet in mac) |
Python | def assure_bigip_network(self, bigip, network):
""" Ensure bigip has configured network object """
if not network:
LOG.error(_(' assure_bigip_network: '
'Attempted to assure a network with no id..skipping.'))
return
if network['id'] in bigip.assured_networks:
return
if network['id'] in self.conf.common_network_ids:
LOG.debug(_(' assure_bigip_network: '
'Network is a common global network... skipping.'))
return
LOG.debug(" assure_bigip_network network: %s" % str(network))
start_time = time()
if self.is_common_network(network):
network_folder = 'Common'
else:
network_folder = network['tenant_id']
# setup all needed L2 network segments
if network['provider:network_type'] == 'flat':
self._assure_device_network_flat(network, bigip, network_folder)
elif network['provider:network_type'] == 'vlan':
self._assure_device_network_vlan(network, bigip, network_folder)
elif network['provider:network_type'] == 'vxlan':
self._assure_device_network_vxlan(network, bigip, network_folder)
elif network['provider:network_type'] == 'gre':
self._assure_device_network_gre(network, bigip, network_folder)
else:
error_message = 'Unsupported network type %s.' \
% network['provider:network_type'] + \
' Cannot setup network.'
LOG.error(_(error_message))
raise f5ex.InvalidNetworkType(error_message)
bigip.assured_networks.append(network['id'])
if time() - start_time > .001:
LOG.debug(" assure bigip network took %.5f secs" %
(time() - start_time)) | def assure_bigip_network(self, bigip, network):
""" Ensure bigip has configured network object """
if not network:
LOG.error(_(' assure_bigip_network: '
'Attempted to assure a network with no id..skipping.'))
return
if network['id'] in bigip.assured_networks:
return
if network['id'] in self.conf.common_network_ids:
LOG.debug(_(' assure_bigip_network: '
'Network is a common global network... skipping.'))
return
LOG.debug(" assure_bigip_network network: %s" % str(network))
start_time = time()
if self.is_common_network(network):
network_folder = 'Common'
else:
network_folder = network['tenant_id']
# setup all needed L2 network segments
if network['provider:network_type'] == 'flat':
self._assure_device_network_flat(network, bigip, network_folder)
elif network['provider:network_type'] == 'vlan':
self._assure_device_network_vlan(network, bigip, network_folder)
elif network['provider:network_type'] == 'vxlan':
self._assure_device_network_vxlan(network, bigip, network_folder)
elif network['provider:network_type'] == 'gre':
self._assure_device_network_gre(network, bigip, network_folder)
else:
error_message = 'Unsupported network type %s.' \
% network['provider:network_type'] + \
' Cannot setup network.'
LOG.error(_(error_message))
raise f5ex.InvalidNetworkType(error_message)
bigip.assured_networks.append(network['id'])
if time() - start_time > .001:
LOG.debug(" assure bigip network took %.5f secs" %
(time() - start_time)) |
Python | def _assure_device_network_flat(self, network, bigip, network_folder):
""" Ensure bigip has configured flat vlan (untagged) """
interface = self.interface_mapping['default']
vlanid = 0
# Do we have host specific mappings?
net_key = network['provider:physical_network']
if net_key + ':' + bigip.icontrol.hostname in \
self.interface_mapping:
interface = self.interface_mapping[
net_key + ':' + bigip.icontrol.hostname]
# Do we have a mapping for this network
elif net_key in self.interface_mapping:
interface = self.interface_mapping[net_key]
vlan_name = self.get_vlan_name(network,
bigip.icontrol.hostname)
self._assure_vcmp_device_network(bigip,
vlan={'name': vlan_name,
'folder': network_folder,
'id': vlanid,
'interface': interface,
'network': network})
if self.vcmp_manager.get_vcmp_host(bigip):
interface = None
bigip.vlan.create(
name=vlan_name, vlanid=0, interface=interface,
folder=network_folder, description=network['id'],
route_domain_id=network['route_domain_id']) | def _assure_device_network_flat(self, network, bigip, network_folder):
""" Ensure bigip has configured flat vlan (untagged) """
interface = self.interface_mapping['default']
vlanid = 0
# Do we have host specific mappings?
net_key = network['provider:physical_network']
if net_key + ':' + bigip.icontrol.hostname in \
self.interface_mapping:
interface = self.interface_mapping[
net_key + ':' + bigip.icontrol.hostname]
# Do we have a mapping for this network
elif net_key in self.interface_mapping:
interface = self.interface_mapping[net_key]
vlan_name = self.get_vlan_name(network,
bigip.icontrol.hostname)
self._assure_vcmp_device_network(bigip,
vlan={'name': vlan_name,
'folder': network_folder,
'id': vlanid,
'interface': interface,
'network': network})
if self.vcmp_manager.get_vcmp_host(bigip):
interface = None
bigip.vlan.create(
name=vlan_name, vlanid=0, interface=interface,
folder=network_folder, description=network['id'],
route_domain_id=network['route_domain_id']) |
Python | def _assure_device_network_vlan(self, network, bigip, network_folder):
""" Ensure bigip has configured tagged vlan """
# VLAN names are limited to 64 characters including
# the folder name, so we name them foolish things.
interface = self.interface_mapping['default']
tagged = self.tagging_mapping['default']
vlanid = 0
# Do we have host specific mappings?
net_key = network['provider:physical_network']
if net_key + ':' + bigip.icontrol.hostname in \
self.interface_mapping:
interface = self.interface_mapping[
net_key + ':' + bigip.icontrol.hostname]
tagged = self.tagging_mapping[
net_key + ':' + bigip.icontrol.hostname]
# Do we have a mapping for this network
elif net_key in self.interface_mapping:
interface = self.interface_mapping[net_key]
tagged = self.tagging_mapping[net_key]
if tagged:
vlanid = network['provider:segmentation_id']
else:
vlanid = 0
vlan_name = self.get_vlan_name(network,
bigip.icontrol.hostname)
self._assure_vcmp_device_network(bigip,
vlan={'name': vlan_name,
'folder': network_folder,
'id': vlanid,
'interface': interface,
'network': network})
if self.vcmp_manager.get_vcmp_host(bigip):
interface = None
bigip.vlan.create(
name=vlan_name, vlanid=vlanid, interface=interface,
folder=network_folder, description=network['id'],
route_domain_id=network['route_domain_id'])
if self.vlan_binding:
self.vlan_binding.allow_vlan(
device_name=bigip.device_name,
interface=interface,
vlanid=vlanid
) | def _assure_device_network_vlan(self, network, bigip, network_folder):
""" Ensure bigip has configured tagged vlan """
# VLAN names are limited to 64 characters including
# the folder name, so we name them foolish things.
interface = self.interface_mapping['default']
tagged = self.tagging_mapping['default']
vlanid = 0
# Do we have host specific mappings?
net_key = network['provider:physical_network']
if net_key + ':' + bigip.icontrol.hostname in \
self.interface_mapping:
interface = self.interface_mapping[
net_key + ':' + bigip.icontrol.hostname]
tagged = self.tagging_mapping[
net_key + ':' + bigip.icontrol.hostname]
# Do we have a mapping for this network
elif net_key in self.interface_mapping:
interface = self.interface_mapping[net_key]
tagged = self.tagging_mapping[net_key]
if tagged:
vlanid = network['provider:segmentation_id']
else:
vlanid = 0
vlan_name = self.get_vlan_name(network,
bigip.icontrol.hostname)
self._assure_vcmp_device_network(bigip,
vlan={'name': vlan_name,
'folder': network_folder,
'id': vlanid,
'interface': interface,
'network': network})
if self.vcmp_manager.get_vcmp_host(bigip):
interface = None
bigip.vlan.create(
name=vlan_name, vlanid=vlanid, interface=interface,
folder=network_folder, description=network['id'],
route_domain_id=network['route_domain_id'])
if self.vlan_binding:
self.vlan_binding.allow_vlan(
device_name=bigip.device_name,
interface=interface,
vlanid=vlanid
) |
Python | def _assure_device_network_gre(self, network, bigip, network_folder):
""" Ensure bigip has configured gre tunnel """
if not bigip.local_ip:
error_message = 'Cannot create tunnel %s on %s' \
% (network['id'], bigip.icontrol.hostname)
error_message += ' no VTEP SelfIP defined.'
LOG.error('L2GRE:' + error_message)
raise f5ex.MissingVTEPAddress('L2GRE:' + error_message)
tunnel_name = _get_tunnel_name(network)
bigip.l2gre.create_multipoint_tunnel(
name=tunnel_name,
profile_name='gre_ovs',
self_ip_address=bigip.local_ip,
greid=network['provider:segmentation_id'],
description=network['id'],
folder=network_folder,
route_domain_id=network['route_domain_id'])
if self.fdb_connector:
self.fdb_connector.notify_vtep_added(network, bigip.local_ip) | def _assure_device_network_gre(self, network, bigip, network_folder):
""" Ensure bigip has configured gre tunnel """
if not bigip.local_ip:
error_message = 'Cannot create tunnel %s on %s' \
% (network['id'], bigip.icontrol.hostname)
error_message += ' no VTEP SelfIP defined.'
LOG.error('L2GRE:' + error_message)
raise f5ex.MissingVTEPAddress('L2GRE:' + error_message)
tunnel_name = _get_tunnel_name(network)
bigip.l2gre.create_multipoint_tunnel(
name=tunnel_name,
profile_name='gre_ovs',
self_ip_address=bigip.local_ip,
greid=network['provider:segmentation_id'],
description=network['id'],
folder=network_folder,
route_domain_id=network['route_domain_id'])
if self.fdb_connector:
self.fdb_connector.notify_vtep_added(network, bigip.local_ip) |
Python | def _assure_vcmp_device_network(self, bigip, vlan):
"""For vCMP Guests, add VLAN to vCMP Host, associate VLAN with
vCMP Guest, and remove VLAN from /Common on vCMP Guest."""
vcmp_host = self.vcmp_manager.get_vcmp_host(bigip)
if not vcmp_host:
return
# Create the VLAN on the vCMP Host
try:
vcmp_host['bigip'].vlan.create(
name=vlan['name'], vlanid=vlan['id'],
interface=vlan['interface'], folder='/Common',
description=vlan['network']['id'],
route_domain_id=vlan['network']['route_domain_id'])
LOG.debug(('Created VLAN %s on vCMP Host %s' %
(vlan['name'], vcmp_host['bigip'].icontrol.hostname)))
except VLANCreationException as exc:
LOG.error(
('Exception creating VLAN %s on vCMP Host %s:%s' %
(vlan['name'], vcmp_host['bigip'].icontrol.hostname, exc)))
# Determine if the VLAN is already associated with the vCMP Guest
if self._is_vlan_assoc_with_vcmp_guest(bigip, vlan):
return
# Associate the VLAN with the vCMP Guest
vcmp_guest = self.vcmp_manager.get_vcmp_guest(vcmp_host, bigip)
try:
vlan_seq = vcmp_host['bigip'].system.sys_vcmp.typefactory.\
create('Common.StringSequence')
vlan_seq.values = prefixed(vlan['name'])
vlan_seq_seq = vcmp_host['bigip'].system.sys_vcmp.typefactory.\
create('Common.StringSequenceSequence')
vlan_seq_seq.values = [vlan_seq]
vcmp_host['bigip'].system.sys_vcmp.add_vlan([vcmp_guest['name']],
vlan_seq_seq)
LOG.debug(('Associated VLAN %s with vCMP Guest %s' %
(vlan['name'], vcmp_guest['mgmt_addr'])))
except WebFault as exc:
LOG.error(('Exception associating VLAN %s to vCMP Guest %s: %s '
% (vlan['name'], vcmp_guest['mgmt_addr'], exc)))
# Wait for the VLAN to propagate to /Common on vCMP Guest
full_path_vlan_name = '/Common/' + prefixed(vlan['name'])
try:
vlan_created = False
for _ in range(0, 30):
if bigip.vlan.exists(name=vlan['name'], folder='/Common'):
vlan_created = True
break
LOG.debug(('Wait for VLAN %s to be created on vCMP Guest %s.'
% (full_path_vlan_name, vcmp_guest['mgmt_addr'])))
sleep(1)
if vlan_created:
LOG.debug(('VLAN %s exists on vCMP Guest %s.' %
(full_path_vlan_name, vcmp_guest['mgmt_addr'])))
else:
LOG.error(('VLAN %s does not exist on vCMP Guest %s.' %
(full_path_vlan_name, vcmp_guest['mgmt_addr'])))
except WebFault as exc:
LOG.error(('Exception waiting for vCMP Host VLAN %s to '
'be created on vCMP Guest %s: %s' %
(vlan['name'], vcmp_guest['mgmt_addr'], exc)))
except Exception as exc:
LOG.error(('Exception waiting for vCMP Host VLAN %s to '
'be created on vCMP Guest %s: %s' %
(vlan['name'], vcmp_guest['mgmt_addr'], exc)))
# Delete the VLAN from the /Common folder on the vCMP Guest
try:
bigip.vlan.delete(name=vlan['name'],
folder='/Common')
LOG.debug(('Deleted VLAN %s from vCMP Guest %s' %
(full_path_vlan_name, vcmp_guest['mgmt_addr'])))
except VLANDeleteException as exc:
LOG.error(('Exception deleting VLAN %s from vCMP Guest %s: %s' %
(full_path_vlan_name, vcmp_guest['mgmt_addr'], exc)))
except Exception as exc:
LOG.error(('Exception deleting VLAN %s from vCMP Guest %s: %s' %
(full_path_vlan_name, vcmp_guest['mgmt_addr'], exc))) | def _assure_vcmp_device_network(self, bigip, vlan):
"""For vCMP Guests, add VLAN to vCMP Host, associate VLAN with
vCMP Guest, and remove VLAN from /Common on vCMP Guest."""
vcmp_host = self.vcmp_manager.get_vcmp_host(bigip)
if not vcmp_host:
return
# Create the VLAN on the vCMP Host
try:
vcmp_host['bigip'].vlan.create(
name=vlan['name'], vlanid=vlan['id'],
interface=vlan['interface'], folder='/Common',
description=vlan['network']['id'],
route_domain_id=vlan['network']['route_domain_id'])
LOG.debug(('Created VLAN %s on vCMP Host %s' %
(vlan['name'], vcmp_host['bigip'].icontrol.hostname)))
except VLANCreationException as exc:
LOG.error(
('Exception creating VLAN %s on vCMP Host %s:%s' %
(vlan['name'], vcmp_host['bigip'].icontrol.hostname, exc)))
# Determine if the VLAN is already associated with the vCMP Guest
if self._is_vlan_assoc_with_vcmp_guest(bigip, vlan):
return
# Associate the VLAN with the vCMP Guest
vcmp_guest = self.vcmp_manager.get_vcmp_guest(vcmp_host, bigip)
try:
vlan_seq = vcmp_host['bigip'].system.sys_vcmp.typefactory.\
create('Common.StringSequence')
vlan_seq.values = prefixed(vlan['name'])
vlan_seq_seq = vcmp_host['bigip'].system.sys_vcmp.typefactory.\
create('Common.StringSequenceSequence')
vlan_seq_seq.values = [vlan_seq]
vcmp_host['bigip'].system.sys_vcmp.add_vlan([vcmp_guest['name']],
vlan_seq_seq)
LOG.debug(('Associated VLAN %s with vCMP Guest %s' %
(vlan['name'], vcmp_guest['mgmt_addr'])))
except WebFault as exc:
LOG.error(('Exception associating VLAN %s to vCMP Guest %s: %s '
% (vlan['name'], vcmp_guest['mgmt_addr'], exc)))
# Wait for the VLAN to propagate to /Common on vCMP Guest
full_path_vlan_name = '/Common/' + prefixed(vlan['name'])
try:
vlan_created = False
for _ in range(0, 30):
if bigip.vlan.exists(name=vlan['name'], folder='/Common'):
vlan_created = True
break
LOG.debug(('Wait for VLAN %s to be created on vCMP Guest %s.'
% (full_path_vlan_name, vcmp_guest['mgmt_addr'])))
sleep(1)
if vlan_created:
LOG.debug(('VLAN %s exists on vCMP Guest %s.' %
(full_path_vlan_name, vcmp_guest['mgmt_addr'])))
else:
LOG.error(('VLAN %s does not exist on vCMP Guest %s.' %
(full_path_vlan_name, vcmp_guest['mgmt_addr'])))
except WebFault as exc:
LOG.error(('Exception waiting for vCMP Host VLAN %s to '
'be created on vCMP Guest %s: %s' %
(vlan['name'], vcmp_guest['mgmt_addr'], exc)))
except Exception as exc:
LOG.error(('Exception waiting for vCMP Host VLAN %s to '
'be created on vCMP Guest %s: %s' %
(vlan['name'], vcmp_guest['mgmt_addr'], exc)))
# Delete the VLAN from the /Common folder on the vCMP Guest
try:
bigip.vlan.delete(name=vlan['name'],
folder='/Common')
LOG.debug(('Deleted VLAN %s from vCMP Guest %s' %
(full_path_vlan_name, vcmp_guest['mgmt_addr'])))
except VLANDeleteException as exc:
LOG.error(('Exception deleting VLAN %s from vCMP Guest %s: %s' %
(full_path_vlan_name, vcmp_guest['mgmt_addr'], exc)))
except Exception as exc:
LOG.error(('Exception deleting VLAN %s from vCMP Guest %s: %s' %
(full_path_vlan_name, vcmp_guest['mgmt_addr'], exc))) |
Python | def _delete_device_vlan(self, bigip, network, network_folder):
""" Delete tagged vlan on specific bigip """
vlan_name = self.get_vlan_name(network,
bigip.icontrol.hostname)
bigip.vlan.delete(name=vlan_name,
folder=network_folder)
if self.vlan_binding:
interface = self.interface_mapping['default']
tagged = self.tagging_mapping['default']
vlanid = 0
# Do we have host specific mappings?
net_key = network['provider:physical_network']
if net_key + ':' + bigip.icontrol.hostname in \
self.interface_mapping:
interface = self.interface_mapping[
net_key + ':' + bigip.icontrol.hostname]
tagged = self.tagging_mapping[
net_key + ':' + bigip.icontrol.hostname]
# Do we have a mapping for this network
elif net_key in self.interface_mapping:
interface = self.interface_mapping[net_key]
tagged = self.tagging_mapping[net_key]
if tagged:
vlanid = network['provider:segmentation_id']
else:
vlanid = 0
self.vlan_binding.prune_vlan(
device_name=bigip.device_name,
interface=interface,
vlanid=vlanid
)
self._delete_vcmp_device_network(bigip, vlan_name) | def _delete_device_vlan(self, bigip, network, network_folder):
""" Delete tagged vlan on specific bigip """
vlan_name = self.get_vlan_name(network,
bigip.icontrol.hostname)
bigip.vlan.delete(name=vlan_name,
folder=network_folder)
if self.vlan_binding:
interface = self.interface_mapping['default']
tagged = self.tagging_mapping['default']
vlanid = 0
# Do we have host specific mappings?
net_key = network['provider:physical_network']
if net_key + ':' + bigip.icontrol.hostname in \
self.interface_mapping:
interface = self.interface_mapping[
net_key + ':' + bigip.icontrol.hostname]
tagged = self.tagging_mapping[
net_key + ':' + bigip.icontrol.hostname]
# Do we have a mapping for this network
elif net_key in self.interface_mapping:
interface = self.interface_mapping[net_key]
tagged = self.tagging_mapping[net_key]
if tagged:
vlanid = network['provider:segmentation_id']
else:
vlanid = 0
self.vlan_binding.prune_vlan(
device_name=bigip.device_name,
interface=interface,
vlanid=vlanid
)
self._delete_vcmp_device_network(bigip, vlan_name) |
Python | def _delete_device_flat(self, bigip, network, network_folder):
""" Delete untagged vlan on specific bigip """
vlan_name = self.get_vlan_name(network,
bigip.icontrol.hostname)
bigip.vlan.delete(name=vlan_name,
folder=network_folder)
self._delete_vcmp_device_network(bigip, vlan_name) | def _delete_device_flat(self, bigip, network, network_folder):
""" Delete untagged vlan on specific bigip """
vlan_name = self.get_vlan_name(network,
bigip.icontrol.hostname)
bigip.vlan.delete(name=vlan_name,
folder=network_folder)
self._delete_vcmp_device_network(bigip, vlan_name) |
Python | def _delete_device_vxlan(self, bigip, network, network_folder):
""" Delete vxlan tunnel on specific bigip """
tunnel_name = _get_tunnel_name(network)
bigip.vxlan.delete_all_fdb_entries(tunnel_name=tunnel_name,
folder=network_folder)
bigip.vxlan.delete_tunnel(name=tunnel_name,
folder=network_folder)
if self.fdb_connector:
self.fdb_connector.notify_vtep_removed(network, bigip.local_ip) | def _delete_device_vxlan(self, bigip, network, network_folder):
""" Delete vxlan tunnel on specific bigip """
tunnel_name = _get_tunnel_name(network)
bigip.vxlan.delete_all_fdb_entries(tunnel_name=tunnel_name,
folder=network_folder)
bigip.vxlan.delete_tunnel(name=tunnel_name,
folder=network_folder)
if self.fdb_connector:
self.fdb_connector.notify_vtep_removed(network, bigip.local_ip) |
Python | def _delete_device_gre(self, bigip, network, network_folder):
""" Delete gre tunnel on specific bigip """
tunnel_name = _get_tunnel_name(network)
# for each known vtep_endpoints to this tunnel
bigip.l2gre.delete_all_fdb_entries(tunnel_name=tunnel_name,
folder=network_folder)
bigip.l2gre.delete_tunnel(name=tunnel_name,
folder=network_folder)
if self.fdb_connector:
self.fdb_connector.notify_vtep_removed(network, bigip.local_ip) | def _delete_device_gre(self, bigip, network, network_folder):
""" Delete gre tunnel on specific bigip """
tunnel_name = _get_tunnel_name(network)
# for each known vtep_endpoints to this tunnel
bigip.l2gre.delete_all_fdb_entries(tunnel_name=tunnel_name,
folder=network_folder)
bigip.l2gre.delete_tunnel(name=tunnel_name,
folder=network_folder)
if self.fdb_connector:
self.fdb_connector.notify_vtep_removed(network, bigip.local_ip) |
Python | def _delete_vcmp_device_network(self, bigip, vlan_name):
"""For vCMP Guests, disassociate VLAN from vCMP Guest and
delete VLAN from vCMP Host."""
vcmp_host = self.vcmp_manager.get_vcmp_host(bigip)
if not vcmp_host:
return
# Remove VLAN association from the vCMP Guest
vcmp_guest = self.vcmp_manager.get_vcmp_guest(vcmp_host, bigip)
try:
vlan_seq = vcmp_host['bigip'].system.sys_vcmp.typefactory.\
create('Common.StringSequence')
vlan_seq.values = prefixed(vlan_name)
vlan_seq_seq = vcmp_host['bigip'].system.sys_vcmp.typefactory.\
create('Common.StringSequenceSequence')
vlan_seq_seq.values = [vlan_seq]
vcmp_host['bigip'].system.sys_vcmp.remove_vlan(
[vcmp_guest['name']], vlan_seq_seq)
LOG.debug(('Removed VLAN %s association from vCMP Guest %s' %
(vlan_name, vcmp_guest['mgmt_addr'])))
except WebFault as webfault:
LOG.error(('Exception removing VLAN %s association from vCMP '
'Guest %s:%s' %
(vlan_name, vcmp_guest['mgmt_addr'], webfault)))
except Exception as exc:
LOG.error(('Exception removing VLAN %s association from vCMP '
'Guest %s:%s' %
(vlan_name, vcmp_guest['mgmt_addr'], exc)))
# Only delete VLAN if it is not in use by other vCMP Guests
if self.vcmp_manager.get_vlan_use_count(vcmp_host, vlan_name):
LOG.debug(('VLAN %s in use by other vCMP Guests on vCMP Host %s' %
(vlan_name, vcmp_host['bigip'].icontrol.hostname)))
return
# Delete VLAN from vCMP Host. This will fail if any other vCMP Guest
# is using this VLAN
try:
vcmp_host['bigip'].vlan.delete(name=vlan_name,
folder='/Common')
LOG.debug(('Deleted VLAN %s from vCMP Host %s' %
(vlan_name, vcmp_host['bigip'].icontrol.hostname)))
except WebFault as webfault:
LOG.error(('Exception deleting VLAN %s from vCMP Host %s:%s' %
(vlan_name, vcmp_host['bigip'].icontrol.hostname,
webfault)))
except Exception as exc:
LOG.error(('Exception deleting VLAN %s from vCMP Host %s:%s' %
(vlan_name, vcmp_host['bigip'].icontrol.hostname, exc))) | def _delete_vcmp_device_network(self, bigip, vlan_name):
"""For vCMP Guests, disassociate VLAN from vCMP Guest and
delete VLAN from vCMP Host."""
vcmp_host = self.vcmp_manager.get_vcmp_host(bigip)
if not vcmp_host:
return
# Remove VLAN association from the vCMP Guest
vcmp_guest = self.vcmp_manager.get_vcmp_guest(vcmp_host, bigip)
try:
vlan_seq = vcmp_host['bigip'].system.sys_vcmp.typefactory.\
create('Common.StringSequence')
vlan_seq.values = prefixed(vlan_name)
vlan_seq_seq = vcmp_host['bigip'].system.sys_vcmp.typefactory.\
create('Common.StringSequenceSequence')
vlan_seq_seq.values = [vlan_seq]
vcmp_host['bigip'].system.sys_vcmp.remove_vlan(
[vcmp_guest['name']], vlan_seq_seq)
LOG.debug(('Removed VLAN %s association from vCMP Guest %s' %
(vlan_name, vcmp_guest['mgmt_addr'])))
except WebFault as webfault:
LOG.error(('Exception removing VLAN %s association from vCMP '
'Guest %s:%s' %
(vlan_name, vcmp_guest['mgmt_addr'], webfault)))
except Exception as exc:
LOG.error(('Exception removing VLAN %s association from vCMP '
'Guest %s:%s' %
(vlan_name, vcmp_guest['mgmt_addr'], exc)))
# Only delete VLAN if it is not in use by other vCMP Guests
if self.vcmp_manager.get_vlan_use_count(vcmp_host, vlan_name):
LOG.debug(('VLAN %s in use by other vCMP Guests on vCMP Host %s' %
(vlan_name, vcmp_host['bigip'].icontrol.hostname)))
return
# Delete VLAN from vCMP Host. This will fail if any other vCMP Guest
# is using this VLAN
try:
vcmp_host['bigip'].vlan.delete(name=vlan_name,
folder='/Common')
LOG.debug(('Deleted VLAN %s from vCMP Host %s' %
(vlan_name, vcmp_host['bigip'].icontrol.hostname)))
except WebFault as webfault:
LOG.error(('Exception deleting VLAN %s from vCMP Host %s:%s' %
(vlan_name, vcmp_host['bigip'].icontrol.hostname,
webfault)))
except Exception as exc:
LOG.error(('Exception deleting VLAN %s from vCMP Host %s:%s' %
(vlan_name, vcmp_host['bigip'].icontrol.hostname, exc))) |
Python | def add_bigip_fdbs(self, bigip, net_folder, fdb_info, vteps_by_type):
""" Add fdb records for a mac/ip with specified vteps """
network = fdb_info['network']
net_type = network['provider:network_type']
vteps_key = net_type + '_vteps'
if vteps_key in vteps_by_type:
vteps = vteps_by_type[vteps_key]
if net_type == 'gre':
self.add_gre_fdbs(bigip, net_folder, fdb_info, vteps)
elif net_type == 'vxlan':
self.add_vxlan_fdbs(bigip, net_folder, fdb_info, vteps) | def add_bigip_fdbs(self, bigip, net_folder, fdb_info, vteps_by_type):
""" Add fdb records for a mac/ip with specified vteps """
network = fdb_info['network']
net_type = network['provider:network_type']
vteps_key = net_type + '_vteps'
if vteps_key in vteps_by_type:
vteps = vteps_by_type[vteps_key]
if net_type == 'gre':
self.add_gre_fdbs(bigip, net_folder, fdb_info, vteps)
elif net_type == 'vxlan':
self.add_vxlan_fdbs(bigip, net_folder, fdb_info, vteps) |
Python | def delete_bigip_fdbs(self, bigip, net_folder, fdb_info, vteps_by_type):
""" Delete fdb records for a mac/ip with specified vteps """
network = fdb_info['network']
net_type = network['provider:network_type']
vteps_key = net_type + '_vteps'
if vteps_key in vteps_by_type:
vteps = vteps_by_type[vteps_key]
if net_type == 'gre':
self.delete_gre_fdbs(bigip, net_folder, fdb_info, vteps)
elif net_type == 'vxlan':
self.delete_vxlan_fdbs(bigip, net_folder, fdb_info, vteps) | def delete_bigip_fdbs(self, bigip, net_folder, fdb_info, vteps_by_type):
""" Delete fdb records for a mac/ip with specified vteps """
network = fdb_info['network']
net_type = network['provider:network_type']
vteps_key = net_type + '_vteps'
if vteps_key in vteps_by_type:
vteps = vteps_by_type[vteps_key]
if net_type == 'gre':
self.delete_gre_fdbs(bigip, net_folder, fdb_info, vteps)
elif net_type == 'vxlan':
self.delete_vxlan_fdbs(bigip, net_folder, fdb_info, vteps) |
Python | def add_bigip_fdb(self, bigip, fdb):
""" Add entries from the fdb relevant to the bigip """
for fdb_operation in \
[{'network_type': 'vxlan',
'get_tunnel_folder': bigip.vxlan.get_tunnel_folder,
'fdb_method': bigip.vxlan.add_fdb_entries},
{'network_type': 'gre',
'get_tunnel_folder': bigip.l2gre.get_tunnel_folder,
'fdb_method': bigip.l2gre.add_fdb_entries}]:
self._operate_bigip_fdb(bigip, fdb, fdb_operation) | def add_bigip_fdb(self, bigip, fdb):
""" Add entries from the fdb relevant to the bigip """
for fdb_operation in \
[{'network_type': 'vxlan',
'get_tunnel_folder': bigip.vxlan.get_tunnel_folder,
'fdb_method': bigip.vxlan.add_fdb_entries},
{'network_type': 'gre',
'get_tunnel_folder': bigip.l2gre.get_tunnel_folder,
'fdb_method': bigip.l2gre.add_fdb_entries}]:
self._operate_bigip_fdb(bigip, fdb, fdb_operation) |
Python | def _operate_bigip_fdb(self, bigip, fdb, fdb_operation):
""" Add L2 records for MAC addresses behind tunnel endpoints.
Description of fdb structure:
{'<network_id>':
'segment_id': <int>
'ports': [ '<vtep>': ['<mac_address>': '<ip_address>'] ]
'<network_id>':
'segment_id':
'ports': [ '<vtep>': ['<mac_address>': '<ip_address>'] ] }
Sample real fdb structure:
{u'45bbbce1-191b-4f7b-84c5-54c6c8243bd2':
{u'segment_id': 1008,
u'ports':
{u'10.30.30.2': [[u'00:00:00:00:00:00', u'0.0.0.0'],
[u'fa:16:3e:3d:7b:7f', u'10.10.1.4']]},
u'network_type': u'vxlan'}}
"""
network_type = fdb_operation['network_type']
get_tunnel_folder = fdb_operation['get_tunnel_folder']
fdb_method = fdb_operation['fdb_method']
for network in fdb:
net_fdb = fdb[network]
if net_fdb['network_type'] == network_type:
net = {'name': network,
'provider:network_type': net_fdb['network_type'],
'provider:segmentation_id': net_fdb['segment_id']}
tunnel_name = _get_tunnel_name(net)
folder = get_tunnel_folder(tunnel_name=tunnel_name)
net_info = {'network': network,
'folder': folder,
'tunnel_name': tunnel_name,
'net_fdb': net_fdb}
fdbs = self._get_bigip_network_fdbs(bigip, net_info)
if len(fdbs) > 0:
fdb_method(fdb_entries=fdbs) | def _operate_bigip_fdb(self, bigip, fdb, fdb_operation):
""" Add L2 records for MAC addresses behind tunnel endpoints.
Description of fdb structure:
{'<network_id>':
'segment_id': <int>
'ports': [ '<vtep>': ['<mac_address>': '<ip_address>'] ]
'<network_id>':
'segment_id':
'ports': [ '<vtep>': ['<mac_address>': '<ip_address>'] ] }
Sample real fdb structure:
{u'45bbbce1-191b-4f7b-84c5-54c6c8243bd2':
{u'segment_id': 1008,
u'ports':
{u'10.30.30.2': [[u'00:00:00:00:00:00', u'0.0.0.0'],
[u'fa:16:3e:3d:7b:7f', u'10.10.1.4']]},
u'network_type': u'vxlan'}}
"""
network_type = fdb_operation['network_type']
get_tunnel_folder = fdb_operation['get_tunnel_folder']
fdb_method = fdb_operation['fdb_method']
for network in fdb:
net_fdb = fdb[network]
if net_fdb['network_type'] == network_type:
net = {'name': network,
'provider:network_type': net_fdb['network_type'],
'provider:segmentation_id': net_fdb['segment_id']}
tunnel_name = _get_tunnel_name(net)
folder = get_tunnel_folder(tunnel_name=tunnel_name)
net_info = {'network': network,
'folder': folder,
'tunnel_name': tunnel_name,
'net_fdb': net_fdb}
fdbs = self._get_bigip_network_fdbs(bigip, net_info)
if len(fdbs) > 0:
fdb_method(fdb_entries=fdbs) |
Python | def _get_bigip_network_fdbs(self, bigip, net_info):
""" Get network fdb entries to add to a bigip """
if not net_info['folder']:
return {}
net_fdb = net_info['net_fdb']
fdbs = {}
for vtep in net_fdb['ports']:
# bigip does not need to set fdb entries for local addresses
if vtep == bigip.local_ip:
continue
# most net_info applies to the vtep
vtep_info = dict(net_info)
# but the network fdb is too broad so delete it
del vtep_info['net_fdb']
# use a slice of the fdb for the vtep instead
vtep_info['vtep'] = vtep
vtep_info['fdb_entries'] = net_fdb['ports'][vtep]
self._merge_vtep_fdbs(vtep_info, fdbs)
return fdbs | def _get_bigip_network_fdbs(self, bigip, net_info):
""" Get network fdb entries to add to a bigip """
if not net_info['folder']:
return {}
net_fdb = net_info['net_fdb']
fdbs = {}
for vtep in net_fdb['ports']:
# bigip does not need to set fdb entries for local addresses
if vtep == bigip.local_ip:
continue
# most net_info applies to the vtep
vtep_info = dict(net_info)
# but the network fdb is too broad so delete it
del vtep_info['net_fdb']
# use a slice of the fdb for the vtep instead
vtep_info['vtep'] = vtep
vtep_info['fdb_entries'] = net_fdb['ports'][vtep]
self._merge_vtep_fdbs(vtep_info, fdbs)
return fdbs |
Python | def _merge_vtep_fdbs(self, vtep_info, fdbs):
""" Add L2 records for a specific network+vtep """
folder = vtep_info['folder']
tunnel_name = vtep_info['tunnel_name']
for entry in vtep_info['fdb_entries']:
mac_address = entry[0]
if mac_address == '00:00:00:00:00:00':
continue
ip_address = entry[1]
# create/get tunnel data
if tunnel_name not in fdbs:
fdbs[tunnel_name] = {}
tunnel_fdbs = fdbs[tunnel_name]
# update tunnel folder
tunnel_fdbs['folder'] = folder
# maybe create records for tunnel
if 'records' not in tunnel_fdbs:
tunnel_fdbs['records'] = {}
# add entry to records map keyed by mac address
tunnel_fdbs['records'][mac_address] = \
{'endpoint': vtep_info['vtep'], 'ip_address': ip_address} | def _merge_vtep_fdbs(self, vtep_info, fdbs):
""" Add L2 records for a specific network+vtep """
folder = vtep_info['folder']
tunnel_name = vtep_info['tunnel_name']
for entry in vtep_info['fdb_entries']:
mac_address = entry[0]
if mac_address == '00:00:00:00:00:00':
continue
ip_address = entry[1]
# create/get tunnel data
if tunnel_name not in fdbs:
fdbs[tunnel_name] = {}
tunnel_fdbs = fdbs[tunnel_name]
# update tunnel folder
tunnel_fdbs['folder'] = folder
# maybe create records for tunnel
if 'records' not in tunnel_fdbs:
tunnel_fdbs['records'] = {}
# add entry to records map keyed by mac address
tunnel_fdbs['records'][mac_address] = \
{'endpoint': vtep_info['vtep'], 'ip_address': ip_address} |
Python | def scrub_dead_agents(self, context, env, group, host=None):
"""Remove all non-alive or admin down agents"""
LOG.debug('scrubing dead agent bindings')
with context.session.begin(subtransactions=True):
try:
self.scheduler.scrub_dead_agents(
self.plugin, context, env, group=None)
except Exception as exc:
LOG.error('scub dead agents exception: %s' % str(exc))
return False
return True | def scrub_dead_agents(self, context, env, group, host=None):
"""Remove all non-alive or admin down agents"""
LOG.debug('scrubing dead agent bindings')
with context.session.begin(subtransactions=True):
try:
self.scheduler.scrub_dead_agents(
self.plugin, context, env, group=None)
except Exception as exc:
LOG.error('scub dead agents exception: %s' % str(exc))
return False
return True |
Python | def _get_extended_pool(self, context, pool_id, global_routed_mode):
""" Get Pool from Neutron and add extended data """
# Start with neutron pool definition
try:
pool = self.plugin.get_pool(context, pool_id)
except:
LOG.error("get_service_by_pool_id: Pool not found %s" %
pool_id)
return None
# Populate extended pool attributes
if not global_routed_mode:
pool['subnet'] = self._get_subnet_cached(
context, pool['subnet_id'])
pool['network'] = self._get_network_cached(
context, pool['subnet']['network_id'])
else:
pool['subnet_id'] = None
pool['network'] = None
return pool | def _get_extended_pool(self, context, pool_id, global_routed_mode):
""" Get Pool from Neutron and add extended data """
# Start with neutron pool definition
try:
pool = self.plugin.get_pool(context, pool_id)
except:
LOG.error("get_service_by_pool_id: Pool not found %s" %
pool_id)
return None
# Populate extended pool attributes
if not global_routed_mode:
pool['subnet'] = self._get_subnet_cached(
context, pool['subnet_id'])
pool['network'] = self._get_network_cached(
context, pool['subnet']['network_id'])
else:
pool['subnet_id'] = None
pool['network'] = None
return pool |
Python | def _get_subnet_cached(self, context, subnet_id):
""" subnet from cache or get from neutron """
if subnet_id not in self.subnet_cache:
subnet_dict = self._core_plugin().get_subnet(context, subnet_id)
self.subnet_cache[subnet_id] = subnet_dict
return self.subnet_cache[subnet_id] | def _get_subnet_cached(self, context, subnet_id):
""" subnet from cache or get from neutron """
if subnet_id not in self.subnet_cache:
subnet_dict = self._core_plugin().get_subnet(context, subnet_id)
self.subnet_cache[subnet_id] = subnet_dict
return self.subnet_cache[subnet_id] |
Python | def _get_network_cached(self, context, network_id):
""" network from cache or get from neutron """
if network_id not in self.net_cache:
net_dict = self._core_plugin().get_network(context, network_id)
if 'provider:network_type' not in net_dict:
net_dict['provider:network_type'] = 'undefined'
if 'provider:segmentation_id' not in net_dict:
net_dict['provider:segmentation_id'] = 0
self.net_cache[network_id] = net_dict
return self.net_cache[network_id] | def _get_network_cached(self, context, network_id):
""" network from cache or get from neutron """
if network_id not in self.net_cache:
net_dict = self._core_plugin().get_network(context, network_id)
if 'provider:network_type' not in net_dict:
net_dict['provider:network_type'] = 'undefined'
if 'provider:segmentation_id' not in net_dict:
net_dict['provider:segmentation_id'] = 0
self.net_cache[network_id] = net_dict
return self.net_cache[network_id] |
Python | def _populate_vip_network_vteps(self, context, vip):
""" put related tunnel endpoints in vip definiton """
vip['vxlan_vteps'] = []
vip['gre_vteps'] = []
if 'provider:network_type' not in vip['network']:
return
nettype = vip['network']['provider:network_type']
if nettype not in ['vxlan', 'gre']:
return
ports = self.get_ports_on_network(context,
network_id=vip['network']['id'])
vtep_hosts = []
for port in ports:
if 'binding:host_id' in port and \
port['binding:host_id'] not in vtep_hosts:
vtep_hosts.append(port['binding:host_id'])
for vtep_host in vtep_hosts:
if nettype == 'vxlan':
endpoints = self._get_vxlan_endpoints(context, vtep_host)
for ep in endpoints:
if ep not in vip['vxlan_vteps']:
vip['vxlan_vteps'].append(ep)
elif nettype == 'gre':
endpoints = self._get_gre_endpoints(context, vtep_host)
for ep in endpoints:
if ep not in vip['gre_vteps']:
vip['gre_vteps'].append(ep) | def _populate_vip_network_vteps(self, context, vip):
""" put related tunnel endpoints in vip definiton """
vip['vxlan_vteps'] = []
vip['gre_vteps'] = []
if 'provider:network_type' not in vip['network']:
return
nettype = vip['network']['provider:network_type']
if nettype not in ['vxlan', 'gre']:
return
ports = self.get_ports_on_network(context,
network_id=vip['network']['id'])
vtep_hosts = []
for port in ports:
if 'binding:host_id' in port and \
port['binding:host_id'] not in vtep_hosts:
vtep_hosts.append(port['binding:host_id'])
for vtep_host in vtep_hosts:
if nettype == 'vxlan':
endpoints = self._get_vxlan_endpoints(context, vtep_host)
for ep in endpoints:
if ep not in vip['vxlan_vteps']:
vip['vxlan_vteps'].append(ep)
elif nettype == 'gre':
endpoints = self._get_gre_endpoints(context, vtep_host)
for ep in endpoints:
if ep not in vip['gre_vteps']:
vip['gre_vteps'].append(ep) |
Python | def _found_and_used_matching_addr(
self, adminctx, context, member, allocated, matching_keys):
""" Find a matching address that matches keys """
# first check list of allocated addresses in neutron
# that match the pool member and check those subnets
# first because we prefer to use a subnet that actually has
# a matching ip address on it.
if self._found_and_used_neutron_addr(
adminctx, context, member, allocated, matching_keys):
return True
# Perhaps the neutron network was deleted but the pool member
# was not. If we find a cached subnet definition that matches the
# deleted network it might help us tear down our configuration.
if self._found_and_used_cached_subnet(
adminctx, member, matching_keys):
return True
# Perhaps the neutron subnet was deleted but the pool member
# was not. Maybe the subnet was deleted and then added back
# with a different id. If we can find a matching subnet, it
# might help us tear down our configuration.
if self._found_and_used_neutron_subnet(
adminctx, member, matching_keys):
return True
return False | def _found_and_used_matching_addr(
self, adminctx, context, member, allocated, matching_keys):
""" Find a matching address that matches keys """
# first check list of allocated addresses in neutron
# that match the pool member and check those subnets
# first because we prefer to use a subnet that actually has
# a matching ip address on it.
if self._found_and_used_neutron_addr(
adminctx, context, member, allocated, matching_keys):
return True
# Perhaps the neutron network was deleted but the pool member
# was not. If we find a cached subnet definition that matches the
# deleted network it might help us tear down our configuration.
if self._found_and_used_cached_subnet(
adminctx, member, matching_keys):
return True
# Perhaps the neutron subnet was deleted but the pool member
# was not. Maybe the subnet was deleted and then added back
# with a different id. If we can find a matching subnet, it
# might help us tear down our configuration.
if self._found_and_used_neutron_subnet(
adminctx, member, matching_keys):
return True
return False |
Python | def _found_and_used_neutron_addr(
self, adminctx, context, member, allocated, matching_keys):
""" Find a matching address that matches keys """
for alloc in allocated:
if matching_keys['subnet_id'] and \
alloc['subnet_id'] != matching_keys['subnet_id']:
continue
try:
net = self._get_network_cached(adminctx, alloc['network_id'])
except:
continue
if matching_keys['tenant_id'] and \
net['tenant_id'] != matching_keys['tenant_id']:
continue
if matching_keys['shared'] and not net['shared']:
continue
member['network'] = net
member['subnet'] = self._get_subnet_cached(
context, alloc['subnet_id'])
member['port'] = self._core_plugin().get_port(
adminctx, alloc['port_id'])
self._populate_member_network(context, member)
return True | def _found_and_used_neutron_addr(
self, adminctx, context, member, allocated, matching_keys):
""" Find a matching address that matches keys """
for alloc in allocated:
if matching_keys['subnet_id'] and \
alloc['subnet_id'] != matching_keys['subnet_id']:
continue
try:
net = self._get_network_cached(adminctx, alloc['network_id'])
except:
continue
if matching_keys['tenant_id'] and \
net['tenant_id'] != matching_keys['tenant_id']:
continue
if matching_keys['shared'] and not net['shared']:
continue
member['network'] = net
member['subnet'] = self._get_subnet_cached(
context, alloc['subnet_id'])
member['port'] = self._core_plugin().get_port(
adminctx, alloc['port_id'])
self._populate_member_network(context, member)
return True |
Python | def _found_and_used_cached_subnet(
self, adminctx, member, matching_keys):
""" check our cache for missing network """
subnets_matched = []
na_add = netaddr.IPAddress(member['address'])
for subnet in self.subnet_cache:
c_subnet = self.subnet_cache[subnet]
na_net = netaddr.IPNetwork(c_subnet['cidr'])
if na_add in na_net:
if matching_keys['subnet_id'] and \
c_subnet['id'] != matching_keys['subnet_id']:
continue
if matching_keys['tenant_id'] and \
c_subnet['tenant_id'] != matching_keys['tenant_id']:
continue
if matching_keys['shared'] and not c_subnet['shared']:
continue
subnets_matched.append(subnet)
if len(subnets_matched) == 1:
member['subnet'] = self._get_subnet_cached(
adminctx, subnets_matched[0])
member['network'] = self._get_network_cached(
adminctx, member['subnet']['network_id'])
return True
return False | def _found_and_used_cached_subnet(
self, adminctx, member, matching_keys):
""" check our cache for missing network """
subnets_matched = []
na_add = netaddr.IPAddress(member['address'])
for subnet in self.subnet_cache:
c_subnet = self.subnet_cache[subnet]
na_net = netaddr.IPNetwork(c_subnet['cidr'])
if na_add in na_net:
if matching_keys['subnet_id'] and \
c_subnet['id'] != matching_keys['subnet_id']:
continue
if matching_keys['tenant_id'] and \
c_subnet['tenant_id'] != matching_keys['tenant_id']:
continue
if matching_keys['shared'] and not c_subnet['shared']:
continue
subnets_matched.append(subnet)
if len(subnets_matched) == 1:
member['subnet'] = self._get_subnet_cached(
adminctx, subnets_matched[0])
member['network'] = self._get_network_cached(
adminctx, member['subnet']['network_id'])
return True
return False |
Python | def _populate_member_network(self, context, member):
""" Add networking info to pool member """
member['vxlan_vteps'] = []
member['gre_vteps'] = []
if 'provider:network_type' in member['network']:
nettype = member['network']['provider:network_type']
if nettype == 'vxlan':
if 'binding:host_id' in member['port']:
host = member['port']['binding:host_id']
member['vxlan_vteps'] = self._get_vxlan_endpoints(
context, host)
if nettype == 'gre':
if 'binding:host_id' in member['port']:
host = member['port']['binding:host_id']
member['gre_vteps'] = self._get_gre_endpoints(
context, host)
if 'provider:network_type' not in member['network']:
member['network']['provider:network_type'] = 'undefined'
if 'provider:segmentation_id' not in member['network']:
member['network']['provider:segmentation_id'] = 0 | def _populate_member_network(self, context, member):
""" Add networking info to pool member """
member['vxlan_vteps'] = []
member['gre_vteps'] = []
if 'provider:network_type' in member['network']:
nettype = member['network']['provider:network_type']
if nettype == 'vxlan':
if 'binding:host_id' in member['port']:
host = member['port']['binding:host_id']
member['vxlan_vteps'] = self._get_vxlan_endpoints(
context, host)
if nettype == 'gre':
if 'binding:host_id' in member['port']:
host = member['port']['binding:host_id']
member['gre_vteps'] = self._get_gre_endpoints(
context, host)
if 'provider:network_type' not in member['network']:
member['network']['provider:network_type'] = 'undefined'
if 'provider:segmentation_id' not in member['network']:
member['network']['provider:segmentation_id'] = 0 |
Python | def create_port_on_subnet_with_specific_ip(self, context, subnet_id=None,
mac_address=None, name=None,
ip_address=None, host=None):
""" Create port on subnet with specific ip address """
if subnet_id and ip_address:
subnet = self._core_plugin().get_subnet(context, subnet_id)
if not mac_address:
mac_address = attributes.ATTR_NOT_SPECIFIED
fixed_ip = {'subnet_id': subnet['id'], 'ip_address': ip_address}
if not host:
host = ''
if not name:
name = ''
port_data = {
'tenant_id': subnet['tenant_id'],
'name': name,
'network_id': subnet['network_id'],
'mac_address': mac_address,
'admin_state_up': True,
'device_id': str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host))),
'device_owner': 'network:f5lbaas',
'status': q_const.PORT_STATUS_ACTIVE,
'fixed_ips': [fixed_ip]
}
port_data[portbindings.HOST_ID] = host
port_data[portbindings.VIF_TYPE] = 'f5'
if 'binding:capabilities' in \
portbindings.EXTENDED_ATTRIBUTES_2_0['ports']:
port_data['binding:capabilities'] = {'port_filter': False}
port = self._core_plugin().create_port(
context, {'port': port_data})
# Because ML2 marks ports DOWN by default on creation
update_data = {
'status': q_const.PORT_STATUS_ACTIVE
}
self._core_plugin().update_port(
context, port['id'], {'port': update_data})
return port | def create_port_on_subnet_with_specific_ip(self, context, subnet_id=None,
mac_address=None, name=None,
ip_address=None, host=None):
""" Create port on subnet with specific ip address """
if subnet_id and ip_address:
subnet = self._core_plugin().get_subnet(context, subnet_id)
if not mac_address:
mac_address = attributes.ATTR_NOT_SPECIFIED
fixed_ip = {'subnet_id': subnet['id'], 'ip_address': ip_address}
if not host:
host = ''
if not name:
name = ''
port_data = {
'tenant_id': subnet['tenant_id'],
'name': name,
'network_id': subnet['network_id'],
'mac_address': mac_address,
'admin_state_up': True,
'device_id': str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host))),
'device_owner': 'network:f5lbaas',
'status': q_const.PORT_STATUS_ACTIVE,
'fixed_ips': [fixed_ip]
}
port_data[portbindings.HOST_ID] = host
port_data[portbindings.VIF_TYPE] = 'f5'
if 'binding:capabilities' in \
portbindings.EXTENDED_ATTRIBUTES_2_0['ports']:
port_data['binding:capabilities'] = {'port_filter': False}
port = self._core_plugin().create_port(
context, {'port': port_data})
# Because ML2 marks ports DOWN by default on creation
update_data = {
'status': q_const.PORT_STATUS_ACTIVE
}
self._core_plugin().update_port(
context, port['id'], {'port': update_data})
return port |
Python | def allocate_fixed_address_on_subnet(self, context, subnet_id=None,
port_id=None, name=None,
fixed_address_count=1, host=None):
""" Allocate a fixed ip address on subnet """
if subnet_id:
subnet = self._core_plugin().get_subnet(context, subnet_id)
if not port_id:
port = self.create_port_on_subnet(
context,
subnet_id=subnet_id,
mac_address=None,
name=name,
fixed_address_count=fixed_address_count,
host=host
)
else:
port = self._core_plugin().get_port(context, port_id)
existing_fixed_ips = port['fixed_ips']
fixed_ip = {'subnet_id': subnet['id']}
if fixed_address_count > 1:
fixed_ips = []
for _ in range(0, fixed_address_count):
fixed_ips.append(fixed_ip)
else:
fixed_ips = [fixed_ip]
port['fixed_ips'] = existing_fixed_ips + fixed_ips
port = self._core_plugin().update_port(context, {'port': port})
new_fixed_ips = port['fixed_ips']
port['new_fixed_ips'] = []
for new_fixed_ip in new_fixed_ips:
ip_address = new_fixed_ip['ip_address']
is_new = True
for existing_fixed_ip in existing_fixed_ips:
if ip_address == existing_fixed_ip['ip_address']:
is_new = False
if is_new:
port['new_fixed_ips'].append(new_fixed_ip)
return port | def allocate_fixed_address_on_subnet(self, context, subnet_id=None,
port_id=None, name=None,
fixed_address_count=1, host=None):
""" Allocate a fixed ip address on subnet """
if subnet_id:
subnet = self._core_plugin().get_subnet(context, subnet_id)
if not port_id:
port = self.create_port_on_subnet(
context,
subnet_id=subnet_id,
mac_address=None,
name=name,
fixed_address_count=fixed_address_count,
host=host
)
else:
port = self._core_plugin().get_port(context, port_id)
existing_fixed_ips = port['fixed_ips']
fixed_ip = {'subnet_id': subnet['id']}
if fixed_address_count > 1:
fixed_ips = []
for _ in range(0, fixed_address_count):
fixed_ips.append(fixed_ip)
else:
fixed_ips = [fixed_ip]
port['fixed_ips'] = existing_fixed_ips + fixed_ips
port = self._core_plugin().update_port(context, {'port': port})
new_fixed_ips = port['fixed_ips']
port['new_fixed_ips'] = []
for new_fixed_ip in new_fixed_ips:
ip_address = new_fixed_ip['ip_address']
is_new = True
for existing_fixed_ip in existing_fixed_ips:
if ip_address == existing_fixed_ip['ip_address']:
is_new = False
if is_new:
port['new_fixed_ips'].append(new_fixed_ip)
return port |
Python | def allocate_specific_fixed_address_on_subnet(self, context,
subnet_id=None,
port_id=None, name=None,
ip_address=None,
host=None):
""" Allocate specific fixed ip address on subnet """
if subnet_id and ip_address:
subnet = self._core_plugin().get_subnet(context, subnet_id)
if not port_id:
port = self.create_port_on_subnet_with_specific_ip(
context,
subnet_id=subnet_id,
mac_address=None,
name=name,
ip_address=ip_address,
host=host
)
else:
port = self._core_plugin().get_port(context, port_id)
existing_fixed_ips = port['fixed_ips']
fixed_ip = {'subnet_id': subnet['id'],
'ip_address': ip_address}
port['fixed_ips'] = existing_fixed_ips + [fixed_ip]
port = self._core_plugin().update_port(context, {'port': port})
return port | def allocate_specific_fixed_address_on_subnet(self, context,
subnet_id=None,
port_id=None, name=None,
ip_address=None,
host=None):
""" Allocate specific fixed ip address on subnet """
if subnet_id and ip_address:
subnet = self._core_plugin().get_subnet(context, subnet_id)
if not port_id:
port = self.create_port_on_subnet_with_specific_ip(
context,
subnet_id=subnet_id,
mac_address=None,
name=name,
ip_address=ip_address,
host=host
)
else:
port = self._core_plugin().get_port(context, port_id)
existing_fixed_ips = port['fixed_ips']
fixed_ip = {'subnet_id': subnet['id'],
'ip_address': ip_address}
port['fixed_ips'] = existing_fixed_ips + [fixed_ip]
port = self._core_plugin().update_port(context, {'port': port})
return port |
Python | def deallocate_fixed_address_on_subnet(self, context, fixed_addresses=None,
subnet_id=None, host=None,
auto_delete_port=False):
""" Allocate fixed ip address on subnet """
if fixed_addresses:
if not isinstance(fixed_addresses, list):
fixed_addresses = [fixed_addresses]
# strip all route domain decorations if they exist
for i in range(len(fixed_addresses)):
try:
decorator_index = str(fixed_addresses[i]).index('%')
fixed_addresses[i] = fixed_addresses[i][:decorator_index]
except:
pass
subnet = self._core_plugin().get_subnet(context, subnet_id)
# get all ports for this host on the subnet
filters = {
'network_id': [subnet['network_id']],
'tenant_id': [subnet['tenant_id']],
'device_id': [str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host)))]
}
ports = self._core_plugin().get_ports(context, filters=filters)
fixed_ips = {}
ok_to_delete_port = {}
for port in ports:
ok_to_delete_port[port['id']] = False
for fixed_ip in port['fixed_ips']:
fixed_ips[fixed_ip['ip_address']] = port['id']
# only get rid of associated fixed_ips
for fixed_ip in fixed_ips:
if fixed_ip in fixed_addresses:
self._core_plugin()._delete_ip_allocation(
context,
subnet['network_id'],
subnet_id,
fixed_ip
)
ok_to_delete_port[fixed_ips[fixed_ip]] = True
else:
ok_to_delete_port[fixed_ips[fixed_ip]] = False
if auto_delete_port:
for port in ok_to_delete_port:
if ok_to_delete_port[port]:
self.delete_port(context, port) | def deallocate_fixed_address_on_subnet(self, context, fixed_addresses=None,
subnet_id=None, host=None,
auto_delete_port=False):
""" Allocate fixed ip address on subnet """
if fixed_addresses:
if not isinstance(fixed_addresses, list):
fixed_addresses = [fixed_addresses]
# strip all route domain decorations if they exist
for i in range(len(fixed_addresses)):
try:
decorator_index = str(fixed_addresses[i]).index('%')
fixed_addresses[i] = fixed_addresses[i][:decorator_index]
except:
pass
subnet = self._core_plugin().get_subnet(context, subnet_id)
# get all ports for this host on the subnet
filters = {
'network_id': [subnet['network_id']],
'tenant_id': [subnet['tenant_id']],
'device_id': [str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host)))]
}
ports = self._core_plugin().get_ports(context, filters=filters)
fixed_ips = {}
ok_to_delete_port = {}
for port in ports:
ok_to_delete_port[port['id']] = False
for fixed_ip in port['fixed_ips']:
fixed_ips[fixed_ip['ip_address']] = port['id']
# only get rid of associated fixed_ips
for fixed_ip in fixed_ips:
if fixed_ip in fixed_addresses:
self._core_plugin()._delete_ip_allocation(
context,
subnet['network_id'],
subnet_id,
fixed_ip
)
ok_to_delete_port[fixed_ips[fixed_ip]] = True
else:
ok_to_delete_port[fixed_ips[fixed_ip]] = False
if auto_delete_port:
for port in ok_to_delete_port:
if ok_to_delete_port[port]:
self.delete_port(context, port) |
Python | def update_vip_status(self, context, vip_id=None,
status=constants.ERROR,
status_description=None,
host=None):
"""Agent confirmation hook to update VIP status."""
try:
vip = self.plugin.get_vip(context, vip_id)
if vip['status'] == constants.PENDING_DELETE:
status = constants.PENDING_DELETE
self.plugin.update_status(
context,
lb_db.Vip,
vip_id,
status,
status_description
)
except VipNotFound:
pass | def update_vip_status(self, context, vip_id=None,
status=constants.ERROR,
status_description=None,
host=None):
"""Agent confirmation hook to update VIP status."""
try:
vip = self.plugin.get_vip(context, vip_id)
if vip['status'] == constants.PENDING_DELETE:
status = constants.PENDING_DELETE
self.plugin.update_status(
context,
lb_db.Vip,
vip_id,
status,
status_description
)
except VipNotFound:
pass |
Python | def update_pool_status(self, context, pool_id=None,
status=constants.ERROR, status_description=None,
host=None):
"""Agent confirmation hook to update pool status."""
try:
pool = self.plugin.get_pool(context, pool_id)
if pool['status'] == constants.PENDING_DELETE:
LOG.debug('Pool status is PENDING_DELETE. '
'Pool status was not updated. %s' % pool)
return
self.plugin.update_status(
context,
lb_db.Pool,
pool_id,
status,
status_description
)
except PoolNotFound:
pass | def update_pool_status(self, context, pool_id=None,
status=constants.ERROR, status_description=None,
host=None):
"""Agent confirmation hook to update pool status."""
try:
pool = self.plugin.get_pool(context, pool_id)
if pool['status'] == constants.PENDING_DELETE:
LOG.debug('Pool status is PENDING_DELETE. '
'Pool status was not updated. %s' % pool)
return
self.plugin.update_status(
context,
lb_db.Pool,
pool_id,
status,
status_description
)
except PoolNotFound:
pass |
Python | def update_member_status(self, context, member_id=None,
status=constants.ERROR, status_description=None,
host=None):
"""Agent confirmation hook to update member status."""
try:
member = self.plugin.get_member(context, member_id)
if member['status'] == constants.PENDING_DELETE:
status = constants.PENDING_DELETE
self.plugin.update_status(
context,
lb_db.Member,
member_id,
status,
status_description
)
except MemberNotFound:
pass | def update_member_status(self, context, member_id=None,
status=constants.ERROR, status_description=None,
host=None):
"""Agent confirmation hook to update member status."""
try:
member = self.plugin.get_member(context, member_id)
if member['status'] == constants.PENDING_DELETE:
status = constants.PENDING_DELETE
self.plugin.update_status(
context,
lb_db.Member,
member_id,
status,
status_description
)
except MemberNotFound:
pass |
Python | def member_destroyed(self, context, member_id=None, host=None):
"""Agent confirmation hook that a member has been destroyed."""
# delete the pool member from the data model
try:
self.plugin._delete_db_member(context, member_id)
except MemberNotFound:
pass | def member_destroyed(self, context, member_id=None, host=None):
"""Agent confirmation hook that a member has been destroyed."""
# delete the pool member from the data model
try:
self.plugin._delete_db_member(context, member_id)
except MemberNotFound:
pass |
Python | def update_health_monitor_status(self, context, pool_id=None,
health_monitor_id=None,
status=constants.ERROR,
status_description=None,
host=None):
"""Agent confirmation hook to update healthmonitor status."""
try:
assoc = self.plugin._get_pool_health_monitor(
context, health_monitor_id, pool_id)
status = getattr(assoc, 'status', None)
if status == constants.PENDING_DELETE:
LOG.error("Attempt to update deleted health monitor %s" %
health_monitor_id)
return
self.plugin.update_pool_health_monitor(
context,
health_monitor_id,
pool_id,
status,
status_description
)
except HealthMonitorNotFound:
pass | def update_health_monitor_status(self, context, pool_id=None,
health_monitor_id=None,
status=constants.ERROR,
status_description=None,
host=None):
"""Agent confirmation hook to update healthmonitor status."""
try:
assoc = self.plugin._get_pool_health_monitor(
context, health_monitor_id, pool_id)
status = getattr(assoc, 'status', None)
if status == constants.PENDING_DELETE:
LOG.error("Attempt to update deleted health monitor %s" %
health_monitor_id)
return
self.plugin.update_pool_health_monitor(
context,
health_monitor_id,
pool_id,
status,
status_description
)
except HealthMonitorNotFound:
pass |
Python | def health_monitor_destroyed(self, context, health_monitor_id=None,
pool_id=None, host=None):
"""Agent confirmation hook that a health has been destroyed."""
# delete the health monitor from the data model
# the plug-in does this sometimes so allow for an error.
try:
self.plugin._delete_db_pool_health_monitor(
context,
health_monitor_id,
pool_id
)
except:
pass | def health_monitor_destroyed(self, context, health_monitor_id=None,
pool_id=None, host=None):
"""Agent confirmation hook that a health has been destroyed."""
# delete the health monitor from the data model
# the plug-in does this sometimes so allow for an error.
try:
self.plugin._delete_db_pool_health_monitor(
context,
health_monitor_id,
pool_id
)
except:
pass |
Python | def create_vip(self, context, vip, service, host):
""" Send message to agent to create vip """
return self.cast(
context,
self.make_msg('create_vip', vip=vip, service=service),
topic='%s.%s' % (self.topic, host)
) | def create_vip(self, context, vip, service, host):
""" Send message to agent to create vip """
return self.cast(
context,
self.make_msg('create_vip', vip=vip, service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def update_vip(self, context, old_vip, vip, service, host):
""" Send message to agent to update vip """
return self.cast(
context,
self.make_msg('update_vip', old_vip=old_vip, vip=vip,
service=service),
topic='%s.%s' % (self.topic, host)
) | def update_vip(self, context, old_vip, vip, service, host):
""" Send message to agent to update vip """
return self.cast(
context,
self.make_msg('update_vip', old_vip=old_vip, vip=vip,
service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def delete_vip(self, context, vip, service, host):
""" Send message to agent to create vip """
return self.cast(
context,
self.make_msg('delete_vip', vip=vip, service=service),
topic='%s.%s' % (self.topic, host)
) | def delete_vip(self, context, vip, service, host):
""" Send message to agent to create vip """
return self.cast(
context,
self.make_msg('delete_vip', vip=vip, service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def create_pool(self, context, pool, service, host):
""" Send message to agent to create pool """
return self.cast(
context,
self.make_msg('create_pool', pool=pool, service=service),
topic='%s.%s' % (self.topic, host)
) | def create_pool(self, context, pool, service, host):
""" Send message to agent to create pool """
return self.cast(
context,
self.make_msg('create_pool', pool=pool, service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def update_pool(self, context, old_pool, pool, service, host):
""" Send message to agent to update pool """
return self.cast(
context,
self.make_msg('update_pool', old_pool=old_pool, pool=pool,
service=service),
topic='%s.%s' % (self.topic, host)
) | def update_pool(self, context, old_pool, pool, service, host):
""" Send message to agent to update pool """
return self.cast(
context,
self.make_msg('update_pool', old_pool=old_pool, pool=pool,
service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def delete_pool(self, context, pool, service, host):
""" Send message to agent to delete pool """
return self.cast(
context,
self.make_msg('delete_pool', pool=pool, service=service),
topic='%s.%s' % (self.topic, host)
) | def delete_pool(self, context, pool, service, host):
""" Send message to agent to delete pool """
return self.cast(
context,
self.make_msg('delete_pool', pool=pool, service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def create_member(self, context, member, service, host):
""" Send message to agent to create member """
return self.cast(
context,
self.make_msg('create_member', member=member, service=service),
topic='%s.%s' % (self.topic, host)
) | def create_member(self, context, member, service, host):
""" Send message to agent to create member """
return self.cast(
context,
self.make_msg('create_member', member=member, service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def update_member(self, context, old_member, member, service, host):
""" Send message to agent to update member """
return self.cast(
context,
self.make_msg('update_member', old_member=old_member,
member=member, service=service),
topic='%s.%s' % (self.topic, host)
) | def update_member(self, context, old_member, member, service, host):
""" Send message to agent to update member """
return self.cast(
context,
self.make_msg('update_member', old_member=old_member,
member=member, service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def delete_member(self, context, member, service, host):
""" Send message to agent to delete member """
return self.cast(
context,
self.make_msg('delete_member', member=member, service=service),
topic='%s.%s' % (self.topic, host)
) | def delete_member(self, context, member, service, host):
""" Send message to agent to delete member """
return self.cast(
context,
self.make_msg('delete_member', member=member, service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def create_pool_health_monitor(self, context, health_monitor, pool,
service, host):
""" Send message to agent to create pool health monitor """
return self.cast(
context,
self.make_msg('create_pool_health_monitor',
health_monitor=health_monitor, pool=pool,
service=service),
topic='%s.%s' % (self.topic, host)
) | def create_pool_health_monitor(self, context, health_monitor, pool,
service, host):
""" Send message to agent to create pool health monitor """
return self.cast(
context,
self.make_msg('create_pool_health_monitor',
health_monitor=health_monitor, pool=pool,
service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def update_health_monitor(self, context, old_health_monitor,
health_monitor, pool, service, host):
""" Send message to agent to update pool health monitor """
return self.cast(
context,
self.make_msg('update_health_monitor',
old_health_monitor=old_health_monitor,
health_monitor=health_monitor,
pool=pool, service=service),
topic='%s.%s' % (self.topic, host)
) | def update_health_monitor(self, context, old_health_monitor,
health_monitor, pool, service, host):
""" Send message to agent to update pool health monitor """
return self.cast(
context,
self.make_msg('update_health_monitor',
old_health_monitor=old_health_monitor,
health_monitor=health_monitor,
pool=pool, service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def delete_pool_health_monitor(self, context, health_monitor, pool,
service, host):
""" Send message to agent to delete pool health monitor """
return self.cast(
context,
self.make_msg('delete_pool_health_monitor',
health_monitor=health_monitor,
pool=pool, service=service),
topic='%s.%s' % (self.topic, host)
) | def delete_pool_health_monitor(self, context, health_monitor, pool,
service, host):
""" Send message to agent to delete pool health monitor """
return self.cast(
context,
self.make_msg('delete_pool_health_monitor',
health_monitor=health_monitor,
pool=pool, service=service),
topic='%s.%s' % (self.topic, host)
) |
Python | def _set_callbacks(self):
""" Setup callbacks to receive calls from agent """
self.callbacks = LoadBalancerCallbacks(self.plugin,
self.env,
self.pool_scheduler)
topic = lbaasv1constants.TOPIC_PROCESS_ON_HOST
if self.env:
topic = topic + "_" + self.env
if PREJUNO:
self.conn = rpc.create_connection(new=True)
# register the callback consumer
self.conn.create_consumer(
topic,
self.callbacks.create_rpc_dispatcher(),
fanout=False)
self.conn.consume_in_thread()
else:
self.conn = q_rpc.create_connection(new=True) # @UndefinedVariable
self.conn.create_consumer(
topic,
[self.callbacks, agents_db.AgentExtRpcCallback(self.plugin)],
fanout=False)
self.conn.consume_in_threads() | def _set_callbacks(self):
""" Setup callbacks to receive calls from agent """
self.callbacks = LoadBalancerCallbacks(self.plugin,
self.env,
self.pool_scheduler)
topic = lbaasv1constants.TOPIC_PROCESS_ON_HOST
if self.env:
topic = topic + "_" + self.env
if PREJUNO:
self.conn = rpc.create_connection(new=True)
# register the callback consumer
self.conn.create_consumer(
topic,
self.callbacks.create_rpc_dispatcher(),
fanout=False)
self.conn.consume_in_thread()
else:
self.conn = q_rpc.create_connection(new=True) # @UndefinedVariable
self.conn.create_consumer(
topic,
[self.callbacks, agents_db.AgentExtRpcCallback(self.plugin)],
fanout=False)
self.conn.consume_in_threads() |
Python | def create_vip(self, context, vip):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, vip['pool_id'])
vip['pool'] = self._get_pool(context, vip['pool_id'])
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=vip['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# Update the port for the VIP to show ownership by this driver
port_data = {
'admin_state_up': True,
'device_id': str(
uuid.uuid5(
uuid.NAMESPACE_DNS, str(agent['host'])
)
),
'device_owner': 'network:f5lbaas',
'status': q_const.PORT_STATUS_ACTIVE
}
port_data[portbindings.HOST_ID] = agent['host']
self._core_plugin().update_port(
context,
vip['port_id'],
{'port': port_data}
)
# call the RPC proxy with the constructed message
self.agent_rpc.create_vip(context, vip, service, agent['host']) | def create_vip(self, context, vip):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, vip['pool_id'])
vip['pool'] = self._get_pool(context, vip['pool_id'])
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=vip['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# Update the port for the VIP to show ownership by this driver
port_data = {
'admin_state_up': True,
'device_id': str(
uuid.uuid5(
uuid.NAMESPACE_DNS, str(agent['host'])
)
),
'device_owner': 'network:f5lbaas',
'status': q_const.PORT_STATUS_ACTIVE
}
port_data[portbindings.HOST_ID] = agent['host']
self._core_plugin().update_port(
context,
vip['port_id'],
{'port': port_data}
)
# call the RPC proxy with the constructed message
self.agent_rpc.create_vip(context, vip, service, agent['host']) |
Python | def update_vip(self, context, old_vip, vip):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, vip['pool_id'])
old_vip['pool'] = self._get_pool(context, old_vip['pool_id'])
vip['pool'] = self._get_pool(context, vip['pool_id'])
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=vip['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.update_vip(context, old_vip, vip,
service, agent['host']) | def update_vip(self, context, old_vip, vip):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, vip['pool_id'])
old_vip['pool'] = self._get_pool(context, old_vip['pool_id'])
vip['pool'] = self._get_pool(context, vip['pool_id'])
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=vip['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.update_vip(context, old_vip, vip,
service, agent['host']) |
Python | def delete_vip(self, context, vip):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, vip['pool_id'])
vip['pool'] = self._get_pool(context, vip['pool_id'])
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=vip['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.delete_vip(context, vip, service, agent['host']) | def delete_vip(self, context, vip):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, vip['pool_id'])
vip['pool'] = self._get_pool(context, vip['pool_id'])
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=vip['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.delete_vip(context, vip, service, agent['host']) |
Python | def create_pool(self, context, pool):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.pool_scheduler.schedule(self.plugin, context,
pool, self.env)
if not agent:
raise lbaas_agentscheduler.NoEligibleLbaasAgent(pool_id=pool['id'])
if not PREJUNO:
agent = self.plugin._make_agent_dict(agent)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool['id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.create_pool(context, pool, service, agent['host']) | def create_pool(self, context, pool):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.pool_scheduler.schedule(self.plugin, context,
pool, self.env)
if not agent:
raise lbaas_agentscheduler.NoEligibleLbaasAgent(pool_id=pool['id'])
if not PREJUNO:
agent = self.plugin._make_agent_dict(agent)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool['id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.create_pool(context, pool, service, agent['host']) |
Python | def delete_pool(self, context, pool):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
try:
agent = self.get_pool_agent(context, pool['id'])
except lbaas_agentscheduler.NoActiveLbaasAgent:
# if there is agent for this pool.. allow the data
# model to delete it.
self.callbacks.pool_destroyed(context, pool['id'], None)
return
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool['id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.delete_pool(context, pool, service, agent['host']) | def delete_pool(self, context, pool):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
try:
agent = self.get_pool_agent(context, pool['id'])
except lbaas_agentscheduler.NoActiveLbaasAgent:
# if there is agent for this pool.. allow the data
# model to delete it.
self.callbacks.pool_destroyed(context, pool['id'], None)
return
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool['id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.delete_pool(context, pool, service, agent['host']) |
Python | def create_member(self, context, member):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, member['pool_id'])
# populate a pool structure for the rpc message
pool = self._get_pool(context, member['pool_id'])
member['pool'] = pool
start_time = time()
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=member['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
LOG.debug("get_service took %.5f secs" % (time() - start_time))
this_member_count = 0
for service_member in service['members']:
if service_member['address'] == member['address'] and \
service_member['protocol_port'] == member['protocol_port']:
this_member_count += 1
if this_member_count > 1:
status_description = 'duplicate member %s:%s found in pool %s' \
% (
member['address'],
member['protocol_port'],
member['pool_id']
)
self.callbacks.update_member_status(
context,
member_id=member['id'],
status=constants.ERROR,
status_description=status_description,
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.create_member(context, member, service, agent['host']) | def create_member(self, context, member):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, member['pool_id'])
# populate a pool structure for the rpc message
pool = self._get_pool(context, member['pool_id'])
member['pool'] = pool
start_time = time()
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=member['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
LOG.debug("get_service took %.5f secs" % (time() - start_time))
this_member_count = 0
for service_member in service['members']:
if service_member['address'] == member['address'] and \
service_member['protocol_port'] == member['protocol_port']:
this_member_count += 1
if this_member_count > 1:
status_description = 'duplicate member %s:%s found in pool %s' \
% (
member['address'],
member['protocol_port'],
member['pool_id']
)
self.callbacks.update_member_status(
context,
member_id=member['id'],
status=constants.ERROR,
status_description=status_description,
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.create_member(context, member, service, agent['host']) |
Python | def update_member(self, context, old_member, member):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, member['pool_id'])
# populate a 'was' pool structure for the rpc message
old_pool = self._get_pool(context, old_member['pool_id'])
old_member['pool'] = old_pool
# populate a 'to be' pool structure for the rpc message
pool = self._get_pool(context, member['pool_id'])
member['pool'] = pool
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=member['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.update_member(context, old_member, member,
service, agent['host'])
# if they moved members between pools, we need to send
# a service call to update the old pool to remove
# the pool member
if not old_member['pool_id'] == member['pool_id']:
# the member should not be in this pool in the db anymore
old_pool_service = self.callbacks.get_service_by_pool_id(
context,
pool_id=old_member['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
for service_member in old_pool_service['members']:
if service_member['id'] == old_member['id']:
service_member['status'] = 'MOVING'
self.agent_rpc.update_member(
context, old_member, member,
old_pool_service, agent['host']
) | def update_member(self, context, old_member, member):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, member['pool_id'])
# populate a 'was' pool structure for the rpc message
old_pool = self._get_pool(context, old_member['pool_id'])
old_member['pool'] = old_pool
# populate a 'to be' pool structure for the rpc message
pool = self._get_pool(context, member['pool_id'])
member['pool'] = pool
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=member['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.update_member(context, old_member, member,
service, agent['host'])
# if they moved members between pools, we need to send
# a service call to update the old pool to remove
# the pool member
if not old_member['pool_id'] == member['pool_id']:
# the member should not be in this pool in the db anymore
old_pool_service = self.callbacks.get_service_by_pool_id(
context,
pool_id=old_member['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
for service_member in old_pool_service['members']:
if service_member['id'] == old_member['id']:
service_member['status'] = 'MOVING'
self.agent_rpc.update_member(
context, old_member, member,
old_pool_service, agent['host']
) |
Python | def delete_member(self, context, member):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, member['pool_id'])
# populate a pool structure for the rpc message
pool = self._get_pool(context, member['pool_id'])
member['pool'] = pool
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=member['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.delete_member(context, member,
service, agent['host']) | def delete_member(self, context, member):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, member['pool_id'])
# populate a pool structure for the rpc message
pool = self._get_pool(context, member['pool_id'])
member['pool'] = pool
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=member['pool_id'],
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.delete_member(context, member,
service, agent['host']) |
Python | def create_pool_health_monitor(self, context, health_monitor, pool_id):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, pool_id)
# populate a pool strucutre for the rpc message
pool = self._get_pool(context, pool_id)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool_id,
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.create_pool_health_monitor(context, health_monitor,
pool, service,
agent['host']) | def create_pool_health_monitor(self, context, health_monitor, pool_id):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, pool_id)
# populate a pool strucutre for the rpc message
pool = self._get_pool(context, pool_id)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool_id,
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.create_pool_health_monitor(context, health_monitor,
pool, service,
agent['host']) |
Python | def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, pool_id)
# populate a pool structure for the rpc message
pool = self._get_pool(context, pool_id)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool_id,
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.update_health_monitor(context, old_health_monitor,
health_monitor, pool,
service, agent['host']) | def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, pool_id)
# populate a pool structure for the rpc message
pool = self._get_pool(context, pool_id)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool_id,
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.update_health_monitor(context, old_health_monitor,
health_monitor, pool,
service, agent['host']) |
Python | def update_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, pool_id)
# populate a pool structure for the rpc message
pool = self._get_pool(context, pool_id)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool_id,
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.update_health_monitor(context, old_health_monitor,
health_monitor, pool,
service, agent['host']) | def update_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, pool_id)
# populate a pool structure for the rpc message
pool = self._get_pool(context, pool_id)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool_id,
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.update_health_monitor(context, old_health_monitor,
health_monitor, pool,
service, agent['host']) |
Python | def delete_pool_health_monitor(self, context, health_monitor, pool_id):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, pool_id)
# populate a pool structure for the rpc message
pool = self._get_pool(context, pool_id)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool_id,
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.delete_pool_health_monitor(context, health_monitor,
pool, service,
agent['host']) | def delete_pool_health_monitor(self, context, health_monitor, pool_id):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, pool_id)
# populate a pool structure for the rpc message
pool = self._get_pool(context, pool_id)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool_id,
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.delete_pool_health_monitor(context, health_monitor,
pool, service,
agent['host']) |
Python | def stats(self, context, pool_id):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, pool_id)
# populate a pool structure for the rpc message
pool = self._get_pool(context, pool_id)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool_id,
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.get_pool_stats(context, pool, service, agent['host']) | def stats(self, context, pool_id):
""" Handle LBaaS method by passing to agent """
# which agent should handle provisioning
agent = self.get_pool_agent(context, pool_id)
# populate a pool structure for the rpc message
pool = self._get_pool(context, pool_id)
# get the complete service definition from the data model
service = self.callbacks.get_service_by_pool_id(
context,
pool_id=pool_id,
global_routed_mode=self._is_global_routed(agent),
host=agent['host']
)
# call the RPC proxy with the constructed message
self.agent_rpc.get_pool_stats(context, pool, service, agent['host']) |
Python | def _get_vxlan_endpoints(self, context):
""" Get vxlan tunneling endpoints from all agents """
endpoints = []
if hasattr(self._core_plugin(), 'get_agents'):
agents = self._core_plugin().get_agents(context)
for agent in agents:
if 'configurations' in agent:
if 'tunnel_types' in agent['configurations']:
if 'vxlan' in agent['configurations']['tunnel_types']:
if 'tunneling_ip' in agent['configurations']:
endpoints.append(
agent['configurations']['tunneling_ip']
)
return endpoints | def _get_vxlan_endpoints(self, context):
""" Get vxlan tunneling endpoints from all agents """
endpoints = []
if hasattr(self._core_plugin(), 'get_agents'):
agents = self._core_plugin().get_agents(context)
for agent in agents:
if 'configurations' in agent:
if 'tunnel_types' in agent['configurations']:
if 'vxlan' in agent['configurations']['tunnel_types']:
if 'tunneling_ip' in agent['configurations']:
endpoints.append(
agent['configurations']['tunneling_ip']
)
return endpoints |
Python | def _get_gre_endpoints(self, context):
""" Get gre tunneling endpoints from all agents """
endpoints = []
if hasattr(self._core_plugin(), 'get_agents'):
agents = self._core_plugin().get_agents(context)
for agent in agents:
if 'configurations' in agent:
if 'tunnel_types' in agent['configurations']:
if 'gre' in agent['configurations']['tunnel_types']:
if 'tunneling_ip' in agent['configurations']:
endpoints.append(
agent['configurations']['tunneling_ip']
)
return endpoints | def _get_gre_endpoints(self, context):
""" Get gre tunneling endpoints from all agents """
endpoints = []
if hasattr(self._core_plugin(), 'get_agents'):
agents = self._core_plugin().get_agents(context)
for agent in agents:
if 'configurations' in agent:
if 'tunnel_types' in agent['configurations']:
if 'gre' in agent['configurations']['tunnel_types']:
if 'tunneling_ip' in agent['configurations']:
endpoints.append(
agent['configurations']['tunneling_ip']
)
return endpoints |
Python | def create_domain(
self, folder='Common', strict_route_isolation=False, is_aux=False):
""" Create route domain.
is_aux: whether it is an auxiliary route domain beyond the main
route domain for the folder """
folder = str(folder).replace('/', '')
if not folder == 'Common':
payload = dict()
payload['partition'] = '/' + folder
payload['id'] = self._get_next_domain_id()
payload['name'] = folder
if is_aux:
payload['name'] += '_aux_' + str(payload['id'])
if strict_route_isolation:
payload['strict'] = 'enabled'
else:
payload['strict'] = 'disabled'
payload['parent'] = '/Common/0'
request_url = self.bigip.icr_url + '/net/route-domain/'
response = self.bigip.icr_session.post(
request_url, data=json.dumps(payload),
timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
return payload['id']
elif response.status_code == 409:
return True
else:
Log.error('route-domain', response.text)
raise exceptions.RouteCreationException(response.text)
return False
return False | def create_domain(
self, folder='Common', strict_route_isolation=False, is_aux=False):
""" Create route domain.
is_aux: whether it is an auxiliary route domain beyond the main
route domain for the folder """
folder = str(folder).replace('/', '')
if not folder == 'Common':
payload = dict()
payload['partition'] = '/' + folder
payload['id'] = self._get_next_domain_id()
payload['name'] = folder
if is_aux:
payload['name'] += '_aux_' + str(payload['id'])
if strict_route_isolation:
payload['strict'] = 'enabled'
else:
payload['strict'] = 'disabled'
payload['parent'] = '/Common/0'
request_url = self.bigip.icr_url + '/net/route-domain/'
response = self.bigip.icr_session.post(
request_url, data=json.dumps(payload),
timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
return payload['id']
elif response.status_code == 409:
return True
else:
Log.error('route-domain', response.text)
raise exceptions.RouteCreationException(response.text)
return False
return False |
Python | def __set_prop(self, name, value, mark_dirty=True, forced=False):
"""
Internal method to set the properties after validation
Args:
name (str): property name
value (str): property value
mark_dirty (bool): if True, property will be part of xml request
forced (bool): if True, set the value without validation
Returns:
None
"""
if not forced:
prop_meta = self.prop_meta[name]
if prop_meta.access != ucsccoremeta.MoPropertyMeta.READ_WRITE:
if getattr(self, name) is not None or \
prop_meta.access != \
ucsccoremeta.MoPropertyMeta.CREATE_ONLY:
raise ValueError("%s is not a read-write property." % name)
if value and not prop_meta.validate_property_value(value):
raise ValueError("Invalid Value Exception - "
"[%s]: Prop <%s>, Value<%s>. "
% (self.__class__.__name__,
name,
value))
# return False
if prop_meta.mask and mark_dirty:
self._dirty_mask |= prop_meta.mask
object.__setattr__(self, name, value) | def __set_prop(self, name, value, mark_dirty=True, forced=False):
"""
Internal method to set the properties after validation
Args:
name (str): property name
value (str): property value
mark_dirty (bool): if True, property will be part of xml request
forced (bool): if True, set the value without validation
Returns:
None
"""
if not forced:
prop_meta = self.prop_meta[name]
if prop_meta.access != ucsccoremeta.MoPropertyMeta.READ_WRITE:
if getattr(self, name) is not None or \
prop_meta.access != \
ucsccoremeta.MoPropertyMeta.CREATE_ONLY:
raise ValueError("%s is not a read-write property." % name)
if value and not prop_meta.validate_property_value(value):
raise ValueError("Invalid Value Exception - "
"[%s]: Prop <%s>, Value<%s>. "
% (self.__class__.__name__,
name,
value))
# return False
if prop_meta.mask and mark_dirty:
self._dirty_mask |= prop_meta.mask
object.__setattr__(self, name, value) |
Python | def mark_dirty(self):
"""
This method marks the managed object dirty.
"""
if self.__class__.__name__ == "ManagedObject" and not self.is_dirty():
self._dirty_mask = ManagedObject.DUMMY_DIRTY
elif "mo_meta" in dir(self):
self._dirty_mask = self.mo_meta.mask | def mark_dirty(self):
"""
This method marks the managed object dirty.
"""
if self.__class__.__name__ == "ManagedObject" and not self.is_dirty():
self._dirty_mask = ManagedObject.DUMMY_DIRTY
elif "mo_meta" in dir(self):
self._dirty_mask = self.mo_meta.mask |
Python | def is_dirty(self):
"""
This method checks if managed object is dirty.
"""
return self._dirty_mask != 0 or self.child_is_dirty() | def is_dirty(self):
"""
This method checks if managed object is dirty.
"""
return self._dirty_mask != 0 or self.child_is_dirty() |
Python | def rn_is_special_case(self):
"""
Method to handle if rn pattern is different across UcsCentral Version
"""
if self.__class__.__name__ == "StorageLocalDiskPartition":
return True
return False | def rn_is_special_case(self):
"""
Method to handle if rn pattern is different across UcsCentral Version
"""
if self.__class__.__name__ == "StorageLocalDiskPartition":
return True
return False |
Python | def rn_get_special_case(self):
"""
Method to handle if rn pattern is different across UcsCentral Version
"""
if self.__class__.__name__ == "StorageLocalDiskPartition":
# some version of ucs central have rn "partition" instead of
# "partition-id"
return "partition" | def rn_get_special_case(self):
"""
Method to handle if rn pattern is different across UcsCentral Version
"""
if self.__class__.__name__ == "StorageLocalDiskPartition":
# some version of ucs central have rn "partition" instead of
# "partition-id"
return "partition" |
Python | def make_rn(self):
"""
This method returns the Rn for a managed object.
"""
import re
rn_pattern = self.mo_meta.rn
for prop in re.findall(r"""\[([^\]]*)\]""", rn_pattern):
if prop in self.prop_meta:
if getattr(self, prop):
rn_pattern = re.sub(r"""\[%s\]""" % prop,
'%s' % getattr(self, prop), rn_pattern)
else:
log.debug('Property "%s" was None in make_rn' % prop)
if self.rn_is_special_case():
return self.rn_get_special_case()
raise UcscValidationException(
'Property "%s" was None in make_rn' % prop)
else:
log.debug(
'Property "%s" was not found in make_rn arguments' % prop)
if self.rn_is_special_case():
return self.rn_get_special_case()
raise UcscValidationException(
'Property "%s" was not found in make_rn arguments' % prop)
return rn_pattern | def make_rn(self):
"""
This method returns the Rn for a managed object.
"""
import re
rn_pattern = self.mo_meta.rn
for prop in re.findall(r"""\[([^\]]*)\]""", rn_pattern):
if prop in self.prop_meta:
if getattr(self, prop):
rn_pattern = re.sub(r"""\[%s\]""" % prop,
'%s' % getattr(self, prop), rn_pattern)
else:
log.debug('Property "%s" was None in make_rn' % prop)
if self.rn_is_special_case():
return self.rn_get_special_case()
raise UcscValidationException(
'Property "%s" was None in make_rn' % prop)
else:
log.debug(
'Property "%s" was not found in make_rn arguments' % prop)
if self.rn_is_special_case():
return self.rn_get_special_case()
raise UcscValidationException(
'Property "%s" was not found in make_rn arguments' % prop)
return rn_pattern |
Python | def to_xml(self, xml_doc=None, option=None, elem_name=None):
"""
Method writes the xml representation of the managed object.
"""
if option == WriteXmlOption.DIRTY and not self.is_dirty():
log.debug("Object is not dirty")
return
xml_obj = self.elem_create(class_tag=self.mo_meta.xml_attribute,
xml_doc=xml_doc,
override_tag=elem_name)
for key in self.__dict__:
if key != 'rn' and key in self.prop_meta:
mo_prop_meta = self.prop_meta[key]
if (option != WriteXmlOption.DIRTY or (
mo_prop_meta.mask is not None and
self._dirty_mask & mo_prop_meta.mask != 0)):
value = getattr(self, key)
if value is not None:
xml_obj.set(mo_prop_meta.xml_attribute, value)
else:
if key not in self.__xtra_props:
# This is an internal property
# This should not be a part of the xml
continue
# This is an unknown property
# This should be a part of the xml
# The server might understand this property, even though
# the sdk does not
if option != WriteXmlOption.DIRTY or \
self.__xtra_props[key].is_dirty:
value = self.__xtra_props[key].value
if value is not None:
xml_obj.set(key, value)
if 'dn' not in xml_obj.attrib:
xml_obj.set('dn', self.dn)
self.child_to_xml(xml_obj, option)
return xml_obj | def to_xml(self, xml_doc=None, option=None, elem_name=None):
"""
Method writes the xml representation of the managed object.
"""
if option == WriteXmlOption.DIRTY and not self.is_dirty():
log.debug("Object is not dirty")
return
xml_obj = self.elem_create(class_tag=self.mo_meta.xml_attribute,
xml_doc=xml_doc,
override_tag=elem_name)
for key in self.__dict__:
if key != 'rn' and key in self.prop_meta:
mo_prop_meta = self.prop_meta[key]
if (option != WriteXmlOption.DIRTY or (
mo_prop_meta.mask is not None and
self._dirty_mask & mo_prop_meta.mask != 0)):
value = getattr(self, key)
if value is not None:
xml_obj.set(mo_prop_meta.xml_attribute, value)
else:
if key not in self.__xtra_props:
# This is an internal property
# This should not be a part of the xml
continue
# This is an unknown property
# This should be a part of the xml
# The server might understand this property, even though
# the sdk does not
if option != WriteXmlOption.DIRTY or \
self.__xtra_props[key].is_dirty:
value = self.__xtra_props[key].value
if value is not None:
xml_obj.set(key, value)
if 'dn' not in xml_obj.attrib:
xml_obj.set('dn', self.dn)
self.child_to_xml(xml_obj, option)
return xml_obj |
Python | def from_xml(self, elem, handle=None):
"""
Method updates the object from the xml representation of the managed
object.
"""
self._handle = handle
if elem.attrib:
if self.__class__.__name__ != "ManagedObject":
for attr_name, attr_value in ucscgenutils.iteritems(
elem.attrib):
if attr_name in self.prop_map:
attr_name = self.prop_map[attr_name]
else:
self.__xtra_props[attr_name] = _GenericProp(
attr_name,
attr_value,
False)
object.__setattr__(self, attr_name, attr_value)
else:
for attr_name, attr_value in ucscgenutils.iteritems(
elem.attrib):
object.__setattr__(self, attr_name, attr_value)
if hasattr(self, 'rn') and not hasattr(self, 'dn'):
self._dn_set()
elif not hasattr(self, 'rn') and hasattr(self, 'dn'):
self.__set_prop("rn", os.path.basename(self.dn), forced=True)
self.mark_clean()
child_elems = list(elem)
if child_elems:
for child_elem in child_elems:
if not ET.iselement(child_elem):
continue
if self.__class__.__name__ != "ManagedObject" and (
child_elem.tag in self.mo_meta.field_names):
pass
class_id = ucscgenutils.word_u(child_elem.tag)
child_obj = ucsccoreutils.get_ucsc_obj(class_id,
child_elem,
self)
self.child_add(child_obj)
child_obj.from_xml(child_elem, handle) | def from_xml(self, elem, handle=None):
"""
Method updates the object from the xml representation of the managed
object.
"""
self._handle = handle
if elem.attrib:
if self.__class__.__name__ != "ManagedObject":
for attr_name, attr_value in ucscgenutils.iteritems(
elem.attrib):
if attr_name in self.prop_map:
attr_name = self.prop_map[attr_name]
else:
self.__xtra_props[attr_name] = _GenericProp(
attr_name,
attr_value,
False)
object.__setattr__(self, attr_name, attr_value)
else:
for attr_name, attr_value in ucscgenutils.iteritems(
elem.attrib):
object.__setattr__(self, attr_name, attr_value)
if hasattr(self, 'rn') and not hasattr(self, 'dn'):
self._dn_set()
elif not hasattr(self, 'rn') and hasattr(self, 'dn'):
self.__set_prop("rn", os.path.basename(self.dn), forced=True)
self.mark_clean()
child_elems = list(elem)
if child_elems:
for child_elem in child_elems:
if not ET.iselement(child_elem):
continue
if self.__class__.__name__ != "ManagedObject" and (
child_elem.tag in self.mo_meta.field_names):
pass
class_id = ucscgenutils.word_u(child_elem.tag)
child_obj = ucsccoreutils.get_ucsc_obj(class_id,
child_elem,
self)
self.child_add(child_obj)
child_obj.from_xml(child_elem, handle) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.