language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def sync_client_commands(self) -> None: """Synchronizes the commands with the client.""" if not self.client: return async def subcommand_caller( ctx: CommandContext, *args, sub_command_group: Optional[str] = None, sub_command: Optional[str] = None, **kwargs, ) -> Optional[Any]: """Calls all of the coroutines of the subcommand.""" base_coro = self.base_coroutine if self._self: base_res = BaseResult(await base_coro(self._self, ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult( await group_coro(self._self, ctx, base_res, *args, **kwargs), base_res ) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(self._self, ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(self._self, ctx, base_res, *args, **kwargs) return base_res base_res = BaseResult(await base_coro(ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult( await group_coro(ctx, base_res, *args, **kwargs), base_res ) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(ctx, base_res, *args, **kwargs) return base_res if self.scope in {None, MISSING} and not self.debug_scope: if isinstance(self.full_data, list): subcommand_caller._command_data = self.full_data[0] elif isinstance(self.full_data, dict): subcommand_caller._command_data = self.full_data else: subcommand_caller._command_data = self.full_data self.client._websocket._dispatch.events[f"command_{self.base}"] = [subcommand_caller] for i, coro in enumerate(self.client._Client__command_coroutines): if isinstance(coro._command_data, list): if coro._command_data[0]["name"] == self.base: del self.client._Client__command_coroutines[i] else: if coro._command_data["name"] == self.base: del self.client._Client__command_coroutines[i] self.client._Client__command_coroutines.append(subcommand_caller)
def sync_client_commands(self) -> None: """Synchronizes the commands with the client.""" if not self.client: return async def subcommand_caller( ctx: CommandContext, *args, sub_command_group: Optional[str] = None, sub_command: Optional[str] = None, **kwargs, ) -> Optional[Any]: """Calls all of the coroutines of the subcommand.""" base_coro = self.base_coroutine if self._self: base_res = BaseResult(await base_coro(self._self, ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult( await group_coro(self._self, ctx, base_res, *args, **kwargs), base_res ) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(self._self, ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(self._self, ctx, base_res, *args, **kwargs) return base_res base_res = BaseResult(await base_coro(ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult( await group_coro(ctx, base_res, *args, **kwargs), base_res ) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(ctx, base_res, *args, **kwargs) return base_res if self.scope in {None, MISSING} and not self.debug_scope: if isinstance(self.full_data, list): subcommand_caller._command_data = self.full_data[0] elif isinstance(self.full_data, dict): subcommand_caller._command_data = self.full_data else: subcommand_caller._command_data = self.full_data self.client._websocket._dispatch.events[f"command_{self.base}"] = [subcommand_caller] for i, coro in enumerate(self.client._Client__command_coroutines): if isinstance(coro._command_data, list): if coro._command_data[0]["name"] == self.base: del self.client._Client__command_coroutines[i] else: if coro._command_data["name"] == self.base: del self.client._Client__command_coroutines[i] self.client._Client__command_coroutines.append(subcommand_caller)
Python
async def subcommand_caller( ctx: CommandContext, *args, sub_command_group: Optional[str] = None, sub_command: Optional[str] = None, **kwargs, ) -> Optional[Any]: """Calls all of the coroutines of the subcommand.""" base_coro = self.base_coroutine if self._self: base_res = BaseResult(await base_coro(self._self, ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult( await group_coro(self._self, ctx, base_res, *args, **kwargs), base_res ) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(self._self, ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(self._self, ctx, base_res, *args, **kwargs) return base_res base_res = BaseResult(await base_coro(ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult( await group_coro(ctx, base_res, *args, **kwargs), base_res ) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(ctx, base_res, *args, **kwargs) return base_res
async def subcommand_caller( ctx: CommandContext, *args, sub_command_group: Optional[str] = None, sub_command: Optional[str] = None, **kwargs, ) -> Optional[Any]: """Calls all of the coroutines of the subcommand.""" base_coro = self.base_coroutine if self._self: base_res = BaseResult(await base_coro(self._self, ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult( await group_coro(self._self, ctx, base_res, *args, **kwargs), base_res ) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(self._self, ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(self._self, ctx, base_res, *args, **kwargs) return base_res base_res = BaseResult(await base_coro(ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult( await group_coro(ctx, base_res, *args, **kwargs), base_res ) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(ctx, base_res, *args, **kwargs) return base_res
Python
async def subcommand_caller( self, ctx: CommandContext, *args, sub_command_group: Optional[str] = None, sub_command: Optional[str] = None, **kwargs, ) -> Optional[Any]: """Calls all of the coroutines of the subcommand.""" base_coro = self.base_coroutine if self._self: base_res = BaseResult(await base_coro(self._self, ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult( await group_coro(self._self, ctx, base_res, *args, **kwargs), base_res ) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(self._self, ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(self._self, ctx, base_res, *args, **kwargs) return base_res base_res = BaseResult(await base_coro(ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult(await group_coro(ctx, base_res, *args, **kwargs), base_res) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(ctx, base_res, *args, **kwargs) return base_res
async def subcommand_caller( self, ctx: CommandContext, *args, sub_command_group: Optional[str] = None, sub_command: Optional[str] = None, **kwargs, ) -> Optional[Any]: """Calls all of the coroutines of the subcommand.""" base_coro = self.base_coroutine if self._self: base_res = BaseResult(await base_coro(self._self, ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult( await group_coro(self._self, ctx, base_res, *args, **kwargs), base_res ) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(self._self, ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(self._self, ctx, base_res, *args, **kwargs) return base_res base_res = BaseResult(await base_coro(ctx, *args, **kwargs)) if base_res() is StopCommand or isinstance(base_res(), StopCommand): return if self.data: if sub_command_group: group_coro = self.coroutines[sub_command_group] subcommand_coro = self.coroutines[f"{sub_command_group} {sub_command}"] group_res = GroupResult(await group_coro(ctx, base_res, *args, **kwargs), base_res) if group_res() is StopCommand or isinstance(group_res(), StopCommand): return return await subcommand_coro(ctx, group_res, *args, **kwargs) elif sub_command: subcommand_coro = self.coroutines[sub_command] return await subcommand_coro(ctx, base_res, *args, **kwargs) return base_res
Python
def command( self: Client, _coro: Optional[Coroutine] = MISSING, *, type: Optional[Union[int, ApplicationCommandType]] = ApplicationCommandType.CHAT_INPUT, name: Optional[str] = MISSING, description: Optional[str] = MISSING, scope: Optional[Union[int, Guild, List[int], List[Guild]]] = MISSING, options: Optional[Union[Dict[str, Any], List[Dict[str, Any]], Option, List[Option]]] = MISSING, debug_scope: Optional[bool] = True, ) -> Callable[..., Any]: """ A modified decorator for creating slash commands. Makes `name` and `description` optional, and adds ability to use `EnhancedOption`s. Full-blown example: ```py from interactions import OptionType, Channel from interactions.ext.enhanced import EnhancedOption from typing_extensions import Annotated @bot.command() async def options( ctx, option1: Annotated[str, EnhancedOption(description="...")], option2: Annotated[OptionType.MENTIONABLE, EnhancedOption(description="...")], option3: Annotated[Channel, EnhancedOption(description="...")], ): \"""Says something!\""" await ctx.send("something") ``` Parameters: * `?type: int | ApplicationCommandType`: The type of application command. Defaults to `ApplicationCommandType.CHAT_INPUT`. * `?name: str`: The name of the command. Defaults to function name. * `?description: str`: The description of the command. Defaults to function docstring or `"No description"`. * `?scope: int | Guild | list[int] | list[Guild]`: The scope of the command. * `?options: list[Option]`: The options of the command. * `?debug_scope: bool`: Whether to use debug_scope for this command. Defaults to `True`. """ def decorator(coro: Coroutine) -> Callable[..., Any]: _name = coro.__name__ if name is MISSING else name _description = ( MISSING if type != ApplicationCommandType.CHAT_INPUT else getdoc(coro) or "No description" if description is MISSING else description ) if isinstance(_description, str): _description = _description.split("\n")[0] if len(_description) > 100: raise ValueError("Description must be less than 100 characters.") _scope = ( self.__debug_scope if scope is MISSING and hasattr(self, "__debug_scope") and debug_scope else scope ) params = signature(coro).parameters _options = ( coro.__decor_options if hasattr(coro, "__decor_options") else parameters_to_options(coro) if options is MISSING and len(params) > 1 and any( isinstance(param.annotation, (EnhancedOption, _AnnotatedAlias)) for _, param in params.items() ) else options ) log.debug(f"command: {_name=} {_description=} {_options=}") if not hasattr(coro, "manager") and type == ApplicationCommandType.CHAT_INPUT: coro.manager = Manager(coro, type, _name, _description, _scope, debug_scope, self) coro.subcommand = coro.manager.subcommand coro.group = coro.manager.group coro._original = True self.old_command( type=type, name=_name, description=_description, scope=_scope, options=_options, )(coro) elif type != ApplicationCommandType.CHAT_INPUT: self.old_command( type=type, name=_name, description=_description, scope=_scope, options=_options, )(coro) return coro if _coro is not MISSING: return decorator(_coro) return decorator
def command( self: Client, _coro: Optional[Coroutine] = MISSING, *, type: Optional[Union[int, ApplicationCommandType]] = ApplicationCommandType.CHAT_INPUT, name: Optional[str] = MISSING, description: Optional[str] = MISSING, scope: Optional[Union[int, Guild, List[int], List[Guild]]] = MISSING, options: Optional[Union[Dict[str, Any], List[Dict[str, Any]], Option, List[Option]]] = MISSING, debug_scope: Optional[bool] = True, ) -> Callable[..., Any]: """ A modified decorator for creating slash commands. Makes `name` and `description` optional, and adds ability to use `EnhancedOption`s. Full-blown example: ```py from interactions import OptionType, Channel from interactions.ext.enhanced import EnhancedOption from typing_extensions import Annotated @bot.command() async def options( ctx, option1: Annotated[str, EnhancedOption(description="...")], option2: Annotated[OptionType.MENTIONABLE, EnhancedOption(description="...")], option3: Annotated[Channel, EnhancedOption(description="...")], ): \"""Says something!\""" await ctx.send("something") ``` Parameters: * `?type: int | ApplicationCommandType`: The type of application command. Defaults to `ApplicationCommandType.CHAT_INPUT`. * `?name: str`: The name of the command. Defaults to function name. * `?description: str`: The description of the command. Defaults to function docstring or `"No description"`. * `?scope: int | Guild | list[int] | list[Guild]`: The scope of the command. * `?options: list[Option]`: The options of the command. * `?debug_scope: bool`: Whether to use debug_scope for this command. Defaults to `True`. """ def decorator(coro: Coroutine) -> Callable[..., Any]: _name = coro.__name__ if name is MISSING else name _description = ( MISSING if type != ApplicationCommandType.CHAT_INPUT else getdoc(coro) or "No description" if description is MISSING else description ) if isinstance(_description, str): _description = _description.split("\n")[0] if len(_description) > 100: raise ValueError("Description must be less than 100 characters.") _scope = ( self.__debug_scope if scope is MISSING and hasattr(self, "__debug_scope") and debug_scope else scope ) params = signature(coro).parameters _options = ( coro.__decor_options if hasattr(coro, "__decor_options") else parameters_to_options(coro) if options is MISSING and len(params) > 1 and any( isinstance(param.annotation, (EnhancedOption, _AnnotatedAlias)) for _, param in params.items() ) else options ) log.debug(f"command: {_name=} {_description=} {_options=}") if not hasattr(coro, "manager") and type == ApplicationCommandType.CHAT_INPUT: coro.manager = Manager(coro, type, _name, _description, _scope, debug_scope, self) coro.subcommand = coro.manager.subcommand coro.group = coro.manager.group coro._original = True self.old_command( type=type, name=_name, description=_description, scope=_scope, options=_options, )(coro) elif type != ApplicationCommandType.CHAT_INPUT: self.old_command( type=type, name=_name, description=_description, scope=_scope, options=_options, )(coro) return coro if _coro is not MISSING: return decorator(_coro) return decorator
Python
def delete_all_profiles_like(self, match=None, folder='Common'): """ Delete profiles that match by name """ if not match: return False folder = str(folder).replace('/', '') request_url = self.bigip.icr_url + '/ltm/profile' response = self.bigip.icr_session.get( request_url, timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: response_obj = json.loads(response.text) if 'items' in response_obj: for p in response_obj['items']: type_link = self.bigip.icr_link( p['reference']['link'] ) + '&$select=name,selfLink&$filter=partition eq ' + \ folder pr_res = self.bigip.icr_session.get( type_link, timeout=const.CONNECTION_TIMEOUT) if pr_res.status_code < 400: pr_res_obj = json.loads(pr_res.text) if 'items' in pr_res_obj: for profile in pr_res_obj['items']: if profile['name'].find(match) > -1: profile['selfLink'] = \ profile['selfLink'].spit('?')[0] del_resp = self.bigip.icr_session.delete( self.bigip.icr_link( profile['selfLink']), timeout=const.CONNECTION_TIMEOUT) if del_resp.status_code > 399 and \ del_resp.status_code != 404: Log.error('profile', del_resp.text) exps = exceptions exp = exps.VirtualServerDeleteException raise exp(del_resp.text) else: self.folder_profiles = {} self.common_profiles = {} else: Log.error('profile', pr_res.text) raise exceptions.VirtualServerQueryException( pr_res.text) elif response.status_code == 404: True else: raise exceptions.VirtualServerQueryException(response.text) return True
def delete_all_profiles_like(self, match=None, folder='Common'): """ Delete profiles that match by name """ if not match: return False folder = str(folder).replace('/', '') request_url = self.bigip.icr_url + '/ltm/profile' response = self.bigip.icr_session.get( request_url, timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: response_obj = json.loads(response.text) if 'items' in response_obj: for p in response_obj['items']: type_link = self.bigip.icr_link( p['reference']['link'] ) + '&$select=name,selfLink&$filter=partition eq ' + \ folder pr_res = self.bigip.icr_session.get( type_link, timeout=const.CONNECTION_TIMEOUT) if pr_res.status_code < 400: pr_res_obj = json.loads(pr_res.text) if 'items' in pr_res_obj: for profile in pr_res_obj['items']: if profile['name'].find(match) > -1: profile['selfLink'] = \ profile['selfLink'].spit('?')[0] del_resp = self.bigip.icr_session.delete( self.bigip.icr_link( profile['selfLink']), timeout=const.CONNECTION_TIMEOUT) if del_resp.status_code > 399 and \ del_resp.status_code != 404: Log.error('profile', del_resp.text) exps = exceptions exp = exps.VirtualServerDeleteException raise exp(del_resp.text) else: self.folder_profiles = {} self.common_profiles = {} else: Log.error('profile', pr_res.text) raise exceptions.VirtualServerQueryException( pr_res.text) elif response.status_code == 404: True else: raise exceptions.VirtualServerQueryException(response.text) return True
Python
def assure_bigip_create_vip(self, bigip, service, traffic_group): """ Called for every bigip only in replication mode, otherwise called once for autosync mode. """ vip = service['vip'] pool = service['pool'] ip_address = vip['address'] snat_pool_name = None network = vip['network'] preserve_network_name = False if self.driver.conf.f5_global_routed_mode: network_name = None else: (network_name, preserve_network_name) = \ self.bigip_l2_manager.get_network_name(bigip, network) if self.bigip_l2_manager.is_common_network(network): network_name = '/Common/' + network_name if self.driver.conf.f5_snat_mode and \ self.driver.conf.f5_snat_addresses_per_subnet > 0: tenant_id = pool['tenant_id'] snat_pool_name = bigip_interfaces.decorate_name(tenant_id, tenant_id) vip_info = {'network_name': network_name, 'preserve_network_name': preserve_network_name, 'ip_address': ip_address, 'traffic_group': traffic_group, 'snat_pool_name': snat_pool_name} just_added_vip = self._create_bigip_vip(bigip, service, vip_info) if vip['status'] == plugin_const.PENDING_CREATE or \ vip['status'] == plugin_const.PENDING_UPDATE or \ just_added_vip: self._update_bigip_vip(bigip, service) if self.l3_binding: self.l3_binding.bind_address(subnet_id=vip['subnet_id'], ip_address=ip_address)
def assure_bigip_create_vip(self, bigip, service, traffic_group): """ Called for every bigip only in replication mode, otherwise called once for autosync mode. """ vip = service['vip'] pool = service['pool'] ip_address = vip['address'] snat_pool_name = None network = vip['network'] preserve_network_name = False if self.driver.conf.f5_global_routed_mode: network_name = None else: (network_name, preserve_network_name) = \ self.bigip_l2_manager.get_network_name(bigip, network) if self.bigip_l2_manager.is_common_network(network): network_name = '/Common/' + network_name if self.driver.conf.f5_snat_mode and \ self.driver.conf.f5_snat_addresses_per_subnet > 0: tenant_id = pool['tenant_id'] snat_pool_name = bigip_interfaces.decorate_name(tenant_id, tenant_id) vip_info = {'network_name': network_name, 'preserve_network_name': preserve_network_name, 'ip_address': ip_address, 'traffic_group': traffic_group, 'snat_pool_name': snat_pool_name} just_added_vip = self._create_bigip_vip(bigip, service, vip_info) if vip['status'] == plugin_const.PENDING_CREATE or \ vip['status'] == plugin_const.PENDING_UPDATE or \ just_added_vip: self._update_bigip_vip(bigip, service) if self.l3_binding: self.l3_binding.bind_address(subnet_id=vip['subnet_id'], ip_address=ip_address)
Python
def assure_bigip_delete_vip(self, bigip, service): """ Remove vip from big-ip """ vip = service['vip'] bigip_vs = bigip.virtual_server LOG.debug(_('Vip: deleting VIP %s' % vip['id'])) bigip_vs.remove_and_delete_persist_profile( name=vip['id'], folder=vip['tenant_id']) bigip_vs.delete(name=vip['id'], folder=vip['tenant_id']) bigip.rule.delete(name=RPS_THROTTLE_RULE_PREFIX + vip['id'], folder=vip['tenant_id']) bigip_vs.delete_uie_persist_profile( name=APP_COOKIE_RULE_PREFIX + vip['id'], folder=vip['tenant_id']) bigip.rule.delete(name=APP_COOKIE_RULE_PREFIX + vip['id'], folder=vip['tenant_id']) if self.l3_binding: self.l3_binding.unbind_address(subnet_id=vip['subnet_id'], ip_address=vip['address'])
def assure_bigip_delete_vip(self, bigip, service): """ Remove vip from big-ip """ vip = service['vip'] bigip_vs = bigip.virtual_server LOG.debug(_('Vip: deleting VIP %s' % vip['id'])) bigip_vs.remove_and_delete_persist_profile( name=vip['id'], folder=vip['tenant_id']) bigip_vs.delete(name=vip['id'], folder=vip['tenant_id']) bigip.rule.delete(name=RPS_THROTTLE_RULE_PREFIX + vip['id'], folder=vip['tenant_id']) bigip_vs.delete_uie_persist_profile( name=APP_COOKIE_RULE_PREFIX + vip['id'], folder=vip['tenant_id']) bigip.rule.delete(name=APP_COOKIE_RULE_PREFIX + vip['id'], folder=vip['tenant_id']) if self.l3_binding: self.l3_binding.unbind_address(subnet_id=vip['subnet_id'], ip_address=vip['address'])
Python
def _create_bigip_vip(self, bigip, service, vip_info): """ Create vip on big-ip """ vip = service['vip'] network_name = vip_info['network_name'] preserve_network_name = vip_info['preserve_network_name'] ip_address = vip_info['ip_address'] vip_tg = vip_info['traffic_group'] snat_pool_name = vip_info['snat_pool_name'] bigip_vs = bigip.virtual_server # This is where you could decide to use a fastl4 # or a standard virtual server. The problem # is making sure that if someone updates the # vip protocol or a session persistence that # required you change virtual service types # would have to make sure a virtual of the # wrong type does not already exist or else # delete it first. That would cause a service # disruption. It would be better if the # specification did not allow you to update # L7 attributes if you already created a # L4 service. You should have to delete the # vip and then create a new one. That way # the end user expects the service outage. virtual_type = 'fastl4' if 'protocol' in vip: if vip['protocol'] == 'HTTP': virtual_type = 'standard' if 'session_persistence' in vip: if vip['session_persistence'] == \ 'APP_COOKIE': virtual_type = 'standard' # Hard code to standard until we decide if we # want to handle the check/delete before create # and document the service outage associated # with deleting a virtual service. We'll leave # the steering logic for create in place. # Be aware the check/delete before create # is not in the logic below because it means # another set of interactions with the device # we don't need unless we decided to handle # shifting from L4 to L7 or from L7 to L4 # virtual_type = 'standard' folder = vip['tenant_id'] if '.' in ip_address: mask = '255.255.255.255' else: mask = None if virtual_type == 'standard': if bigip_vs.create( name=vip['id'], ip_address=ip_address, mask=mask, port=int(vip['protocol_port']), protocol=vip['protocol'], vlan_name=network_name, traffic_group=vip_tg, use_snat=self.driver.conf.f5_snat_mode, snat_pool=snat_pool_name, folder=folder, preserve_vlan_name=preserve_network_name ): return True else: if bigip_vs.create_fastl4( name=vip['id'], ip_address=ip_address, mask=mask, port=int(vip['protocol_port']), protocol=vip['protocol'], vlan_name=network_name, traffic_group=vip_tg, use_snat=self.driver.conf.f5_snat_mode, snat_pool=snat_pool_name, folder=folder, preserve_vlan_name=preserve_network_name ): return True
def _create_bigip_vip(self, bigip, service, vip_info): """ Create vip on big-ip """ vip = service['vip'] network_name = vip_info['network_name'] preserve_network_name = vip_info['preserve_network_name'] ip_address = vip_info['ip_address'] vip_tg = vip_info['traffic_group'] snat_pool_name = vip_info['snat_pool_name'] bigip_vs = bigip.virtual_server # This is where you could decide to use a fastl4 # or a standard virtual server. The problem # is making sure that if someone updates the # vip protocol or a session persistence that # required you change virtual service types # would have to make sure a virtual of the # wrong type does not already exist or else # delete it first. That would cause a service # disruption. It would be better if the # specification did not allow you to update # L7 attributes if you already created a # L4 service. You should have to delete the # vip and then create a new one. That way # the end user expects the service outage. virtual_type = 'fastl4' if 'protocol' in vip: if vip['protocol'] == 'HTTP': virtual_type = 'standard' if 'session_persistence' in vip: if vip['session_persistence'] == \ 'APP_COOKIE': virtual_type = 'standard' # Hard code to standard until we decide if we # want to handle the check/delete before create # and document the service outage associated # with deleting a virtual service. We'll leave # the steering logic for create in place. # Be aware the check/delete before create # is not in the logic below because it means # another set of interactions with the device # we don't need unless we decided to handle # shifting from L4 to L7 or from L7 to L4 # virtual_type = 'standard' folder = vip['tenant_id'] if '.' in ip_address: mask = '255.255.255.255' else: mask = None if virtual_type == 'standard': if bigip_vs.create( name=vip['id'], ip_address=ip_address, mask=mask, port=int(vip['protocol_port']), protocol=vip['protocol'], vlan_name=network_name, traffic_group=vip_tg, use_snat=self.driver.conf.f5_snat_mode, snat_pool=snat_pool_name, folder=folder, preserve_vlan_name=preserve_network_name ): return True else: if bigip_vs.create_fastl4( name=vip['id'], ip_address=ip_address, mask=mask, port=int(vip['protocol_port']), protocol=vip['protocol'], vlan_name=network_name, traffic_group=vip_tg, use_snat=self.driver.conf.f5_snat_mode, snat_pool=snat_pool_name, folder=folder, preserve_vlan_name=preserve_network_name ): return True
Python
def _update_bigip_vip(self, bigip, service): """ Update vip on big-ip """ vip = service['vip'] pool = service['pool'] bigip_vs = bigip.virtual_server desc = vip['name'] + ':' + vip['description'] bigip_vs.set_description(name=vip['id'], description=desc, folder=pool['tenant_id']) bigip_vs.set_pool(name=vip['id'], pool_name=pool['id'], folder=pool['tenant_id']) if vip['admin_state_up']: bigip_vs.enable_virtual_server(name=vip['id'], folder=pool['tenant_id']) else: bigip_vs.disable_virtual_server(name=vip['id'], folder=pool['tenant_id']) if 'session_persistence' in vip and vip['session_persistence']: # branch on persistence type persistence_type = vip['session_persistence']['type'] set_persist = bigip_vs.set_persist_profile set_fallback_persist = bigip_vs.set_fallback_persist_profile if persistence_type == 'SOURCE_IP': # add source_addr persistence profile LOG.debug('adding source_addr primary persistence') set_persist(name=vip['id'], profile_name='/Common/source_addr', folder=vip['tenant_id']) if pool['protocol'] == 'TCP' or pool['protocol'] == 'HTTPS': bigip_vs.remove_profile(name=vip['id'], profile_name='/Common/http', folder=vip['tenant_id']) elif persistence_type == 'HTTP_COOKIE': # HTTP cookie persistence requires an HTTP profile LOG.debug('adding http profile and' + ' primary cookie persistence') bigip_vs.add_profile(name=vip['id'], profile_name='/Common/http', folder=vip['tenant_id']) # add standard cookie persistence profile set_persist(name=vip['id'], profile_name='/Common/cookie', folder=vip['tenant_id']) if pool['lb_method'] == 'SOURCE_IP': set_fallback_persist(name=vip['id'], profile_name='/Common/source_addr', folder=vip['tenant_id']) elif persistence_type == 'APP_COOKIE': self._set_bigip_vip_cookie_persist(bigip, service) elif pool['lb_method'].upper() == 'SOURCE_IP': bigip_vs.set_persist_profile(name=vip['id'], profile_name='/Common/source_addr', folder=vip['tenant_id']) else: bigip_vs.remove_all_persist_profiles(name=vip['id'], folder=vip['tenant_id']) if vip['connection_limit'] > 0 and 'protocol' in vip: conn_limit = int(vip['connection_limit']) LOG.debug('setting connection limit') # if not HTTP.. use connection limits bigip_vs.set_connection_limit(name=vip['id'], connection_limit=conn_limit, folder=pool['tenant_id']) else: # clear the connection limits LOG.debug('removing connection limits') bigip_vs.set_connection_limit(name=vip['id'], connection_limit=0, folder=pool['tenant_id'])
def _update_bigip_vip(self, bigip, service): """ Update vip on big-ip """ vip = service['vip'] pool = service['pool'] bigip_vs = bigip.virtual_server desc = vip['name'] + ':' + vip['description'] bigip_vs.set_description(name=vip['id'], description=desc, folder=pool['tenant_id']) bigip_vs.set_pool(name=vip['id'], pool_name=pool['id'], folder=pool['tenant_id']) if vip['admin_state_up']: bigip_vs.enable_virtual_server(name=vip['id'], folder=pool['tenant_id']) else: bigip_vs.disable_virtual_server(name=vip['id'], folder=pool['tenant_id']) if 'session_persistence' in vip and vip['session_persistence']: # branch on persistence type persistence_type = vip['session_persistence']['type'] set_persist = bigip_vs.set_persist_profile set_fallback_persist = bigip_vs.set_fallback_persist_profile if persistence_type == 'SOURCE_IP': # add source_addr persistence profile LOG.debug('adding source_addr primary persistence') set_persist(name=vip['id'], profile_name='/Common/source_addr', folder=vip['tenant_id']) if pool['protocol'] == 'TCP' or pool['protocol'] == 'HTTPS': bigip_vs.remove_profile(name=vip['id'], profile_name='/Common/http', folder=vip['tenant_id']) elif persistence_type == 'HTTP_COOKIE': # HTTP cookie persistence requires an HTTP profile LOG.debug('adding http profile and' + ' primary cookie persistence') bigip_vs.add_profile(name=vip['id'], profile_name='/Common/http', folder=vip['tenant_id']) # add standard cookie persistence profile set_persist(name=vip['id'], profile_name='/Common/cookie', folder=vip['tenant_id']) if pool['lb_method'] == 'SOURCE_IP': set_fallback_persist(name=vip['id'], profile_name='/Common/source_addr', folder=vip['tenant_id']) elif persistence_type == 'APP_COOKIE': self._set_bigip_vip_cookie_persist(bigip, service) elif pool['lb_method'].upper() == 'SOURCE_IP': bigip_vs.set_persist_profile(name=vip['id'], profile_name='/Common/source_addr', folder=vip['tenant_id']) else: bigip_vs.remove_all_persist_profiles(name=vip['id'], folder=vip['tenant_id']) if vip['connection_limit'] > 0 and 'protocol' in vip: conn_limit = int(vip['connection_limit']) LOG.debug('setting connection limit') # if not HTTP.. use connection limits bigip_vs.set_connection_limit(name=vip['id'], connection_limit=conn_limit, folder=pool['tenant_id']) else: # clear the connection limits LOG.debug('removing connection limits') bigip_vs.set_connection_limit(name=vip['id'], connection_limit=0, folder=pool['tenant_id'])
Python
def initialize_tunneling(self, bigip): """ setup tunneling setup VTEP tunnels if needed """ vtep_folder = self.conf.f5_vtep_folder vtep_selfip_name = self.conf.f5_vtep_selfip_name local_ips = [] # for bigip in self.driver.get_all_bigips(): if not vtep_folder or vtep_folder.lower() == 'none': vtep_folder = 'Common' if vtep_selfip_name and \ not vtep_selfip_name.lower() == 'none': # profiles may already exist bigip.vxlan.create_multipoint_profile( name='vxlan_ovs', folder='Common') bigip.l2gre.create_multipoint_profile( name='gre_ovs', folder='Common') # find the IP address for the selfip for each box local_ip = bigip.selfip.get_addr(vtep_selfip_name, vtep_folder) if local_ip: bigip.local_ip = local_ip local_ips.append(local_ip) else: raise f5ex.MissingVTEPAddress( 'device %s missing vtep selfip %s' % (bigip.device_name, '/' + vtep_folder + '/' + vtep_selfip_name)) return local_ips
def initialize_tunneling(self, bigip): """ setup tunneling setup VTEP tunnels if needed """ vtep_folder = self.conf.f5_vtep_folder vtep_selfip_name = self.conf.f5_vtep_selfip_name local_ips = [] # for bigip in self.driver.get_all_bigips(): if not vtep_folder or vtep_folder.lower() == 'none': vtep_folder = 'Common' if vtep_selfip_name and \ not vtep_selfip_name.lower() == 'none': # profiles may already exist bigip.vxlan.create_multipoint_profile( name='vxlan_ovs', folder='Common') bigip.l2gre.create_multipoint_profile( name='gre_ovs', folder='Common') # find the IP address for the selfip for each box local_ip = bigip.selfip.get_addr(vtep_selfip_name, vtep_folder) if local_ip: bigip.local_ip = local_ip local_ips.append(local_ip) else: raise f5ex.MissingVTEPAddress( 'device %s missing vtep selfip %s' % (bigip.device_name, '/' + vtep_folder + '/' + vtep_selfip_name)) return local_ips
Python
def prep_service_networking(self, service, traffic_group): """ Assure network connectivity is established on all bigips for the service. """ if self.conf.f5_global_routed_mode or not service['pool']: return if self.conf.use_namespaces: self._annotate_service_route_domains(service) # Per Device Network Connectivity (VLANs or Tunnels) subnetsinfo = _get_subnets_to_assure(service) for (assure_bigip, subnetinfo) in \ itertools.product(self.driver.get_all_bigips(), subnetsinfo): self.bigip_l2_manager.assure_bigip_network( assure_bigip, subnetinfo['network']) self.bigip_selfip_manager.assure_bigip_selfip( assure_bigip, service, subnetinfo) # L3 Shared Config assure_bigips = self.driver.get_config_bigips() for subnetinfo in subnetsinfo: if self.conf.f5_snat_addresses_per_subnet > 0: self._assure_subnet_snats(assure_bigips, service, subnetinfo) if subnetinfo['is_for_member'] and not self.conf.f5_snat_mode: self._allocate_gw_addr(subnetinfo) for assure_bigip in assure_bigips: # if we are not using SNATS, attempt to become # the subnet's default gateway. self.bigip_selfip_manager.assure_gateway_on_subnet( assure_bigip, subnetinfo, traffic_group)
def prep_service_networking(self, service, traffic_group): """ Assure network connectivity is established on all bigips for the service. """ if self.conf.f5_global_routed_mode or not service['pool']: return if self.conf.use_namespaces: self._annotate_service_route_domains(service) # Per Device Network Connectivity (VLANs or Tunnels) subnetsinfo = _get_subnets_to_assure(service) for (assure_bigip, subnetinfo) in \ itertools.product(self.driver.get_all_bigips(), subnetsinfo): self.bigip_l2_manager.assure_bigip_network( assure_bigip, subnetinfo['network']) self.bigip_selfip_manager.assure_bigip_selfip( assure_bigip, service, subnetinfo) # L3 Shared Config assure_bigips = self.driver.get_config_bigips() for subnetinfo in subnetsinfo: if self.conf.f5_snat_addresses_per_subnet > 0: self._assure_subnet_snats(assure_bigips, service, subnetinfo) if subnetinfo['is_for_member'] and not self.conf.f5_snat_mode: self._allocate_gw_addr(subnetinfo) for assure_bigip in assure_bigips: # if we are not using SNATS, attempt to become # the subnet's default gateway. self.bigip_selfip_manager.assure_gateway_on_subnet( assure_bigip, subnetinfo, traffic_group)
Python
def _annotate_service_route_domains(self, service): """ Add route domain notation to pool member and vip addresses. """ LOG.debug("Service before route domains: %s" % service) tenant_id = service['pool']['tenant_id'] self.update_rds_cache(tenant_id) if 'members' in service: for member in service['members']: LOG.debug("processing member %s" % member['address']) if 'address' in member: if 'network' in member and member['network']: self.assign_route_domain( tenant_id, member['network'], member['subnet']) rd_id = '%' + str(member['network']['route_domain_id']) member['address'] += rd_id else: member['address'] += '%0' if 'vip' in service and 'address' in service['vip']: vip = service['vip'] if 'network' in vip and vip['network']: self.assign_route_domain( tenant_id, vip['network'], vip['subnet']) rd_id = '%' + str(vip['network']['route_domain_id']) service['vip']['address'] += rd_id else: service['vip']['address'] += '%0' LOG.debug("Service after route domains: %s" % service)
def _annotate_service_route_domains(self, service): """ Add route domain notation to pool member and vip addresses. """ LOG.debug("Service before route domains: %s" % service) tenant_id = service['pool']['tenant_id'] self.update_rds_cache(tenant_id) if 'members' in service: for member in service['members']: LOG.debug("processing member %s" % member['address']) if 'address' in member: if 'network' in member and member['network']: self.assign_route_domain( tenant_id, member['network'], member['subnet']) rd_id = '%' + str(member['network']['route_domain_id']) member['address'] += rd_id else: member['address'] += '%0' if 'vip' in service and 'address' in service['vip']: vip = service['vip'] if 'network' in vip and vip['network']: self.assign_route_domain( tenant_id, vip['network'], vip['subnet']) rd_id = '%' + str(vip['network']['route_domain_id']) service['vip']['address'] += rd_id else: service['vip']['address'] += '%0' LOG.debug("Service after route domains: %s" % service)
Python
def assign_route_domain(self, tenant_id, network, subnet): """ Assign route domain for a network """ if self.bigip_l2_manager.is_common_network(network): network['route_domain_id'] = 0 return LOG.debug("assign route domain get from cache %s" % network) route_domain_id = self.get_route_domain_from_cache(network) if route_domain_id is not None: network['route_domain_id'] = route_domain_id return LOG.debug("max namespaces: %s" % self.conf.max_namespaces_per_tenant) LOG.debug("max namespaces ==1: %s" % (self.conf.max_namespaces_per_tenant == 1)) if self.conf.max_namespaces_per_tenant == 1: bigip = self.driver.get_bigip() LOG.debug("bigip before get_domain: %s" % bigip) tenant_rd = bigip.route.get_domain(folder=tenant_id) network['route_domain_id'] = tenant_rd return LOG.debug("assign route domain checking for available route domain") # need new route domain ? check_cidr = netaddr.IPNetwork(subnet['cidr']) placed_route_domain_id = None for route_domain_id in self.rds_cache[tenant_id]: LOG.debug("checking rd %s" % route_domain_id) rd_entry = self.rds_cache[tenant_id][route_domain_id] overlapping_subnet = None for net_shortname in rd_entry: LOG.debug("checking net %s" % net_shortname) net_entry = rd_entry[net_shortname] for exist_subnet_id in net_entry['subnets']: if exist_subnet_id == subnet['id']: continue exist_subnet = net_entry['subnets'][exist_subnet_id] exist_cidr = exist_subnet['cidr'] if check_cidr in exist_cidr or exist_cidr in check_cidr: overlapping_subnet = exist_subnet LOG.debug('rd %s: overlaps with subnet %s id: %s' % ( (route_domain_id, exist_subnet, exist_subnet_id))) break if overlapping_subnet: # no need to keep looking break if not overlapping_subnet: placed_route_domain_id = route_domain_id break if placed_route_domain_id is None: if (len(self.rds_cache[tenant_id]) < self.conf.max_namespaces_per_tenant): placed_route_domain_id = self._create_aux_rd(tenant_id) self.rds_cache[tenant_id][placed_route_domain_id] = {} LOG.debug("Tenant %s now has %d route domains" % (tenant_id, len(self.rds_cache[tenant_id]))) else: raise Exception("Cannot allocate route domain") LOG.debug("Placed in route domain %s" % placed_route_domain_id) rd_entry = self.rds_cache[tenant_id][placed_route_domain_id] net_short_name = self.get_neutron_net_short_name(network) if net_short_name not in rd_entry: rd_entry[net_short_name] = {'subnets': {}} net_subnets = rd_entry[net_short_name]['subnets'] net_subnets[subnet['id']] = {'cidr': check_cidr} network['route_domain_id'] = placed_route_domain_id
def assign_route_domain(self, tenant_id, network, subnet): """ Assign route domain for a network """ if self.bigip_l2_manager.is_common_network(network): network['route_domain_id'] = 0 return LOG.debug("assign route domain get from cache %s" % network) route_domain_id = self.get_route_domain_from_cache(network) if route_domain_id is not None: network['route_domain_id'] = route_domain_id return LOG.debug("max namespaces: %s" % self.conf.max_namespaces_per_tenant) LOG.debug("max namespaces ==1: %s" % (self.conf.max_namespaces_per_tenant == 1)) if self.conf.max_namespaces_per_tenant == 1: bigip = self.driver.get_bigip() LOG.debug("bigip before get_domain: %s" % bigip) tenant_rd = bigip.route.get_domain(folder=tenant_id) network['route_domain_id'] = tenant_rd return LOG.debug("assign route domain checking for available route domain") # need new route domain ? check_cidr = netaddr.IPNetwork(subnet['cidr']) placed_route_domain_id = None for route_domain_id in self.rds_cache[tenant_id]: LOG.debug("checking rd %s" % route_domain_id) rd_entry = self.rds_cache[tenant_id][route_domain_id] overlapping_subnet = None for net_shortname in rd_entry: LOG.debug("checking net %s" % net_shortname) net_entry = rd_entry[net_shortname] for exist_subnet_id in net_entry['subnets']: if exist_subnet_id == subnet['id']: continue exist_subnet = net_entry['subnets'][exist_subnet_id] exist_cidr = exist_subnet['cidr'] if check_cidr in exist_cidr or exist_cidr in check_cidr: overlapping_subnet = exist_subnet LOG.debug('rd %s: overlaps with subnet %s id: %s' % ( (route_domain_id, exist_subnet, exist_subnet_id))) break if overlapping_subnet: # no need to keep looking break if not overlapping_subnet: placed_route_domain_id = route_domain_id break if placed_route_domain_id is None: if (len(self.rds_cache[tenant_id]) < self.conf.max_namespaces_per_tenant): placed_route_domain_id = self._create_aux_rd(tenant_id) self.rds_cache[tenant_id][placed_route_domain_id] = {} LOG.debug("Tenant %s now has %d route domains" % (tenant_id, len(self.rds_cache[tenant_id]))) else: raise Exception("Cannot allocate route domain") LOG.debug("Placed in route domain %s" % placed_route_domain_id) rd_entry = self.rds_cache[tenant_id][placed_route_domain_id] net_short_name = self.get_neutron_net_short_name(network) if net_short_name not in rd_entry: rd_entry[net_short_name] = {'subnets': {}} net_subnets = rd_entry[net_short_name]['subnets'] net_subnets[subnet['id']] = {'cidr': check_cidr} network['route_domain_id'] = placed_route_domain_id
Python
def update_rds_cache(self, tenant_id): """ Update the route domain cache from bigips """ if tenant_id not in self.rds_cache: LOG.debug("rds_cache: adding tenant %s" % tenant_id) self.rds_cache[tenant_id] = {} for bigip in self.driver.get_all_bigips(): self.update_rds_cache_bigip(tenant_id, bigip) LOG.debug("rds_cache updated: " + str(self.rds_cache))
def update_rds_cache(self, tenant_id): """ Update the route domain cache from bigips """ if tenant_id not in self.rds_cache: LOG.debug("rds_cache: adding tenant %s" % tenant_id) self.rds_cache[tenant_id] = {} for bigip in self.driver.get_all_bigips(): self.update_rds_cache_bigip(tenant_id, bigip) LOG.debug("rds_cache updated: " + str(self.rds_cache))
Python
def update_rds_cache_bigip(self, tenant_id, bigip): """ Update the route domain cache for this tenant with information from bigip's vlan and tunnels """ LOG.debug("rds_cache: processing bigip %s" % bigip.device_name) route_domain_ids = bigip.route.get_domain_ids(folder=tenant_id) # LOG.debug("rds_cache: got bigip route domains: %s" % route_domains) for route_domain_id in route_domain_ids: self.update_rds_cache_bigip_rd_vlans( tenant_id, bigip, route_domain_id)
def update_rds_cache_bigip(self, tenant_id, bigip): """ Update the route domain cache for this tenant with information from bigip's vlan and tunnels """ LOG.debug("rds_cache: processing bigip %s" % bigip.device_name) route_domain_ids = bigip.route.get_domain_ids(folder=tenant_id) # LOG.debug("rds_cache: got bigip route domains: %s" % route_domains) for route_domain_id in route_domain_ids: self.update_rds_cache_bigip_rd_vlans( tenant_id, bigip, route_domain_id)
Python
def update_rds_cache_bigip_rd_vlans( self, tenant_id, bigip, route_domain_id): """ Update the route domain cache with information from the bigip vlans and tunnels from this route domain """ LOG.debug("rds_cache: processing bigip %s rd %s" % (bigip.device_name, route_domain_id)) # this gets tunnels too rd_vlans = bigip.route.get_vlans_in_domain_by_id( folder=tenant_id, route_domain_id=route_domain_id) LOG.debug("rds_cache: bigip %s rd %s vlans: %s" % (bigip.device_name, route_domain_id, rd_vlans)) if len(rd_vlans) == 0: return # make sure this rd has a cache entry tenant_entry = self.rds_cache[tenant_id] if route_domain_id not in tenant_entry: tenant_entry[route_domain_id] = {} # for every VLAN or TUNNEL on this bigip... for rd_vlan in rd_vlans: self.update_rds_cache_bigip_vlan( tenant_id, bigip, route_domain_id, rd_vlan)
def update_rds_cache_bigip_rd_vlans( self, tenant_id, bigip, route_domain_id): """ Update the route domain cache with information from the bigip vlans and tunnels from this route domain """ LOG.debug("rds_cache: processing bigip %s rd %s" % (bigip.device_name, route_domain_id)) # this gets tunnels too rd_vlans = bigip.route.get_vlans_in_domain_by_id( folder=tenant_id, route_domain_id=route_domain_id) LOG.debug("rds_cache: bigip %s rd %s vlans: %s" % (bigip.device_name, route_domain_id, rd_vlans)) if len(rd_vlans) == 0: return # make sure this rd has a cache entry tenant_entry = self.rds_cache[tenant_id] if route_domain_id not in tenant_entry: tenant_entry[route_domain_id] = {} # for every VLAN or TUNNEL on this bigip... for rd_vlan in rd_vlans: self.update_rds_cache_bigip_vlan( tenant_id, bigip, route_domain_id, rd_vlan)
Python
def update_rds_cache_bigip_vlan( self, tenant_id, bigip, route_domain_id, rd_vlan): """ Update the route domain cache with information from the bigip vlan or tunnel """ LOG.debug("rds_cache: processing bigip %s rd %d vlan %s" % (bigip.device_name, route_domain_id, rd_vlan)) net_short_name = self.get_bigip_net_short_name( bigip, tenant_id, rd_vlan) # make sure this net has a cache entry tenant_entry = self.rds_cache[tenant_id] rd_entry = tenant_entry[route_domain_id] if net_short_name not in rd_entry: rd_entry[net_short_name] = {'subnets': {}} net_subnets = rd_entry[net_short_name]['subnets'] selfips = bigip.selfip.get_selfips(folder=tenant_id, vlan=rd_vlan) LOG.debug("rds_cache: got selfips: %s" % selfips) for selfip in selfips: LOG.debug("rds_cache: processing bigip %s rd %s vlan %s self %s" % (bigip.device_name, route_domain_id, rd_vlan, selfip['name'])) if bigip.device_name not in selfip['name']: LOG.error("rds_cache: Found unexpected selfip %s for tenant %s" % (selfip['name'], tenant_id)) continue subnet_id = selfip['name'].split(bigip.device_name + '-')[1] # convert 10.1.1.1%1/24 to 10.1.1.1/24 addr = selfip['address'].split('/')[0] addr = addr.split('%')[0] netbits = selfip['address'].split('/')[1] selfip['address'] = addr + '/' + netbits # selfip addresses will have slash notation: 10.1.1.1/24 netip = netaddr.IPNetwork(selfip['address']) LOG.debug("rds_cache: updating subnet %s with %s" % (subnet_id, str(netip.cidr))) net_subnets[subnet_id] = {'cidr': netip.cidr} LOG.debug("rds_cache: now %s" % self.rds_cache)
def update_rds_cache_bigip_vlan( self, tenant_id, bigip, route_domain_id, rd_vlan): """ Update the route domain cache with information from the bigip vlan or tunnel """ LOG.debug("rds_cache: processing bigip %s rd %d vlan %s" % (bigip.device_name, route_domain_id, rd_vlan)) net_short_name = self.get_bigip_net_short_name( bigip, tenant_id, rd_vlan) # make sure this net has a cache entry tenant_entry = self.rds_cache[tenant_id] rd_entry = tenant_entry[route_domain_id] if net_short_name not in rd_entry: rd_entry[net_short_name] = {'subnets': {}} net_subnets = rd_entry[net_short_name]['subnets'] selfips = bigip.selfip.get_selfips(folder=tenant_id, vlan=rd_vlan) LOG.debug("rds_cache: got selfips: %s" % selfips) for selfip in selfips: LOG.debug("rds_cache: processing bigip %s rd %s vlan %s self %s" % (bigip.device_name, route_domain_id, rd_vlan, selfip['name'])) if bigip.device_name not in selfip['name']: LOG.error("rds_cache: Found unexpected selfip %s for tenant %s" % (selfip['name'], tenant_id)) continue subnet_id = selfip['name'].split(bigip.device_name + '-')[1] # convert 10.1.1.1%1/24 to 10.1.1.1/24 addr = selfip['address'].split('/')[0] addr = addr.split('%')[0] netbits = selfip['address'].split('/')[1] selfip['address'] = addr + '/' + netbits # selfip addresses will have slash notation: 10.1.1.1/24 netip = netaddr.IPNetwork(selfip['address']) LOG.debug("rds_cache: updating subnet %s with %s" % (subnet_id, str(netip.cidr))) net_subnets[subnet_id] = {'cidr': netip.cidr} LOG.debug("rds_cache: now %s" % self.rds_cache)
Python
def remove_from_rds_cache(self, network, subnet): """ Get route domain from cache by network """ net_short_name = self.get_neutron_net_short_name(network) for tenant_id in self.rds_cache: tenant_cache = self.rds_cache[tenant_id] for route_domain_id in tenant_cache: if net_short_name in tenant_cache[route_domain_id]: net_entry = tenant_cache[route_domain_id][net_short_name] if subnet['id'] in net_entry: del net_entry[subnet['id']]
def remove_from_rds_cache(self, network, subnet): """ Get route domain from cache by network """ net_short_name = self.get_neutron_net_short_name(network) for tenant_id in self.rds_cache: tenant_cache = self.rds_cache[tenant_id] for route_domain_id in tenant_cache: if net_short_name in tenant_cache[route_domain_id]: net_entry = tenant_cache[route_domain_id][net_short_name] if subnet['id'] in net_entry: del net_entry[subnet['id']]
Python
def _assure_subnet_snats(self, assure_bigips, service, subnetinfo): """ Ensure snat for subnet exists on bigips """ tenant_id = service['pool']['tenant_id'] subnet = subnetinfo['subnet'] assure_bigips = \ [bigip for bigip in assure_bigips if tenant_id not in bigip.assured_tenant_snat_subnets or subnet['id'] not in bigip.assured_tenant_snat_subnets[tenant_id]] if len(assure_bigips): snat_addrs = self.bigip_snat_manager.get_snat_addrs( subnetinfo, tenant_id) for assure_bigip in assure_bigips: self.bigip_snat_manager.assure_bigip_snats( assure_bigip, subnetinfo, snat_addrs, tenant_id)
def _assure_subnet_snats(self, assure_bigips, service, subnetinfo): """ Ensure snat for subnet exists on bigips """ tenant_id = service['pool']['tenant_id'] subnet = subnetinfo['subnet'] assure_bigips = \ [bigip for bigip in assure_bigips if tenant_id not in bigip.assured_tenant_snat_subnets or subnet['id'] not in bigip.assured_tenant_snat_subnets[tenant_id]] if len(assure_bigips): snat_addrs = self.bigip_snat_manager.get_snat_addrs( subnetinfo, tenant_id) for assure_bigip in assure_bigips: self.bigip_snat_manager.assure_bigip_snats( assure_bigip, subnetinfo, snat_addrs, tenant_id)
Python
def _allocate_gw_addr(self, subnetinfo): """ Create a name for the port and for the IP Forwarding Virtual Server as well as the floating Self IP which will answer ARP for the members """ network = subnetinfo['network'] if not network: LOG.error(_('Attempted to create default gateway' ' for network with no id.. skipping.')) return subnet = subnetinfo['subnet'] gw_name = "gw-" + subnet['id'] ports = self.driver.plugin_rpc.get_port_by_name(port_name=gw_name) if len(ports) < 1: need_port_for_gateway = True # There was no port on this agent's host, so get one from Neutron if need_port_for_gateway: try: rpc = self.driver.plugin_rpc new_port = rpc.create_port_on_subnet_with_specific_ip( subnet_id=subnet['id'], mac_address=None, name=gw_name, ip_address=subnet['gateway_ip']) LOG.info(_('gateway IP for subnet %s will be port %s' % (subnet['id'], new_port['id']))) except Exception as exc: ermsg = 'Invalid default gateway for subnet %s:%s - %s.' \ % (subnet['id'], subnet['gateway_ip'], exc.message) ermsg += " SNAT will not function and load balancing" ermsg += " support will likely fail. Enable f5_snat_mode." LOG.error(_(ermsg)) return True
def _allocate_gw_addr(self, subnetinfo): """ Create a name for the port and for the IP Forwarding Virtual Server as well as the floating Self IP which will answer ARP for the members """ network = subnetinfo['network'] if not network: LOG.error(_('Attempted to create default gateway' ' for network with no id.. skipping.')) return subnet = subnetinfo['subnet'] gw_name = "gw-" + subnet['id'] ports = self.driver.plugin_rpc.get_port_by_name(port_name=gw_name) if len(ports) < 1: need_port_for_gateway = True # There was no port on this agent's host, so get one from Neutron if need_port_for_gateway: try: rpc = self.driver.plugin_rpc new_port = rpc.create_port_on_subnet_with_specific_ip( subnet_id=subnet['id'], mac_address=None, name=gw_name, ip_address=subnet['gateway_ip']) LOG.info(_('gateway IP for subnet %s will be port %s' % (subnet['id'], new_port['id']))) except Exception as exc: ermsg = 'Invalid default gateway for subnet %s:%s - %s.' \ % (subnet['id'], subnet['gateway_ip'], exc.message) ermsg += " SNAT will not function and load balancing" ermsg += " support will likely fail. Enable f5_snat_mode." LOG.error(_(ermsg)) return True
Python
def post_service_networking(self, service, all_subnet_hints): """ Assure networks are deleted from big-ips """ if self.conf.f5_global_routed_mode: return # L2toL3 networking layer # Non Shared Config - Local Per BIG-IP self.update_bigip_l2(service) # Delete shared config objects deleted_names = set() for bigip in self.driver.get_config_bigips(): LOG.debug(' post_service_networking: calling ' '_assure_delete_networks del nets sh for bigip %s %s' % (bigip.device_name, all_subnet_hints)) subnet_hints = all_subnet_hints[bigip.device_name] deleted_names = deleted_names.union( self._assure_delete_nets_shared(bigip, service, subnet_hints)) # avoids race condition: # deletion of shared ip objects must sync before we # remove the selfips or vlans from the peer bigips. self.driver.sync_if_clustered() # Delete non shared config objects for bigip in self.driver.get_all_bigips(): LOG.debug(' post_service_networking: calling ' ' _assure_delete_networks del nets ns for bigip %s' % bigip.device_name) if self.conf.f5_sync_mode == 'replication': subnet_hints = all_subnet_hints[bigip.device_name] else: # If in autosync mode, then the IP operations were performed # on just the primary big-ip, and so that is where the subnet # hints are stored. So, just use those hints for every bigip. device_name = self.driver.get_bigip().device_name subnet_hints = all_subnet_hints[device_name] deleted_names = deleted_names.union( self._assure_delete_nets_nonshared( bigip, service, subnet_hints)) for port_name in deleted_names: LOG.debug(' post_service_networking: calling ' ' del port %s' % port_name) self.driver.plugin_rpc.delete_port_by_name( port_name=port_name)
def post_service_networking(self, service, all_subnet_hints): """ Assure networks are deleted from big-ips """ if self.conf.f5_global_routed_mode: return # L2toL3 networking layer # Non Shared Config - Local Per BIG-IP self.update_bigip_l2(service) # Delete shared config objects deleted_names = set() for bigip in self.driver.get_config_bigips(): LOG.debug(' post_service_networking: calling ' '_assure_delete_networks del nets sh for bigip %s %s' % (bigip.device_name, all_subnet_hints)) subnet_hints = all_subnet_hints[bigip.device_name] deleted_names = deleted_names.union( self._assure_delete_nets_shared(bigip, service, subnet_hints)) # avoids race condition: # deletion of shared ip objects must sync before we # remove the selfips or vlans from the peer bigips. self.driver.sync_if_clustered() # Delete non shared config objects for bigip in self.driver.get_all_bigips(): LOG.debug(' post_service_networking: calling ' ' _assure_delete_networks del nets ns for bigip %s' % bigip.device_name) if self.conf.f5_sync_mode == 'replication': subnet_hints = all_subnet_hints[bigip.device_name] else: # If in autosync mode, then the IP operations were performed # on just the primary big-ip, and so that is where the subnet # hints are stored. So, just use those hints for every bigip. device_name = self.driver.get_bigip().device_name subnet_hints = all_subnet_hints[device_name] deleted_names = deleted_names.union( self._assure_delete_nets_nonshared( bigip, service, subnet_hints)) for port_name in deleted_names: LOG.debug(' post_service_networking: calling ' ' del port %s' % port_name) self.driver.plugin_rpc.delete_port_by_name( port_name=port_name)
Python
def _assure_delete_nets_shared(self, bigip, service, subnet_hints): """ Assure shared configuration (which syncs) is deleted """ deleted_names = set() tenant_id = service['pool']['tenant_id'] delete_gateway = self.bigip_selfip_manager.delete_gateway_on_subnet for subnetinfo in _get_subnets_to_delete(bigip, service, subnet_hints): try: if not self.conf.f5_snat_mode: gw_name = delete_gateway(bigip, subnetinfo) deleted_names.add(gw_name) my_deleted_names, my_in_use_subnets = \ self.bigip_snat_manager.delete_bigip_snats( bigip, subnetinfo, tenant_id) deleted_names = deleted_names.union(my_deleted_names) for in_use_subnetid in my_in_use_subnets: subnet_hints['check_for_delete_subnets'].pop( in_use_subnetid, None) except NeutronException as exc: LOG.error("assure_delete_nets_shared: exception: %s" % str(exc.msg)) except Exception as exc: LOG.error("assure_delete_nets_shared: exception: %s" % str(exc.message)) return deleted_names
def _assure_delete_nets_shared(self, bigip, service, subnet_hints): """ Assure shared configuration (which syncs) is deleted """ deleted_names = set() tenant_id = service['pool']['tenant_id'] delete_gateway = self.bigip_selfip_manager.delete_gateway_on_subnet for subnetinfo in _get_subnets_to_delete(bigip, service, subnet_hints): try: if not self.conf.f5_snat_mode: gw_name = delete_gateway(bigip, subnetinfo) deleted_names.add(gw_name) my_deleted_names, my_in_use_subnets = \ self.bigip_snat_manager.delete_bigip_snats( bigip, subnetinfo, tenant_id) deleted_names = deleted_names.union(my_deleted_names) for in_use_subnetid in my_in_use_subnets: subnet_hints['check_for_delete_subnets'].pop( in_use_subnetid, None) except NeutronException as exc: LOG.error("assure_delete_nets_shared: exception: %s" % str(exc.msg)) except Exception as exc: LOG.error("assure_delete_nets_shared: exception: %s" % str(exc.message)) return deleted_names
Python
def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): """ Delete non shared base objects for networks """ deleted_names = set() for subnetinfo in _get_subnets_to_delete(bigip, service, subnet_hints): try: network = subnetinfo['network'] if self.bigip_l2_manager.is_common_network(network): network_folder = 'Common' else: network_folder = service['pool']['tenant_id'] subnet = subnetinfo['subnet'] if self.conf.f5_populate_static_arp: bigip.arp.delete_by_subnet(subnet=subnet['cidr'], mask=None, folder=network_folder) local_selfip_name = "local-" + bigip.device_name + \ "-" + subnet['id'] selfip_address = bigip.selfip.get_addr(name=local_selfip_name, folder=network_folder) bigip.selfip.delete(name=local_selfip_name, folder=network_folder) if self.l3_binding: self.l3_binding.unbind_address(subnet_id=subnet['id'], ip_address=selfip_address) deleted_names.add(local_selfip_name) self.bigip_l2_manager.delete_bigip_network(bigip, network) if subnet['id'] not in subnet_hints['do_not_delete_subnets']: subnet_hints['do_not_delete_subnets'].append(subnet['id']) self.remove_from_rds_cache(network, subnet) tenant_id = service['pool']['tenant_id'] if tenant_id in bigip.assured_tenant_snat_subnets: tenant_snat_subnets = \ bigip.assured_tenant_snat_subnets[tenant_id] if subnet['id'] in tenant_snat_subnets: tenant_snat_subnets.remove(subnet['id']) except NeutronException as exc: LOG.error("assure_delete_nets_nonshared: exception: %s" % str(exc.msg)) except Exception as exc: LOG.error("assure_delete_nets_nonshared: exception: %s" % str(exc.message)) return deleted_names
def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): """ Delete non shared base objects for networks """ deleted_names = set() for subnetinfo in _get_subnets_to_delete(bigip, service, subnet_hints): try: network = subnetinfo['network'] if self.bigip_l2_manager.is_common_network(network): network_folder = 'Common' else: network_folder = service['pool']['tenant_id'] subnet = subnetinfo['subnet'] if self.conf.f5_populate_static_arp: bigip.arp.delete_by_subnet(subnet=subnet['cidr'], mask=None, folder=network_folder) local_selfip_name = "local-" + bigip.device_name + \ "-" + subnet['id'] selfip_address = bigip.selfip.get_addr(name=local_selfip_name, folder=network_folder) bigip.selfip.delete(name=local_selfip_name, folder=network_folder) if self.l3_binding: self.l3_binding.unbind_address(subnet_id=subnet['id'], ip_address=selfip_address) deleted_names.add(local_selfip_name) self.bigip_l2_manager.delete_bigip_network(bigip, network) if subnet['id'] not in subnet_hints['do_not_delete_subnets']: subnet_hints['do_not_delete_subnets'].append(subnet['id']) self.remove_from_rds_cache(network, subnet) tenant_id = service['pool']['tenant_id'] if tenant_id in bigip.assured_tenant_snat_subnets: tenant_snat_subnets = \ bigip.assured_tenant_snat_subnets[tenant_id] if subnet['id'] in tenant_snat_subnets: tenant_snat_subnets.remove(subnet['id']) except NeutronException as exc: LOG.error("assure_delete_nets_nonshared: exception: %s" % str(exc.msg)) except Exception as exc: LOG.error("assure_delete_nets_nonshared: exception: %s" % str(exc.message)) return deleted_names
Python
def _get_subnets_to_assure(service): """ Examine service and return active networks """ networks = dict() vip = service['vip'] if 'id' in vip and \ not vip['status'] == plugin_const.PENDING_DELETE: if 'network' in vip and vip['network']: network = vip['network'] subnet = vip['subnet'] networks[network['id']] = {'network': network, 'subnet': subnet, 'is_for_member': False} for member in service['members']: if not member['status'] == plugin_const.PENDING_DELETE: if 'network' in member and member['network']: network = member['network'] subnet = member['subnet'] networks[network['id']] = {'network': network, 'subnet': subnet, 'is_for_member': True} return networks.values()
def _get_subnets_to_assure(service): """ Examine service and return active networks """ networks = dict() vip = service['vip'] if 'id' in vip and \ not vip['status'] == plugin_const.PENDING_DELETE: if 'network' in vip and vip['network']: network = vip['network'] subnet = vip['subnet'] networks[network['id']] = {'network': network, 'subnet': subnet, 'is_for_member': False} for member in service['members']: if not member['status'] == plugin_const.PENDING_DELETE: if 'network' in member and member['network']: network = member['network'] subnet = member['subnet'] networks[network['id']] = {'network': network, 'subnet': subnet, 'is_for_member': True} return networks.values()
Python
def validate_pools_state(self, pools): """Get the status of a list of pools IDs in Neutron""" return self.call( self.context, self.make_msg('validate_pools_state', pools=pools, host=self.host), topic=self.topic )
def validate_pools_state(self, pools): """Get the status of a list of pools IDs in Neutron""" return self.call( self.context, self.make_msg('validate_pools_state', pools=pools, host=self.host), topic=self.topic )
Python
def validate_pools_state(self, vips): """Get the status of a list of vips IDs in Neutron""" return self.call( self.context, self.make_msg('validate_vips_state', vips=vips, host=self.host), topic=self.topic )
def validate_pools_state(self, vips): """Get the status of a list of vips IDs in Neutron""" return self.call( self.context, self.make_msg('validate_vips_state', vips=vips, host=self.host), topic=self.topic )
Python
def delete_by_mac(self, mac_address=None, folder='Common'): """ Delete an ARP static entry by MAC address """ if mac_address: arps = self.get_arps(None, folder) for arp in arps: for ip_address in arp: if arp[ip_address] == mac_address: self.delete(ip_address=ip_address, folder=folder)
def delete_by_mac(self, mac_address=None, folder='Common'): """ Delete an ARP static entry by MAC address """ if mac_address: arps = self.get_arps(None, folder) for arp in arps: for ip_address in arp: if arp[ip_address] == mac_address: self.delete(ip_address=ip_address, folder=folder)
Python
def delete_by_subnet(self, subnet=None, mask=None, folder='Common'): """ Delete ARP static entries on subnet """ if subnet: mask_div = subnet.find('/') if mask_div > 0: try: rd_div = subnet.find('%') if rd_div > -1: network = netaddr.IPNetwork( subnet[0:mask_div][0:rd_div] + subnet[mask_div:]) else: network = netaddr.IPNetwork(subnet) except Exception as exc: Log.error('ARP', exc.message) return [] elif not mask: return [] else: try: rd_div = subnet.find('%') if rd_div > -1: network = netaddr.IPNetwork( subnet[0:rd_div] + '/' + mask) else: network = netaddr.IPNetwork(subnet + '/' + mask) except Exception as exc: Log.error('ARP', exc.message) return [] return self._delete_by_network(folder, network)
def delete_by_subnet(self, subnet=None, mask=None, folder='Common'): """ Delete ARP static entries on subnet """ if subnet: mask_div = subnet.find('/') if mask_div > 0: try: rd_div = subnet.find('%') if rd_div > -1: network = netaddr.IPNetwork( subnet[0:mask_div][0:rd_div] + subnet[mask_div:]) else: network = netaddr.IPNetwork(subnet) except Exception as exc: Log.error('ARP', exc.message) return [] elif not mask: return [] else: try: rd_div = subnet.find('%') if rd_div > -1: network = netaddr.IPNetwork( subnet[0:rd_div] + '/' + mask) else: network = netaddr.IPNetwork(subnet + '/' + mask) except Exception as exc: Log.error('ARP', exc.message) return [] return self._delete_by_network(folder, network)
Python
def _remove_route_domain_zero(self, ip_address): """ Remove route domain zero from ip_address """ decorator_index = ip_address.find('%0') if decorator_index > 0: ip_address = ip_address[:decorator_index] return ip_address
def _remove_route_domain_zero(self, ip_address): """ Remove route domain zero from ip_address """ decorator_index = ip_address.find('%0') if decorator_index > 0: ip_address = ip_address[:decorator_index] return ip_address
Python
def add_folder(folder, name): """ Add a BIG-IP folder """ folder = str(folder).replace("/", "") if not str(name).startswith("/" + folder + "/"): return "/" + folder + "/" + name else: return name
def add_folder(folder, name): """ Add a BIG-IP folder """ folder = str(folder).replace("/", "") if not str(name).startswith("/" + folder + "/"): return "/" + folder + "/" + name else: return name
Python
def sync(self, name, force_now=False): """ Ensure local device in sync with group """ sync_start_time = time.time() dev_name = self.get_local_device_name() sleep_delay = const.SYNC_DELAY attempts = 0 if force_now: self.sync_local_device_to_group(name) time.sleep(sleep_delay) attempts += 1 while attempts < const.MAX_SYNC_ATTEMPTS: state = self.get_sync_status() if state in ['Standalone', 'In Sync']: break elif state == 'Awaiting Initial Sync': attempts += 1 Log.info( 'Cluster', "Device %s - Synchronizing initial config to group %s" % (dev_name, name)) self.sync_local_device_to_group(name) time.sleep(sleep_delay) elif state in ['Disconnected', 'Not All Devices Synced', 'Changes Pending']: attempts += 1 last_log_time = 0 now = time.time() wait_start_time = now # Keep checking the sync state in a quick loop. # We want to detect In Sync as quickly as possible. while now - wait_start_time < sleep_delay: # Only log once per second if now - last_log_time >= 1: Log.info( 'Cluster', 'Device %s, Group %s not synced. ' % (dev_name, name) + 'Waiting. State is: %s' % state) last_log_time = now state = self.get_sync_status() if state in ['Standalone', 'In Sync']: break time.sleep(.5) now = time.time() else: # if we didn't break out due to the group being in sync # then attempt to force a sync. self.sync_local_device_to_group(name) sleep_delay += const.SYNC_DELAY # no need to sleep here because we already spent the sleep # interval checking status. continue # Only a break from the inner while loop due to Standalone or # In Sync will reach here. # Normal exit of the while loop reach the else statement # above which continues the outer loop break elif state == 'Sync Failure': Log.info('Cluster', "Device %s - Synchronization failed for %s" % (dev_name, name)) Log.debug('Cluster', 'SYNC SECONDS (Sync Failure): ' + str(time.time() - sync_start_time)) raise exceptions.BigIPClusterSyncFailure( 'Device service group %s' % name + ' failed after ' + '%s attempts.' % const.MAX_SYNC_ATTEMPTS + ' Correct sync problem manually' + ' according to sol13946 on ' + ' support.f5.com.') else: attempts += 1 Log.info('Cluster', "Device %s " % dev_name + "Synchronizing config attempt %s to group %s:" % (attempts, name) + " current state: %s" % state) self.sync_local_device_to_group(name) time.sleep(sleep_delay) sleep_delay += const.SYNC_DELAY else: if state == 'Disconnected': Log.debug('Cluster', 'SYNC SECONDS(Disconnected): ' + str(time.time() - sync_start_time)) raise exceptions.BigIPClusterSyncFailure( 'Device service group %s' % name + ' could not reach a sync state' + ' because they can not communicate' + ' over the sync network. Please' + ' check connectivity.') else: Log.debug('Cluster', 'SYNC SECONDS(Timeout): ' + str(time.time() - sync_start_time)) raise exceptions.BigIPClusterSyncFailure( 'Device service group %s' % name + ' could not reach a sync state after ' + '%s attempts.' % const.MAX_SYNC_ATTEMPTS + ' It is in %s state currently.' % state + ' Correct sync problem manually' + ' according to sol13946 on ' + ' support.f5.com.') Log.debug('Cluster', 'SYNC SECONDS(Success): ' + str(time.time() - sync_start_time))
def sync(self, name, force_now=False): """ Ensure local device in sync with group """ sync_start_time = time.time() dev_name = self.get_local_device_name() sleep_delay = const.SYNC_DELAY attempts = 0 if force_now: self.sync_local_device_to_group(name) time.sleep(sleep_delay) attempts += 1 while attempts < const.MAX_SYNC_ATTEMPTS: state = self.get_sync_status() if state in ['Standalone', 'In Sync']: break elif state == 'Awaiting Initial Sync': attempts += 1 Log.info( 'Cluster', "Device %s - Synchronizing initial config to group %s" % (dev_name, name)) self.sync_local_device_to_group(name) time.sleep(sleep_delay) elif state in ['Disconnected', 'Not All Devices Synced', 'Changes Pending']: attempts += 1 last_log_time = 0 now = time.time() wait_start_time = now # Keep checking the sync state in a quick loop. # We want to detect In Sync as quickly as possible. while now - wait_start_time < sleep_delay: # Only log once per second if now - last_log_time >= 1: Log.info( 'Cluster', 'Device %s, Group %s not synced. ' % (dev_name, name) + 'Waiting. State is: %s' % state) last_log_time = now state = self.get_sync_status() if state in ['Standalone', 'In Sync']: break time.sleep(.5) now = time.time() else: # if we didn't break out due to the group being in sync # then attempt to force a sync. self.sync_local_device_to_group(name) sleep_delay += const.SYNC_DELAY # no need to sleep here because we already spent the sleep # interval checking status. continue # Only a break from the inner while loop due to Standalone or # In Sync will reach here. # Normal exit of the while loop reach the else statement # above which continues the outer loop break elif state == 'Sync Failure': Log.info('Cluster', "Device %s - Synchronization failed for %s" % (dev_name, name)) Log.debug('Cluster', 'SYNC SECONDS (Sync Failure): ' + str(time.time() - sync_start_time)) raise exceptions.BigIPClusterSyncFailure( 'Device service group %s' % name + ' failed after ' + '%s attempts.' % const.MAX_SYNC_ATTEMPTS + ' Correct sync problem manually' + ' according to sol13946 on ' + ' support.f5.com.') else: attempts += 1 Log.info('Cluster', "Device %s " % dev_name + "Synchronizing config attempt %s to group %s:" % (attempts, name) + " current state: %s" % state) self.sync_local_device_to_group(name) time.sleep(sleep_delay) sleep_delay += const.SYNC_DELAY else: if state == 'Disconnected': Log.debug('Cluster', 'SYNC SECONDS(Disconnected): ' + str(time.time() - sync_start_time)) raise exceptions.BigIPClusterSyncFailure( 'Device service group %s' % name + ' could not reach a sync state' + ' because they can not communicate' + ' over the sync network. Please' + ' check connectivity.') else: Log.debug('Cluster', 'SYNC SECONDS(Timeout): ' + str(time.time() - sync_start_time)) raise exceptions.BigIPClusterSyncFailure( 'Device service group %s' % name + ' could not reach a sync state after ' + '%s attempts.' % const.MAX_SYNC_ATTEMPTS + ' It is in %s state currently.' % state + ' Correct sync problem manually' + ' according to sol13946 on ' + ' support.f5.com.') Log.debug('Cluster', 'SYNC SECONDS(Success): ' + str(time.time() - sync_start_time))
Python
def add_peer(self, name, mgmt_ip_address, username, password): """ Add a peer to the local trust group """ if not self.peer_exists(name): if self.bigip.device.get_lock(): local_device = self.get_local_device_name() local_mgmt_address = self.get_local_device_addr() root_mgmt_dict = {'root_device_name': local_device, 'root_device_mgmt_address': local_mgmt_address} local_md = self.bigip.device.get_metadata() if local_md and 'root_device_name' in local_md.keys(): md_device_name = os.path.basename( local_md['root_device_name']) if md_device_name: if not md_device_name == local_device: raise exceptions.BigIPClusterPeerAddFailure( 'the device used to peer %s ' % name + ' was already itself peered from root' + ' device: %s' % local_md['root_device_name']) self.bigip.device.update_metadata(None, root_mgmt_dict) Log.info('Cluster', 'Device %s - adding peer %s' % (local_device, name)) self.mgmt_trust.add_authority_device(mgmt_ip_address, username, password, name, '', '', '', '') attempts = 0 while attempts < const.PEER_ADD_ATTEMPTS_MAX: if self.get_sync_status() == "OFFLINE": self.mgmt_trust.remove_device([name]) self.mgmt_trust.add_authority_device(mgmt_ip_address, username, password, name, '', '', '', '') else: self.bigip.device.release_lock() return time.sleep(const.PEER_ADD_ATTEMPT_DELAY) attempts += 1 else: raise exceptions.BigIPClusterPeerAddFailure( 'Could not add peer device %s' % name + ' as a trust for device %s' % os.path.basename(self.mgmt_dev.get_local_device()) + ' after % attempts' % const.PEER_ADD_ATTEMPTS_MAX) else: raise exceptions.BigIPDeviceLockAcquireFailed( 'Unable to obtain device lock for device %s' % os.path.basename(self.mgmt_dev.get_local_device()) )
def add_peer(self, name, mgmt_ip_address, username, password): """ Add a peer to the local trust group """ if not self.peer_exists(name): if self.bigip.device.get_lock(): local_device = self.get_local_device_name() local_mgmt_address = self.get_local_device_addr() root_mgmt_dict = {'root_device_name': local_device, 'root_device_mgmt_address': local_mgmt_address} local_md = self.bigip.device.get_metadata() if local_md and 'root_device_name' in local_md.keys(): md_device_name = os.path.basename( local_md['root_device_name']) if md_device_name: if not md_device_name == local_device: raise exceptions.BigIPClusterPeerAddFailure( 'the device used to peer %s ' % name + ' was already itself peered from root' + ' device: %s' % local_md['root_device_name']) self.bigip.device.update_metadata(None, root_mgmt_dict) Log.info('Cluster', 'Device %s - adding peer %s' % (local_device, name)) self.mgmt_trust.add_authority_device(mgmt_ip_address, username, password, name, '', '', '', '') attempts = 0 while attempts < const.PEER_ADD_ATTEMPTS_MAX: if self.get_sync_status() == "OFFLINE": self.mgmt_trust.remove_device([name]) self.mgmt_trust.add_authority_device(mgmt_ip_address, username, password, name, '', '', '', '') else: self.bigip.device.release_lock() return time.sleep(const.PEER_ADD_ATTEMPT_DELAY) attempts += 1 else: raise exceptions.BigIPClusterPeerAddFailure( 'Could not add peer device %s' % name + ' as a trust for device %s' % os.path.basename(self.mgmt_dev.get_local_device()) + ' after % attempts' % const.PEER_ADD_ATTEMPTS_MAX) else: raise exceptions.BigIPDeviceLockAcquireFailed( 'Unable to obtain device lock for device %s' % os.path.basename(self.mgmt_dev.get_local_device()) )
Python
def enable_auto_sync(self, name): """ Enable autosync on a device group """ payload = dict() payload['autoSync'] = 'enabled' request_url = self.bigip.icr_url + '/cm/device-group/~Common~' request_url += name response = self.bigip.icr_session.put( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: return True else: Log.error('device-group', response.text) raise exceptions.ClusterUpdateException(response.text)
def enable_auto_sync(self, name): """ Enable autosync on a device group """ payload = dict() payload['autoSync'] = 'enabled' request_url = self.bigip.icr_url + '/cm/device-group/~Common~' request_url += name response = self.bigip.icr_session.put( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: return True else: Log.error('device-group', response.text) raise exceptions.ClusterUpdateException(response.text)
Python
def disable_auto_sync(self, name): """ Disable autosync on a device group """ payload = dict() payload['autoSync'] = 'disabled' request_url = self.bigip.icr_url + '/cm/device-group/~Common~' request_url += name response = self.bigip.icr_session.put( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: return True else: Log.error('device-group', response.text) raise exceptions.ClusterUpdateException(response.text)
def disable_auto_sync(self, name): """ Disable autosync on a device group """ payload = dict() payload['autoSync'] = 'disabled' request_url = self.bigip.icr_url + '/cm/device-group/~Common~' request_url += name response = self.bigip.icr_session.put( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: return True else: Log.error('device-group', response.text) raise exceptions.ClusterUpdateException(response.text)
Python
def remove_all_devices(self, name): """ Remove all devices from device group """ request_url = self.bigip.icr_url + '/cm/device-group/~Common~' request_url += name payload = dict() payload['devices'] = list() response = self.bigip.icr_session.put( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: return True elif response.status_code == 404: return True else: Log.error('device-group', response.text) raise exceptions.ClusterQueryException(response.text)
def remove_all_devices(self, name): """ Remove all devices from device group """ request_url = self.bigip.icr_url + '/cm/device-group/~Common~' request_url += name payload = dict() payload['devices'] = list() response = self.bigip.icr_session.put( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: return True elif response.status_code == 404: return True else: Log.error('device-group', response.text) raise exceptions.ClusterQueryException(response.text)
Python
def assure_tenant_cleanup(self, service, all_subnet_hints): """ Delete tenant partition. Called for every bigip only in replication mode, otherwise called once. """ for bigip in self.driver.get_config_bigips(): subnet_hints = all_subnet_hints[bigip.device_name] self._assure_bigip_tenant_cleanup(bigip, service, subnet_hints)
def assure_tenant_cleanup(self, service, all_subnet_hints): """ Delete tenant partition. Called for every bigip only in replication mode, otherwise called once. """ for bigip in self.driver.get_config_bigips(): subnet_hints = all_subnet_hints[bigip.device_name] self._assure_bigip_tenant_cleanup(bigip, service, subnet_hints)
Python
def _assure_bigip_tenant_cleanup(self, bigip, service, subnet_hints): """ if something was deleted check whether to do domain+folder teardown """ tenant_id = service['pool']['tenant_id'] if service['pool']['status'] == plugin_const.PENDING_DELETE or \ len(subnet_hints['check_for_delete_subnets']) > 0: existing_monitors = bigip.monitor.get_monitors(folder=tenant_id) existing_pools = bigip.pool.get_pools(folder=tenant_id) existing_vips = bigip.virtual_server.get_virtual_service_insertion( folder=tenant_id) if not (existing_monitors or existing_pools or existing_vips): if self.conf.f5_sync_mode == 'replication': self._remove_tenant_replication_mode(bigip, tenant_id) else: self._remove_tenant_autosync_mode(bigip, tenant_id)
def _assure_bigip_tenant_cleanup(self, bigip, service, subnet_hints): """ if something was deleted check whether to do domain+folder teardown """ tenant_id = service['pool']['tenant_id'] if service['pool']['status'] == plugin_const.PENDING_DELETE or \ len(subnet_hints['check_for_delete_subnets']) > 0: existing_monitors = bigip.monitor.get_monitors(folder=tenant_id) existing_pools = bigip.pool.get_pools(folder=tenant_id) existing_vips = bigip.virtual_server.get_virtual_service_insertion( folder=tenant_id) if not (existing_monitors or existing_pools or existing_vips): if self.conf.f5_sync_mode == 'replication': self._remove_tenant_replication_mode(bigip, tenant_id) else: self._remove_tenant_autosync_mode(bigip, tenant_id)
Python
def _remove_tenant_replication_mode(self, bigip, tenant_id): """ Remove tenant in replication sync-mode """ for domain_name in bigip.route.get_domain_names(folder=tenant_id): bigip.route.delete_domain(folder=tenant_id, name=domain_name) sudslog = std_logging.getLogger('suds.client') sudslog.setLevel(std_logging.FATAL) bigip.system.force_root_folder() sudslog.setLevel(std_logging.ERROR) try: bigip.system.delete_folder(folder=bigip.decorate_folder(tenant_id)) except f5ex.SystemDeleteException: bigip.system.purge_folder_contents( folder=bigip.decorate_folder(tenant_id)) bigip.system.delete_folder(folder=bigip.decorate_folder(tenant_id))
def _remove_tenant_replication_mode(self, bigip, tenant_id): """ Remove tenant in replication sync-mode """ for domain_name in bigip.route.get_domain_names(folder=tenant_id): bigip.route.delete_domain(folder=tenant_id, name=domain_name) sudslog = std_logging.getLogger('suds.client') sudslog.setLevel(std_logging.FATAL) bigip.system.force_root_folder() sudslog.setLevel(std_logging.ERROR) try: bigip.system.delete_folder(folder=bigip.decorate_folder(tenant_id)) except f5ex.SystemDeleteException: bigip.system.purge_folder_contents( folder=bigip.decorate_folder(tenant_id)) bigip.system.delete_folder(folder=bigip.decorate_folder(tenant_id))
Python
def _remove_tenant_autosync_mode(self, bigip, tenant_id): """ Remove tenant in autosync sync-mode """ # all domains must be gone before we attempt to delete # the folder or it won't delete due to not being empty for set_bigip in self.driver.get_all_bigips(): set_bigip.route.delete_domain(folder=tenant_id) sudslog = std_logging.getLogger('suds.client') sudslog.setLevel(std_logging.FATAL) set_bigip.system.force_root_folder() sudslog.setLevel(std_logging.ERROR) # we need to ensure that the following folder deletion # is clearly the last change that needs to be synced. self.driver.sync_if_clustered() greenthread.sleep(5) try: bigip.system.delete_folder(folder=bigip.decorate_folder(tenant_id)) except f5ex.SystemDeleteException: bigip.system.purge_folder_contents( folder=bigip.decorate_folder(tenant_id)) bigip.system.delete_folder(folder=bigip.decorate_folder(tenant_id)) # Need to make sure this folder delete syncs before # something else runs and changes the current folder to # the folder being deleted which will cause big problems. self.driver.sync_if_clustered()
def _remove_tenant_autosync_mode(self, bigip, tenant_id): """ Remove tenant in autosync sync-mode """ # all domains must be gone before we attempt to delete # the folder or it won't delete due to not being empty for set_bigip in self.driver.get_all_bigips(): set_bigip.route.delete_domain(folder=tenant_id) sudslog = std_logging.getLogger('suds.client') sudslog.setLevel(std_logging.FATAL) set_bigip.system.force_root_folder() sudslog.setLevel(std_logging.ERROR) # we need to ensure that the following folder deletion # is clearly the last change that needs to be synced. self.driver.sync_if_clustered() greenthread.sleep(5) try: bigip.system.delete_folder(folder=bigip.decorate_folder(tenant_id)) except f5ex.SystemDeleteException: bigip.system.purge_folder_contents( folder=bigip.decorate_folder(tenant_id)) bigip.system.delete_folder(folder=bigip.decorate_folder(tenant_id)) # Need to make sure this folder delete syncs before # something else runs and changes the current folder to # the folder being deleted which will cause big problems. self.driver.sync_if_clustered()
Python
def _assure_connector_and_tenant(self, service): """ Make sure bigiq connector and tenant exists """ project_id = service['pool']['tenant_id'] # The name of the OpenStack connectors will be based off of the # OpenStack project ID connector_name = LBaaSBuilderBigiqIApp._connector_name( project_id) # We use a lock here to avoid creating multiple connectors that have # the same 'name' data member. This lock is required because the # plugin identifies a connector off of its 'name' data member # where as BIG-IQ Cloud identifies a connector off of its 'connectorId' # data member (i.e. 'connectorId' is the primary and natural key). # If we don't lock here we have a race between when we decide to make # a connector for the first time and get the response from BIG-IQ and # map it vs. when we check if the connector is created again. with self._connectors_lock: if not self._connectors_by_name.get(connector_name): LOG.info(_("Didn't see a connector with the name of '%s' on " "the BIG-IQ. Creating a new connector and tenant." % connector_name)) connector = LBaaSBuilderBigiqIApp._cloud_connector( project_id, self.conf.openstack_keystone_uri, self.conf.openstack_admin_username, self.conf.openstack_admin_password) connector = self._bigiq.post_cloud_connector( bigiq_interface.BIGIQ.CC_TYPE_OPENSTACK, connector) LOG.info(_("Mapping a created connector with a 'connectorId' " "of '%s' and a 'name' of '%s'" % (connector['connectorId'], connector['name']))) self._connectors_by_name[connector['name']] = connector self._connectors_by_id[connector['connectorId']] = connector tenant = LBaaSBuilderBigiqIApp._tenant( project_id, connector['selfLink']) self._bigiq.post_tenant(tenant) LOG.debug("Sleeping until devices are discovered....") time.sleep(15)
def _assure_connector_and_tenant(self, service): """ Make sure bigiq connector and tenant exists """ project_id = service['pool']['tenant_id'] # The name of the OpenStack connectors will be based off of the # OpenStack project ID connector_name = LBaaSBuilderBigiqIApp._connector_name( project_id) # We use a lock here to avoid creating multiple connectors that have # the same 'name' data member. This lock is required because the # plugin identifies a connector off of its 'name' data member # where as BIG-IQ Cloud identifies a connector off of its 'connectorId' # data member (i.e. 'connectorId' is the primary and natural key). # If we don't lock here we have a race between when we decide to make # a connector for the first time and get the response from BIG-IQ and # map it vs. when we check if the connector is created again. with self._connectors_lock: if not self._connectors_by_name.get(connector_name): LOG.info(_("Didn't see a connector with the name of '%s' on " "the BIG-IQ. Creating a new connector and tenant." % connector_name)) connector = LBaaSBuilderBigiqIApp._cloud_connector( project_id, self.conf.openstack_keystone_uri, self.conf.openstack_admin_username, self.conf.openstack_admin_password) connector = self._bigiq.post_cloud_connector( bigiq_interface.BIGIQ.CC_TYPE_OPENSTACK, connector) LOG.info(_("Mapping a created connector with a 'connectorId' " "of '%s' and a 'name' of '%s'" % (connector['connectorId'], connector['name']))) self._connectors_by_name[connector['name']] = connector self._connectors_by_id[connector['connectorId']] = connector tenant = LBaaSBuilderBigiqIApp._tenant( project_id, connector['selfLink']) self._bigiq.post_tenant(tenant) LOG.debug("Sleeping until devices are discovered....") time.sleep(15)
Python
def _assure_managed_devices(self, service): """Ensures that any BIG-IP instances for the OpenStack project are managed OpenStack connectors periodically look for any BIG-IP instances in a project that it didn't previously know about. If it finds one it tries to manage the BIG-IP. The best that it can do is manage it as a cloud device that isn't fully discovered since its credentials weren't known. This method looks for those BIG-IPs that aren't fully managed yet and updates their credentials so that they become fully managed. :param dict service: A dictionary representing the OpenStack LBaaS service """ project_id = service['pool']['tenant_id'] connector = self._connectors_by_name[ LBaaSBuilderBigiqIApp._connector_name(project_id)] # Get any BIG-IP instances associated with this OpenStack project # that we have tried to manage, whether they are truly managed or # not at this point. managed_devices = self._bigiq.get_related( LBaaSBuilderBigiqIApp._MANAGED_DEVICE_KIND, connector['selfLink'], True) LOG.debug("Got managed devices: %s" % str(managed_devices)) undiscovered_devices = [] # Find any devices that aren't truly managed. They will show up as # an undiscovered device and their autodiscover stat will say admin # credentials need to be provided to complete the discovery for managed_device in managed_devices: # Skip any null JSON values (None in Python) or empty JSON # objects (empty dictionary in JSON) if not managed_device: continue LOG.debug("process managed device: %s" % str(managed_device)) if ('state' in managed_device and managed_device['state'] == 'UNDISCOVERED' and 'selfLink' in managed_device and managed_device['selfLink']): # We convert the 'selfLink' of the device into a remote URL # for the stats of it # as we are eventually going to check if it wasn't # discovered as it needs admin credentials managed_device['selfLink'] = string.replace( managed_device['selfLink'] + '/stats', 'localhost', self._bigiq.hostname, 1) LOG.debug("found undiscovered device: %s" % str(managed_device)) undiscovered_devices.append(managed_device) for undiscovered_device in undiscovered_devices: get_result = self._bigiq.get(undiscovered_device['selfLink']) LOG.debug("bigiq.get(%s) returns %s" % (undiscovered_device['selfLink'], str(get_result))) stats = get_result['entries'] LOG.debug("stats: %s" % str(stats)) npw = 'Please provide admin username and ' + \ 'password to complete device discovery.' hsca = 'health.summary.cloud.autodiscover' if 'health.summary.cloud.autodiscover' in stats: if stats[hsca]['description'] == npw: LOG.debug("posting cloud device at %s with %s %s" % (undiscovered_device['address'], self.conf.bigip_management_username, self.conf.bigip_management_password) ) self._bigiq.post_cloud_device( undiscovered_device['address'], self.conf.bigip_management_username, self.conf.bigip_management_password) else: LOG.debug( "non matching description: [%s] [%s] " % (stats['health.summary.cloud.autodiscover']['description'], 'Please provide admin username and password to ' 'complete device discovery.'))
def _assure_managed_devices(self, service): """Ensures that any BIG-IP instances for the OpenStack project are managed OpenStack connectors periodically look for any BIG-IP instances in a project that it didn't previously know about. If it finds one it tries to manage the BIG-IP. The best that it can do is manage it as a cloud device that isn't fully discovered since its credentials weren't known. This method looks for those BIG-IPs that aren't fully managed yet and updates their credentials so that they become fully managed. :param dict service: A dictionary representing the OpenStack LBaaS service """ project_id = service['pool']['tenant_id'] connector = self._connectors_by_name[ LBaaSBuilderBigiqIApp._connector_name(project_id)] # Get any BIG-IP instances associated with this OpenStack project # that we have tried to manage, whether they are truly managed or # not at this point. managed_devices = self._bigiq.get_related( LBaaSBuilderBigiqIApp._MANAGED_DEVICE_KIND, connector['selfLink'], True) LOG.debug("Got managed devices: %s" % str(managed_devices)) undiscovered_devices = [] # Find any devices that aren't truly managed. They will show up as # an undiscovered device and their autodiscover stat will say admin # credentials need to be provided to complete the discovery for managed_device in managed_devices: # Skip any null JSON values (None in Python) or empty JSON # objects (empty dictionary in JSON) if not managed_device: continue LOG.debug("process managed device: %s" % str(managed_device)) if ('state' in managed_device and managed_device['state'] == 'UNDISCOVERED' and 'selfLink' in managed_device and managed_device['selfLink']): # We convert the 'selfLink' of the device into a remote URL # for the stats of it # as we are eventually going to check if it wasn't # discovered as it needs admin credentials managed_device['selfLink'] = string.replace( managed_device['selfLink'] + '/stats', 'localhost', self._bigiq.hostname, 1) LOG.debug("found undiscovered device: %s" % str(managed_device)) undiscovered_devices.append(managed_device) for undiscovered_device in undiscovered_devices: get_result = self._bigiq.get(undiscovered_device['selfLink']) LOG.debug("bigiq.get(%s) returns %s" % (undiscovered_device['selfLink'], str(get_result))) stats = get_result['entries'] LOG.debug("stats: %s" % str(stats)) npw = 'Please provide admin username and ' + \ 'password to complete device discovery.' hsca = 'health.summary.cloud.autodiscover' if 'health.summary.cloud.autodiscover' in stats: if stats[hsca]['description'] == npw: LOG.debug("posting cloud device at %s with %s %s" % (undiscovered_device['address'], self.conf.bigip_management_username, self.conf.bigip_management_password) ) self._bigiq.post_cloud_device( undiscovered_device['address'], self.conf.bigip_management_username, self.conf.bigip_management_password) else: LOG.debug( "non matching description: [%s] [%s] " % (stats['health.summary.cloud.autodiscover']['description'], 'Please provide admin username and password to ' 'complete device discovery.'))
Python
def _assure_provider_template(self): """ Make sure template exists on bigiq """ # We are interested in catching an exception here as it denotes that # there is not provider template if one is thrown. try: self._bigiq.get_provider_template( LBaaSBuilderBigiqIApp._LBAAS_PROVIDER_TEMPLATE_NAME) except: self._create_lbaas_provider_template()
def _assure_provider_template(self): """ Make sure template exists on bigiq """ # We are interested in catching an exception here as it denotes that # there is not provider template if one is thrown. try: self._bigiq.get_provider_template( LBaaSBuilderBigiqIApp._LBAAS_PROVIDER_TEMPLATE_NAME) except: self._create_lbaas_provider_template()
Python
def notify_vtep_added(self, network, vtep_ip_address): """ A client calls this when it has a local VTEP ip address that needs to be advertised into the fdb. """ raise NotImplementedError()
def notify_vtep_added(self, network, vtep_ip_address): """ A client calls this when it has a local VTEP ip address that needs to be advertised into the fdb. """ raise NotImplementedError()
Python
def notify_vtep_removed(self, network, vtep_ip_address): """ A client calls this when it has a local VTEP ip address that needs to be removed from the fdb. """ raise NotImplementedError()
def notify_vtep_removed(self, network, vtep_ip_address): """ A client calls this when it has a local VTEP ip address that needs to be removed from the fdb. """ raise NotImplementedError()
Python
def advertise_tunnel_ips(self, tunnel_ips): """ A client calls this periodically to advertise local VTEP ip addresses. """ raise NotImplementedError()
def advertise_tunnel_ips(self, tunnel_ips): """ A client calls this periodically to advertise local VTEP ip addresses. """ raise NotImplementedError()
Python
def delete(self, url): """Makes a HTTP DELETE request Makes a HTTP DELETE request to the argument provided to the 'url' parameter using the HTTP session previously established when the instance of this BIGIQ type was created. Thus the URL is presumed to be a resource on the BIG-IQ. :param string url: The URL to perform a HTTP DELETE on """ response = self.http_session.delete(url) response.raise_for_status() # no json to parse on delete response return
def delete(self, url): """Makes a HTTP DELETE request Makes a HTTP DELETE request to the argument provided to the 'url' parameter using the HTTP session previously established when the instance of this BIGIQ type was created. Thus the URL is presumed to be a resource on the BIG-IQ. :param string url: The URL to perform a HTTP DELETE on """ response = self.http_session.delete(url) response.raise_for_status() # no json to parse on delete response return
Python
def post(self, url, body): """Makes a HTTP POST request Makes a HTTP POST request to the argument provided to the 'url' parameter using the HTTP session previously established when the instance of this BIGIQ type was created. Thus the URL is presumed to be a resource on the BIG-IQ. The body posted is contained in the parameter 'body'. It will be serialized to JSON inside this method. :param string url: The URL to perform a HTTP POST on :param object body: An object that will be serialized to JSON for the body :return: The JSON response body """ response = self.http_session.post(url, json.dumps(body)) response.raise_for_status() return response.json()
def post(self, url, body): """Makes a HTTP POST request Makes a HTTP POST request to the argument provided to the 'url' parameter using the HTTP session previously established when the instance of this BIGIQ type was created. Thus the URL is presumed to be a resource on the BIG-IQ. The body posted is contained in the parameter 'body'. It will be serialized to JSON inside this method. :param string url: The URL to perform a HTTP POST on :param object body: An object that will be serialized to JSON for the body :return: The JSON response body """ response = self.http_session.post(url, json.dumps(body)) response.raise_for_status() return response.json()
Python
def put(self, url, body): """Makes a HTTP PUT request Makes a HTTP PUT request to the argument provided to the 'url' parameter using the HTTP session previously established when the instance of this BIGIQ type was created. Thus the URL is presumed to be a resource on the BIG-IQ. The body posted is contained in the parameter 'body'. It will be serialized to JSON inside this method. :param string url: The URL to perform a HTTP PUT on :param object body: An object that will be serialized to JSON for the body :return: The JSON response body """ response = self.http_session.put(url, json.dumps(body)) response.raise_for_status() return response.json()
def put(self, url, body): """Makes a HTTP PUT request Makes a HTTP PUT request to the argument provided to the 'url' parameter using the HTTP session previously established when the instance of this BIGIQ type was created. Thus the URL is presumed to be a resource on the BIG-IQ. The body posted is contained in the parameter 'body'. It will be serialized to JSON inside this method. :param string url: The URL to perform a HTTP PUT on :param object body: An object that will be serialized to JSON for the body :return: The JSON response body """ response = self.http_session.put(url, json.dumps(body)) response.raise_for_status() return response.json()
Python
def build_bigiq_url(self, uri_path, query_component=None): """Builds a URL to a resource on the BIG-IQ The URL is that of a 'https' scheme. The URI path is presumed to be properly formed. The query component is presumed to be properly formed. :param string uri_path: The path of the URL :param string query_component: The query component of the URI. :return: URL """ url = BIGIQ.SCHEME_HTTPS + BIGIQ.SCHEME_SEPARATOR + \ self.hostname + uri_path if query_component: url += query_component return url
def build_bigiq_url(self, uri_path, query_component=None): """Builds a URL to a resource on the BIG-IQ The URL is that of a 'https' scheme. The URI path is presumed to be properly formed. The query component is presumed to be properly formed. :param string uri_path: The path of the URL :param string query_component: The query component of the URI. :return: URL """ url = BIGIQ.SCHEME_HTTPS + BIGIQ.SCHEME_SEPARATOR + \ self.hostname + uri_path if query_component: url += query_component return url
Python
def build_remote_uri_path(*uri_segments): """Builds a URI path to a remote resource on a BIG-IQ from URI segments URI segments can include leading or trailing path separators. If the URI segment doesn't include a leading path separator one is added. If the URI segment does include a trailing path separator it is removed. URI segments in the list should be strings. The types of the objects provided in uri_segments isn't type checked so providing non-string type objects may result in unexpected behavior with the possibility of an error occurring. The empty string will be returned if the list of URI segments is empty. The URI path returned will be prefixed with the 'mgmt' URI segment. :param list uri_segments: List of URI segments of object type string. :return: URI path """ uri_path = "" if not uri_segments: return uri_path for uri_segment in uri_segments: # Skip the URI segment if it is empty if not uri_segment: continue # Add the URI segment with a leading '/' if it doesn't have one if uri_segment[0] == BIGIQ.PATH_SEPARATOR: uri_path += uri_segment else: uri_path += BIGIQ.PATH_SEPARATOR + uri_segment # Chop off the trailing '/' on the URI segment if it had one if uri_path[-1] == BIGIQ.PATH_SEPARATOR: uri_path = uri_path[:-1] start_path = BIGIQ.PATH_SEPARATOR + BIGIQ.MGMT_ENDPOINT_URI_SEGMENT if uri_path and not uri_path.startswith(start_path): uri_path = start_path + uri_path return uri_path
def build_remote_uri_path(*uri_segments): """Builds a URI path to a remote resource on a BIG-IQ from URI segments URI segments can include leading or trailing path separators. If the URI segment doesn't include a leading path separator one is added. If the URI segment does include a trailing path separator it is removed. URI segments in the list should be strings. The types of the objects provided in uri_segments isn't type checked so providing non-string type objects may result in unexpected behavior with the possibility of an error occurring. The empty string will be returned if the list of URI segments is empty. The URI path returned will be prefixed with the 'mgmt' URI segment. :param list uri_segments: List of URI segments of object type string. :return: URI path """ uri_path = "" if not uri_segments: return uri_path for uri_segment in uri_segments: # Skip the URI segment if it is empty if not uri_segment: continue # Add the URI segment with a leading '/' if it doesn't have one if uri_segment[0] == BIGIQ.PATH_SEPARATOR: uri_path += uri_segment else: uri_path += BIGIQ.PATH_SEPARATOR + uri_segment # Chop off the trailing '/' on the URI segment if it had one if uri_path[-1] == BIGIQ.PATH_SEPARATOR: uri_path = uri_path[:-1] start_path = BIGIQ.PATH_SEPARATOR + BIGIQ.MGMT_ENDPOINT_URI_SEGMENT if uri_path and not uri_path.startswith(start_path): uri_path = start_path + uri_path return uri_path
Python
def build_query_component(**key_value_pairs): """Builds a query component to be used in a URL Takes a dictionary and from the KvPs in the dictionary builds a query string made out of the KvPs. :param dict key_value_pairs: The KvPs to turn into the query component :return: string that can be used as the query component in an URL """ if not key_value_pairs: return "" query_component = BIGIQ.QUERY_COMPONENT_STARTER for key, value in key_value_pairs.items(): # Skip the key if it is empty if not key: continue add_component = key + BIGIQ.QUERY_COMPONENT_KV_SEPARATOR + \ value + BIGIQ.QUERY_COMPONENT_KVP_SEPARATOR # Add the key value pair to the query string query_component += add_component # Chop off the trailing '&' on the query component query_component = query_component[:-1] # Terminate the query component with the '#' character query_component += BIGIQ.QUERY_COMPONENT_TERMINATOR return query_component
def build_query_component(**key_value_pairs): """Builds a query component to be used in a URL Takes a dictionary and from the KvPs in the dictionary builds a query string made out of the KvPs. :param dict key_value_pairs: The KvPs to turn into the query component :return: string that can be used as the query component in an URL """ if not key_value_pairs: return "" query_component = BIGIQ.QUERY_COMPONENT_STARTER for key, value in key_value_pairs.items(): # Skip the key if it is empty if not key: continue add_component = key + BIGIQ.QUERY_COMPONENT_KV_SEPARATOR + \ value + BIGIQ.QUERY_COMPONENT_KVP_SEPARATOR # Add the key value pair to the query string query_component += add_component # Chop off the trailing '&' on the query component query_component = query_component[:-1] # Terminate the query component with the '#' character query_component += BIGIQ.QUERY_COMPONENT_TERMINATOR return query_component
Python
def post_cloud_connector(self, connector_type, connector): """Creates a cloud connector of a specific type :param string connector_type: The type of the connector to create (e.g. 'openstack', 'ec2', etc.) :param dict connector: A dictionary representing the connector to be used in the POST body :return: Created connector serialized to JSON """ uri_path = BIGIQ.build_remote_uri_path( BIGIQ.NS_CM_URI_SEGMENT, BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT, BIGIQ.CLOUD_CONNECTORS_URI_SEGMENT, connector_type) url = self.build_bigiq_url(uri_path) LOG.debug("Posting Cloud Connector, URL: %s body: %s" % (url, connector)) return self.post(url, connector)
def post_cloud_connector(self, connector_type, connector): """Creates a cloud connector of a specific type :param string connector_type: The type of the connector to create (e.g. 'openstack', 'ec2', etc.) :param dict connector: A dictionary representing the connector to be used in the POST body :return: Created connector serialized to JSON """ uri_path = BIGIQ.build_remote_uri_path( BIGIQ.NS_CM_URI_SEGMENT, BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT, BIGIQ.CLOUD_CONNECTORS_URI_SEGMENT, connector_type) url = self.build_bigiq_url(uri_path) LOG.debug("Posting Cloud Connector, URL: %s body: %s" % (url, connector)) return self.post(url, connector)
Python
def post_cloud_device( self, ip_address, username, password, auto_update=True): """Adds a cloud device for management :param string ip_address: The address of the device :param string username: The username to use when authenticating the device :param string password: The password to use when authenticating the device :param boolean auto_update: Whether the device should be updated when managed (defaults to True) :return: The managed device serialized to JSON """ uri_path = BIGIQ.build_remote_uri_path( BIGIQ.NS_SHARED_URI_SEGMENT, BIGIQ.NS_RESOLVER_URI_SEGMENT, 'device-groups', 'cm-cloud-managed-devices', 'devices') url = self.build_bigiq_url(uri_path) body = {} body['address'] = ip_address body['userName'] = username body['password'] = password body['rootUser'] = 'root' body['rootPassword'] = 'default' body['automaticallyUpdateFramework'] = auto_update LOG.debug("Posting Cloud Device, URL: %s body: %s" % (url, body)) return self.post(url, body)
def post_cloud_device( self, ip_address, username, password, auto_update=True): """Adds a cloud device for management :param string ip_address: The address of the device :param string username: The username to use when authenticating the device :param string password: The password to use when authenticating the device :param boolean auto_update: Whether the device should be updated when managed (defaults to True) :return: The managed device serialized to JSON """ uri_path = BIGIQ.build_remote_uri_path( BIGIQ.NS_SHARED_URI_SEGMENT, BIGIQ.NS_RESOLVER_URI_SEGMENT, 'device-groups', 'cm-cloud-managed-devices', 'devices') url = self.build_bigiq_url(uri_path) body = {} body['address'] = ip_address body['userName'] = username body['password'] = password body['rootUser'] = 'root' body['rootPassword'] = 'default' body['automaticallyUpdateFramework'] = auto_update LOG.debug("Posting Cloud Device, URL: %s body: %s" % (url, body)) return self.post(url, body)
Python
def put_tenant_service(self, tenant_name, service_name, service): """Updates a tenant service by full replacement :param string tenant_name: The name of the tenant to update a service for :param string service_name: The name of the service to update :param dict service: A dictionary representing the tenant service to be used in the PUT body :return: Updated tenant service serialized to JSON """ uri_path = BIGIQ.build_remote_uri_path( BIGIQ.NS_CM_URI_SEGMENT, BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT, BIGIQ.CLOUD_TENANTS_URI_SEGMENT, tenant_name, BIGIQ.CLOUD_SERVICES_URI_SEGMENT, BIGIQ.CLOUD_IAPP_URI_SEGMENTS, service_name) url = self.build_bigiq_url(uri_path) return self.put(url, service)
def put_tenant_service(self, tenant_name, service_name, service): """Updates a tenant service by full replacement :param string tenant_name: The name of the tenant to update a service for :param string service_name: The name of the service to update :param dict service: A dictionary representing the tenant service to be used in the PUT body :return: Updated tenant service serialized to JSON """ uri_path = BIGIQ.build_remote_uri_path( BIGIQ.NS_CM_URI_SEGMENT, BIGIQ.SUB_CM_NS_CLOUD_URI_SEGMENT, BIGIQ.CLOUD_TENANTS_URI_SEGMENT, tenant_name, BIGIQ.CLOUD_SERVICES_URI_SEGMENT, BIGIQ.CLOUD_IAPP_URI_SEGMENTS, service_name) url = self.build_bigiq_url(uri_path) return self.put(url, service)
Python
def sync_state(self): """Synchronize device configuration from controller state.""" resync = False if hasattr(self, 'lbdriver'): if not self.lbdriver.backend_integrity(): return resync known_services = set() owned_services = set() for pool_id, service in self.cache.services.iteritems(): known_services.add(pool_id) if self.agent_host == service.agent_host: owned_services.add(pool_id) now = datetime.datetime.now() try: # Get pools from the environment which are bound to # this agent. active_pools = ( self.plugin_rpc.get_active_pools(host=self.agent_host) ) active_pool_ids = set( [pool['pool_id'] for pool in active_pools] ) LOG.debug("plugin produced the list of active pool ids: %s" % list(active_pool_ids)) LOG.debug("currently known pool ids before sync are: %s" % list(known_services)) # Validate each service we own, i.e. loadbalancers to which this # agent is bound, that does not exist in our service cache. for pool_id in active_pool_ids: if not self.cache.get_by_pool_id(pool_id): self.validate_service(pool_id) errored_pools = ( self.plugin_rpc.get_errored_pools(host=self.agent_host) ) errored_pool_ids = set( [pool['pool_id'] for pool in errored_pools] ) LOG.debug( "plugin produced the list of errored pool ids: %s" % list(errored_pool_ids)) LOG.debug("currently known pool ids before sync are: %s" % list(known_services)) # Validate each service we own, i.e. pools to which this # agent is bound, that does not exist in our service cache. for pool_id in errored_pool_ids: if not self.cache.get_by_pool_id(pool_id): self.validate_service(pool_id) # This produces a list of loadbalancers with pending tasks to # be performed. pending_pools = ( self.plugin_rpc.get_pending_pools(host=self.agent_host) ) pending_pool_ids = set( [pool['pool_id'] for pool in pending_pools] ) LOG.debug( "plugin produced the list of pending pool ids: %s" % list(pending_pool_ids)) for pool_id in pending_pool_ids: pool_pending = self.refresh_service(pool_id) if pool_pending: if pool_id not in self.pending_services: self.pending_services[pool_id] = now time_added = self.pending_services[pool_id] time_expired = ((now - time_added).seconds > self.conf.f5_pending_services_timeout) if time_expired: pool_pending = False self.service_timeout(pool_id) if not pool_pending: del self.pending_services[pool_id] # If there are services in the pending cache resync if self.pending_services: resync = True # Get a list of any cached service we now know after # refreshing services known_services = set() for (pool_id, service) in self.cache.services.iteritems(): if self.agent_host == service.agent_host: known_services.add(pool_id) LOG.debug("currently known pool ids after sync: %s" % list(known_services)) except Exception as e: LOG.error("Unable to sync state: %s" % e.message) resync = True return resync
def sync_state(self): """Synchronize device configuration from controller state.""" resync = False if hasattr(self, 'lbdriver'): if not self.lbdriver.backend_integrity(): return resync known_services = set() owned_services = set() for pool_id, service in self.cache.services.iteritems(): known_services.add(pool_id) if self.agent_host == service.agent_host: owned_services.add(pool_id) now = datetime.datetime.now() try: # Get pools from the environment which are bound to # this agent. active_pools = ( self.plugin_rpc.get_active_pools(host=self.agent_host) ) active_pool_ids = set( [pool['pool_id'] for pool in active_pools] ) LOG.debug("plugin produced the list of active pool ids: %s" % list(active_pool_ids)) LOG.debug("currently known pool ids before sync are: %s" % list(known_services)) # Validate each service we own, i.e. loadbalancers to which this # agent is bound, that does not exist in our service cache. for pool_id in active_pool_ids: if not self.cache.get_by_pool_id(pool_id): self.validate_service(pool_id) errored_pools = ( self.plugin_rpc.get_errored_pools(host=self.agent_host) ) errored_pool_ids = set( [pool['pool_id'] for pool in errored_pools] ) LOG.debug( "plugin produced the list of errored pool ids: %s" % list(errored_pool_ids)) LOG.debug("currently known pool ids before sync are: %s" % list(known_services)) # Validate each service we own, i.e. pools to which this # agent is bound, that does not exist in our service cache. for pool_id in errored_pool_ids: if not self.cache.get_by_pool_id(pool_id): self.validate_service(pool_id) # This produces a list of loadbalancers with pending tasks to # be performed. pending_pools = ( self.plugin_rpc.get_pending_pools(host=self.agent_host) ) pending_pool_ids = set( [pool['pool_id'] for pool in pending_pools] ) LOG.debug( "plugin produced the list of pending pool ids: %s" % list(pending_pool_ids)) for pool_id in pending_pool_ids: pool_pending = self.refresh_service(pool_id) if pool_pending: if pool_id not in self.pending_services: self.pending_services[pool_id] = now time_added = self.pending_services[pool_id] time_expired = ((now - time_added).seconds > self.conf.f5_pending_services_timeout) if time_expired: pool_pending = False self.service_timeout(pool_id) if not pool_pending: del self.pending_services[pool_id] # If there are services in the pending cache resync if self.pending_services: resync = True # Get a list of any cached service we now know after # refreshing services known_services = set() for (pool_id, service) in self.cache.services.iteritems(): if self.agent_host == service.agent_host: known_services.add(pool_id) LOG.debug("currently known pool ids after sync: %s" % list(known_services)) except Exception as e: LOG.error("Unable to sync state: %s" % e.message) resync = True return resync
Python
def reload_pool(self, context, pool_id=None, host=None): """Handle RPC cast from plugin to reload a pool.""" if host and host == self.agent_host: if pool_id: self.refresh_service(pool_id)
def reload_pool(self, context, pool_id=None, host=None): """Handle RPC cast from plugin to reload a pool.""" if host and host == self.agent_host: if pool_id: self.refresh_service(pool_id)
Python
def create_vip(self, context, vip, service): """Handle RPC cast from plugin to create_vip""" try: self.lbdriver.create_vip(vip, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("Exception: %s" % exc.message)
def create_vip(self, context, vip, service): """Handle RPC cast from plugin to create_vip""" try: self.lbdriver.create_vip(vip, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("Exception: %s" % exc.message)
Python
def update_vip(self, context, old_vip, vip, service): """Handle RPC cast from plugin to update_vip""" try: self.lbdriver.update_vip(old_vip, vip, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("Exception: %s" % exc.message)
def update_vip(self, context, old_vip, vip, service): """Handle RPC cast from plugin to update_vip""" try: self.lbdriver.update_vip(old_vip, vip, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("Exception: %s" % exc.message)
Python
def delete_vip(self, context, vip, service): """Handle RPC cast from plugin to delete_vip""" try: self.lbdriver.delete_vip(vip, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("Exception: %s" % exc.message)
def delete_vip(self, context, vip, service): """Handle RPC cast from plugin to delete_vip""" try: self.lbdriver.delete_vip(vip, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("Exception: %s" % exc.message)
Python
def create_pool(self, context, pool, service): """Handle RPC cast from plugin to create_pool""" try: self.lbdriver.create_pool(pool, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("Exception: %s" % exc.message)
def create_pool(self, context, pool, service): """Handle RPC cast from plugin to create_pool""" try: self.lbdriver.create_pool(pool, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("Exception: %s" % exc.message)
Python
def update_pool(self, context, old_pool, pool, service): """Handle RPC cast from plugin to update_pool""" try: self.lbdriver.update_pool(old_pool, pool, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("Exception: %s" % exc.message)
def update_pool(self, context, old_pool, pool, service): """Handle RPC cast from plugin to update_pool""" try: self.lbdriver.update_pool(old_pool, pool, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("Exception: %s" % exc.message)
Python
def delete_pool(self, context, pool, service): """Handle RPC cast from plugin to delete_pool""" try: self.lbdriver.delete_pool(pool, service) self.cache.remove_by_pool_id(pool['id']) except NeutronException as exc: LOG.error("delete_pool: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("delete_pool: Exception: %s" % exc.message)
def delete_pool(self, context, pool, service): """Handle RPC cast from plugin to delete_pool""" try: self.lbdriver.delete_pool(pool, service) self.cache.remove_by_pool_id(pool['id']) except NeutronException as exc: LOG.error("delete_pool: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("delete_pool: Exception: %s" % exc.message)
Python
def create_member(self, context, member, service): """Handle RPC cast from plugin to create_member""" try: self.lbdriver.create_member(member, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("create_member: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("create_member: Exception: %s" % exc.message)
def create_member(self, context, member, service): """Handle RPC cast from plugin to create_member""" try: self.lbdriver.create_member(member, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("create_member: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("create_member: Exception: %s" % exc.message)
Python
def update_member(self, context, old_member, member, service): """Handle RPC cast from plugin to update_member""" try: self.lbdriver.update_member(old_member, member, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("update_member: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("update_member: Exception: %s" % exc.message)
def update_member(self, context, old_member, member, service): """Handle RPC cast from plugin to update_member""" try: self.lbdriver.update_member(old_member, member, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("update_member: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("update_member: Exception: %s" % exc.message)
Python
def delete_member(self, context, member, service): """Handle RPC cast from plugin to delete_member""" try: self.lbdriver.delete_member(member, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("delete_member: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("delete_member: Exception: %s" % exc.message)
def delete_member(self, context, member, service): """Handle RPC cast from plugin to delete_member""" try: self.lbdriver.delete_member(member, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("delete_member: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("delete_member: Exception: %s" % exc.message)
Python
def create_pool_health_monitor(self, context, health_monitor, pool, service): """Handle RPC cast from plugin to create_pool_health_monitor""" try: self.lbdriver.create_pool_health_monitor(health_monitor, pool, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error(_("create_pool_health_monitor: NeutronException: %s" % exc.msg)) except Exception as exc: LOG.error(_("create_pool_health_monitor: Exception: %s" % exc.message))
def create_pool_health_monitor(self, context, health_monitor, pool, service): """Handle RPC cast from plugin to create_pool_health_monitor""" try: self.lbdriver.create_pool_health_monitor(health_monitor, pool, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error(_("create_pool_health_monitor: NeutronException: %s" % exc.msg)) except Exception as exc: LOG.error(_("create_pool_health_monitor: Exception: %s" % exc.message))
Python
def update_health_monitor(self, context, old_health_monitor, health_monitor, pool, service): """Handle RPC cast from plugin to update_health_monitor""" try: self.lbdriver.update_health_monitor(old_health_monitor, health_monitor, pool, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("update_health_monitor: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("update_health_monitor: Exception: %s" % exc.message)
def update_health_monitor(self, context, old_health_monitor, health_monitor, pool, service): """Handle RPC cast from plugin to update_health_monitor""" try: self.lbdriver.update_health_monitor(old_health_monitor, health_monitor, pool, service) self.cache.put(service, self.agent_host) except NeutronException as exc: LOG.error("update_health_monitor: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("update_health_monitor: Exception: %s" % exc.message)
Python
def delete_pool_health_monitor(self, context, health_monitor, pool, service): """Handle RPC cast from plugin to delete_pool_health_monitor""" try: self.lbdriver.delete_pool_health_monitor(health_monitor, pool, service) except NeutronException as exc: LOG.error(_("delete_pool_health_monitor: NeutronException: %s" % exc.msg)) except Exception as exc: LOG.error(_("delete_pool_health_monitor: Exception: %s" % exc.message))
def delete_pool_health_monitor(self, context, health_monitor, pool, service): """Handle RPC cast from plugin to delete_pool_health_monitor""" try: self.lbdriver.delete_pool_health_monitor(health_monitor, pool, service) except NeutronException as exc: LOG.error(_("delete_pool_health_monitor: NeutronException: %s" % exc.msg)) except Exception as exc: LOG.error(_("delete_pool_health_monitor: Exception: %s" % exc.message))
Python
def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" if payload['admin_state_up'] != self.admin_state_up: self.admin_state_up = payload['admin_state_up'] if self.admin_state_up: self.needs_resync = True else: for pool_id in self.cache.get_pool_ids(): self.destroy_service(pool_id) LOG.info(_("agent_updated by server side %s!"), payload)
def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" if payload['admin_state_up'] != self.admin_state_up: self.admin_state_up = payload['admin_state_up'] if self.admin_state_up: self.needs_resync = True else: for pool_id in self.cache.get_pool_ids(): self.destroy_service(pool_id) LOG.info(_("agent_updated by server side %s!"), payload)
Python
def tunnel_update(self, context, **kwargs): """Handle RPC cast from core to update tunnel definitions""" try: LOG.debug(_('received tunnel_update: %s' % kwargs)) self.lbdriver.tunnel_update(**kwargs) except NeutronException as exc: LOG.error("tunnel_update: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("tunnel_update: Exception: %s" % exc.message)
def tunnel_update(self, context, **kwargs): """Handle RPC cast from core to update tunnel definitions""" try: LOG.debug(_('received tunnel_update: %s' % kwargs)) self.lbdriver.tunnel_update(**kwargs) except NeutronException as exc: LOG.error("tunnel_update: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("tunnel_update: Exception: %s" % exc.message)
Python
def update_fdb_entries(self, context, fdb_entries, host=None): """Handle RPC cast from core to update tunnel definitions""" try: LOG.debug(_('received update_fdb_entries: %s host: %s' % (fdb_entries, host))) self.lbdriver.fdb_update(fdb_entries) except NeutronException as exc: LOG.error("update_fdb_entrie: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("update_fdb_entrie: Exception: %s" % exc.message)
def update_fdb_entries(self, context, fdb_entries, host=None): """Handle RPC cast from core to update tunnel definitions""" try: LOG.debug(_('received update_fdb_entries: %s host: %s' % (fdb_entries, host))) self.lbdriver.fdb_update(fdb_entries) except NeutronException as exc: LOG.error("update_fdb_entrie: NeutronException: %s" % exc.msg) except Exception as exc: LOG.error("update_fdb_entrie: Exception: %s" % exc.message)
Python
def _get_snat_name(self, subnet, tenant_id): """ Get the snat name based on HA type """ if self.driver.conf.f5_ha_type == 'standalone': return 'snat-traffic-group-local-only-' + subnet['id'] elif self.driver.conf.f5_ha_type == 'pair': return 'snat-traffic-group-1' + subnet['id'] elif self.driver.conf.f5_ha_type == 'scalen': traffic_group = self.driver.tenant_to_traffic_group(tenant_id) base_traffic_group = os.path.basename(traffic_group) return 'snat-' + base_traffic_group + '-' + subnet['id'] LOG.error(_('Invalid f5_ha_type:%s' % self.driver.conf.f5_ha_type)) return ''
def _get_snat_name(self, subnet, tenant_id): """ Get the snat name based on HA type """ if self.driver.conf.f5_ha_type == 'standalone': return 'snat-traffic-group-local-only-' + subnet['id'] elif self.driver.conf.f5_ha_type == 'pair': return 'snat-traffic-group-1' + subnet['id'] elif self.driver.conf.f5_ha_type == 'scalen': traffic_group = self.driver.tenant_to_traffic_group(tenant_id) base_traffic_group = os.path.basename(traffic_group) return 'snat-' + base_traffic_group + '-' + subnet['id'] LOG.error(_('Invalid f5_ha_type:%s' % self.driver.conf.f5_ha_type)) return ''
Python
def _get_snat_traffic_group(self, tenant_id): """ Get the snat name based on HA type """ if self.driver.conf.f5_ha_type == 'standalone': return 'traffic-group-local-only' elif self.driver.conf.f5_ha_type == 'pair': return 'traffic-group-1' elif self.driver.conf.f5_ha_type == 'scalen': traffic_group = self.driver.tenant_to_traffic_group(tenant_id) return os.path.basename(traffic_group) LOG.error(_('Invalid f5_ha_type:%s' % self.driver.conf.f5_ha_type)) return ''
def _get_snat_traffic_group(self, tenant_id): """ Get the snat name based on HA type """ if self.driver.conf.f5_ha_type == 'standalone': return 'traffic-group-local-only' elif self.driver.conf.f5_ha_type == 'pair': return 'traffic-group-1' elif self.driver.conf.f5_ha_type == 'scalen': traffic_group = self.driver.tenant_to_traffic_group(tenant_id) return os.path.basename(traffic_group) LOG.error(_('Invalid f5_ha_type:%s' % self.driver.conf.f5_ha_type)) return ''
Python
def assure_bigip_snats(self, bigip, subnetinfo, snat_addrs, tenant_id): """ Ensure Snat Addresses are configured on a bigip. Called for every bigip only in replication mode. otherwise called once and synced. """ network = subnetinfo['network'] snat_info = {} if self.bigip_l2_manager.is_common_network(network): snat_info['network_folder'] = 'Common' else: snat_info['network_folder'] = tenant_id snat_info['pool_name'] = tenant_id snat_info['pool_folder'] = tenant_id snat_info['addrs'] = snat_addrs self._assure_bigip_snats(bigip, subnetinfo, snat_info, tenant_id)
def assure_bigip_snats(self, bigip, subnetinfo, snat_addrs, tenant_id): """ Ensure Snat Addresses are configured on a bigip. Called for every bigip only in replication mode. otherwise called once and synced. """ network = subnetinfo['network'] snat_info = {} if self.bigip_l2_manager.is_common_network(network): snat_info['network_folder'] = 'Common' else: snat_info['network_folder'] = tenant_id snat_info['pool_name'] = tenant_id snat_info['pool_folder'] = tenant_id snat_info['addrs'] = snat_addrs self._assure_bigip_snats(bigip, subnetinfo, snat_info, tenant_id)
Python
def _assure_bigip_snats(self, bigip, subnetinfo, snat_info, tenant_id): """ Configure the ip addresses for snat """ network = subnetinfo['network'] subnet = subnetinfo['subnet'] if tenant_id not in bigip.assured_tenant_snat_subnets: bigip.assured_tenant_snat_subnets[tenant_id] = [] if subnet['id'] in bigip.assured_tenant_snat_subnets[tenant_id]: return snat_name = self._get_snat_name(subnet, tenant_id) for i in range(self.driver.conf.f5_snat_addresses_per_subnet): ip_address = snat_info['addrs'][i] + \ '%' + str(network['route_domain_id']) index_snat_name = snat_name + "_" + str(i) if self.bigip_l2_manager.is_common_network(network): index_snat_name = '/Common/' + index_snat_name snat_traffic_group = self._get_snat_traffic_group(tenant_id) bigip.snat.create(name=index_snat_name, ip_address=ip_address, traffic_group=snat_traffic_group, snat_pool_name=snat_info['pool_name'], folder=snat_info['network_folder'], snat_pool_folder=snat_info['pool_folder']) if self.l3_binding: self.l3_binding.bind_address(subnet_id=subnet['id'], ip_address=ip_address) bigip.assured_tenant_snat_subnets[tenant_id].append(subnet['id'])
def _assure_bigip_snats(self, bigip, subnetinfo, snat_info, tenant_id): """ Configure the ip addresses for snat """ network = subnetinfo['network'] subnet = subnetinfo['subnet'] if tenant_id not in bigip.assured_tenant_snat_subnets: bigip.assured_tenant_snat_subnets[tenant_id] = [] if subnet['id'] in bigip.assured_tenant_snat_subnets[tenant_id]: return snat_name = self._get_snat_name(subnet, tenant_id) for i in range(self.driver.conf.f5_snat_addresses_per_subnet): ip_address = snat_info['addrs'][i] + \ '%' + str(network['route_domain_id']) index_snat_name = snat_name + "_" + str(i) if self.bigip_l2_manager.is_common_network(network): index_snat_name = '/Common/' + index_snat_name snat_traffic_group = self._get_snat_traffic_group(tenant_id) bigip.snat.create(name=index_snat_name, ip_address=ip_address, traffic_group=snat_traffic_group, snat_pool_name=snat_info['pool_name'], folder=snat_info['network_folder'], snat_pool_folder=snat_info['pool_folder']) if self.l3_binding: self.l3_binding.bind_address(subnet_id=subnet['id'], ip_address=ip_address) bigip.assured_tenant_snat_subnets[tenant_id].append(subnet['id'])
Python
def delete_bigip_snats(self, bigip, subnetinfo, tenant_id): """ Assure shared snat configuration (which syncs) is deleted Called for every bigip only in replication mode, otherwise called once. """ if not subnetinfo['network']: LOG.error(_('Attempted to delete selfip and snats' ' for missing network ... skipping.')) return set() return self._delete_bigip_snats(bigip, subnetinfo, tenant_id)
def delete_bigip_snats(self, bigip, subnetinfo, tenant_id): """ Assure shared snat configuration (which syncs) is deleted Called for every bigip only in replication mode, otherwise called once. """ if not subnetinfo['network']: LOG.error(_('Attempted to delete selfip and snats' ' for missing network ... skipping.')) return set() return self._delete_bigip_snats(bigip, subnetinfo, tenant_id)
Python
def _remove_assured_tenant_snat_subnet(self, bigip, tenant_id, subnet): """" Remove ref for the subnet for this tenant""" if tenant_id in bigip.assured_tenant_snat_subnets: tenant_snat_subnets = \ bigip.assured_tenant_snat_subnets[tenant_id] if tenant_snat_subnets and subnet['id'] in tenant_snat_subnets: LOG.debug(_( 'Remove subnet id %s from ' \ 'bigip.assured_tenant_snat_subnets for tenant %s' % \ (subnet['id'], tenant_id))) tenant_snat_subnets.remove(subnet['id']) else: LOG.debug(_( 'Subnet id %s does not exist in ' \ 'bigip.assured_tenant_snat_subnets for tenant %s' % \ (subnet['id'], tenant_id))) else: LOG.debug(_( 'Tenant id %s does not exist in ' \ 'bigip.assured_tenant_snat_subnets' % tenant_id))
def _remove_assured_tenant_snat_subnet(self, bigip, tenant_id, subnet): """" Remove ref for the subnet for this tenant""" if tenant_id in bigip.assured_tenant_snat_subnets: tenant_snat_subnets = \ bigip.assured_tenant_snat_subnets[tenant_id] if tenant_snat_subnets and subnet['id'] in tenant_snat_subnets: LOG.debug(_( 'Remove subnet id %s from ' \ 'bigip.assured_tenant_snat_subnets for tenant %s' % \ (subnet['id'], tenant_id))) tenant_snat_subnets.remove(subnet['id']) else: LOG.debug(_( 'Subnet id %s does not exist in ' \ 'bigip.assured_tenant_snat_subnets for tenant %s' % \ (subnet['id'], tenant_id))) else: LOG.debug(_( 'Tenant id %s does not exist in ' \ 'bigip.assured_tenant_snat_subnets' % tenant_id))
Python
def _delete_bigip_snats(self, bigip, subnetinfo, tenant_id): """ Assure snats deleted in standalone mode """ network = subnetinfo['network'] subnet = subnetinfo['subnet'] deleted_names = set() in_use_subnets = set() # Delete SNATs on traffic-group-local-only snat_name = self._get_snat_name(subnet, tenant_id) for i in range(self.driver.conf.f5_snat_addresses_per_subnet): index_snat_name = snat_name + "_" + str(i) if self.bigip_l2_manager.is_common_network(network): tmos_snat_name = '/Common/' + index_snat_name else: tmos_snat_name = index_snat_name if self.l3_binding: ip_address = bigip.snat.get_snat_ipaddress( folder=tenant_id, snataddress_name=index_snat_name) self.l3_binding.unbind_address(subnet_id=subnet['id'], ip_address=ip_address) # Remove translation address from tenant snat pool bigip.snat.remove_from_pool( name=tenant_id, member_name=tmos_snat_name, folder=tenant_id) # Delete snat pool if empty (no members) LOG.debug(_('Check if snat pool is empty')) if not len(bigip.snat.get_snatpool_members(name=tenant_id, folder=tenant_id)): LOG.debug(_('Snat pool is empty - delete snatpool')) bigip.snat.delete_snatpool(name=tenant_id, folder=tenant_id) # Check if subnet in use by any tenants/snatpools. If in use, # add subnet to hints list of subnets in use. self._remove_assured_tenant_snat_subnet(bigip, tenant_id, subnet) LOG.debug(_( 'Check cache for subnet %s in use by other tenant' % \ subnet['id'])) in_use_count = 0 for loop_tenant_id in bigip.assured_tenant_snat_subnets: tenant_snat_subnets = \ bigip.assured_tenant_snat_subnets[loop_tenant_id] if subnet['id'] in tenant_snat_subnets: LOG.debug(_( 'Subnet %s in use (tenant %s)' % \ (subnet['id'], loop_tenant_id))) in_use_count += 1 if in_use_count: in_use_subnets.add(subnet['id']) else: LOG.debug(_('Check subnet in use by any tenant')) if bigip.snat.get_snatpool_member_use_count(subnet['id']): LOG.debug(_('Subnet in use - do not delete')) in_use_subnets.add(subnet['id']) else: LOG.debug(_('Subnet not in use - delete')) # Check if trans addr in use by any snatpool. If not in use, # okay to delete associated neutron port. LOG.debug(_('Check trans addr %s in use.' % tmos_snat_name)) if not bigip.snat.get_snatpool_member_use_count(tmos_snat_name): LOG.debug(_('Trans addr not in use - delete')) deleted_names.add(index_snat_name) else: LOG.debug(_('Trans addr in use - do not delete')) return deleted_names, in_use_subnets
def _delete_bigip_snats(self, bigip, subnetinfo, tenant_id): """ Assure snats deleted in standalone mode """ network = subnetinfo['network'] subnet = subnetinfo['subnet'] deleted_names = set() in_use_subnets = set() # Delete SNATs on traffic-group-local-only snat_name = self._get_snat_name(subnet, tenant_id) for i in range(self.driver.conf.f5_snat_addresses_per_subnet): index_snat_name = snat_name + "_" + str(i) if self.bigip_l2_manager.is_common_network(network): tmos_snat_name = '/Common/' + index_snat_name else: tmos_snat_name = index_snat_name if self.l3_binding: ip_address = bigip.snat.get_snat_ipaddress( folder=tenant_id, snataddress_name=index_snat_name) self.l3_binding.unbind_address(subnet_id=subnet['id'], ip_address=ip_address) # Remove translation address from tenant snat pool bigip.snat.remove_from_pool( name=tenant_id, member_name=tmos_snat_name, folder=tenant_id) # Delete snat pool if empty (no members) LOG.debug(_('Check if snat pool is empty')) if not len(bigip.snat.get_snatpool_members(name=tenant_id, folder=tenant_id)): LOG.debug(_('Snat pool is empty - delete snatpool')) bigip.snat.delete_snatpool(name=tenant_id, folder=tenant_id) # Check if subnet in use by any tenants/snatpools. If in use, # add subnet to hints list of subnets in use. self._remove_assured_tenant_snat_subnet(bigip, tenant_id, subnet) LOG.debug(_( 'Check cache for subnet %s in use by other tenant' % \ subnet['id'])) in_use_count = 0 for loop_tenant_id in bigip.assured_tenant_snat_subnets: tenant_snat_subnets = \ bigip.assured_tenant_snat_subnets[loop_tenant_id] if subnet['id'] in tenant_snat_subnets: LOG.debug(_( 'Subnet %s in use (tenant %s)' % \ (subnet['id'], loop_tenant_id))) in_use_count += 1 if in_use_count: in_use_subnets.add(subnet['id']) else: LOG.debug(_('Check subnet in use by any tenant')) if bigip.snat.get_snatpool_member_use_count(subnet['id']): LOG.debug(_('Subnet in use - do not delete')) in_use_subnets.add(subnet['id']) else: LOG.debug(_('Subnet not in use - delete')) # Check if trans addr in use by any snatpool. If not in use, # okay to delete associated neutron port. LOG.debug(_('Check trans addr %s in use.' % tmos_snat_name)) if not bigip.snat.get_snatpool_member_use_count(tmos_snat_name): LOG.debug(_('Trans addr not in use - delete')) deleted_names.add(index_snat_name) else: LOG.debug(_('Trans addr in use - do not delete')) return deleted_names, in_use_subnets
Python
def _check_monitor_delete(self, service): """If the pool is being deleted, then delete related objects""" if service['pool']['status'] == plugin_const.PENDING_DELETE: # Everything needs to be go with the pool, so overwrite # service state to appropriately remove all elements service['vip']['status'] = plugin_const.PENDING_DELETE for member in service['members']: member['status'] = plugin_const.PENDING_DELETE for monitor in service['pool']['health_monitors_status']: monitor['status'] = plugin_const.PENDING_DELETE
def _check_monitor_delete(self, service): """If the pool is being deleted, then delete related objects""" if service['pool']['status'] == plugin_const.PENDING_DELETE: # Everything needs to be go with the pool, so overwrite # service state to appropriately remove all elements service['vip']['status'] = plugin_const.PENDING_DELETE for member in service['members']: member['status'] = plugin_const.PENDING_DELETE for monitor in service['pool']['health_monitors_status']: monitor['status'] = plugin_const.PENDING_DELETE
Python
def _get_all_subnets(service): """ Examine service and return active networks """ subnets = dict() if 'id' in service['vip']: vip = service['vip'] if 'network' in vip and vip['network']: network = service['vip']['network'] subnet = service['vip']['subnet'] subnets[subnet['id']] = {'network': network, 'subnet': subnet, 'is_for_member': False} for member in service['members']: if 'network' in member and member['network']: network = member['network'] subnet = member['subnet'] subnets[subnet['id']] = {'network': network, 'subnet': subnet, 'is_for_member': True} return subnets
def _get_all_subnets(service): """ Examine service and return active networks """ subnets = dict() if 'id' in service['vip']: vip = service['vip'] if 'network' in vip and vip['network']: network = service['vip']['network'] subnet = service['vip']['subnet'] subnets[subnet['id']] = {'network': network, 'subnet': subnet, 'is_for_member': False} for member in service['members']: if 'network' in member and member['network']: network = member['network'] subnet = member['subnet'] subnets[subnet['id']] = {'network': network, 'subnet': subnet, 'is_for_member': True} return subnets
Python
def fill_in_pool_info(self, tenant_service, os_service): """ Fill in pool info on tenant service """ # { # ..., # "vars": # [ # ..., # { # "name":"pool__lb_method", # "value":"round-robin" # }, # { # "name":"pool__monitor", # "value":"http" # }, # ... # ], # ... # } # The 'vars' key and the list for its value should have already been # created on 'tenant_service' os_pool = os_service.get('pool') # This isn't required per the f5.lbaas iApp template pool_lb_method_var = LBaaSBuilderIApp._pool_lb_method_var(os_pool) if pool_lb_method_var: tenant_service[self.varkey].append(pool_lb_method_var) # This isn't required per the f5.lbaas iApp template pool_monitor_var = \ LBaaSBuilderIApp._pool_monitor_var(os_service) if pool_monitor_var: tenant_service[self.varkey].append(pool_monitor_var)
def fill_in_pool_info(self, tenant_service, os_service): """ Fill in pool info on tenant service """ # { # ..., # "vars": # [ # ..., # { # "name":"pool__lb_method", # "value":"round-robin" # }, # { # "name":"pool__monitor", # "value":"http" # }, # ... # ], # ... # } # The 'vars' key and the list for its value should have already been # created on 'tenant_service' os_pool = os_service.get('pool') # This isn't required per the f5.lbaas iApp template pool_lb_method_var = LBaaSBuilderIApp._pool_lb_method_var(os_pool) if pool_lb_method_var: tenant_service[self.varkey].append(pool_lb_method_var) # This isn't required per the f5.lbaas iApp template pool_monitor_var = \ LBaaSBuilderIApp._pool_monitor_var(os_service) if pool_monitor_var: tenant_service[self.varkey].append(pool_monitor_var)
Python
def fill_in_vip_info(self, tenant_service, os_service, bigiq_workaround=False): """ Fill in vip info on tenant service """ # { # ..., # "vars": # [ # ..., # { # "name":"vip__addr", # "value":"0.0.0.0" # }, # { # "name":"vip__persist", # "value":"http-cookie" # }, # { # "name":"vip__cookie", # "value":"jsessionid" # }, # { # "name":"vip__port", # "value":"80" # }, # { # "name":"vip__protocol", # "value":"http" # }, # { # "name":"vip__state", # "value":"enabled" # }, # ... # ], # ... # } # The 'vars' key and the list for its value should have already # been created on 'tenant_service' have_vip = ('vip' in os_service and 'id' in os_service['vip'] and 'address' in os_service['vip'] and os_service['vip']['address'] and os_service['vip']['status'] != plugin_const.PENDING_DELETE) if not have_vip: vip_state_var = get_tenant_service_var('vip__state', 'delete') tenant_service[self.varkey].append(vip_state_var) return os_vip = os_service.get('vip') # This is required per the f5.lbaas iApp template vip_addr_var = self._vip_addr_var(os_vip) tenant_service[self.varkey].append(vip_addr_var) vip_mask_var = self._vip_mask_var(os_vip) tenant_service[self.varkey].append(vip_mask_var) # This is a workaround where we add an additional var named # 'pool__addr' to the tenant service we are POSTing/PUTting. # This is because the IAppServiceWorker.java on the BIG-IP queries # for iApp service info via the IAppServiceMcpHelper.java. The # iApp service helper has the method named 'appScalarVarsToPojo' # where it looks at all of the app vars and tries to determine which # ones correspond to the VIP address and port. If it finds them it # then updates the server tier references. Specifically the iApp # service helper is looking for the vars named 'pool__addr', # 'basic__addr', 'pool__port', and 'basic__port'. The f5.lbaas # template uses the vars 'vip__addr' and 'vip__port' and as a result # iApp service worker # doesn't get a list of updated server tiers. # As a result when the # VirtualServerStatsAggregationWorker.java # queries for the iApp service info (via views and the cache workers) # it doesn't get any can't correspond any stats to any virtual servers # on the BIG-IQ. Thus there are no virtual server stats. # We also aren't getting app stats which we believe is a result of us # not getting virtual server stats. In adding the 'pool__addr' var # we hope that it gets stored in MCP, the iApp server helper sees it # and can correctly update the server tier info which will hopefully # give us stats. We don't change the 'vip__addr' var name to # 'pool__addr' as we want to leave the presentation # and implementation of the f5.lbaas iApp the same. if bigiq_workaround: tenant_service[self.varkey].append( get_tenant_service_var('pool__addr', vip_addr_var['value'])) vip_persist_var = self._vip_persist_var(os_vip) # The f5.lbaas iApp template doesn't require this variable to be # filled in for us to deploy it. If the method doesn't returns # None we skip adding it to the template we will deploy. if vip_persist_var: tenant_service[self.varkey].append(vip_persist_var) vip_cookie_var = self._vip_cookie_var(os_vip) # The f5.lbaas iApp template doesn't require this variable to be # filled in for us to deploy it. If the method doesn't returns None # we skip adding it to the template we will deploy. if vip_cookie_var: tenant_service[self.varkey].append(vip_cookie_var) vip_port_var = self._vip_port_var(os_vip) # The f5.lbaas iApp template doesn't require this variable to be # filled in for us to deploy it. If the method doesn't returns None # we skip adding it to the template we will deploy. if vip_port_var: tenant_service[self.varkey].append(vip_port_var) # The 'pool__port' var has the same story as the 'pool__addr' # var from above. tenant_service[self.varkey].append( get_tenant_service_var('pool__port', vip_port_var['value'])) vip_protocol_var = self._vip_protocol_var(os_vip) # The f5.lbaas iApp template doesn't require this variable to be # filled in for us to deploy it. If the method doesn't returns # None we skip adding it to the template we will deploy. if vip_protocol_var: tenant_service[self.varkey].append(vip_protocol_var) vip_state_var = self._vs_state_var(os_vip) # The f5.lbaas iApp template doesn't require this variable to be # filled in for us to deploy it. If the method doesn't returns # None we skip adding it to the template we will deploy. if vip_state_var: tenant_service[self.varkey].append(vip_state_var)
def fill_in_vip_info(self, tenant_service, os_service, bigiq_workaround=False): """ Fill in vip info on tenant service """ # { # ..., # "vars": # [ # ..., # { # "name":"vip__addr", # "value":"0.0.0.0" # }, # { # "name":"vip__persist", # "value":"http-cookie" # }, # { # "name":"vip__cookie", # "value":"jsessionid" # }, # { # "name":"vip__port", # "value":"80" # }, # { # "name":"vip__protocol", # "value":"http" # }, # { # "name":"vip__state", # "value":"enabled" # }, # ... # ], # ... # } # The 'vars' key and the list for its value should have already # been created on 'tenant_service' have_vip = ('vip' in os_service and 'id' in os_service['vip'] and 'address' in os_service['vip'] and os_service['vip']['address'] and os_service['vip']['status'] != plugin_const.PENDING_DELETE) if not have_vip: vip_state_var = get_tenant_service_var('vip__state', 'delete') tenant_service[self.varkey].append(vip_state_var) return os_vip = os_service.get('vip') # This is required per the f5.lbaas iApp template vip_addr_var = self._vip_addr_var(os_vip) tenant_service[self.varkey].append(vip_addr_var) vip_mask_var = self._vip_mask_var(os_vip) tenant_service[self.varkey].append(vip_mask_var) # This is a workaround where we add an additional var named # 'pool__addr' to the tenant service we are POSTing/PUTting. # This is because the IAppServiceWorker.java on the BIG-IP queries # for iApp service info via the IAppServiceMcpHelper.java. The # iApp service helper has the method named 'appScalarVarsToPojo' # where it looks at all of the app vars and tries to determine which # ones correspond to the VIP address and port. If it finds them it # then updates the server tier references. Specifically the iApp # service helper is looking for the vars named 'pool__addr', # 'basic__addr', 'pool__port', and 'basic__port'. The f5.lbaas # template uses the vars 'vip__addr' and 'vip__port' and as a result # iApp service worker # doesn't get a list of updated server tiers. # As a result when the # VirtualServerStatsAggregationWorker.java # queries for the iApp service info (via views and the cache workers) # it doesn't get any can't correspond any stats to any virtual servers # on the BIG-IQ. Thus there are no virtual server stats. # We also aren't getting app stats which we believe is a result of us # not getting virtual server stats. In adding the 'pool__addr' var # we hope that it gets stored in MCP, the iApp server helper sees it # and can correctly update the server tier info which will hopefully # give us stats. We don't change the 'vip__addr' var name to # 'pool__addr' as we want to leave the presentation # and implementation of the f5.lbaas iApp the same. if bigiq_workaround: tenant_service[self.varkey].append( get_tenant_service_var('pool__addr', vip_addr_var['value'])) vip_persist_var = self._vip_persist_var(os_vip) # The f5.lbaas iApp template doesn't require this variable to be # filled in for us to deploy it. If the method doesn't returns # None we skip adding it to the template we will deploy. if vip_persist_var: tenant_service[self.varkey].append(vip_persist_var) vip_cookie_var = self._vip_cookie_var(os_vip) # The f5.lbaas iApp template doesn't require this variable to be # filled in for us to deploy it. If the method doesn't returns None # we skip adding it to the template we will deploy. if vip_cookie_var: tenant_service[self.varkey].append(vip_cookie_var) vip_port_var = self._vip_port_var(os_vip) # The f5.lbaas iApp template doesn't require this variable to be # filled in for us to deploy it. If the method doesn't returns None # we skip adding it to the template we will deploy. if vip_port_var: tenant_service[self.varkey].append(vip_port_var) # The 'pool__port' var has the same story as the 'pool__addr' # var from above. tenant_service[self.varkey].append( get_tenant_service_var('pool__port', vip_port_var['value'])) vip_protocol_var = self._vip_protocol_var(os_vip) # The f5.lbaas iApp template doesn't require this variable to be # filled in for us to deploy it. If the method doesn't returns # None we skip adding it to the template we will deploy. if vip_protocol_var: tenant_service[self.varkey].append(vip_protocol_var) vip_state_var = self._vs_state_var(os_vip) # The f5.lbaas iApp template doesn't require this variable to be # filled in for us to deploy it. If the method doesn't returns # None we skip adding it to the template we will deploy. if vip_state_var: tenant_service[self.varkey].append(vip_state_var)
Python
def add_fdb_entries(self, fdb_entries=None): """ Add fdb entries for a tunnel """ for tunnel_name in fdb_entries: folder = fdb_entries[tunnel_name]['folder'] if folder != 'Common': folder = prefixed(folder) request_url = self.bigip.icr_url + '/net/fdb/tunnel/' request_url += '~' + folder + '~' + self.OBJ_PREFIX + \ tunnel_name + '?ver=11.5.0' existing_records = self.get_fdb_entry(tunnel_name=tunnel_name, mac=None, folder=folder) new_records = [] new_mac_addresses = [] new_arp_addresses = {} tunnel_records = fdb_entries[tunnel_name]['records'] for mac in tunnel_records: fdb_entry = dict() fdb_entry['name'] = mac fdb_entry['endpoint'] = tunnel_records[mac]['endpoint'] new_records.append(fdb_entry) new_mac_addresses.append(mac) if tunnel_records[mac]['ip_address']: new_arp_addresses[mac] = tunnel_records[mac]['ip_address'] for record in existing_records: if not record['name'] in new_mac_addresses: new_records.append(record) else: # This fdb entry exists and is not being updated. # So, do not update the ARP record either. if record['name'] in new_arp_addresses: del new_arp_addresses[record['name']] payload = dict() payload['records'] = new_records response = self.bigip.icr_session.patch( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: if const.FDB_POPULATE_STATIC_ARP: for mac in new_arp_addresses: try: self.bigip.arp.create( ip_address=new_arp_addresses[mac], mac_address=mac, folder=folder) except Exception as exc: Log.error('L2GRE', 'could not create static arp: %s' % exc.message) return True return False
def add_fdb_entries(self, fdb_entries=None): """ Add fdb entries for a tunnel """ for tunnel_name in fdb_entries: folder = fdb_entries[tunnel_name]['folder'] if folder != 'Common': folder = prefixed(folder) request_url = self.bigip.icr_url + '/net/fdb/tunnel/' request_url += '~' + folder + '~' + self.OBJ_PREFIX + \ tunnel_name + '?ver=11.5.0' existing_records = self.get_fdb_entry(tunnel_name=tunnel_name, mac=None, folder=folder) new_records = [] new_mac_addresses = [] new_arp_addresses = {} tunnel_records = fdb_entries[tunnel_name]['records'] for mac in tunnel_records: fdb_entry = dict() fdb_entry['name'] = mac fdb_entry['endpoint'] = tunnel_records[mac]['endpoint'] new_records.append(fdb_entry) new_mac_addresses.append(mac) if tunnel_records[mac]['ip_address']: new_arp_addresses[mac] = tunnel_records[mac]['ip_address'] for record in existing_records: if not record['name'] in new_mac_addresses: new_records.append(record) else: # This fdb entry exists and is not being updated. # So, do not update the ARP record either. if record['name'] in new_arp_addresses: del new_arp_addresses[record['name']] payload = dict() payload['records'] = new_records response = self.bigip.icr_session.patch( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: if const.FDB_POPULATE_STATIC_ARP: for mac in new_arp_addresses: try: self.bigip.arp.create( ip_address=new_arp_addresses[mac], mac_address=mac, folder=folder) except Exception as exc: Log.error('L2GRE', 'could not create static arp: %s' % exc.message) return True return False
Python
def delete_fdb_entry(self, tunnel_name=None, mac_address=None, arp_ip_address=None, folder='Common'): """ Delete fdb entry for a tunnel """ folder = str(folder).replace('/', '') if const.FDB_POPULATE_STATIC_ARP: if arp_ip_address: self.bigip.arp.delete(ip_address=arp_ip_address, folder=folder) request_url = self.bigip.icr_url + '/net/fdb/tunnel/' request_url += '~' + folder + '~' + tunnel_name + '?ver=11.5.0' records = self.get_fdb_entry(tunnel_name=tunnel_name, mac=None, folder=folder) if not records: return False original_len = len(records) records = [record for record in records if record.get('name') != mac_address] if original_len != len(records): if len(records) == 0: records = None payload = dict() payload['records'] = records response = self.bigip.icr_session.patch( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: return True elif response.status_code == 404: return True else: Log.error('L2GRE', response.text) raise exceptions.L2GRETunnelUpdateException(response.text) return False return False
def delete_fdb_entry(self, tunnel_name=None, mac_address=None, arp_ip_address=None, folder='Common'): """ Delete fdb entry for a tunnel """ folder = str(folder).replace('/', '') if const.FDB_POPULATE_STATIC_ARP: if arp_ip_address: self.bigip.arp.delete(ip_address=arp_ip_address, folder=folder) request_url = self.bigip.icr_url + '/net/fdb/tunnel/' request_url += '~' + folder + '~' + tunnel_name + '?ver=11.5.0' records = self.get_fdb_entry(tunnel_name=tunnel_name, mac=None, folder=folder) if not records: return False original_len = len(records) records = [record for record in records if record.get('name') != mac_address] if original_len != len(records): if len(records) == 0: records = None payload = dict() payload['records'] = records response = self.bigip.icr_session.patch( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: return True elif response.status_code == 404: return True else: Log.error('L2GRE', response.text) raise exceptions.L2GRETunnelUpdateException(response.text) return False return False
Python
def delete_fdb_entries(self, tunnel_name=None, fdb_entries=None): """ Delete fdb entries for a tunnel """ for tunnel_name in fdb_entries: folder = fdb_entries[tunnel_name]['folder'] if folder != 'Common': folder = prefixed(folder) request_url = self.bigip.icr_url + '/net/fdb/tunnel/' request_url += '~' + folder + '~' + tunnel_name + '?ver=11.5.0' existing_records = self.get_fdb_entry(tunnel_name=tunnel_name, mac=None, folder=folder) arps_to_delete = {} new_records = [] for record in existing_records: for mac in fdb_entries[tunnel_name]['records']: if record['name'] == mac and mac['ip_address']: arps_to_delete[mac] = mac['ip_address'] break else: new_records.append(record) if len(new_records) == 0: new_records = None payload = dict() payload['records'] = new_records response = self.bigip.icr_session.patch( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: if const.FDB_POPULATE_STATIC_ARP: for mac in arps_to_delete: self.bigip.arp.delete(ip_address=arps_to_delete[mac], folder='Common') return True return False
def delete_fdb_entries(self, tunnel_name=None, fdb_entries=None): """ Delete fdb entries for a tunnel """ for tunnel_name in fdb_entries: folder = fdb_entries[tunnel_name]['folder'] if folder != 'Common': folder = prefixed(folder) request_url = self.bigip.icr_url + '/net/fdb/tunnel/' request_url += '~' + folder + '~' + tunnel_name + '?ver=11.5.0' existing_records = self.get_fdb_entry(tunnel_name=tunnel_name, mac=None, folder=folder) arps_to_delete = {} new_records = [] for record in existing_records: for mac in fdb_entries[tunnel_name]['records']: if record['name'] == mac and mac['ip_address']: arps_to_delete[mac] = mac['ip_address'] break else: new_records.append(record) if len(new_records) == 0: new_records = None payload = dict() payload['records'] = new_records response = self.bigip.icr_session.patch( request_url, data=json.dumps(payload), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400: if const.FDB_POPULATE_STATIC_ARP: for mac in arps_to_delete: self.bigip.arp.delete(ip_address=arps_to_delete[mac], folder='Common') return True return False
Python
def delete_all_fdb_entries(self, tunnel_name=None, folder='Common'): """ Delete all fdb entries for a tunnel """ folder = str(folder).replace('/', '') request_url = self.bigip.icr_url + '/net/fdb/tunnel/' request_url += '~' + folder + '~' + tunnel_name + '?ver=11.5.0' response = self.bigip.icr_session.patch( request_url, data=json.dumps({'records': None}), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400 or response.status_code == 404: return True else: Log.error('L2GRE', response.text) raise exceptions.L2GRETunnelUpdateException(response.text) return False
def delete_all_fdb_entries(self, tunnel_name=None, folder='Common'): """ Delete all fdb entries for a tunnel """ folder = str(folder).replace('/', '') request_url = self.bigip.icr_url + '/net/fdb/tunnel/' request_url += '~' + folder + '~' + tunnel_name + '?ver=11.5.0' response = self.bigip.icr_session.patch( request_url, data=json.dumps({'records': None}), timeout=const.CONNECTION_TIMEOUT) if response.status_code < 400 or response.status_code == 404: return True else: Log.error('L2GRE', response.text) raise exceptions.L2GRETunnelUpdateException(response.text) return False
Python
def prefixed(name): """ Put object prefix in front of name """ if not name.startswith(OBJ_PREFIX): name = OBJ_PREFIX + name return name
def prefixed(name): """ Put object prefix in front of name """ if not name.startswith(OBJ_PREFIX): name = OBJ_PREFIX + name return name
Python
def icontrol_folder(method): """ Returns the iControl folder + object name if a kwarg name is 'name' or else ends in '_name'. The folder and the name will be prefixed with the global prefix OBJ_PREFIX. If preserve_vlan_name=True is an argument, then the 'vlan_name' argument will not be prefixed but the other matching arguments will. It also sets the iControl active folder to folder kwarg assuring get_list returns just the appopriate objects for the specific administrative partition. It does this for kwarg named 'name', ends in '_name', or 'named_address'. If the value in the name already includes '/Common/' the decoration honors that full path. """ def wrapper(*args, **kwargs): """ Necessary wrapper """ instance = args[0] preserve_vlan_name = False if 'preserve_vlan_name' in kwargs: preserve_vlan_name = kwargs['preserve_vlan_name'] if 'folder' in kwargs and kwargs['folder']: if kwargs['folder'].find('~') > -1: kwargs['folder'] = kwargs['folder'].replace('~', '/') kwargs['folder'] = os.path.basename(kwargs['folder']) if not kwargs['folder'] == 'Common': kwargs['folder'] = prefixed(kwargs['folder']) if 'name' in kwargs and kwargs['name']: if isinstance(kwargs['name'], basestring): if kwargs['name'].find('~') > -1: kwargs['name'] = kwargs['name'].replace('~', '/') if kwargs['name'].startswith('/Common/'): kwargs['name'] = os.path.basename(kwargs['name']) kwargs['name'] = prefixed(kwargs['name']) kwargs['name'] = instance.bigip.set_folder( kwargs['name'], 'Common') else: kwargs['name'] = os.path.basename(kwargs['name']) kwargs['name'] = prefixed(kwargs['name']) kwargs['name'] = instance.bigip.set_folder( kwargs['name'], kwargs['folder']) if 'named_address' in kwargs and kwargs['named_address']: if isinstance(kwargs['name'], basestring): if kwargs['named_address'].find('~') > -1: kwargs['named_address'] = \ kwargs['named_address'].replace('~', '/') if kwargs['named_address'].startswith('/Common/'): kwargs['named_address'] = \ os.path.basename(kwargs['named_address']) kwargs['named_address'] = \ instance.bigip.set_folder(kwargs['named_address'], 'Common') else: kwargs['named_address'] = \ os.path.basename(kwargs['named_address']) kwargs['named_address'] = \ instance.bigip.set_folder(kwargs['named_address'], kwargs['folder']) for name in kwargs: if name.find('_folder') > 0 and kwargs[name]: if kwargs[name].find('~') > -1: kwargs[name] = kwargs[name].replace('~', '/') kwargs[name] = os.path.basename(kwargs[name]) if not kwargs[name] == 'Common': kwargs[name] = prefixed(kwargs[name]) if name.find('_name') > 0 and kwargs[name]: if isinstance(kwargs['name'], basestring): if kwargs[name].find('~') > -1: kwargs[name] = kwargs[name].replace('~', '/') if kwargs[name].startswith('/Common/'): kwargs[name] = os.path.basename(kwargs[name]) if name != 'vlan_name' or not preserve_vlan_name: kwargs[name] = prefixed(kwargs[name]) kwargs[name] = instance.bigip.set_folder( kwargs[name], 'Common') else: name_prefix = name[0:name.index('_name')] specific_folder_name = name_prefix + "_folder" folder = kwargs['folder'] if specific_folder_name in kwargs: folder = kwargs[specific_folder_name] kwargs[name] = os.path.basename(kwargs[name]) if name != 'vlan_name' or not preserve_vlan_name: kwargs[name] = prefixed(kwargs[name]) kwargs[name] = instance.bigip.set_folder( kwargs[name], folder) instance.bigip.set_folder(None, kwargs['folder']) return method(*args, **kwargs) return wrapper
def icontrol_folder(method): """ Returns the iControl folder + object name if a kwarg name is 'name' or else ends in '_name'. The folder and the name will be prefixed with the global prefix OBJ_PREFIX. If preserve_vlan_name=True is an argument, then the 'vlan_name' argument will not be prefixed but the other matching arguments will. It also sets the iControl active folder to folder kwarg assuring get_list returns just the appopriate objects for the specific administrative partition. It does this for kwarg named 'name', ends in '_name', or 'named_address'. If the value in the name already includes '/Common/' the decoration honors that full path. """ def wrapper(*args, **kwargs): """ Necessary wrapper """ instance = args[0] preserve_vlan_name = False if 'preserve_vlan_name' in kwargs: preserve_vlan_name = kwargs['preserve_vlan_name'] if 'folder' in kwargs and kwargs['folder']: if kwargs['folder'].find('~') > -1: kwargs['folder'] = kwargs['folder'].replace('~', '/') kwargs['folder'] = os.path.basename(kwargs['folder']) if not kwargs['folder'] == 'Common': kwargs['folder'] = prefixed(kwargs['folder']) if 'name' in kwargs and kwargs['name']: if isinstance(kwargs['name'], basestring): if kwargs['name'].find('~') > -1: kwargs['name'] = kwargs['name'].replace('~', '/') if kwargs['name'].startswith('/Common/'): kwargs['name'] = os.path.basename(kwargs['name']) kwargs['name'] = prefixed(kwargs['name']) kwargs['name'] = instance.bigip.set_folder( kwargs['name'], 'Common') else: kwargs['name'] = os.path.basename(kwargs['name']) kwargs['name'] = prefixed(kwargs['name']) kwargs['name'] = instance.bigip.set_folder( kwargs['name'], kwargs['folder']) if 'named_address' in kwargs and kwargs['named_address']: if isinstance(kwargs['name'], basestring): if kwargs['named_address'].find('~') > -1: kwargs['named_address'] = \ kwargs['named_address'].replace('~', '/') if kwargs['named_address'].startswith('/Common/'): kwargs['named_address'] = \ os.path.basename(kwargs['named_address']) kwargs['named_address'] = \ instance.bigip.set_folder(kwargs['named_address'], 'Common') else: kwargs['named_address'] = \ os.path.basename(kwargs['named_address']) kwargs['named_address'] = \ instance.bigip.set_folder(kwargs['named_address'], kwargs['folder']) for name in kwargs: if name.find('_folder') > 0 and kwargs[name]: if kwargs[name].find('~') > -1: kwargs[name] = kwargs[name].replace('~', '/') kwargs[name] = os.path.basename(kwargs[name]) if not kwargs[name] == 'Common': kwargs[name] = prefixed(kwargs[name]) if name.find('_name') > 0 and kwargs[name]: if isinstance(kwargs['name'], basestring): if kwargs[name].find('~') > -1: kwargs[name] = kwargs[name].replace('~', '/') if kwargs[name].startswith('/Common/'): kwargs[name] = os.path.basename(kwargs[name]) if name != 'vlan_name' or not preserve_vlan_name: kwargs[name] = prefixed(kwargs[name]) kwargs[name] = instance.bigip.set_folder( kwargs[name], 'Common') else: name_prefix = name[0:name.index('_name')] specific_folder_name = name_prefix + "_folder" folder = kwargs['folder'] if specific_folder_name in kwargs: folder = kwargs[specific_folder_name] kwargs[name] = os.path.basename(kwargs[name]) if name != 'vlan_name' or not preserve_vlan_name: kwargs[name] = prefixed(kwargs[name]) kwargs[name] = instance.bigip.set_folder( kwargs[name], folder) instance.bigip.set_folder(None, kwargs['folder']) return method(*args, **kwargs) return wrapper
Python
def icontrol_rest_folder(method): """ Returns iControl REST folder + object name if a kwarg name is 'name' or else ends in '_name'. The folder and the name will be prefixed with the global prefix OBJ_PREFIX. """ def wrapper(*args, **kwargs): """ Necessary wrapper """ preserve_vlan_name = False if 'preserve_vlan_name' in kwargs: preserve_vlan_name = kwargs['preserve_vlan_name'] # Here we make sure the name or folder is not REST formatted, # which uses '~' instead of '/'. We change them back to '/'. # We normalize the object names to their base name (with no # / in the name at all) and then use a common prefix. if 'folder' in kwargs and kwargs['folder']: if kwargs['folder'] != '/' and kwargs['folder'].find('Common') < 0: temp = kwargs['folder'].replace('~', '/') kwargs['folder'] = prefixed(os.path.basename(temp)) if 'name' in kwargs and kwargs['name']: if isinstance(kwargs['name'], basestring): temp = kwargs['name'].replace('~', '/') kwargs['name'] = prefixed(os.path.basename(temp)) else: LOG.warn('attempting to normalize non basestring name. ' 'Argument: val: ' + str(kwargs['name'])) for name in kwargs: if name.find('_folder') > 0 and kwargs[name]: kwargs[name] = kwargs[name].replace('~', '/') kwargs[name] = os.path.basename(kwargs[name]) if not kwargs[name] == 'Common': kwargs[name] = prefixed(kwargs[name]) if name.find('_name') > 0 and kwargs[name]: if isinstance(kwargs[name], basestring): kwargs[name] = kwargs[name].replace('~', '/') kwargs[name] = os.path.basename(kwargs[name]) if name != 'vlan_name' or not preserve_vlan_name: kwargs[name] = prefixed(kwargs[name]) else: LOG.warn('attempting to normalize non basestring name. ' ' Argument: name: ' + str(name) + ' val:' + str(kwargs[name])) return method(*args, **kwargs) return wrapper
def icontrol_rest_folder(method): """ Returns iControl REST folder + object name if a kwarg name is 'name' or else ends in '_name'. The folder and the name will be prefixed with the global prefix OBJ_PREFIX. """ def wrapper(*args, **kwargs): """ Necessary wrapper """ preserve_vlan_name = False if 'preserve_vlan_name' in kwargs: preserve_vlan_name = kwargs['preserve_vlan_name'] # Here we make sure the name or folder is not REST formatted, # which uses '~' instead of '/'. We change them back to '/'. # We normalize the object names to their base name (with no # / in the name at all) and then use a common prefix. if 'folder' in kwargs and kwargs['folder']: if kwargs['folder'] != '/' and kwargs['folder'].find('Common') < 0: temp = kwargs['folder'].replace('~', '/') kwargs['folder'] = prefixed(os.path.basename(temp)) if 'name' in kwargs and kwargs['name']: if isinstance(kwargs['name'], basestring): temp = kwargs['name'].replace('~', '/') kwargs['name'] = prefixed(os.path.basename(temp)) else: LOG.warn('attempting to normalize non basestring name. ' 'Argument: val: ' + str(kwargs['name'])) for name in kwargs: if name.find('_folder') > 0 and kwargs[name]: kwargs[name] = kwargs[name].replace('~', '/') kwargs[name] = os.path.basename(kwargs[name]) if not kwargs[name] == 'Common': kwargs[name] = prefixed(kwargs[name]) if name.find('_name') > 0 and kwargs[name]: if isinstance(kwargs[name], basestring): kwargs[name] = kwargs[name].replace('~', '/') kwargs[name] = os.path.basename(kwargs[name]) if name != 'vlan_name' or not preserve_vlan_name: kwargs[name] = prefixed(kwargs[name]) else: LOG.warn('attempting to normalize non basestring name. ' ' Argument: name: ' + str(name) + ' val:' + str(kwargs[name])) return method(*args, **kwargs) return wrapper
Python
def decorate_name(name=None, folder='Common', use_prefix=True): """ Add "namespace" prefix to names """ folder = os.path.basename(folder) if not folder == 'Common': folder = prefixed(folder) if name.startswith('/Common/'): name = os.path.basename(name) if use_prefix: name = prefixed(name) name = '/Common/' + name else: name = os.path.basename(name) if use_prefix: name = prefixed(name) name = '/' + folder + '/' + name return name
def decorate_name(name=None, folder='Common', use_prefix=True): """ Add "namespace" prefix to names """ folder = os.path.basename(folder) if not folder == 'Common': folder = prefixed(folder) if name.startswith('/Common/'): name = os.path.basename(name) if use_prefix: name = prefixed(name) name = '/Common/' + name else: name = os.path.basename(name) if use_prefix: name = prefixed(name) name = '/' + folder + '/' + name return name
Python
def log(method): """Decorator helping to log method calls.""" def wrapper(*args, **kwargs): """ Necessary wrapper """ instance = args[0] LOG.debug('%s::%s called with args: %s kwargs: %s', instance.__class__.__name__, method.__name__, args[1:], kwargs) return method(*args, **kwargs) return wrapper
def log(method): """Decorator helping to log method calls.""" def wrapper(*args, **kwargs): """ Necessary wrapper """ instance = args[0] LOG.debug('%s::%s called with args: %s kwargs: %s', instance.__class__.__name__, method.__name__, args[1:], kwargs) return method(*args, **kwargs) return wrapper
Python
def serialized(method_name): """Outer wrapper in order to specify method name""" def real_serialized(method): """Decorator to serialize calls to configure via iControl""" def wrapper(*args, **kwargs): """ Necessary wrapper """ # args[0] must be an instance of iControlDriver service_queue = args[0].service_queue my_request_id = uuid.uuid4() service = None if len(args) > 0: last_arg = args[-1] if isinstance(last_arg, dict) and ('pool' in last_arg): service = last_arg if 'service' in kwargs: service = kwargs['service'] # Consolidate create_member requests for the same pool. # # NOTE: The following block of code alters the state of # a queue that other greenthreads are waiting behind. # This code assumes it will not be preempted by another # greenthread while running. It does not do I/O or call any # other monkey-patched code which might cause a context switch. # To avoid race conditions, DO NOT add logging to this code # block. # num_requests = len(service_queue) # queue optimization # if num_requests > 1 and method_name == 'create_member': # cur_pool_id = service['pool']['id'] # cur_index = num_requests - 1 # do not attempt to replace the first entry (index 0) # because it may already be in process. # while cur_index > 0: # (check_request, check_method, check_service) = \ # service_queue[cur_index] # if check_service['pool']['id'] != cur_pool_id: # cur_index -= 1 # continue # if check_method != 'create_member': # break # move this request up in the queue and return # so that existing thread can handle it # service_queue[cur_index] = \ # (check_request, check_method, service) # return # End of code block which assumes no preemption. req = (my_request_id, method_name, service) service_queue.append(req) reqs_ahead_of_us = request_index(service_queue, my_request_id) while reqs_ahead_of_us != 0: if reqs_ahead_of_us == 1: # it is almost our turn. get ready waitsecs = .01 else: waitsecs = reqs_ahead_of_us * .5 if waitsecs > .01: LOG.debug('%s request %s is blocking' ' for %.2f secs - queue depth: %d' % (str(method_name), my_request_id, waitsecs, len(service_queue))) greenthread.sleep(waitsecs) reqs_ahead_of_us = request_index(service_queue, my_request_id) try: LOG.debug('%s request %s is running with queue depth: %d' % (str(method_name), my_request_id, len(service_queue))) start_time = time() result = method(*args, **kwargs) LOG.debug('%s request %s took %.5f secs' % (str(method_name), my_request_id, time() - start_time)) except: LOG.error('%s request %s FAILED' % (str(method_name), my_request_id)) raise finally: service_queue.pop(0) return result return wrapper return real_serialized
def serialized(method_name): """Outer wrapper in order to specify method name""" def real_serialized(method): """Decorator to serialize calls to configure via iControl""" def wrapper(*args, **kwargs): """ Necessary wrapper """ # args[0] must be an instance of iControlDriver service_queue = args[0].service_queue my_request_id = uuid.uuid4() service = None if len(args) > 0: last_arg = args[-1] if isinstance(last_arg, dict) and ('pool' in last_arg): service = last_arg if 'service' in kwargs: service = kwargs['service'] # Consolidate create_member requests for the same pool. # # NOTE: The following block of code alters the state of # a queue that other greenthreads are waiting behind. # This code assumes it will not be preempted by another # greenthread while running. It does not do I/O or call any # other monkey-patched code which might cause a context switch. # To avoid race conditions, DO NOT add logging to this code # block. # num_requests = len(service_queue) # queue optimization # if num_requests > 1 and method_name == 'create_member': # cur_pool_id = service['pool']['id'] # cur_index = num_requests - 1 # do not attempt to replace the first entry (index 0) # because it may already be in process. # while cur_index > 0: # (check_request, check_method, check_service) = \ # service_queue[cur_index] # if check_service['pool']['id'] != cur_pool_id: # cur_index -= 1 # continue # if check_method != 'create_member': # break # move this request up in the queue and return # so that existing thread can handle it # service_queue[cur_index] = \ # (check_request, check_method, service) # return # End of code block which assumes no preemption. req = (my_request_id, method_name, service) service_queue.append(req) reqs_ahead_of_us = request_index(service_queue, my_request_id) while reqs_ahead_of_us != 0: if reqs_ahead_of_us == 1: # it is almost our turn. get ready waitsecs = .01 else: waitsecs = reqs_ahead_of_us * .5 if waitsecs > .01: LOG.debug('%s request %s is blocking' ' for %.2f secs - queue depth: %d' % (str(method_name), my_request_id, waitsecs, len(service_queue))) greenthread.sleep(waitsecs) reqs_ahead_of_us = request_index(service_queue, my_request_id) try: LOG.debug('%s request %s is running with queue depth: %d' % (str(method_name), my_request_id, len(service_queue))) start_time = time() result = method(*args, **kwargs) LOG.debug('%s request %s took %.5f secs' % (str(method_name), my_request_id, time() - start_time)) except: LOG.error('%s request %s FAILED' % (str(method_name), my_request_id)) raise finally: service_queue.pop(0) return result return wrapper return real_serialized
Python
def real_serialized(method): """Decorator to serialize calls to configure via iControl""" def wrapper(*args, **kwargs): """ Necessary wrapper """ # args[0] must be an instance of iControlDriver service_queue = args[0].service_queue my_request_id = uuid.uuid4() service = None if len(args) > 0: last_arg = args[-1] if isinstance(last_arg, dict) and ('pool' in last_arg): service = last_arg if 'service' in kwargs: service = kwargs['service'] # Consolidate create_member requests for the same pool. # # NOTE: The following block of code alters the state of # a queue that other greenthreads are waiting behind. # This code assumes it will not be preempted by another # greenthread while running. It does not do I/O or call any # other monkey-patched code which might cause a context switch. # To avoid race conditions, DO NOT add logging to this code # block. # num_requests = len(service_queue) # queue optimization # if num_requests > 1 and method_name == 'create_member': # cur_pool_id = service['pool']['id'] # cur_index = num_requests - 1 # do not attempt to replace the first entry (index 0) # because it may already be in process. # while cur_index > 0: # (check_request, check_method, check_service) = \ # service_queue[cur_index] # if check_service['pool']['id'] != cur_pool_id: # cur_index -= 1 # continue # if check_method != 'create_member': # break # move this request up in the queue and return # so that existing thread can handle it # service_queue[cur_index] = \ # (check_request, check_method, service) # return # End of code block which assumes no preemption. req = (my_request_id, method_name, service) service_queue.append(req) reqs_ahead_of_us = request_index(service_queue, my_request_id) while reqs_ahead_of_us != 0: if reqs_ahead_of_us == 1: # it is almost our turn. get ready waitsecs = .01 else: waitsecs = reqs_ahead_of_us * .5 if waitsecs > .01: LOG.debug('%s request %s is blocking' ' for %.2f secs - queue depth: %d' % (str(method_name), my_request_id, waitsecs, len(service_queue))) greenthread.sleep(waitsecs) reqs_ahead_of_us = request_index(service_queue, my_request_id) try: LOG.debug('%s request %s is running with queue depth: %d' % (str(method_name), my_request_id, len(service_queue))) start_time = time() result = method(*args, **kwargs) LOG.debug('%s request %s took %.5f secs' % (str(method_name), my_request_id, time() - start_time)) except: LOG.error('%s request %s FAILED' % (str(method_name), my_request_id)) raise finally: service_queue.pop(0) return result return wrapper
def real_serialized(method): """Decorator to serialize calls to configure via iControl""" def wrapper(*args, **kwargs): """ Necessary wrapper """ # args[0] must be an instance of iControlDriver service_queue = args[0].service_queue my_request_id = uuid.uuid4() service = None if len(args) > 0: last_arg = args[-1] if isinstance(last_arg, dict) and ('pool' in last_arg): service = last_arg if 'service' in kwargs: service = kwargs['service'] # Consolidate create_member requests for the same pool. # # NOTE: The following block of code alters the state of # a queue that other greenthreads are waiting behind. # This code assumes it will not be preempted by another # greenthread while running. It does not do I/O or call any # other monkey-patched code which might cause a context switch. # To avoid race conditions, DO NOT add logging to this code # block. # num_requests = len(service_queue) # queue optimization # if num_requests > 1 and method_name == 'create_member': # cur_pool_id = service['pool']['id'] # cur_index = num_requests - 1 # do not attempt to replace the first entry (index 0) # because it may already be in process. # while cur_index > 0: # (check_request, check_method, check_service) = \ # service_queue[cur_index] # if check_service['pool']['id'] != cur_pool_id: # cur_index -= 1 # continue # if check_method != 'create_member': # break # move this request up in the queue and return # so that existing thread can handle it # service_queue[cur_index] = \ # (check_request, check_method, service) # return # End of code block which assumes no preemption. req = (my_request_id, method_name, service) service_queue.append(req) reqs_ahead_of_us = request_index(service_queue, my_request_id) while reqs_ahead_of_us != 0: if reqs_ahead_of_us == 1: # it is almost our turn. get ready waitsecs = .01 else: waitsecs = reqs_ahead_of_us * .5 if waitsecs > .01: LOG.debug('%s request %s is blocking' ' for %.2f secs - queue depth: %d' % (str(method_name), my_request_id, waitsecs, len(service_queue))) greenthread.sleep(waitsecs) reqs_ahead_of_us = request_index(service_queue, my_request_id) try: LOG.debug('%s request %s is running with queue depth: %d' % (str(method_name), my_request_id, len(service_queue))) start_time = time() result = method(*args, **kwargs) LOG.debug('%s request %s took %.5f secs' % (str(method_name), my_request_id, time() - start_time)) except: LOG.error('%s request %s FAILED' % (str(method_name), my_request_id)) raise finally: service_queue.pop(0) return result return wrapper
Python
def request_index(request_queue, request_id): """ Get index of request in request queue """ for request in request_queue: if request[0] == request_id: return request_queue.index(request)
def request_index(request_queue, request_id): """ Get index of request in request queue """ for request in request_queue: if request[0] == request_id: return request_queue.index(request)
Python
def assure_bigip_pool_delete(self, bigip, service): """ Assure pool is deleted from big-ip """ LOG.debug(_('Deleting Pool %s' % service['pool']['id'])) bigip.pool.delete(name=service['pool']['id'], folder=service['pool']['tenant_id'])
def assure_bigip_pool_delete(self, bigip, service): """ Assure pool is deleted from big-ip """ LOG.debug(_('Deleting Pool %s' % service['pool']['id'])) bigip.pool.delete(name=service['pool']['id'], folder=service['pool']['tenant_id'])
Python
def assure_bigip_members(self, bigip, service, subnet_hints): """ Ensure pool members are on bigip """ pool = service['pool'] start_time = time() # Does pool exist... If not don't bother if not bigip.pool.exists(name=pool['id'], folder=pool['tenant_id']): return # Current members on the BigIP pool['existing_members'] = bigip.pool.get_members( name=pool['id'], folder=pool['tenant_id']) # Flag if we need to change the pool's LB method to # include weighting by the ratio attribute any_using_ratio = False # Members according to Neutron for member in service['members']: member_hints = \ self._assure_bigip_member(bigip, subnet_hints, pool, member) if member_hints['using_ratio']: any_using_ratio = True # Remove member from the list of members bigip needs to remove if member_hints['found_existing']: pool['existing_members'].remove(member_hints['found_existing']) LOG.debug(_("Pool: %s removing members %s" % (pool['id'], pool['existing_members']))) # remove any members which are no longer in the service for need_to_delete in pool['existing_members']: bigip.pool.remove_member(name=pool['id'], ip_address=need_to_delete['addr'], port=int(need_to_delete['port']), folder=pool['tenant_id']) # if members are using weights, change the LB to RATIO if any_using_ratio: # LOG.debug(_("Pool: %s changing to ratio based lb" # % pool['id'])) if pool['lb_method'] == lb_const.LB_METHOD_LEAST_CONNECTIONS: bigip.pool.set_lb_method(name=pool['id'], lb_method='RATIO_LEAST_CONNECTIONS', folder=pool['tenant_id']) else: bigip.pool.set_lb_method(name=pool['id'], lb_method='RATIO', folder=pool['tenant_id']) else: # We must update the pool lb_method for the case where # the pool object was not updated, but the member # used to have a weight (setting ration) and now does # not. bigip.pool.set_lb_method(name=pool['id'], lb_method=pool['lb_method'], folder=pool['tenant_id']) if time() - start_time > .001: LOG.debug(" _assure_members setting pool lb method" + " took %.5f secs" % (time() - start_time))
def assure_bigip_members(self, bigip, service, subnet_hints): """ Ensure pool members are on bigip """ pool = service['pool'] start_time = time() # Does pool exist... If not don't bother if not bigip.pool.exists(name=pool['id'], folder=pool['tenant_id']): return # Current members on the BigIP pool['existing_members'] = bigip.pool.get_members( name=pool['id'], folder=pool['tenant_id']) # Flag if we need to change the pool's LB method to # include weighting by the ratio attribute any_using_ratio = False # Members according to Neutron for member in service['members']: member_hints = \ self._assure_bigip_member(bigip, subnet_hints, pool, member) if member_hints['using_ratio']: any_using_ratio = True # Remove member from the list of members bigip needs to remove if member_hints['found_existing']: pool['existing_members'].remove(member_hints['found_existing']) LOG.debug(_("Pool: %s removing members %s" % (pool['id'], pool['existing_members']))) # remove any members which are no longer in the service for need_to_delete in pool['existing_members']: bigip.pool.remove_member(name=pool['id'], ip_address=need_to_delete['addr'], port=int(need_to_delete['port']), folder=pool['tenant_id']) # if members are using weights, change the LB to RATIO if any_using_ratio: # LOG.debug(_("Pool: %s changing to ratio based lb" # % pool['id'])) if pool['lb_method'] == lb_const.LB_METHOD_LEAST_CONNECTIONS: bigip.pool.set_lb_method(name=pool['id'], lb_method='RATIO_LEAST_CONNECTIONS', folder=pool['tenant_id']) else: bigip.pool.set_lb_method(name=pool['id'], lb_method='RATIO', folder=pool['tenant_id']) else: # We must update the pool lb_method for the case where # the pool object was not updated, but the member # used to have a weight (setting ration) and now does # not. bigip.pool.set_lb_method(name=pool['id'], lb_method=pool['lb_method'], folder=pool['tenant_id']) if time() - start_time > .001: LOG.debug(" _assure_members setting pool lb method" + " took %.5f secs" % (time() - start_time))
Python
def _assure_bigip_member(self, bigip, subnet_hints, pool, member): """ Ensure pool member is on bigip """ start_time = time() network = member['network'] subnet = member['subnet'] member_hints = {'found_existing': None, 'using_ratio': False, 'deleted_members': []} ip_address = member['address'] for existing_member in pool['existing_members']: if ip_address.startswith(existing_member['addr']) and \ (member['protocol_port'] == existing_member['port']): member_hints['found_existing'] = existing_member break # Delete those pending delete if member['status'] == plugin_const.PENDING_DELETE: self._assure_bigip_delete_member(bigip, pool, member, ip_address) member_hints['deleted_members'].append(member) if subnet and \ subnet['id'] not in subnet_hints['do_not_delete_subnets']: subnet_hints['check_for_delete_subnets'][subnet['id']] = \ {'network': network, 'subnet': subnet, 'is_for_member': True} else: just_added = False if not member_hints['found_existing']: add_start_time = time() port = int(member['protocol_port']) if bigip.pool.add_member(name=pool['id'], ip_address=ip_address, port=port, folder=pool['tenant_id'], no_checks=True): just_added = True LOG.debug(" bigip.pool.add_member %s took %.5f" % (ip_address, time() - add_start_time)) if just_added or member['status'] == plugin_const.PENDING_UPDATE: member_info = {'pool': pool, 'member': member, 'ip_address': ip_address, 'just_added': just_added} member_hints['using_ratio'] = \ self._assure_update_member(bigip, member_info) if subnet and \ subnet['id'] in subnet_hints['check_for_delete_subnets']: del subnet_hints['check_for_delete_subnets'][subnet['id']] if subnet and \ subnet['id'] not in subnet_hints['do_not_delete_subnets']: subnet_hints['do_not_delete_subnets'].append(subnet['id']) if time() - start_time > .001: LOG.debug(" assuring member %s took %.5f secs" % (member['address'], time() - start_time)) return member_hints
def _assure_bigip_member(self, bigip, subnet_hints, pool, member): """ Ensure pool member is on bigip """ start_time = time() network = member['network'] subnet = member['subnet'] member_hints = {'found_existing': None, 'using_ratio': False, 'deleted_members': []} ip_address = member['address'] for existing_member in pool['existing_members']: if ip_address.startswith(existing_member['addr']) and \ (member['protocol_port'] == existing_member['port']): member_hints['found_existing'] = existing_member break # Delete those pending delete if member['status'] == plugin_const.PENDING_DELETE: self._assure_bigip_delete_member(bigip, pool, member, ip_address) member_hints['deleted_members'].append(member) if subnet and \ subnet['id'] not in subnet_hints['do_not_delete_subnets']: subnet_hints['check_for_delete_subnets'][subnet['id']] = \ {'network': network, 'subnet': subnet, 'is_for_member': True} else: just_added = False if not member_hints['found_existing']: add_start_time = time() port = int(member['protocol_port']) if bigip.pool.add_member(name=pool['id'], ip_address=ip_address, port=port, folder=pool['tenant_id'], no_checks=True): just_added = True LOG.debug(" bigip.pool.add_member %s took %.5f" % (ip_address, time() - add_start_time)) if just_added or member['status'] == plugin_const.PENDING_UPDATE: member_info = {'pool': pool, 'member': member, 'ip_address': ip_address, 'just_added': just_added} member_hints['using_ratio'] = \ self._assure_update_member(bigip, member_info) if subnet and \ subnet['id'] in subnet_hints['check_for_delete_subnets']: del subnet_hints['check_for_delete_subnets'][subnet['id']] if subnet and \ subnet['id'] not in subnet_hints['do_not_delete_subnets']: subnet_hints['do_not_delete_subnets'].append(subnet['id']) if time() - start_time > .001: LOG.debug(" assuring member %s took %.5f secs" % (member['address'], time() - start_time)) return member_hints
Python
def _assure_update_member(self, bigip, member_info): """ Update properties of pool member on bigip """ pool = member_info['pool'] member = member_info['member'] ip_address = member_info['ip_address'] just_added = member_info['just_added'] using_ratio = False # Is it enabled or disabled? # no_checks because we add the member above if not found start_time = time() member_port = int(member['protocol_port']) if member['admin_state_up']: bigip.pool.enable_member(name=pool['id'], ip_address=ip_address, port=member_port, folder=pool['tenant_id'], no_checks=True) else: bigip.pool.disable_member(name=pool['id'], ip_address=ip_address, port=member_port, folder=pool['tenant_id'], no_checks=True) LOG.debug(" member enable/disable took %.5f secs" % (time() - start_time)) # Do we have weights for ratios? if member['weight'] > 1: if not just_added or just_added: start_time = time() set_ratio = bigip.pool.set_member_ratio set_ratio(name=pool['id'], ip_address=ip_address, port=member_port, ratio=int(member['weight']), folder=pool['tenant_id'], no_checks=True) if time() - start_time > .0001: LOG.debug(" member set ratio took %.5f secs" % (time() - start_time)) using_ratio = True return using_ratio
def _assure_update_member(self, bigip, member_info): """ Update properties of pool member on bigip """ pool = member_info['pool'] member = member_info['member'] ip_address = member_info['ip_address'] just_added = member_info['just_added'] using_ratio = False # Is it enabled or disabled? # no_checks because we add the member above if not found start_time = time() member_port = int(member['protocol_port']) if member['admin_state_up']: bigip.pool.enable_member(name=pool['id'], ip_address=ip_address, port=member_port, folder=pool['tenant_id'], no_checks=True) else: bigip.pool.disable_member(name=pool['id'], ip_address=ip_address, port=member_port, folder=pool['tenant_id'], no_checks=True) LOG.debug(" member enable/disable took %.5f secs" % (time() - start_time)) # Do we have weights for ratios? if member['weight'] > 1: if not just_added or just_added: start_time = time() set_ratio = bigip.pool.set_member_ratio set_ratio(name=pool['id'], ip_address=ip_address, port=member_port, ratio=int(member['weight']), folder=pool['tenant_id'], no_checks=True) if time() - start_time > .0001: LOG.debug(" member set ratio took %.5f secs" % (time() - start_time)) using_ratio = True return using_ratio
Python
def _assure_bigip_delete_member(self, bigip, pool, member, ip_address): """ Ensure pool member is deleted from bigip """ member_port = int(member['protocol_port']) bigip.pool.remove_member(name=pool['id'], ip_address=ip_address, port=member_port, folder=pool['tenant_id'])
def _assure_bigip_delete_member(self, bigip, pool, member, ip_address): """ Ensure pool member is deleted from bigip """ member_port = int(member['protocol_port']) bigip.pool.remove_member(name=pool['id'], ip_address=ip_address, port=member_port, folder=pool['tenant_id'])
Python
def _init_bigip_managers(self): """ Setup the managers that create big-ip configurations. """ #self.vcmp_manager = VcmpManager(self) self.tenant_manager = BigipTenantManager( self.conf, self) if self.conf.vlan_binding_driver: try: self.vlan_binding = importutils.import_object( self.conf.vlan_binding_driver, self.conf, self) except ImportError: LOG.error(_('Failed to import VLAN binding driver: %s' % self.conf.vlan_binding_driver)) if self.conf.l3_binding_driver: try: self.l3_binding = importutils.import_object( self.conf.l3_binding_driver, self.conf, self) except ImportError: LOG.error(_('Failed to import L3 binding driver: %s' % self.conf.l3_binding_driver)) else: LOG.debug(_('No L3 binding driver configured.' ' No L3 binding will be done.')) self.l3_binding = None if self.conf.f5_global_routed_mode: self.bigip_l2_manager = None else: self.fdb_connector = FDBConnectorML2(self.conf) self.bigip_l2_manager = BigipL2Manager( self.conf, self.vcmp_manager, self.fdb_connector, self.vlan_binding ) # Direct means creating vlans, selfips, directly # rather than via iApp self.network_builder = NetworkBuilderDirect( self.conf, self, self.bigip_l2_manager, self.l3_binding ) # Directly to the BIG-IP rather than through BIG-IQ. self.lbaas_builder_bigip_iapp = LBaaSBuilderBigipIApp( self.conf, self, self.bigip_l2_manager ) # Object signifies creating vips, pools with iControl # rather than using iApp. self.lbaas_builder_bigip_objects = LBaaSBuilderBigipObjects( self.conf, self, self.bigip_l2_manager, self.l3_binding ) try: self.lbaas_builder_bigiq_iapp = LBaaSBuilderBigiqIApp( self.conf, self ) except NeutronException as exc: LOG.debug(_('Not using bigiq: %s' % exc.msg))
def _init_bigip_managers(self): """ Setup the managers that create big-ip configurations. """ #self.vcmp_manager = VcmpManager(self) self.tenant_manager = BigipTenantManager( self.conf, self) if self.conf.vlan_binding_driver: try: self.vlan_binding = importutils.import_object( self.conf.vlan_binding_driver, self.conf, self) except ImportError: LOG.error(_('Failed to import VLAN binding driver: %s' % self.conf.vlan_binding_driver)) if self.conf.l3_binding_driver: try: self.l3_binding = importutils.import_object( self.conf.l3_binding_driver, self.conf, self) except ImportError: LOG.error(_('Failed to import L3 binding driver: %s' % self.conf.l3_binding_driver)) else: LOG.debug(_('No L3 binding driver configured.' ' No L3 binding will be done.')) self.l3_binding = None if self.conf.f5_global_routed_mode: self.bigip_l2_manager = None else: self.fdb_connector = FDBConnectorML2(self.conf) self.bigip_l2_manager = BigipL2Manager( self.conf, self.vcmp_manager, self.fdb_connector, self.vlan_binding ) # Direct means creating vlans, selfips, directly # rather than via iApp self.network_builder = NetworkBuilderDirect( self.conf, self, self.bigip_l2_manager, self.l3_binding ) # Directly to the BIG-IP rather than through BIG-IQ. self.lbaas_builder_bigip_iapp = LBaaSBuilderBigipIApp( self.conf, self, self.bigip_l2_manager ) # Object signifies creating vips, pools with iControl # rather than using iApp. self.lbaas_builder_bigip_objects = LBaaSBuilderBigipObjects( self.conf, self, self.bigip_l2_manager, self.l3_binding ) try: self.lbaas_builder_bigiq_iapp = LBaaSBuilderBigiqIApp( self.conf, self ) except NeutronException as exc: LOG.debug(_('Not using bigiq: %s' % exc.msg))