language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def post_event_graph(self, event, graph_nodes):
"""Post a new event graph into system.
Graph is a collection of events to be
executed in a certain manner. Use the
commonly defined 'Event' class to define
even the graph.
:param event: Object of 'Event' class.
Return: None
"""
# Post events for all the graph nodes
for node in graph_nodes:
self.post_event(node)
event = super(NfpController, self).post_event_graph(event)
message = "(event - %s) - New event" % (event.identify())
LOG.debug(message)
if self.PROCESS_TYPE == "worker":
# Event posted in worker context, send it to parent process
message = ("(event - %s) - new event in worker"
"posting to distributor process") % (event.identify())
LOG.debug(message)
# Send it to the distributor process
self.pipe_send(self._pipe, event)
else:
message = ("(event - %s) - new event in distributor"
"processing event") % (event.identify())
LOG.debug(message)
self._manager.process_events([event]) | def post_event_graph(self, event, graph_nodes):
"""Post a new event graph into system.
Graph is a collection of events to be
executed in a certain manner. Use the
commonly defined 'Event' class to define
even the graph.
:param event: Object of 'Event' class.
Return: None
"""
# Post events for all the graph nodes
for node in graph_nodes:
self.post_event(node)
event = super(NfpController, self).post_event_graph(event)
message = "(event - %s) - New event" % (event.identify())
LOG.debug(message)
if self.PROCESS_TYPE == "worker":
# Event posted in worker context, send it to parent process
message = ("(event - %s) - new event in worker"
"posting to distributor process") % (event.identify())
LOG.debug(message)
# Send it to the distributor process
self.pipe_send(self._pipe, event)
else:
message = ("(event - %s) - new event in distributor"
"processing event") % (event.identify())
LOG.debug(message)
self._manager.process_events([event]) |
Python | def post_event(self, event):
"""Post a new event into the system.
If distributor(main) process posts an event, it
is delivered to the worker.
If worker posts an event, it is deliverd to
distributor for processing, where it can decide
to loadbalance & sequence events.
:param event: Object of 'Event' class.
Returns: None
"""
event = super(NfpController, self).post_event(event)
message = "(event - %s) - New event" % (event.identify())
LOG.debug(message)
if self.PROCESS_TYPE == "worker":
# Event posted in worker context, send it to parent process
message = ("(event - %s) - new event in worker"
"posting to distributor process") % (event.identify())
LOG.debug(message)
# Send it to the distributor process
self.pipe_send(self._pipe, event)
else:
message = ("(event - %s) - new event in distributor"
"processing event") % (event.identify())
LOG.debug(message)
self._manager.process_events([event]) | def post_event(self, event):
"""Post a new event into the system.
If distributor(main) process posts an event, it
is delivered to the worker.
If worker posts an event, it is deliverd to
distributor for processing, where it can decide
to loadbalance & sequence events.
:param event: Object of 'Event' class.
Returns: None
"""
event = super(NfpController, self).post_event(event)
message = "(event - %s) - New event" % (event.identify())
LOG.debug(message)
if self.PROCESS_TYPE == "worker":
# Event posted in worker context, send it to parent process
message = ("(event - %s) - new event in worker"
"posting to distributor process") % (event.identify())
LOG.debug(message)
# Send it to the distributor process
self.pipe_send(self._pipe, event)
else:
message = ("(event - %s) - new event in distributor"
"processing event") % (event.identify())
LOG.debug(message)
self._manager.process_events([event]) |
Python | def stash_event(self, event):
"""To stash an event.
This will be invoked by worker process.
Put this event in queue, distributor will
pick it up.
Executor: worker-process
"""
if self.PROCESS_TYPE == "distributor":
message = "(event - %s) - distributor cannot stash" % (
event.identify())
LOG.error(message)
else:
message = "(event - %s) - stashed" % (event.identify())
LOG.debug(message)
self._stashq.put(event) | def stash_event(self, event):
"""To stash an event.
This will be invoked by worker process.
Put this event in queue, distributor will
pick it up.
Executor: worker-process
"""
if self.PROCESS_TYPE == "distributor":
message = "(event - %s) - distributor cannot stash" % (
event.identify())
LOG.error(message)
else:
message = "(event - %s) - stashed" % (event.identify())
LOG.debug(message)
self._stashq.put(event) |
Python | def load_nfp_modules(conf, controller):
""" Load all nfp modules from configured directory. """
pymodules = []
try:
base_module = __import__(conf.nfp_modules_path,
globals(), locals(), ['modules'], -1)
modules_dir = base_module.__path__[0]
try:
files = os.listdir(modules_dir)
for pyfile in set([f for f in files if f.endswith(".py")]):
try:
pymodule = __import__(conf.nfp_modules_path,
globals(), locals(),
[pyfile[:-3]], -1)
pymodule = eval('pymodule.%s' % (pyfile[:-3]))
try:
pymodule.nfp_module_init(controller, conf)
pymodules += [pymodule]
message = "(module - %s) - Initialized" % (
identify(pymodule))
LOG.debug(message)
except AttributeError as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
message = "Traceback: %s" % (exc_traceback)
LOG.error(message)
message = ("(module - %s) - does not implement"
"nfp_module_init()") % (identify(pymodule))
LOG.warn(message)
except ImportError:
message = "Failed to import module %s" % (pyfile)
LOG.error(message)
except OSError:
message = "Failed to read files from %s" % (modules_dir)
LOG.error(message)
except ImportError:
message = "Failed to import module from path %s" % (
conf.nfp_modules_path)
LOG.error(message)
return pymodules | def load_nfp_modules(conf, controller):
""" Load all nfp modules from configured directory. """
pymodules = []
try:
base_module = __import__(conf.nfp_modules_path,
globals(), locals(), ['modules'], -1)
modules_dir = base_module.__path__[0]
try:
files = os.listdir(modules_dir)
for pyfile in set([f for f in files if f.endswith(".py")]):
try:
pymodule = __import__(conf.nfp_modules_path,
globals(), locals(),
[pyfile[:-3]], -1)
pymodule = eval('pymodule.%s' % (pyfile[:-3]))
try:
pymodule.nfp_module_init(controller, conf)
pymodules += [pymodule]
message = "(module - %s) - Initialized" % (
identify(pymodule))
LOG.debug(message)
except AttributeError as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
message = "Traceback: %s" % (exc_traceback)
LOG.error(message)
message = ("(module - %s) - does not implement"
"nfp_module_init()") % (identify(pymodule))
LOG.warn(message)
except ImportError:
message = "Failed to import module %s" % (pyfile)
LOG.error(message)
except OSError:
message = "Failed to read files from %s" % (modules_dir)
LOG.error(message)
except ImportError:
message = "Failed to import module from path %s" % (
conf.nfp_modules_path)
LOG.error(message)
return pymodules |
Python | def _allocate_snat_ip(self, context, host_or_vrf, network, es_name):
"""Allocate SNAT IP for a host for an external network."""
snat_subnets = self._get_subnets(context,
filters={'name': [HOST_SNAT_POOL],
'network_id': [network['id']]})
if not snat_subnets:
LOG.info(_LI("Subnet for SNAT-pool could not be found "
"for external network %(net_id)s. SNAT will not "
"function on this network"), {'net_id': network['id']})
return {}
else:
snat_ports = self._get_ports(context,
filters={'name': [HOST_SNAT_POOL_PORT],
'network_id': [network['id']],
'device_id': [host_or_vrf]})
snat_ip = None
if not snat_ports:
# Note that the following port is created for only getting
# an IP assignment in the
attrs = {'device_id': host_or_vrf,
'device_owner': DEVICE_OWNER_SNAT_PORT,
'binding:host_id': host_or_vrf,
'binding:vif_type': portbindings.VIF_TYPE_UNBOUND,
'tenant_id': network['tenant_id'],
'name': HOST_SNAT_POOL_PORT,
'network_id': network['id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': [{'subnet_id': snat_subnets[0]['id']}],
'admin_state_up': False}
port = self._create_port(context, attrs)
if port and port['fixed_ips']:
snat_ip = port['fixed_ips'][0]['ip_address']
else:
LOG.warning(_LW("SNAT-port creation failed for subnet "
"%(subnet_id)s on external network "
"%(net_id)s. SNAT will not function on"
"host or vrf %(host_or_vrf)s for this "
"network"),
{'subnet_id': snat_subnets[0]['id'],
'net_id': network['id'],
'host_or_vrf': host_or_vrf})
return {}
elif snat_ports[0]['fixed_ips']:
snat_ip = snat_ports[0]['fixed_ips'][0]['ip_address']
else:
LOG.warning(_LW("SNAT-port %(port)s for external network "
"%(net)s on host or VRF %(host_or_vrf)s doesn't "
"have an IP-address"),
{'port': snat_ports[0]['id'],
'net': network['id'],
'host_or_vrf': host_or_vrf})
return {}
return {'external_segment_name': es_name,
'host_snat_ip': snat_ip,
'gateway_ip': snat_subnets[0]['gateway_ip'],
'prefixlen':
netaddr.IPNetwork(snat_subnets[0]['cidr']).prefixlen} | def _allocate_snat_ip(self, context, host_or_vrf, network, es_name):
"""Allocate SNAT IP for a host for an external network."""
snat_subnets = self._get_subnets(context,
filters={'name': [HOST_SNAT_POOL],
'network_id': [network['id']]})
if not snat_subnets:
LOG.info(_LI("Subnet for SNAT-pool could not be found "
"for external network %(net_id)s. SNAT will not "
"function on this network"), {'net_id': network['id']})
return {}
else:
snat_ports = self._get_ports(context,
filters={'name': [HOST_SNAT_POOL_PORT],
'network_id': [network['id']],
'device_id': [host_or_vrf]})
snat_ip = None
if not snat_ports:
# Note that the following port is created for only getting
# an IP assignment in the
attrs = {'device_id': host_or_vrf,
'device_owner': DEVICE_OWNER_SNAT_PORT,
'binding:host_id': host_or_vrf,
'binding:vif_type': portbindings.VIF_TYPE_UNBOUND,
'tenant_id': network['tenant_id'],
'name': HOST_SNAT_POOL_PORT,
'network_id': network['id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': [{'subnet_id': snat_subnets[0]['id']}],
'admin_state_up': False}
port = self._create_port(context, attrs)
if port and port['fixed_ips']:
snat_ip = port['fixed_ips'][0]['ip_address']
else:
LOG.warning(_LW("SNAT-port creation failed for subnet "
"%(subnet_id)s on external network "
"%(net_id)s. SNAT will not function on"
"host or vrf %(host_or_vrf)s for this "
"network"),
{'subnet_id': snat_subnets[0]['id'],
'net_id': network['id'],
'host_or_vrf': host_or_vrf})
return {}
elif snat_ports[0]['fixed_ips']:
snat_ip = snat_ports[0]['fixed_ips'][0]['ip_address']
else:
LOG.warning(_LW("SNAT-port %(port)s for external network "
"%(net)s on host or VRF %(host_or_vrf)s doesn't "
"have an IP-address"),
{'port': snat_ports[0]['id'],
'net': network['id'],
'host_or_vrf': host_or_vrf})
return {}
return {'external_segment_name': es_name,
'host_snat_ip': snat_ip,
'gateway_ip': snat_subnets[0]['gateway_ip'],
'prefixlen':
netaddr.IPNetwork(snat_subnets[0]['cidr']).prefixlen} |
Python | def _get_ip_mapping_details(self, context, port_id, l3_policy, pt=None,
owned_addresses=None, host=None):
""" Add information about IP mapping for DNAT/SNAT """
if not l3_policy['external_segments']:
return [], [], []
fips_filter = [port_id]
if pt:
# For each owned address, we must pass the FIPs of the original
# owning port.
# REVISIT(ivar): should be done for allowed_address_pairs in
# general?
ptg_pts = self._get_policy_targets(
context, {'policy_target_group_id':
[pt['policy_target_group_id']]})
ports = self._get_ports(context,
{'id': [x['port_id'] for x in ptg_pts]})
for port in ports:
# Whenever a owned address belongs to a port, steal its FIPs
if owned_addresses & set([x['ip_address'] for x in
port['fixed_ips'] + port.get(
'allowed_address_pairs', [])]):
fips_filter.append(port['id'])
fips = self._get_fips(context, filters={'port_id': fips_filter})
ipms = []
# Populate host_snat_ips in the format:
# [ {'external_segment_name': <ext_segment_name1>,
# 'host_snat_ip': <ip_addr>, 'gateway_ip': <gateway_ip>,
# 'prefixlen': <prefix_length_of_host_snat_pool_subnet>},
# {..}, ... ]
host_snat_ips = []
ess = context._plugin.get_external_segments(context._plugin_context,
filters={'id': l3_policy['external_segments'].keys()})
for es in ess:
if not self._is_nat_enabled_on_es(es):
continue
ext_info = self.apic_manager.ext_net_dict.get(es['name'])
if ext_info and self._is_edge_nat(ext_info):
continue
nat_epg_tenant, nat_epg_name = self._determine_nat_epg_for_es(
context, es, l3_policy)
nat_epg_tenant = self.apic_manager.apic.fvTenant.name(
nat_epg_tenant)
fips_in_es = []
if es['subnet_id']:
subnet = self._get_subnet(context._plugin_context,
es['subnet_id'])
ext_net_id = subnet['network_id']
fips_in_es = filter(
lambda x: x['floating_network_id'] == ext_net_id, fips)
ext_network = self._get_network(context._plugin_context,
ext_net_id)
if host:
host_snat_ip_allocation = (
self._allocate_snat_ip(
context._plugin_context, host, ext_network,
es['name']))
if host_snat_ip_allocation:
host_snat_ips.append(host_snat_ip_allocation)
if not fips_in_es:
ipms.append({'external_segment_name': es['name'],
'nat_epg_name': nat_epg_name,
'nat_epg_tenant': nat_epg_tenant,
'next_hop_ep_tenant': (
self.apic_manager.apic.fvTenant.name(
self._tenant_by_sharing_policy(es)))})
for f in fips_in_es:
f['nat_epg_name'] = nat_epg_name
f['nat_epg_tenant'] = nat_epg_tenant
return fips, ipms, host_snat_ips | def _get_ip_mapping_details(self, context, port_id, l3_policy, pt=None,
owned_addresses=None, host=None):
""" Add information about IP mapping for DNAT/SNAT """
if not l3_policy['external_segments']:
return [], [], []
fips_filter = [port_id]
if pt:
# For each owned address, we must pass the FIPs of the original
# owning port.
# REVISIT(ivar): should be done for allowed_address_pairs in
# general?
ptg_pts = self._get_policy_targets(
context, {'policy_target_group_id':
[pt['policy_target_group_id']]})
ports = self._get_ports(context,
{'id': [x['port_id'] for x in ptg_pts]})
for port in ports:
# Whenever a owned address belongs to a port, steal its FIPs
if owned_addresses & set([x['ip_address'] for x in
port['fixed_ips'] + port.get(
'allowed_address_pairs', [])]):
fips_filter.append(port['id'])
fips = self._get_fips(context, filters={'port_id': fips_filter})
ipms = []
# Populate host_snat_ips in the format:
# [ {'external_segment_name': <ext_segment_name1>,
# 'host_snat_ip': <ip_addr>, 'gateway_ip': <gateway_ip>,
# 'prefixlen': <prefix_length_of_host_snat_pool_subnet>},
# {..}, ... ]
host_snat_ips = []
ess = context._plugin.get_external_segments(context._plugin_context,
filters={'id': l3_policy['external_segments'].keys()})
for es in ess:
if not self._is_nat_enabled_on_es(es):
continue
ext_info = self.apic_manager.ext_net_dict.get(es['name'])
if ext_info and self._is_edge_nat(ext_info):
continue
nat_epg_tenant, nat_epg_name = self._determine_nat_epg_for_es(
context, es, l3_policy)
nat_epg_tenant = self.apic_manager.apic.fvTenant.name(
nat_epg_tenant)
fips_in_es = []
if es['subnet_id']:
subnet = self._get_subnet(context._plugin_context,
es['subnet_id'])
ext_net_id = subnet['network_id']
fips_in_es = filter(
lambda x: x['floating_network_id'] == ext_net_id, fips)
ext_network = self._get_network(context._plugin_context,
ext_net_id)
if host:
host_snat_ip_allocation = (
self._allocate_snat_ip(
context._plugin_context, host, ext_network,
es['name']))
if host_snat_ip_allocation:
host_snat_ips.append(host_snat_ip_allocation)
if not fips_in_es:
ipms.append({'external_segment_name': es['name'],
'nat_epg_name': nat_epg_name,
'nat_epg_tenant': nat_epg_tenant,
'next_hop_ep_tenant': (
self.apic_manager.apic.fvTenant.name(
self._tenant_by_sharing_policy(es)))})
for f in fips_in_es:
f['nat_epg_name'] = nat_epg_name
f['nat_epg_tenant'] = nat_epg_tenant
return fips, ipms, host_snat_ips |
Python | def nat_pool_iterator(self, context, tenant_id, floatingip):
"""Get NAT pool for floating IP associated with external-network."""
fip = floatingip['floatingip']
f_net_id = fip['floating_network_id']
subnets = self._get_subnets(context.elevated(),
{'network_id': [f_net_id]})
ext_seg = self.gbp_plugin.get_external_segments(context.elevated(),
{'subnet_id': [s['id'] for s in subnets]}) if subnets else []
context._plugin = self.gbp_plugin
context._plugin_context = context
for es in ext_seg:
for nat_pool in self._gen_nat_pool_in_ext_seg(context,
tenant_id, es):
yield nat_pool | def nat_pool_iterator(self, context, tenant_id, floatingip):
"""Get NAT pool for floating IP associated with external-network."""
fip = floatingip['floatingip']
f_net_id = fip['floating_network_id']
subnets = self._get_subnets(context.elevated(),
{'network_id': [f_net_id]})
ext_seg = self.gbp_plugin.get_external_segments(context.elevated(),
{'subnet_id': [s['id'] for s in subnets]}) if subnets else []
context._plugin = self.gbp_plugin
context._plugin_context = context
for es in ext_seg:
for nat_pool in self._gen_nat_pool_in_ext_seg(context,
tenant_id, es):
yield nat_pool |
Python | def _use_implicit_subnet(self, context, force_add=False):
"""Implicit subnet for APIC driver.
The first PTG of a given BD will allocate a new subnet from the L3P.
Any subsequent PTG in the same BD will use the same subnet.
More subnets will be allocated whenever the existing ones go out of
addresses.
"""
l2p_id = context.current['l2_policy_id']
with lockutils.lock(l2p_id, external=True):
subs = self._get_l2p_subnets(context._plugin_context, l2p_id)
subs = set([x['id'] for x in subs])
added = []
# Always add a new subnet to L3 proxies
is_proxy = bool(context.current.get('proxied_group_id'))
force_add = force_add or is_proxy
if not subs or force_add:
l2p = context._plugin.get_l2_policy(context._plugin_context,
l2p_id)
if is_proxy:
name = APIC_OWNED_RES + context.current['id']
else:
name = APIC_OWNED + l2p['name']
added = super(
ApicMappingDriver, self)._use_implicit_subnet(
context, subnet_specifics={'name': name},
is_proxy=is_proxy)
context.add_subnets(subs - set(context.current['subnets']))
if added:
l3p_id = l2p['l3_policy_id']
l3p = context._plugin.get_l3_policy(context._plugin_context,
l3p_id)
for subnet in added:
self.process_subnet_added(context._plugin_context, subnet)
if not is_proxy:
for router_id in l3p['routers']:
for subnet in added:
self._plug_router_to_subnet(
nctx.get_admin_context(),
subnet['id'], router_id) | def _use_implicit_subnet(self, context, force_add=False):
"""Implicit subnet for APIC driver.
The first PTG of a given BD will allocate a new subnet from the L3P.
Any subsequent PTG in the same BD will use the same subnet.
More subnets will be allocated whenever the existing ones go out of
addresses.
"""
l2p_id = context.current['l2_policy_id']
with lockutils.lock(l2p_id, external=True):
subs = self._get_l2p_subnets(context._plugin_context, l2p_id)
subs = set([x['id'] for x in subs])
added = []
# Always add a new subnet to L3 proxies
is_proxy = bool(context.current.get('proxied_group_id'))
force_add = force_add or is_proxy
if not subs or force_add:
l2p = context._plugin.get_l2_policy(context._plugin_context,
l2p_id)
if is_proxy:
name = APIC_OWNED_RES + context.current['id']
else:
name = APIC_OWNED + l2p['name']
added = super(
ApicMappingDriver, self)._use_implicit_subnet(
context, subnet_specifics={'name': name},
is_proxy=is_proxy)
context.add_subnets(subs - set(context.current['subnets']))
if added:
l3p_id = l2p['l3_policy_id']
l3p = context._plugin.get_l3_policy(context._plugin_context,
l3p_id)
for subnet in added:
self.process_subnet_added(context._plugin_context, subnet)
if not is_proxy:
for router_id in l3p['routers']:
for subnet in added:
self._plug_router_to_subnet(
nctx.get_admin_context(),
subnet['id'], router_id) |
Python | def _get_router_ext_subnet_for_l3p(self, context, l3policy):
""" Get dict of external-subnets to the routers of l3 policy """
rtr_sn = {}
routers = self._get_routers(context._plugin_context,
{'id': l3policy['routers']})
for r in routers:
if (not r['external_gateway_info'] or
not r['external_gateway_info']['external_fixed_ips']):
continue
for ip in r['external_gateway_info']['external_fixed_ips']:
rtr_sn[ip['subnet_id']] = r['id']
return rtr_sn | def _get_router_ext_subnet_for_l3p(self, context, l3policy):
""" Get dict of external-subnets to the routers of l3 policy """
rtr_sn = {}
routers = self._get_routers(context._plugin_context,
{'id': l3policy['routers']})
for r in routers:
if (not r['external_gateway_info'] or
not r['external_gateway_info']['external_fixed_ips']):
continue
for ip in r['external_gateway_info']['external_fixed_ips']:
rtr_sn[ip['subnet_id']] = r['id']
return rtr_sn |
Python | def _is_master_owner(self, plugin_context, pt, master_pt=None,
owned_ips=None):
"""Verifies if the port owns the master address.
Returns the master MAC address or False
"""
if pt['cluster_id']:
master_pt = master_pt or self._get_pt_cluster_master(
plugin_context, pt)
# Get the owned IPs by PT, and verify at least one of them belong
# to the cluster master.
owned_addresses = owned_ips or self._get_owned_addresses(
plugin_context, pt['port_id'])
master_port = self._get_port(plugin_context, master_pt['port_id'])
master_addresses = set([x['ip_address'] for x in
master_port['fixed_ips']])
master_mac = master_port['mac_address']
if bool(owned_addresses & master_addresses):
return master_mac
return False | def _is_master_owner(self, plugin_context, pt, master_pt=None,
owned_ips=None):
"""Verifies if the port owns the master address.
Returns the master MAC address or False
"""
if pt['cluster_id']:
master_pt = master_pt or self._get_pt_cluster_master(
plugin_context, pt)
# Get the owned IPs by PT, and verify at least one of them belong
# to the cluster master.
owned_addresses = owned_ips or self._get_owned_addresses(
plugin_context, pt['port_id'])
master_port = self._get_port(plugin_context, master_pt['port_id'])
master_addresses = set([x['ip_address'] for x in
master_port['fixed_ips']])
master_mac = master_port['mac_address']
if bool(owned_addresses & master_addresses):
return master_mac
return False |
Python | def _disable_port_on_shadow_subnet(self, context):
"""Disable certain kinds of ports in shadow-network."""
port = context.current
if (port['device_owner'] == n_constants.DEVICE_OWNER_DHCP and
port['admin_state_up'] is True and
self._shadow_network_id_to_ptg(context, port['network_id'])):
self._update_port(context._plugin_context.elevated(),
port['id'],
{'admin_state_up': False}) | def _disable_port_on_shadow_subnet(self, context):
"""Disable certain kinds of ports in shadow-network."""
port = context.current
if (port['device_owner'] == n_constants.DEVICE_OWNER_DHCP and
port['admin_state_up'] is True and
self._shadow_network_id_to_ptg(context, port['network_id'])):
self._update_port(context._plugin_context.elevated(),
port['id'],
{'admin_state_up': False}) |
Python | def select_network_function_device(self, devices, device_data,
network_handler=None):
""" Select a NFD which is eligible for sharing
:param devices: NFDs
:type devices: list
:param device_data: NFD data
:type device_data: dict
:returns: None -- when device sharing is not supported, or
when no device is eligible for sharing
:return: dict -- NFD which is eligible for sharing
:raises: exceptions.IncompleteData
"""
if (
any(key not in device_data
for key in ['ports']) or
type(device_data['ports']) is not list or
any(key not in port
for port in device_data['ports']
for key in ['id',
'port_classification',
'port_model']) or
type(devices) is not list or
any(key not in device
for device in devices
for key in ['interfaces_in_use'])
):
raise exceptions.IncompleteData()
token = self._get_token(device_data.get('token'))
if not token:
return None
image_name = self._get_image_name(device_data)
if image_name:
self._update_vendor_data(device_data,
device_data.get('token'))
if not self._is_device_sharing_supported():
return None | def select_network_function_device(self, devices, device_data,
network_handler=None):
""" Select a NFD which is eligible for sharing
:param devices: NFDs
:type devices: list
:param device_data: NFD data
:type device_data: dict
:returns: None -- when device sharing is not supported, or
when no device is eligible for sharing
:return: dict -- NFD which is eligible for sharing
:raises: exceptions.IncompleteData
"""
if (
any(key not in device_data
for key in ['ports']) or
type(device_data['ports']) is not list or
any(key not in port
for port in device_data['ports']
for key in ['id',
'port_classification',
'port_model']) or
type(devices) is not list or
any(key not in device
for device in devices
for key in ['interfaces_in_use'])
):
raise exceptions.IncompleteData()
token = self._get_token(device_data.get('token'))
if not token:
return None
image_name = self._get_image_name(device_data)
if image_name:
self._update_vendor_data(device_data,
device_data.get('token'))
if not self._is_device_sharing_supported():
return None |
Python | def plug_network_function_device_interfaces(self, device_data,
network_handler=None):
""" Attach the network interfaces for NFD
:param device_data: NFD
:type device_data: dict
:returns: bool -- False on failure and True on Success
:raises: exceptions.IncompleteData,
exceptions.ComputePolicyNotSupported
"""
if (
any(key not in device_data
for key in ['id',
'service_details',
'ports']) or
type(device_data['service_details']) is not dict or
any(key not in device_data['service_details']
for key in ['service_vendor',
'device_type',
'network_mode']) or
type(device_data['ports']) is not list or
any(key not in port
for port in device_data['ports']
for key in ['id',
'port_classification',
'port_model'])
):
raise exceptions.IncompleteData()
if (
device_data['service_details']['device_type'] !=
nfp_constants.NOVA_MODE
):
raise exceptions.ComputePolicyNotSupported(
compute_policy=device_data['service_details']['device_type'])
token = device_data['token']
tenant_id = device_data['tenant_id']
try:
executor = nfp_executor.TaskExecutor(jobs=10)
for port in device_data['ports']:
if port['port_classification'] == nfp_constants.PROVIDER:
service_type = device_data[
'service_details']['service_type'].lower()
if service_type.lower() in [
nfp_constants.FIREWALL.lower(),
nfp_constants.VPN.lower()]:
executor.add_job(
'SET_PROMISCUOS_MODE',
network_handler.set_promiscuos_mode_fast,
token, port['id'])
executor.add_job(
'ATTACH_INTERFACE',
self.compute_handler_nova.attach_interface,
token, tenant_id, device_data['id'],
port['id'])
break
for port in device_data['ports']:
if port['port_classification'] == nfp_constants.CONSUMER:
service_type = device_data[
'service_details']['service_type'].lower()
if service_type.lower() in [
nfp_constants.FIREWALL.lower(),
nfp_constants.VPN.lower()]:
executor.add_job(
'SET_PROMISCUOS_MODE',
network_handler.set_promiscuos_mode_fast,
token, port['id'])
executor.add_job(
'ATTACH_INTERFACE',
self.compute_handler_nova.attach_interface,
token, tenant_id, device_data['id'],
port['id'])
break
executor.fire()
except Exception as e:
LOG.error(_LE('Failed to plug interface(s) to the device.'
'Error: %(error)s'), {'error': e})
return None
else:
return True | def plug_network_function_device_interfaces(self, device_data,
network_handler=None):
""" Attach the network interfaces for NFD
:param device_data: NFD
:type device_data: dict
:returns: bool -- False on failure and True on Success
:raises: exceptions.IncompleteData,
exceptions.ComputePolicyNotSupported
"""
if (
any(key not in device_data
for key in ['id',
'service_details',
'ports']) or
type(device_data['service_details']) is not dict or
any(key not in device_data['service_details']
for key in ['service_vendor',
'device_type',
'network_mode']) or
type(device_data['ports']) is not list or
any(key not in port
for port in device_data['ports']
for key in ['id',
'port_classification',
'port_model'])
):
raise exceptions.IncompleteData()
if (
device_data['service_details']['device_type'] !=
nfp_constants.NOVA_MODE
):
raise exceptions.ComputePolicyNotSupported(
compute_policy=device_data['service_details']['device_type'])
token = device_data['token']
tenant_id = device_data['tenant_id']
try:
executor = nfp_executor.TaskExecutor(jobs=10)
for port in device_data['ports']:
if port['port_classification'] == nfp_constants.PROVIDER:
service_type = device_data[
'service_details']['service_type'].lower()
if service_type.lower() in [
nfp_constants.FIREWALL.lower(),
nfp_constants.VPN.lower()]:
executor.add_job(
'SET_PROMISCUOS_MODE',
network_handler.set_promiscuos_mode_fast,
token, port['id'])
executor.add_job(
'ATTACH_INTERFACE',
self.compute_handler_nova.attach_interface,
token, tenant_id, device_data['id'],
port['id'])
break
for port in device_data['ports']:
if port['port_classification'] == nfp_constants.CONSUMER:
service_type = device_data[
'service_details']['service_type'].lower()
if service_type.lower() in [
nfp_constants.FIREWALL.lower(),
nfp_constants.VPN.lower()]:
executor.add_job(
'SET_PROMISCUOS_MODE',
network_handler.set_promiscuos_mode_fast,
token, port['id'])
executor.add_job(
'ATTACH_INTERFACE',
self.compute_handler_nova.attach_interface,
token, tenant_id, device_data['id'],
port['id'])
break
executor.fire()
except Exception as e:
LOG.error(_LE('Failed to plug interface(s) to the device.'
'Error: %(error)s'), {'error': e})
return None
else:
return True |
Python | def unplug_network_function_device_interfaces(self, device_data,
network_handler=None):
""" Detach the network interfaces for NFD
:param device_data: NFD
:type device_data: dict
:returns: bool -- False on failure and True on Success
:raises: exceptions.IncompleteData,
exceptions.ComputePolicyNotSupported
"""
if (
any(key not in device_data
for key in ['id',
'service_details',
'ports']) or
type(device_data['service_details']) is not dict or
any(key not in device_data['service_details']
for key in ['service_vendor',
'device_type',
'network_mode']) or
any(key not in port
for port in device_data['ports']
for key in ['id',
'port_classification',
'port_model'])
):
raise exceptions.IncompleteData()
if (
device_data['service_details']['device_type'] !=
nfp_constants.NOVA_MODE
):
raise exceptions.ComputePolicyNotSupported(
compute_policy=device_data['service_details']['device_type'])
image_name = self._get_image_name(device_data)
if image_name:
self._update_vendor_data(device_data,
device_data.get('token'))
token = self._get_token(device_data.get('token'))
if not token:
return None
try:
for port in device_data['ports']:
port_id = network_handler.get_port_id(token, port['id'])
self.compute_handler_nova.detach_interface(
token,
self._get_admin_tenant_id(token=token),
device_data['id'],
port_id)
except Exception as e:
LOG.error(_LE('Failed to unplug interface(s) from the device.'
'Error: %(error)s'), {'error': e})
return None
else:
return True | def unplug_network_function_device_interfaces(self, device_data,
network_handler=None):
""" Detach the network interfaces for NFD
:param device_data: NFD
:type device_data: dict
:returns: bool -- False on failure and True on Success
:raises: exceptions.IncompleteData,
exceptions.ComputePolicyNotSupported
"""
if (
any(key not in device_data
for key in ['id',
'service_details',
'ports']) or
type(device_data['service_details']) is not dict or
any(key not in device_data['service_details']
for key in ['service_vendor',
'device_type',
'network_mode']) or
any(key not in port
for port in device_data['ports']
for key in ['id',
'port_classification',
'port_model'])
):
raise exceptions.IncompleteData()
if (
device_data['service_details']['device_type'] !=
nfp_constants.NOVA_MODE
):
raise exceptions.ComputePolicyNotSupported(
compute_policy=device_data['service_details']['device_type'])
image_name = self._get_image_name(device_data)
if image_name:
self._update_vendor_data(device_data,
device_data.get('token'))
token = self._get_token(device_data.get('token'))
if not token:
return None
try:
for port in device_data['ports']:
port_id = network_handler.get_port_id(token, port['id'])
self.compute_handler_nova.detach_interface(
token,
self._get_admin_tenant_id(token=token),
device_data['id'],
port_id)
except Exception as e:
LOG.error(_LE('Failed to unplug interface(s) from the device.'
'Error: %(error)s'), {'error': e})
return None
else:
return True |
Python | def _sendjson(self, method, url, headers, obj=None):
"""Send json to the ODL controller."""
medium = self._convert2ascii(obj) if obj else None
url = self._convert2ascii(url)
data = (
jsonutils.dumps(medium, indent=4, sort_keys=True) if medium
else None
)
LOG.debug("=========================================================")
LOG.debug("Sending METHOD (%(method)s) URL (%(url)s)",
{'method': method, 'url': url})
LOG.debug("(%(data)s)", {'data': data})
LOG.debug("=========================================================")
r = requests.request(
method,
url=url,
headers=headers,
data=data,
auth=auth.HTTPBasicAuth(self._username,
self._password)
)
r.raise_for_status() | def _sendjson(self, method, url, headers, obj=None):
"""Send json to the ODL controller."""
medium = self._convert2ascii(obj) if obj else None
url = self._convert2ascii(url)
data = (
jsonutils.dumps(medium, indent=4, sort_keys=True) if medium
else None
)
LOG.debug("=========================================================")
LOG.debug("Sending METHOD (%(method)s) URL (%(url)s)",
{'method': method, 'url': url})
LOG.debug("(%(data)s)", {'data': data})
LOG.debug("=========================================================")
r = requests.request(
method,
url=url,
headers=headers,
data=data,
auth=auth.HTTPBasicAuth(self._username,
self._password)
)
r.raise_for_status() |
Python | def _register_drivers(self):
"""Register all servicechain drivers.
This method should only be called once in the DriverManager
constructor.
"""
for ext in self:
self.drivers[ext.name] = ext
self.ordered_drivers.append(ext)
LOG.info(_LI("Registered servicechain drivers: %s"),
[driver.name for driver in self.ordered_drivers]) | def _register_drivers(self):
"""Register all servicechain drivers.
This method should only be called once in the DriverManager
constructor.
"""
for ext in self:
self.drivers[ext.name] = ext
self.ordered_drivers.append(ext)
LOG.info(_LI("Registered servicechain drivers: %s"),
[driver.name for driver in self.ordered_drivers]) |
Python | def _call_on_drivers(self, method_name, context):
"""Helper method for calling a method across all servicechain drivers.
:param method_name: name of the method to call
:param context: context parameter to pass to each method call
:param continue_on_failure: whether or not to continue to call
all servicechain drivers once one has raised an exception
:raises: neutron.services.servicechain.common.ServiceChainDriverError
if any servicechain driver call fails.
"""
error = False
for driver in self.ordered_drivers:
try:
getattr(driver.obj, method_name)(context)
except sc_exc.ServiceChainException:
# This is an exception for the user.
raise
except Exception:
# This is an internal failure.
LOG.exception(
_LE("ServiceChain driver '%(name)s' failed in %(method)s"),
{'name': driver.name, 'method': method_name}
)
error = True
if error:
raise sc_exc.ServiceChainDriverError(
method=method_name
) | def _call_on_drivers(self, method_name, context):
"""Helper method for calling a method across all servicechain drivers.
:param method_name: name of the method to call
:param context: context parameter to pass to each method call
:param continue_on_failure: whether or not to continue to call
all servicechain drivers once one has raised an exception
:raises: neutron.services.servicechain.common.ServiceChainDriverError
if any servicechain driver call fails.
"""
error = False
for driver in self.ordered_drivers:
try:
getattr(driver.obj, method_name)(context)
except sc_exc.ServiceChainException:
# This is an exception for the user.
raise
except Exception:
# This is an internal failure.
LOG.exception(
_LE("ServiceChain driver '%(name)s' failed in %(method)s"),
{'name': driver.name, 'method': method_name}
)
error = True
if error:
raise sc_exc.ServiceChainDriverError(
method=method_name
) |
Python | def events_init(sc, conf):
"""Register event with its handler."""
evs = [
Event(id='PULL_NOTIFICATIONS',
handler=pull.PullNotification(sc, conf))]
sc.register_events(evs) | def events_init(sc, conf):
"""Register event with its handler."""
evs = [
Event(id='PULL_NOTIFICATIONS',
handler=pull.PullNotification(sc, conf))]
sc.register_events(evs) |
Python | def nfp_module_post_init(sc, conf):
"""Post a event for pull notification after each periodic_task_interval"""
ev = sc.new_event(id='PULL_NOTIFICATIONS',
key='PULL_NOTIFICATIONS')
sc.post_event(ev) | def nfp_module_post_init(sc, conf):
"""Post a event for pull notification after each periodic_task_interval"""
ev = sc.new_event(id='PULL_NOTIFICATIONS',
key='PULL_NOTIFICATIONS')
sc.post_event(ev) |
Python | def _create_network(self, fmt, name, admin_state_up, **kwargs):
"""Override the routine for allowing the router:external attribute."""
# attributes containing a colon should be passed with
# a double underscore
new_args = dict(itertools.izip(map(lambda x: x.replace('__', ':'),
kwargs),
kwargs.values()))
arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,)
return super(ResourceMappingTestCase, self)._create_network(
fmt, name, admin_state_up, arg_list=arg_list, **new_args) | def _create_network(self, fmt, name, admin_state_up, **kwargs):
"""Override the routine for allowing the router:external attribute."""
# attributes containing a colon should be passed with
# a double underscore
new_args = dict(itertools.izip(map(lambda x: x.replace('__', ':'),
kwargs),
kwargs.values()))
arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,)
return super(ResourceMappingTestCase, self)._create_network(
fmt, name, admin_state_up, arg_list=arg_list, **new_args) |
Python | def run(self):
"""Run to find timedout event. """
q = self._queue
timefunc = self.timefunc
pop = heapq.heappop
if q:
time, priority, action, argument = checked_event = q[0]
now = timefunc()
if now < time:
return
else:
event = pop(q)
# Verify that the event was not removed or altered
# by another thread after we last looked at q[0].
if event is checked_event:
action(*argument)
else:
heapq.heappush(q, event) | def run(self):
"""Run to find timedout event. """
q = self._queue
timefunc = self.timefunc
pop = heapq.heappop
if q:
time, priority, action, argument = checked_event = q[0]
now = timefunc()
if now < time:
return
else:
event = pop(q)
# Verify that the event was not removed or altered
# by another thread after we last looked at q[0].
if event is checked_event:
action(*argument)
else:
heapq.heappush(q, event) |
Python | def _register_policy_drivers(self):
"""Register all policy drivers.
This method should only be called once in the PolicDriverManager
constructor.
"""
for ext in self:
self.policy_drivers[ext.name] = ext
self.ordered_policy_drivers.append(ext)
self.reverse_ordered_policy_drivers = self.ordered_policy_drivers[::-1]
LOG.info(_LI("Registered policy drivers: %s"),
[driver.name for driver in self.ordered_policy_drivers]) | def _register_policy_drivers(self):
"""Register all policy drivers.
This method should only be called once in the PolicDriverManager
constructor.
"""
for ext in self:
self.policy_drivers[ext.name] = ext
self.ordered_policy_drivers.append(ext)
self.reverse_ordered_policy_drivers = self.ordered_policy_drivers[::-1]
LOG.info(_LI("Registered policy drivers: %s"),
[driver.name for driver in self.ordered_policy_drivers]) |
Python | def _call_on_drivers(self, method_name, context,
continue_on_failure=False):
"""Helper method for calling a method across all policy drivers.
:param method_name: name of the method to call
:param context: context parameter to pass to each method call
:param continue_on_failure: whether or not to continue to call
all policy drivers once one has raised an exception
:raises: neutron.services.group_policy.common.GroupPolicyDriverError
if any policy driver call fails.
"""
error = False
drivers = (self.ordered_policy_drivers if not
method_name.startswith('delete') else
self.reverse_ordered_policy_drivers)
for driver in drivers:
try:
getattr(driver.obj, method_name)(context)
except gp_exc.GroupPolicyException:
# This is an exception for the user.
raise
except Exception:
# This is an internal failure.
LOG.exception(
_LE("Policy driver '%(name)s' failed in %(method)s"),
{'name': driver.name, 'method': method_name}
)
error = True
if not continue_on_failure:
break
if error:
raise gp_exc.GroupPolicyDriverError(
method=method_name
) | def _call_on_drivers(self, method_name, context,
continue_on_failure=False):
"""Helper method for calling a method across all policy drivers.
:param method_name: name of the method to call
:param context: context parameter to pass to each method call
:param continue_on_failure: whether or not to continue to call
all policy drivers once one has raised an exception
:raises: neutron.services.group_policy.common.GroupPolicyDriverError
if any policy driver call fails.
"""
error = False
drivers = (self.ordered_policy_drivers if not
method_name.startswith('delete') else
self.reverse_ordered_policy_drivers)
for driver in drivers:
try:
getattr(driver.obj, method_name)(context)
except gp_exc.GroupPolicyException:
# This is an exception for the user.
raise
except Exception:
# This is an internal failure.
LOG.exception(
_LE("Policy driver '%(name)s' failed in %(method)s"),
{'name': driver.name, 'method': method_name}
)
error = True
if not continue_on_failure:
break
if error:
raise gp_exc.GroupPolicyDriverError(
method=method_name
) |
Python | def _check_segment(self, segment):
"""Verify a segment is valid for the OpenDaylight MechanismDriver.
Verify the requested segment is supported by ODL and return True or
False to indicate this to callers.
"""
network_type = segment[api.NETWORK_TYPE]
return network_type in [constants.TYPE_VXLAN, ] | def _check_segment(self, segment):
"""Verify a segment is valid for the OpenDaylight MechanismDriver.
Verify the requested segment is supported by ODL and return True or
False to indicate this to callers.
"""
network_type = segment[api.NETWORK_TYPE]
return network_type in [constants.TYPE_VXLAN, ] |
Python | def _agent_bind_port(self, context, agent_list, bind_strategy):
"""Attempt port binding per agent.
Perform the port binding for a given agent.
Returns True if bound successfully.
"""
for agent in agent_list:
LOG.debug("Checking agent: %s", agent)
if agent['alive']:
for segment in context.segments_to_bind:
if bind_strategy(context, segment, agent):
LOG.debug("Bound using segment: %s", segment)
return True
else:
LOG.warning(_LW("Refusing to bind port %(pid)s to dead agent: "
"%(agent)s"),
{'pid': context.current['id'], 'agent': agent})
return False | def _agent_bind_port(self, context, agent_list, bind_strategy):
"""Attempt port binding per agent.
Perform the port binding for a given agent.
Returns True if bound successfully.
"""
for agent in agent_list:
LOG.debug("Checking agent: %s", agent)
if agent['alive']:
for segment in context.segments_to_bind:
if bind_strategy(context, segment, agent):
LOG.debug("Bound using segment: %s", segment)
return True
else:
LOG.warning(_LW("Refusing to bind port %(pid)s to dead agent: "
"%(agent)s"),
{'pid': context.current['id'], 'agent': agent})
return False |
Python | def bind_port(self, context):
"""Get port binding per host.
This is similar to the one defined in the
AgentMechanismDriverBase class, but is modified
to support multiple L2 agent types (DVS and OpFlex).
"""
LOG.debug("Attempting to bind port %(port)s on "
"network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in [portbindings.VNIC_NORMAL]:
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
# Attempt to bind ports for DVS agents for nova-compute daemons
# first. This allows having network agents (dhcp, metadata)
# that typically run on a network node using an OpFlex agent to
# co-exist with nova-compute daemons for ESX, which host DVS agents.
if context.current['device_owner'].startswith('compute:'):
agent_list = context.host_agents(AGENT_TYPE_DVS)
if self._agent_bind_port(context, agent_list, self._bind_dvs_port):
return
# It either wasn't a DVS binding, or there wasn't a DVS
# agent on the binding host (could be the case in a hybrid
# environment supporting KVM and ESX compute). Go try for
# OpFlex agents.
agent_list = context.host_agents(ofcst.AGENT_TYPE_OPFLEX_OVS)
self._agent_bind_port(context, agent_list, self._bind_opflex_port) | def bind_port(self, context):
"""Get port binding per host.
This is similar to the one defined in the
AgentMechanismDriverBase class, but is modified
to support multiple L2 agent types (DVS and OpFlex).
"""
LOG.debug("Attempting to bind port %(port)s on "
"network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in [portbindings.VNIC_NORMAL]:
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
# Attempt to bind ports for DVS agents for nova-compute daemons
# first. This allows having network agents (dhcp, metadata)
# that typically run on a network node using an OpFlex agent to
# co-exist with nova-compute daemons for ESX, which host DVS agents.
if context.current['device_owner'].startswith('compute:'):
agent_list = context.host_agents(AGENT_TYPE_DVS)
if self._agent_bind_port(context, agent_list, self._bind_dvs_port):
return
# It either wasn't a DVS binding, or there wasn't a DVS
# agent on the binding host (could be the case in a hybrid
# environment supporting KVM and ESX compute). Go try for
# OpFlex agents.
agent_list = context.host_agents(ofcst.AGENT_TYPE_OPFLEX_OVS)
self._agent_bind_port(context, agent_list, self._bind_opflex_port) |
Python | def _bind_dvs_port(self, context, segment, agent):
"""Populate VIF type and details for DVS VIFs.
For DVS VIFs, provide the portgroup along
with the security groups setting
"""
if self._check_segment_for_agent(segment, agent):
port = context.current
# We only handle details for ports that are PTs in PTGs
ptg, pt = self.apic_gbp._port_id_to_ptg(context._plugin_context,
port['id'])
if ptg is None:
LOG.warning(_LW("PTG for port %s does not exist"), port['id'])
return False
mapper = self.apic_gbp.name_mapper.name_mapper
ptg_name = mapper.policy_target_group(context, ptg['name'])
network_id = port.get('network_id')
network = self.apic_gbp._get_network(context._plugin_context,
network_id)
project_name = self.apic_gbp._tenant_by_sharing_policy(network)
apic_tenant_name = self.apic_gbp.apic_manager.apic.fvTenant.name(
project_name)
profile = self.apic_gbp.apic_manager.app_profile_name
# Use default security groups from MD
vif_details = {portbindings.CAP_PORT_FILTER: False}
vif_details['dvs_port_group_name'] = (apic_tenant_name +
'|' + str(profile) +
'|' + str(ptg_name))
currentcopy = copy.copy(context.current)
currentcopy['portgroup_name'] = (
vif_details['dvs_port_group_name'])
booked_port_key = None
if self.dvs_notifier:
booked_port_key = self.dvs_notifier.bind_port_call(
currentcopy,
context.network.network_segments,
context.network.current,
context.host
)
if booked_port_key:
vif_details['dvs_port_key'] = booked_port_key
context.set_binding(segment[api.ID],
VIF_TYPE_DVS, vif_details,
n_constants.PORT_STATUS_ACTIVE)
return True
else:
return False | def _bind_dvs_port(self, context, segment, agent):
"""Populate VIF type and details for DVS VIFs.
For DVS VIFs, provide the portgroup along
with the security groups setting
"""
if self._check_segment_for_agent(segment, agent):
port = context.current
# We only handle details for ports that are PTs in PTGs
ptg, pt = self.apic_gbp._port_id_to_ptg(context._plugin_context,
port['id'])
if ptg is None:
LOG.warning(_LW("PTG for port %s does not exist"), port['id'])
return False
mapper = self.apic_gbp.name_mapper.name_mapper
ptg_name = mapper.policy_target_group(context, ptg['name'])
network_id = port.get('network_id')
network = self.apic_gbp._get_network(context._plugin_context,
network_id)
project_name = self.apic_gbp._tenant_by_sharing_policy(network)
apic_tenant_name = self.apic_gbp.apic_manager.apic.fvTenant.name(
project_name)
profile = self.apic_gbp.apic_manager.app_profile_name
# Use default security groups from MD
vif_details = {portbindings.CAP_PORT_FILTER: False}
vif_details['dvs_port_group_name'] = (apic_tenant_name +
'|' + str(profile) +
'|' + str(ptg_name))
currentcopy = copy.copy(context.current)
currentcopy['portgroup_name'] = (
vif_details['dvs_port_group_name'])
booked_port_key = None
if self.dvs_notifier:
booked_port_key = self.dvs_notifier.bind_port_call(
currentcopy,
context.network.network_segments,
context.network.current,
context.host
)
if booked_port_key:
vif_details['dvs_port_key'] = booked_port_key
context.set_binding(segment[api.ID],
VIF_TYPE_DVS, vif_details,
n_constants.PORT_STATUS_ACTIVE)
return True
else:
return False |
Python | def _bind_opflex_port(self, context, segment, agent):
"""Populate VIF type and details for OpFlex VIFs.
For OpFlex VIFs, we just report the OVS VIF type,
along with security groups setting, which were
set when this mechanism driver was instantiated.
"""
if self._check_segment_for_agent(segment, agent):
context.set_binding(segment[api.ID],
portbindings.VIF_TYPE_OVS,
{portbindings.CAP_PORT_FILTER: False,
portbindings.OVS_HYBRID_PLUG: False})
return True
else:
return False | def _bind_opflex_port(self, context, segment, agent):
"""Populate VIF type and details for OpFlex VIFs.
For OpFlex VIFs, we just report the OVS VIF type,
along with security groups setting, which were
set when this mechanism driver was instantiated.
"""
if self._check_segment_for_agent(segment, agent):
context.set_binding(segment[api.ID],
portbindings.VIF_TYPE_OVS,
{portbindings.CAP_PORT_FILTER: False,
portbindings.OVS_HYBRID_PLUG: False})
return True
else:
return False |
Python | def _check_segment_for_agent(self, segment, agent):
"""Check support for OpFlex type segments.
The agent has the ability to limit the segments in OpFlex
networks by specifying the mappings in their config. If no
mapping is specifified, then all OpFlex segments are
supported.
"""
network_type = segment[api.NETWORK_TYPE]
if network_type == ofcst.TYPE_OPFLEX:
opflex_mappings = agent['configurations'].get('opflex_networks')
LOG.debug("Checking segment: %(segment)s "
"for physical network: %(mappings)s ",
{'segment': segment, 'mappings': opflex_mappings})
return (opflex_mappings is None or
segment[api.PHYSICAL_NETWORK] in opflex_mappings)
elif network_type == 'local':
return True
else:
return False | def _check_segment_for_agent(self, segment, agent):
"""Check support for OpFlex type segments.
The agent has the ability to limit the segments in OpFlex
networks by specifying the mappings in their config. If no
mapping is specifified, then all OpFlex segments are
supported.
"""
network_type = segment[api.NETWORK_TYPE]
if network_type == ofcst.TYPE_OPFLEX:
opflex_mappings = agent['configurations'].get('opflex_networks')
LOG.debug("Checking segment: %(segment)s "
"for physical network: %(mappings)s ",
{'segment': segment, 'mappings': opflex_mappings})
return (opflex_mappings is None or
segment[api.PHYSICAL_NETWORK] in opflex_mappings)
elif network_type == 'local':
return True
else:
return False |
Python | def mapper(name_type):
"""Wrapper to land all the common operations between mappers."""
def wrap(func):
def inner(inst, session, resource_id, resource_name=None):
saved_name = inst.db.get_apic_name(session,
resource_id,
name_type)
if saved_name:
result = saved_name[0]
return result
name = ''
try:
name = func(inst, session, resource_id, resource_name)
except Exception as e:
LOG.warn(("Exception in looking up name %s"), name_type)
LOG.error(e.message)
purged_id = re.sub(r"-+", "-", resource_id)
result = purged_id[:inst.min_suffix]
if name:
name = re.sub(r"-+", "-", name)
# Keep as many uuid chars as possible
id_suffix = "_" + result
max_name_length = MAX_APIC_NAME_LENGTH - len(id_suffix)
result = truncate(name, max_name_length) + id_suffix
result = truncate(result, MAX_APIC_NAME_LENGTH)
# Remove forbidden whitespaces
result = result.replace(' ', '')
result = inst._grow_id_if_needed(
session, purged_id, name_type, result,
start=inst.min_suffix)
else:
result = purged_id
inst.db.add_apic_name(session, resource_id,
name_type, result)
return result
return inner
return wrap | def mapper(name_type):
"""Wrapper to land all the common operations between mappers."""
def wrap(func):
def inner(inst, session, resource_id, resource_name=None):
saved_name = inst.db.get_apic_name(session,
resource_id,
name_type)
if saved_name:
result = saved_name[0]
return result
name = ''
try:
name = func(inst, session, resource_id, resource_name)
except Exception as e:
LOG.warn(("Exception in looking up name %s"), name_type)
LOG.error(e.message)
purged_id = re.sub(r"-+", "-", resource_id)
result = purged_id[:inst.min_suffix]
if name:
name = re.sub(r"-+", "-", name)
# Keep as many uuid chars as possible
id_suffix = "_" + result
max_name_length = MAX_APIC_NAME_LENGTH - len(id_suffix)
result = truncate(name, max_name_length) + id_suffix
result = truncate(result, MAX_APIC_NAME_LENGTH)
# Remove forbidden whitespaces
result = result.replace(' ', '')
result = inst._grow_id_if_needed(
session, purged_id, name_type, result,
start=inst.min_suffix)
else:
result = purged_id
inst.db.add_apic_name(session, resource_id,
name_type, result)
return result
return inner
return wrap |
Python | def create_network_function(self, context, network_function):
'''Create Network Function.
Invoked in an RPC Call. Return the Network function DB object
created. Results in an Event for async processing of Network
Function Instance
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
return service_orchestrator.create_network_function(
context, network_function) | def create_network_function(self, context, network_function):
'''Create Network Function.
Invoked in an RPC Call. Return the Network function DB object
created. Results in an Event for async processing of Network
Function Instance
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
return service_orchestrator.create_network_function(
context, network_function) |
Python | def update_network_function(self, context, network_function_id,
config):
'''Update Network Function Configuration.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.update_network_function(
context, network_function_id, config) | def update_network_function(self, context, network_function_id,
config):
'''Update Network Function Configuration.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.update_network_function(
context, network_function_id, config) |
Python | def delete_network_function(self, context, network_function_id):
'''Delete the network Function.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.delete_network_function(
context, network_function_id) | def delete_network_function(self, context, network_function_id):
'''Delete the network Function.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.delete_network_function(
context, network_function_id) |
Python | def policy_target_added_notification(self, context, network_function_id,
policy_target):
'''Update Configuration to react to member addition.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_policy_target_added(
context, network_function_id, policy_target) | def policy_target_added_notification(self, context, network_function_id,
policy_target):
'''Update Configuration to react to member addition.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_policy_target_added(
context, network_function_id, policy_target) |
Python | def policy_target_removed_notification(self, context, network_function_id,
policy_target):
'''Update Configuration to react to member deletion.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_policy_target_removed(
context, network_function_id, policy_target) | def policy_target_removed_notification(self, context, network_function_id,
policy_target):
'''Update Configuration to react to member deletion.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_policy_target_removed(
context, network_function_id, policy_target) |
Python | def consumer_ptg_added_notification(self, context, network_function_id,
policy_target_group):
'''Update Configuration to react to consumer PTG creation.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_consumer_ptg_added(
context, network_function_id, policy_target_group) | def consumer_ptg_added_notification(self, context, network_function_id,
policy_target_group):
'''Update Configuration to react to consumer PTG creation.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_consumer_ptg_added(
context, network_function_id, policy_target_group) |
Python | def consumer_ptg_removed_notification(self, context, network_function_id,
policy_target_group):
'''Update Configuration to react to consumer PTG deletion.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_consumer_ptg_removed(
context, network_function_id, policy_target_group) | def consumer_ptg_removed_notification(self, context, network_function_id,
policy_target_group):
'''Update Configuration to react to consumer PTG deletion.
Invoked in an RPC call. Return the updated Network function DB object.
Results in an Event for async processing of Network Function Instance.
'''
service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_consumer_ptg_removed(
context, network_function_id, policy_target_group) |
Python | def handle_update_user_config(self, event):
'''
Handler to apply any updates in user config.
Initially checks with config driver whether upadte supported for
service type or not. If not supported first deletes the config(checks
for user config deletion via UPDATE_USER_CONFIG_PREPARING_TO_START
event) and then recreates the config with new changes via
UPDATE_USER_CONFIG_STILL_IN_PROGRESS event.
If update supported, update/create corresponding user config in
UPDATE_USER_CONFIG_IN_PROGRESS event.
'''
request_data = event.data
network_function_details = self.get_network_function_details(
request_data['network_function_id'])
stack_id = network_function_details['network_function'
]['heat_stack_id']
network_function = network_function_details['network_function']
service_profile_id = network_function['service_profile_id']
service_type = self._get_service_type(service_profile_id)
if not self.config_driver.is_update_config_supported(service_type):
service_chain_id = network_function['service_chain_id']
admin_token = self.keystoneclient.get_admin_token()
servicechain_instance = self.gbpclient.get_servicechain_instance(
admin_token,
service_chain_id)
provider_ptg_id = servicechain_instance['provider_ptg_id']
provider_ptg = self.gbpclient.get_policy_target_group(
admin_token,
provider_ptg_id)
provider_tenant_id = provider_ptg['tenant_id']
stack_id = self.config_driver.delete_config(stack_id,
provider_tenant_id)
request_data = {
'heat_stack_id': stack_id,
'network_function_id': network_function['id'],
'tenant_id': provider_tenant_id,
'action': 'update',
'operation': request_data['operation']
}
self._create_event('UPDATE_USER_CONFIG_PREPARING_TO_START',
event_data=request_data,
is_poll_event=True, original_event=event)
else:
self._create_event('UPDATE_USER_CONFIG_IN_PROGRESS',
event_data=event.data,
is_internal_event=True,
original_event=event) | def handle_update_user_config(self, event):
'''
Handler to apply any updates in user config.
Initially checks with config driver whether upadte supported for
service type or not. If not supported first deletes the config(checks
for user config deletion via UPDATE_USER_CONFIG_PREPARING_TO_START
event) and then recreates the config with new changes via
UPDATE_USER_CONFIG_STILL_IN_PROGRESS event.
If update supported, update/create corresponding user config in
UPDATE_USER_CONFIG_IN_PROGRESS event.
'''
request_data = event.data
network_function_details = self.get_network_function_details(
request_data['network_function_id'])
stack_id = network_function_details['network_function'
]['heat_stack_id']
network_function = network_function_details['network_function']
service_profile_id = network_function['service_profile_id']
service_type = self._get_service_type(service_profile_id)
if not self.config_driver.is_update_config_supported(service_type):
service_chain_id = network_function['service_chain_id']
admin_token = self.keystoneclient.get_admin_token()
servicechain_instance = self.gbpclient.get_servicechain_instance(
admin_token,
service_chain_id)
provider_ptg_id = servicechain_instance['provider_ptg_id']
provider_ptg = self.gbpclient.get_policy_target_group(
admin_token,
provider_ptg_id)
provider_tenant_id = provider_ptg['tenant_id']
stack_id = self.config_driver.delete_config(stack_id,
provider_tenant_id)
request_data = {
'heat_stack_id': stack_id,
'network_function_id': network_function['id'],
'tenant_id': provider_tenant_id,
'action': 'update',
'operation': request_data['operation']
}
self._create_event('UPDATE_USER_CONFIG_PREPARING_TO_START',
event_data=request_data,
is_poll_event=True, original_event=event)
else:
self._create_event('UPDATE_USER_CONFIG_IN_PROGRESS',
event_data=event.data,
is_internal_event=True,
original_event=event) |
Python | def simulation_2(self):
'''
1. Se inicializa la fuente de informaciom
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
y decodificador de canal con correccion de e.
'''
sistema = channel.com_sys(m, k, n) #Se crea el objeto sistema
bcT = sistema.channel_encoder(bfT)
bcR = sistema.bin_symmetrical_channel(bcT)
bfR = np.array([bcR[i][8:] for i in range(len(bcR))])
bfR = bfR.astype(str)
'''
4. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, z, 8)
sumidero = Image.fromarray(vR)
sumidero.show()
#sumidero.save('./salida.jpg') | def simulation_2(self):
'''
1. Se inicializa la fuente de informaciom
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
y decodificador de canal con correccion de e.
'''
sistema = channel.com_sys(m, k, n) #Se crea el objeto sistema
bcT = sistema.channel_encoder(bfT)
bcR = sistema.bin_symmetrical_channel(bcT)
bfR = np.array([bcR[i][8:] for i in range(len(bcR))])
bfR = bfR.astype(str)
'''
4. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, z, 8)
sumidero = Image.fromarray(vR)
sumidero.show()
#sumidero.save('./salida.jpg') |
Python | def simulation_3(self):
'''
1. Se inicializa la fuente de informaciom
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
y decodificador de canal con correccion de e.
'''
sistema = channel.com_sys(m, k, n) #Se crea el objeto sistema
bcT = sistema.channel_encoder(bfT)
bcR = sistema.bin_symmetrical_channel(bcT)
bfR = sistema.channel_decoder(bcR)
'''
4. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, True, 8)
sumidero = Image.fromarray(vR)
sumidero.show()
#sumidero.save('./salida.jpg') | def simulation_3(self):
'''
1. Se inicializa la fuente de informaciom
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
y decodificador de canal con correccion de e.
'''
sistema = channel.com_sys(m, k, n) #Se crea el objeto sistema
bcT = sistema.channel_encoder(bfT)
bcR = sistema.bin_symmetrical_channel(bcT)
bfR = sistema.channel_decoder(bcR)
'''
4. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, True, 8)
sumidero = Image.fromarray(vR)
sumidero.show()
#sumidero.save('./salida.jpg') |
Python | def simulation_4(self):
'''
1. Se inicializa la fuente de informacion
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
'''
#Se crea el objeto sistema:
sistema = channel.com_sys(m, k, n)
bcT = sistema.channel_encoder(bfT)
'''
4. Se inicializa el modulador, medio
ideal y desmodulador
'''
modem = modulation.modulation(m, k, n)
an = modem.symbol_modulator(bcT)
modem.PAM(an)
yn = modem.symbol_demodulator(an)
'''
5. Se inicializa decodificador de canal.
'''
bfR = sistema.channel_decoder(yn)
'''
6. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, True, 8)
sumidero = Image.fromarray(vR)
sumidero.show() | def simulation_4(self):
'''
1. Se inicializa la fuente de informacion
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
'''
#Se crea el objeto sistema:
sistema = channel.com_sys(m, k, n)
bcT = sistema.channel_encoder(bfT)
'''
4. Se inicializa el modulador, medio
ideal y desmodulador
'''
modem = modulation.modulation(m, k, n)
an = modem.symbol_modulator(bcT)
modem.PAM(an)
yn = modem.symbol_demodulator(an)
'''
5. Se inicializa decodificador de canal.
'''
bfR = sistema.channel_decoder(yn)
'''
6. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, True, 8)
sumidero = Image.fromarray(vR)
sumidero.show() |
Python | def simulation_5(self):
'''
1. Se inicializa la fuente de informaciom
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
'''
sistema = channel.com_sys(m, k, n) #Se crea el objeto sistema
bcT = sistema.channel_encoder(bfT)
'''
4. Se inicializa el modulador, medio
de transmision con ruido al 20 % y
desmodulador
'''
modem = modulation.modulation(m, k, n)
an = modem.symbol_modulator(bcT)
modem.PAM(an)
an = modem.noised_transmitter(an, 20)
yn = modem.symbol_demodulator(an)
'''
5. Se inicializa decodificador de canal.
'''
bfR = sistema.channel_decoder(yn)
'''
6. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, True, 8)
sumidero = Image.fromarray(vR)
sumidero.show()
#sumidero.save('./salida2.jpg') | def simulation_5(self):
'''
1. Se inicializa la fuente de informaciom
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
'''
sistema = channel.com_sys(m, k, n) #Se crea el objeto sistema
bcT = sistema.channel_encoder(bfT)
'''
4. Se inicializa el modulador, medio
de transmision con ruido al 20 % y
desmodulador
'''
modem = modulation.modulation(m, k, n)
an = modem.symbol_modulator(bcT)
modem.PAM(an)
an = modem.noised_transmitter(an, 20)
yn = modem.symbol_demodulator(an)
'''
5. Se inicializa decodificador de canal.
'''
bfR = sistema.channel_decoder(yn)
'''
6. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, True, 8)
sumidero = Image.fromarray(vR)
sumidero.show()
#sumidero.save('./salida2.jpg') |
Python | def simulation_6(self):
'''
1. Se inicializa la fuente de informacion
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
'''
#Se crea el objeto sistema:
sistema = channel.com_sys(m, k, n)
bcT = sistema.channel_encoder(bfT)
'''
4. Se inicializa el modulador, medio
ideal y desmodulador
'''
modem = modulation.modulation(m, k, n)
an = modem.symbol_modulator(bcT)
sT = modem.ask(an)
yn = modem.ask_demodulator(sT)
'''
5. Se inicializa decodificador de canal.
'''
bfR = sistema.channel_decoder(yn)
'''
6. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, True, 8)
sumidero = Image.fromarray(vR)
sumidero.show()
#sumidero.save('noised.jpg') | def simulation_6(self):
'''
1. Se inicializa la fuente de informacion
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
'''
#Se crea el objeto sistema:
sistema = channel.com_sys(m, k, n)
bcT = sistema.channel_encoder(bfT)
'''
4. Se inicializa el modulador, medio
ideal y desmodulador
'''
modem = modulation.modulation(m, k, n)
an = modem.symbol_modulator(bcT)
sT = modem.ask(an)
yn = modem.ask_demodulator(sT)
'''
5. Se inicializa decodificador de canal.
'''
bfR = sistema.channel_decoder(yn)
'''
6. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, True, 8)
sumidero = Image.fromarray(vR)
sumidero.show()
#sumidero.save('noised.jpg') |
Python | def simulation_7(self):
'''
1. Se inicializa la fuente de informacion
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
'''
#Se crea el objeto sistema:
sistema = channel.com_sys(m, k, n)
bcT = sistema.channel_encoder(bfT)
'''
4. Se inicializa el modulador, medio
ideal y desmodulador
'''
modem = modulation.modulation(m, k, n)
an = modem.symbol_modulator(bcT)
sT = modem.ask(an)
sN = modem.ask_noised_transmitter(sT, 50)
yn = modem.ask_demodulator(sN)
'''
5. Se inicializa decodificador de canal.
'''
bfR = sistema.channel_decoder(yn)
'''
6. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, True, 8)
sumidero = Image.fromarray(vR)
sumidero.show()
#sumidero.save('noised.jpg') | def simulation_7(self):
'''
1. Se inicializa la fuente de informacion
y se llama al codificador de fuente
'''
fuente_info = source.image_source(path)
x, y, z = fuente_info.shape
vT, bfT, r = source.source_encoder(fuente_info)
'''
2. Parametros dimensionales: m es el numero
de paquetes 1xk obtenidos del codificador
de fuente en bits; n > k
'''
m, k = bfT.shape
n = 16
'''
3. Se inicializa el objeto sistema, que cuenta
con codificador de canal, canal simetrico
'''
#Se crea el objeto sistema:
sistema = channel.com_sys(m, k, n)
bcT = sistema.channel_encoder(bfT)
'''
4. Se inicializa el modulador, medio
ideal y desmodulador
'''
modem = modulation.modulation(m, k, n)
an = modem.symbol_modulator(bcT)
sT = modem.ask(an)
sN = modem.ask_noised_transmitter(sT, 50)
yn = modem.ask_demodulator(sN)
'''
5. Se inicializa decodificador de canal.
'''
bfR = sistema.channel_decoder(yn)
'''
6. Se llama al decodificador de fuente
y se simula el sumidero para recuperar
la imagen transmitida.
'''
vR = source.source_Decoder(bfR, x, y, True, 8)
sumidero = Image.fromarray(vR)
sumidero.show()
#sumidero.save('noised.jpg') |
Python | def generate_nodeNumbers(self, sourceNodeName, sinkNodeName, nodesNumber, nodesList1, nodesList2, del_nodeNums, is_edgeShort=False):
"""
Generates initial ordinal number for nodes
"""
# 1: for short edge's nodes
if is_edgeShort:
self.shorts += 1
# (1)one node has ordinal number, the other don't: choose the existed number for two nodes
if sourceNodeName in nodesList1.keys() and sinkNodeName not in nodesList1.keys():
self.nodes += 1
sourceNodeNumber = nodesList1[sourceNodeName]
sinkNodeNumber = sourceNodeNumber
nodesList1[sinkNodeName] = sinkNodeNumber
nodesList2[sinkNodeNumber] = nodesList2.get(sinkNodeNumber, list()) + [sinkNodeName]
elif sourceNodeName not in nodesList1.keys() and sinkNodeName in nodesList1.keys():
self.nodes += 1
sinkNodeNumber = nodesList1[sinkNodeName]
sourceNodeNumber = sinkNodeNumber
nodesList1[sourceNodeName] = sourceNodeNumber
nodesList2[sourceNodeNumber] = nodesList2.get(sourceNodeNumber, list()) + [sourceNodeName]
# (2)both two nodes have ordinal number: delete the big number and choose the small number for two nodes
elif sourceNodeName in nodesList1.keys() and sinkNodeName in nodesList1.keys():
NodeNumber = min(nodesList1[sourceNodeName], nodesList1[sinkNodeName])
del_nodeNum = max(nodesList1[sourceNodeName], nodesList1[sinkNodeName])
del_nodeNums.append(del_nodeNum)
# print("{} is del".format(del_nodeNum))
if NodeNumber == nodesList1[sourceNodeName]:
nodesList2[NodeNumber] = nodesList2.get(NodeNumber, list()) + [sinkNodeName]
nodesList2[del_nodeNum].remove(sinkNodeName)
nodesList1[sinkNodeName] = NodeNumber
else:
nodesList2[NodeNumber] = nodesList2.get(NodeNumber, list()) + [sourceNodeName]
nodesList2[del_nodeNum].remove(sourceNodeName)
nodesList1[sourceNodeName] = NodeNumber
# (3)two nodes don't have ordinal number: generate one number for two nodes
else:
self.nodes += 2
nodesNumber += 1
nodesList1[sourceNodeName] = nodesNumber
nodesList1[sinkNodeName] = nodesNumber
nodesList2[nodesNumber] = nodesList2.get(nodesNumber, list()) + [sourceNodeName, sinkNodeName]
# 2: for not short edge's node, generate two number for two nodes
else:
for NodeName in [sourceNodeName, sinkNodeName]:
if NodeName not in nodesList1.keys():
self.nodes += 1
nodesNumber += 1
nodesList1[NodeName] = nodesNumber
nodesList2[nodesNumber] = nodesList2.get(nodesNumber, list()) + [NodeName]
return nodesNumber, nodesList1, nodesList2, del_nodeNums | def generate_nodeNumbers(self, sourceNodeName, sinkNodeName, nodesNumber, nodesList1, nodesList2, del_nodeNums, is_edgeShort=False):
"""
Generates initial ordinal number for nodes
"""
# 1: for short edge's nodes
if is_edgeShort:
self.shorts += 1
# (1)one node has ordinal number, the other don't: choose the existed number for two nodes
if sourceNodeName in nodesList1.keys() and sinkNodeName not in nodesList1.keys():
self.nodes += 1
sourceNodeNumber = nodesList1[sourceNodeName]
sinkNodeNumber = sourceNodeNumber
nodesList1[sinkNodeName] = sinkNodeNumber
nodesList2[sinkNodeNumber] = nodesList2.get(sinkNodeNumber, list()) + [sinkNodeName]
elif sourceNodeName not in nodesList1.keys() and sinkNodeName in nodesList1.keys():
self.nodes += 1
sinkNodeNumber = nodesList1[sinkNodeName]
sourceNodeNumber = sinkNodeNumber
nodesList1[sourceNodeName] = sourceNodeNumber
nodesList2[sourceNodeNumber] = nodesList2.get(sourceNodeNumber, list()) + [sourceNodeName]
# (2)both two nodes have ordinal number: delete the big number and choose the small number for two nodes
elif sourceNodeName in nodesList1.keys() and sinkNodeName in nodesList1.keys():
NodeNumber = min(nodesList1[sourceNodeName], nodesList1[sinkNodeName])
del_nodeNum = max(nodesList1[sourceNodeName], nodesList1[sinkNodeName])
del_nodeNums.append(del_nodeNum)
# print("{} is del".format(del_nodeNum))
if NodeNumber == nodesList1[sourceNodeName]:
nodesList2[NodeNumber] = nodesList2.get(NodeNumber, list()) + [sinkNodeName]
nodesList2[del_nodeNum].remove(sinkNodeName)
nodesList1[sinkNodeName] = NodeNumber
else:
nodesList2[NodeNumber] = nodesList2.get(NodeNumber, list()) + [sourceNodeName]
nodesList2[del_nodeNum].remove(sourceNodeName)
nodesList1[sourceNodeName] = NodeNumber
# (3)two nodes don't have ordinal number: generate one number for two nodes
else:
self.nodes += 2
nodesNumber += 1
nodesList1[sourceNodeName] = nodesNumber
nodesList1[sinkNodeName] = nodesNumber
nodesList2[nodesNumber] = nodesList2.get(nodesNumber, list()) + [sourceNodeName, sinkNodeName]
# 2: for not short edge's node, generate two number for two nodes
else:
for NodeName in [sourceNodeName, sinkNodeName]:
if NodeName not in nodesList1.keys():
self.nodes += 1
nodesNumber += 1
nodesList1[NodeName] = nodesNumber
nodesList2[nodesNumber] = nodesList2.get(nodesNumber, list()) + [NodeName]
return nodesNumber, nodesList1, nodesList2, del_nodeNums |
Python | def convert_network_into_graph(self, filename, filepath=""):
"""
Preprocessing of Spice files and build the grid data structure.
:return:
"""
print("\n-----------Convert powerGridFile into graph-----------")
print(filepath+filename)
file = open(filepath+filename, 'r')
self.name = filename.split(".")[0]
nodesNumber = 0
nodesList1 = {"0": 0} # {nodeName:nodeNumber}
nodesList2 = {0: ["0"]} # {nodeNumber:[nodeName1,nodeName2,……]}
del_nodeNums = [] # Some Nodenumbers do not have corresponding nodes due to short circuit
temp = [] # Temporarily stores information for non-short-circuited "V" edge: [[sourceNodeName, sinkNodeName, branchValue, edgeType],[……],……]
for line in file.readlines():
is_edgeShort = False
if ".end" in line:
break
elif ".op" in line or "*" in line:
continue
else:
self.edges += 1
edgeName, sourceNodeName, sinkNodeName, branchValue = line.split()
edgeType = edgeName[0].upper()
branchValue = float(branchValue)
if branchValue == 0.0 and edgeType == "V":
# print('short edge:', line)
is_edgeShort = True
else:
temp.append([sourceNodeName, sinkNodeName, branchValue, edgeType])
nodesNumber, nodesList1, nodesList2, del_nodeNums = self.generate_nodeNumbers(sourceNodeName, sinkNodeName,
nodesNumber, nodesList1, nodesList2, del_nodeNums, is_edgeShort)
self.nodes += 1
file.close()
print("------------read powerGridFile over------------")
# generate nodeDict from nodeList2,nodeList1
if len(del_nodeNums):
print("length of deleted nodeNumbers is not 0, but {}".format(len(del_nodeNums)))
print("------------sort again for nodeNumber----------")
nodesList3 = list(nodesList2.items())
nodesList3.sort(key=lambda x: x[0], reverse=False)
i = 0
for num_nodeName in nodesList3:
if num_nodeName[1]:
if num_nodeName[0] != i:
for nodeName in num_nodeName[1]:
nodesList1[nodeName] = i
self.nodeDict[i] = self.nodeDict.get(i, list()) + [nodeName]
else:
for nodeName in num_nodeName[1]:
self.nodeDict[i] = self.nodeDict.get(i, list()) + [nodeName]
i += 1
else:
for nodeName in nodesList1:
self.nodeDict[nodesList1[nodeName]] = self.nodeDict.get(nodesList1[nodeName], list()) + [nodeName]
# add edge to graph
print("------------add edge to graph------------------")
for edge in temp:
sourceNodeName, sinkNodeName, branchValue, edgeType = edge
sourceNodeNumber = nodesList1[sourceNodeName]
sinkNodeNumber = nodesList1[sinkNodeName]
if edgeType not in "RIV":
raise TypeError("edge is not in proper format!")
else:
self.add_edge_to_graph(sourceNodeNumber, sinkNodeNumber, branchValue, edgeType)
# print(sourceNodeName, ":", sourceNodeNumber, sinkNodeName, ":", sinkNodeNumber, branchValue, edgeType)
print("Parse over!")
print("Total number of Nodes(include shorts):", self.nodes)
print("Total number of Edges(include shorts):", self.edges)
print("Total number of short Edges = ", self.shorts)
print("Total number of Current Source = ", self.currentSource)
print("Total number of Voltage Source(not include shorts):", self.voltageSource)
# print("nodesDict:\n", self.nodeDict)
print("-------------------------------------------------------\n") | def convert_network_into_graph(self, filename, filepath=""):
"""
Preprocessing of Spice files and build the grid data structure.
:return:
"""
print("\n-----------Convert powerGridFile into graph-----------")
print(filepath+filename)
file = open(filepath+filename, 'r')
self.name = filename.split(".")[0]
nodesNumber = 0
nodesList1 = {"0": 0} # {nodeName:nodeNumber}
nodesList2 = {0: ["0"]} # {nodeNumber:[nodeName1,nodeName2,……]}
del_nodeNums = [] # Some Nodenumbers do not have corresponding nodes due to short circuit
temp = [] # Temporarily stores information for non-short-circuited "V" edge: [[sourceNodeName, sinkNodeName, branchValue, edgeType],[……],……]
for line in file.readlines():
is_edgeShort = False
if ".end" in line:
break
elif ".op" in line or "*" in line:
continue
else:
self.edges += 1
edgeName, sourceNodeName, sinkNodeName, branchValue = line.split()
edgeType = edgeName[0].upper()
branchValue = float(branchValue)
if branchValue == 0.0 and edgeType == "V":
# print('short edge:', line)
is_edgeShort = True
else:
temp.append([sourceNodeName, sinkNodeName, branchValue, edgeType])
nodesNumber, nodesList1, nodesList2, del_nodeNums = self.generate_nodeNumbers(sourceNodeName, sinkNodeName,
nodesNumber, nodesList1, nodesList2, del_nodeNums, is_edgeShort)
self.nodes += 1
file.close()
print("------------read powerGridFile over------------")
# generate nodeDict from nodeList2,nodeList1
if len(del_nodeNums):
print("length of deleted nodeNumbers is not 0, but {}".format(len(del_nodeNums)))
print("------------sort again for nodeNumber----------")
nodesList3 = list(nodesList2.items())
nodesList3.sort(key=lambda x: x[0], reverse=False)
i = 0
for num_nodeName in nodesList3:
if num_nodeName[1]:
if num_nodeName[0] != i:
for nodeName in num_nodeName[1]:
nodesList1[nodeName] = i
self.nodeDict[i] = self.nodeDict.get(i, list()) + [nodeName]
else:
for nodeName in num_nodeName[1]:
self.nodeDict[i] = self.nodeDict.get(i, list()) + [nodeName]
i += 1
else:
for nodeName in nodesList1:
self.nodeDict[nodesList1[nodeName]] = self.nodeDict.get(nodesList1[nodeName], list()) + [nodeName]
# add edge to graph
print("------------add edge to graph------------------")
for edge in temp:
sourceNodeName, sinkNodeName, branchValue, edgeType = edge
sourceNodeNumber = nodesList1[sourceNodeName]
sinkNodeNumber = nodesList1[sinkNodeName]
if edgeType not in "RIV":
raise TypeError("edge is not in proper format!")
else:
self.add_edge_to_graph(sourceNodeNumber, sinkNodeNumber, branchValue, edgeType)
# print(sourceNodeName, ":", sourceNodeNumber, sinkNodeName, ":", sinkNodeNumber, branchValue, edgeType)
print("Parse over!")
print("Total number of Nodes(include shorts):", self.nodes)
print("Total number of Edges(include shorts):", self.edges)
print("Total number of short Edges = ", self.shorts)
print("Total number of Current Source = ", self.currentSource)
print("Total number of Voltage Source(not include shorts):", self.voltageSource)
# print("nodesDict:\n", self.nodeDict)
print("-------------------------------------------------------\n") |
Python | def init_sparse_matrix(self):
"""
Initialize the sparse matrix in DOK format.
"""
print("-------------Initialize the sparse matrix--------------")
order = self.nodes - self.shorts - 1
if order > 0:
self.order = order
print("order:", order)
self.sparseMatrix = dok_matrix((order, order))
self.currentVector = np.zeros(order)
else:
raise ValueError("sparseMatrix's order is <= 0!") | def init_sparse_matrix(self):
"""
Initialize the sparse matrix in DOK format.
"""
print("-------------Initialize the sparse matrix--------------")
order = self.nodes - self.shorts - 1
if order > 0:
self.order = order
print("order:", order)
self.sparseMatrix = dok_matrix((order, order))
self.currentVector = np.zeros(order)
else:
raise ValueError("sparseMatrix's order is <= 0!") |
Python | def fill_sparse_matrix(self):
"""
Convert it into a linear system solving problem: Ax=b.
Generate sparse matrix A in DOK format and then convert it to CSC format.
"""
self.init_sparse_matrix()
print("----------------fill the sparse matrix-----------------")
# "V" edgeType
print("for \"V\" edgeType")
for nodeNumber, nodeVolt in self.voltNodeDict.items():
self.sparseMatrix[nodeNumber - 1, nodeNumber - 1] = 1
self.currentVector[nodeNumber - 1] = nodeVolt
# 'I' edgeType
print("for \"I\" edgeType")
for edge in self.currentList:
sourceNodeNo = edge.sourceNodeNumber
sinkNodeNo = edge.sinkNodeNumber
if sinkNodeNo == 0:
self.currentVector[sourceNodeNo - 1] -= edge.branchValue
elif sourceNodeNo == 0:
self.currentVector[sinkNodeNo - 1] += edge.branchValue
else:
self.currentVector[sourceNodeNo - 1] -= edge.branchValue # current out from sourceNode of I: -
self.currentVector[sinkNodeNo - 1] += edge.branchValue # current in to sinkNode of I: +
# 'R' edgeType
print("for \"R\" edgeType")
for nodeNumber in self.edgeResNodeDict:
if nodeNumber in self.voltNodeDict.keys():
continue
else:
for edge in self.edgeResNodeDict[nodeNumber]:
if edge.is_sourceNode:
otherNodeNo = edge.sinkNodeNumber
else:
otherNodeNo = edge.sourceNodeNumber
self.sparseMatrix[nodeNumber - 1, nodeNumber - 1] += edge.branchValue
if otherNodeNo == 0:
continue
elif otherNodeNo in self.voltNodeDict.keys():
self.currentVector[nodeNumber - 1] += self.voltNodeDict[otherNodeNo] * edge.branchValue
else:
self.sparseMatrix[nodeNumber - 1, otherNodeNo - 1] -= edge.branchValue
self.sparseMatrix = csc_matrix(self.sparseMatrix)
# save_npz('sparse_matrix_{}.npz'.format(self.name), self.sparseMatrix)
# ----------Display MNA matrix and current vector-----------
# print("MNA sparseMatrix:")
# print(self.sparseMatrix)
# print("currentVector:")
# print(self.currentVector)
| def fill_sparse_matrix(self):
"""
Convert it into a linear system solving problem: Ax=b.
Generate sparse matrix A in DOK format and then convert it to CSC format.
"""
self.init_sparse_matrix()
print("----------------fill the sparse matrix-----------------")
# "V" edgeType
print("for \"V\" edgeType")
for nodeNumber, nodeVolt in self.voltNodeDict.items():
self.sparseMatrix[nodeNumber - 1, nodeNumber - 1] = 1
self.currentVector[nodeNumber - 1] = nodeVolt
# 'I' edgeType
print("for \"I\" edgeType")
for edge in self.currentList:
sourceNodeNo = edge.sourceNodeNumber
sinkNodeNo = edge.sinkNodeNumber
if sinkNodeNo == 0:
self.currentVector[sourceNodeNo - 1] -= edge.branchValue
elif sourceNodeNo == 0:
self.currentVector[sinkNodeNo - 1] += edge.branchValue
else:
self.currentVector[sourceNodeNo - 1] -= edge.branchValue # current out from sourceNode of I: -
self.currentVector[sinkNodeNo - 1] += edge.branchValue # current in to sinkNode of I: +
# 'R' edgeType
print("for \"R\" edgeType")
for nodeNumber in self.edgeResNodeDict:
if nodeNumber in self.voltNodeDict.keys():
continue
else:
for edge in self.edgeResNodeDict[nodeNumber]:
if edge.is_sourceNode:
otherNodeNo = edge.sinkNodeNumber
else:
otherNodeNo = edge.sourceNodeNumber
self.sparseMatrix[nodeNumber - 1, nodeNumber - 1] += edge.branchValue
if otherNodeNo == 0:
continue
elif otherNodeNo in self.voltNodeDict.keys():
self.currentVector[nodeNumber - 1] += self.voltNodeDict[otherNodeNo] * edge.branchValue
else:
self.sparseMatrix[nodeNumber - 1, otherNodeNo - 1] -= edge.branchValue
self.sparseMatrix = csc_matrix(self.sparseMatrix)
# save_npz('sparse_matrix_{}.npz'.format(self.name), self.sparseMatrix)
# ----------Display MNA matrix and current vector-----------
# print("MNA sparseMatrix:")
# print(self.sparseMatrix)
# print("currentVector:")
# print(self.currentVector)
|
Python | def node_voltage_solver(self):
"""
Interface realization of solving module of linear equations
"""
print("\n-----------------Node voltage solution-----------------")
current_vector = self.currentVector
matrix = self.sparseMatrix
method = self.method
ordering_method = self.orderingMethod
if method == 'LU':
print("--------------1: LU Decomposition--------------")
if ordering_method is None:
LU = splu(matrix)
elif ordering_method == "MMD_AT_PLUS_A":
LU = splu(matrix, permc_spec=ordering_method, diag_pivot_thresh=0.0, options=dict(SymmetricMode=True))
else:
LU = splu(matrix, permc_spec=ordering_method)
self.solVec = LU.solve(current_vector)
elif method == 'CG':
print("------------2: Conjugate Gradient--------------")
self.solVec, exitCode = cg(matrix, current_vector)
if exitCode == 0:
print('0 : successful')
elif exitCode > 0:
print('>0 : convergence to tolerance not achieved, number of iterations:{}'.format(exitCode))
else:
print('<0 : illegal input or breakdown')
elif method == 'cholesky':
print("----------3: Cholesky Decomposition------------")
if ordering_method is None:
factor = cholesky(matrix)
else:
factor = cholesky(matrix, ordering_method=ordering_method)
self.solVec = factor(current_vector)
else:
raise NotImplementedError("no method \"{}\"".format(method)) | def node_voltage_solver(self):
"""
Interface realization of solving module of linear equations
"""
print("\n-----------------Node voltage solution-----------------")
current_vector = self.currentVector
matrix = self.sparseMatrix
method = self.method
ordering_method = self.orderingMethod
if method == 'LU':
print("--------------1: LU Decomposition--------------")
if ordering_method is None:
LU = splu(matrix)
elif ordering_method == "MMD_AT_PLUS_A":
LU = splu(matrix, permc_spec=ordering_method, diag_pivot_thresh=0.0, options=dict(SymmetricMode=True))
else:
LU = splu(matrix, permc_spec=ordering_method)
self.solVec = LU.solve(current_vector)
elif method == 'CG':
print("------------2: Conjugate Gradient--------------")
self.solVec, exitCode = cg(matrix, current_vector)
if exitCode == 0:
print('0 : successful')
elif exitCode > 0:
print('>0 : convergence to tolerance not achieved, number of iterations:{}'.format(exitCode))
else:
print('<0 : illegal input or breakdown')
elif method == 'cholesky':
print("----------3: Cholesky Decomposition------------")
if ordering_method is None:
factor = cholesky(matrix)
else:
factor = cholesky(matrix, ordering_method=ordering_method)
self.solVec = factor(current_vector)
else:
raise NotImplementedError("no method \"{}\"".format(method)) |
Python | def print_solution(self, filepath=""):
"""
Output node voltage values to the solution file
"""
print("\n--------------------Print solution---------------------")
print("---------------------Voltage-------------------")
if self.orderingMethod is None:
solution_file = open(filepath + self.name + '_' + self.method + ".solution", 'w')
else:
solution_file = open(filepath + self.name + '_' + self.method + '_' + self.orderingMethod + ".solution", 'w')
# print(self.nodeDict)
for nodeName in self.nodeDict[0]:
solution_file.write("{} {:e}\n".format(nodeName, 0))
for i in range(len(self.nodeDict) - 1):
for nodeName in self.nodeDict[i + 1]:
solution_file.write("{} {:e}\n".format(nodeName, self.solVec[i]))
solution_file.close() | def print_solution(self, filepath=""):
"""
Output node voltage values to the solution file
"""
print("\n--------------------Print solution---------------------")
print("---------------------Voltage-------------------")
if self.orderingMethod is None:
solution_file = open(filepath + self.name + '_' + self.method + ".solution", 'w')
else:
solution_file = open(filepath + self.name + '_' + self.method + '_' + self.orderingMethod + ".solution", 'w')
# print(self.nodeDict)
for nodeName in self.nodeDict[0]:
solution_file.write("{} {:e}\n".format(nodeName, 0))
for i in range(len(self.nodeDict) - 1):
for nodeName in self.nodeDict[i + 1]:
solution_file.write("{} {:e}\n".format(nodeName, self.solVec[i]))
solution_file.close() |
Python | def read_excel(self, fname):
"""Read Excel file data and load into the scenario.
Parameters
----------
fname : string
path to file
"""
funcs = {
'set': self.add_set,
'par': self.add_par,
}
dfs = pd_read(fname, sheet_name=None)
# get item-type mapping
df = dfs['ix_type_mapping']
ix_types = dict(zip(df['item'], df['ix_type']))
# fill in necessary items first (only sets for now)
col = 0 # special case for prefill set Series
def is_prefill(x):
return dfs[x].columns[0] == col and len(dfs[x].columns) == 1
prefill = [x for x in dfs if is_prefill(x)]
for name in prefill:
data = list(dfs[name][col])
if len(data) > 0:
ix_type = ix_types[name]
funcs[ix_type](name, data)
# fill all other pars and sets, skipping those already done
skip_sheets = ['ix_type_mapping'] + prefill
for sheet_name, df in dfs.items():
if sheet_name not in skip_sheets and not df.empty:
ix_type = ix_types[sheet_name]
funcs[ix_type](sheet_name, df) | def read_excel(self, fname):
"""Read Excel file data and load into the scenario.
Parameters
----------
fname : string
path to file
"""
funcs = {
'set': self.add_set,
'par': self.add_par,
}
dfs = pd_read(fname, sheet_name=None)
# get item-type mapping
df = dfs['ix_type_mapping']
ix_types = dict(zip(df['item'], df['ix_type']))
# fill in necessary items first (only sets for now)
col = 0 # special case for prefill set Series
def is_prefill(x):
return dfs[x].columns[0] == col and len(dfs[x].columns) == 1
prefill = [x for x in dfs if is_prefill(x)]
for name in prefill:
data = list(dfs[name][col])
if len(data) > 0:
ix_type = ix_types[name]
funcs[ix_type](name, data)
# fill all other pars and sets, skipping those already done
skip_sheets = ['ix_type_mapping'] + prefill
for sheet_name, df in dfs.items():
if sheet_name not in skip_sheets and not df.empty:
ix_type = ix_types[sheet_name]
funcs[ix_type](sheet_name, df) |
Python | async def call(self, fn: Callable[[any], any], key: str, *args, **kwargs) -> any:
"""
Asynchronously call `fn` with the given `*args` and `**kwargs` exactly once
`key` are used to detect and coalesce duplicate call
`key` is only hold for the duration of this function, after that it will be removed and `key` can be used again
"""
if not isinstance(key, str):
raise TypeError("Key should be a str")
if not isinstance(fn, Callable):
raise TypeError("fn should be a callable")
# this part does not use with-statement
# because the one need to be waited is different object (self.lock vs self.m[key].ev)
await self.lock.acquire()
if key in self.m:
# key exists here means
# another thread is currently making the call
# just need to wait
cl = self.m[key]
self.lock.release()
await cl.ev.wait()
if cl.err:
raise cl.err
return cl.res
cl = CallLockAsync()
self.m[key] = cl
self.lock.release()
try:
cl.res = await fn(*args, **kwargs)
cl.err = None
except Exception as e:
cl.res = None
cl.err = e
finally:
cl.ev.set()
# delete the calllock, so next call
# with same key can pass through
async with self.lock:
del(self.m[key])
if cl.err is not None:
raise cl.err
return cl.res | async def call(self, fn: Callable[[any], any], key: str, *args, **kwargs) -> any:
"""
Asynchronously call `fn` with the given `*args` and `**kwargs` exactly once
`key` are used to detect and coalesce duplicate call
`key` is only hold for the duration of this function, after that it will be removed and `key` can be used again
"""
if not isinstance(key, str):
raise TypeError("Key should be a str")
if not isinstance(fn, Callable):
raise TypeError("fn should be a callable")
# this part does not use with-statement
# because the one need to be waited is different object (self.lock vs self.m[key].ev)
await self.lock.acquire()
if key in self.m:
# key exists here means
# another thread is currently making the call
# just need to wait
cl = self.m[key]
self.lock.release()
await cl.ev.wait()
if cl.err:
raise cl.err
return cl.res
cl = CallLockAsync()
self.m[key] = cl
self.lock.release()
try:
cl.res = await fn(*args, **kwargs)
cl.err = None
except Exception as e:
cl.res = None
cl.err = e
finally:
cl.ev.set()
# delete the calllock, so next call
# with same key can pass through
async with self.lock:
del(self.m[key])
if cl.err is not None:
raise cl.err
return cl.res |
Python | def call(self, fn: Callable[[any], any], key: str, *args, **kwargs) -> any:
"""
Call `fn` with the given `*args` and `**kwargs` exactly once
`key` are used to detect and coalesce duplicate call
`key` is only hold for the duration of this function, after that it will be removed and `key` can be used again
"""
if not isinstance(key, str):
raise TypeError("Key should be a str")
if not isinstance(fn, Callable):
raise TypeError("fn should be a callable")
# this part does not use with-statement
# because the one need to be waited is different object (self.lock vs self.m[key].ev)
self.lock.acquire(True)
if key in self.m:
# key exists here means
# another thread is currently making the call
# just need to wait
cl = self.m[key]
self.lock.release()
cl.ev.wait()
if cl.err:
raise cl.err
return cl.res
cl = CallLockGevent()
self.m[key] = cl
self.lock.release()
try:
cl.res = fn(*args, **kwargs)
cl.err = None
except Exception as e:
cl.res = None
cl.err = e
finally:
cl.ev.set()
# delete the calllock, so next call
# with same key can pass through
with self.lock:
del(self.m[key])
if cl.err is not None:
raise cl.err
return cl.res | def call(self, fn: Callable[[any], any], key: str, *args, **kwargs) -> any:
"""
Call `fn` with the given `*args` and `**kwargs` exactly once
`key` are used to detect and coalesce duplicate call
`key` is only hold for the duration of this function, after that it will be removed and `key` can be used again
"""
if not isinstance(key, str):
raise TypeError("Key should be a str")
if not isinstance(fn, Callable):
raise TypeError("fn should be a callable")
# this part does not use with-statement
# because the one need to be waited is different object (self.lock vs self.m[key].ev)
self.lock.acquire(True)
if key in self.m:
# key exists here means
# another thread is currently making the call
# just need to wait
cl = self.m[key]
self.lock.release()
cl.ev.wait()
if cl.err:
raise cl.err
return cl.res
cl = CallLockGevent()
self.m[key] = cl
self.lock.release()
try:
cl.res = fn(*args, **kwargs)
cl.err = None
except Exception as e:
cl.res = None
cl.err = e
finally:
cl.ev.set()
# delete the calllock, so next call
# with same key can pass through
with self.lock:
del(self.m[key])
if cl.err is not None:
raise cl.err
return cl.res |
Python | def complete_discarded(
cls, tasks: Iterable['InferenceTask'], results: Iterable['InferenceResult'],
) -> Iterator['InferenceResult']:
"""
Generate InferenceResults based on successful inference results and
fallback results of discarded tasks.
"""
iterable_results = iter(results)
try:
for task in tasks:
if task.is_discarded:
yield task.error
else:
yield next(iterable_results)
except StopIteration:
raise StopIteration(
'The results does not match the number of tasks'
) from None | def complete_discarded(
cls, tasks: Iterable['InferenceTask'], results: Iterable['InferenceResult'],
) -> Iterator['InferenceResult']:
"""
Generate InferenceResults based on successful inference results and
fallback results of discarded tasks.
"""
iterable_results = iter(results)
try:
for task in tasks:
if task.is_discarded:
yield task.error
else:
yield next(iterable_results)
except StopIteration:
raise StopIteration(
'The results does not match the number of tasks'
) from None |
Python | def discard(self, err_msg="", **context):
"""
Discard this task. All subsequent steps will be skipped.
Parameters
----------
err_msg: str
The reason why this task got discarded. It would be the body of
HTTP Response, a field in AWS lambda event or CLI stderr message.
*other contexts
Other contexts of the fallback ``InferenceResult``
"""
self.is_discarded = True
self.error = InferenceError(err_msg=err_msg, **context)
return self | def discard(self, err_msg="", **context):
"""
Discard this task. All subsequent steps will be skipped.
Parameters
----------
err_msg: str
The reason why this task got discarded. It would be the body of
HTTP Response, a field in AWS lambda event or CLI stderr message.
*other contexts
Other contexts of the fallback ``InferenceResult``
"""
self.is_discarded = True
self.error = InferenceError(err_msg=err_msg, **context)
return self |
Python | def exists(self, arg):
"""
Determine if arg is to be considered existing.
:param arg: Either a model instance or (possibly invalid!) data object.
:return: Whether we treat this as non-existing instance.
"""
raise NotImplementedError | def exists(self, arg):
"""
Determine if arg is to be considered existing.
:param arg: Either a model instance or (possibly invalid!) data object.
:return: Whether we treat this as non-existing instance.
"""
raise NotImplementedError |
Python | def _set_all_record_contents(rrset: models.RRset, rrs):
"""
Updates this RR set's resource records, discarding any old values.
:param rrset: the RRset at which we overwrite all RRs
:param rrs: list of RR representations
"""
record_contents = [rr['content'] for rr in rrs]
try:
rrset.save_records(record_contents)
except django.core.exceptions.ValidationError as e:
raise serializers.ValidationError(e.messages, code='record-content') | def _set_all_record_contents(rrset: models.RRset, rrs):
"""
Updates this RR set's resource records, discarding any old values.
:param rrset: the RRset at which we overwrite all RRs
:param rrs: list of RR representations
"""
record_contents = [rr['content'] for rr in rrs]
try:
rrset.save_records(record_contents)
except django.core.exceptions.ValidationError as e:
raise serializers.ValidationError(e.messages, code='record-content') |
Python | def filter_queryset(self, value, queryset, field_name):
"""
Filter the queryset to all instances matching the given value on the specified lookup field.
"""
filter_kwargs = {'%s__%s' % (self.lookup_field or field_name, self.lookup): value}
return qs_filter(queryset, **filter_kwargs) | def filter_queryset(self, value, queryset, field_name):
"""
Filter the queryset to all instances matching the given value on the specified lookup field.
"""
filter_kwargs = {'%s__%s' % (self.lookup_field or field_name, self.lookup): value}
return qs_filter(queryset, **filter_kwargs) |
Python | def api_anon() -> DeSECAPIV1Client:
"""
Anonymous access to the API.
"""
return DeSECAPIV1Client() | def api_anon() -> DeSECAPIV1Client:
"""
Anonymous access to the API.
"""
return DeSECAPIV1Client() |
Python | def api_user() -> DeSECAPIV1Client:
"""
Access to the API with a fresh user account (zero domains, one token). Authorization header
is preconfigured, email address and password are randomly chosen.
"""
api = DeSECAPIV1Client()
email = random_email()
password = random_password()
api.register(email, password)
api.login(email, password)
return api | def api_user() -> DeSECAPIV1Client:
"""
Access to the API with a fresh user account (zero domains, one token). Authorization header
is preconfigured, email address and password are randomly chosen.
"""
api = DeSECAPIV1Client()
email = random_email()
password = random_password()
api.register(email, password)
api.login(email, password)
return api |
Python | def api_user_domain(api_user) -> DeSECAPIV1Client:
"""
Access to the API with a fresh user account that owns a domain with random name. The domain has
no records other than the default ones.
"""
api_user.domain_create(random_domainname())
return api_user | def api_user_domain(api_user) -> DeSECAPIV1Client:
"""
Access to the API with a fresh user account that owns a domain with random name. The domain has
no records other than the default ones.
"""
api_user.domain_create(random_domainname())
return api_user |
Python | def extract_words(text, word_count):
"""
Extract a list of words from a text in sequential order.
:param text: source text, tokenized
:param word_count: number of words to return
:return: list list of words
"""
text_length = len(text)
if word_count > text_length:
raise RuntimeError('Cannot extract {} words from a text of {} words.'.format(word_count, text_length))
# Determine start index
max_range = text_length - word_count
start_range = random.randrange(max_range)
return text[start_range:start_range + word_count] | def extract_words(text, word_count):
"""
Extract a list of words from a text in sequential order.
:param text: source text, tokenized
:param word_count: number of words to return
:return: list list of words
"""
text_length = len(text)
if word_count > text_length:
raise RuntimeError('Cannot extract {} words from a text of {} words.'.format(word_count, text_length))
# Determine start index
max_range = text_length - word_count
start_range = random.randrange(max_range)
return text[start_range:start_range + word_count] |
Python | def initialize_glyph_cache(glyph_set, glyph_source):
"""
Initialize the glyph cache variable given a glyph set and the location of glyph files.
:param glyph_set: the glyphs to cache
:param glyph_source: the location of glyph files
:return: None
"""
regex = re.compile(r'[^.]+\.(jpg|png)$')
for glyph_index, glyph in enumerate(glyph_set):
# Load directory
directory = str(glyph)
get_glyph_file.cache[directory] = {}
glyph_directory = os.path.join(glyph_source, directory)
files = os.listdir(glyph_directory)
glyphs = list(filter(lambda x: regex.match(x), files))
for glyph in glyphs:
get_glyph_file.cache[directory][glyph] = None | def initialize_glyph_cache(glyph_set, glyph_source):
"""
Initialize the glyph cache variable given a glyph set and the location of glyph files.
:param glyph_set: the glyphs to cache
:param glyph_source: the location of glyph files
:return: None
"""
regex = re.compile(r'[^.]+\.(jpg|png)$')
for glyph_index, glyph in enumerate(glyph_set):
# Load directory
directory = str(glyph)
get_glyph_file.cache[directory] = {}
glyph_directory = os.path.join(glyph_source, directory)
files = os.listdir(glyph_directory)
glyphs = list(filter(lambda x: regex.match(x), files))
for glyph in glyphs:
get_glyph_file.cache[directory][glyph] = None |
Python | def estimate_word_width(word, glyph_set, glyph_width_probabilities):
"""
Given a word, estimate its width given the glyph average width.
:param word: word to estimate the width
:param glyph_set: list supported glyphs
:param glyph_width_probabilities: glyph width (average width + number of samples)
:return: int the expected word width
"""
width = 0
glyph_map = {}
for glyph_index, glyph in enumerate(glyph_set):
glyph_map[chr(glyph)] = glyph_index
for glyph in word:
index = glyph_map[glyph]
glyph_width = glyph_width_probabilities[index, 0]
width += glyph_width
return int(width) | def estimate_word_width(word, glyph_set, glyph_width_probabilities):
"""
Given a word, estimate its width given the glyph average width.
:param word: word to estimate the width
:param glyph_set: list supported glyphs
:param glyph_width_probabilities: glyph width (average width + number of samples)
:return: int the expected word width
"""
width = 0
glyph_map = {}
for glyph_index, glyph in enumerate(glyph_set):
glyph_map[chr(glyph)] = glyph_index
for glyph in word:
index = glyph_map[glyph]
glyph_width = glyph_width_probabilities[index, 0]
width += glyph_width
return int(width) |
Python | def extract_nouns(string):
"""Extract all the nouns from the given string"""
nouns = []
doc = nlp(string)
for token in doc:
if token.pos_ == "PROPN" or token.pos_ == "NOUN":
nouns.append(token.text)
return nouns | def extract_nouns(string):
"""Extract all the nouns from the given string"""
nouns = []
doc = nlp(string)
for token in doc:
if token.pos_ == "PROPN" or token.pos_ == "NOUN":
nouns.append(token.text)
return nouns |
Python | def display_image(file_id):
"""Display the image with the provided file_id"""
filepath = file_id_to_fname(file_id)
print(filepath)
image = mpimg.imread(filepath)
plt.imshow(image)
plt.show() | def display_image(file_id):
"""Display the image with the provided file_id"""
filepath = file_id_to_fname(file_id)
print(filepath)
image = mpimg.imread(filepath)
plt.imshow(image)
plt.show() |
Python | def parse(cls, data, **kwargs):
"""Parse a JSON object into a model instance."""
data = data or {}
item = cls() if data else None
content = deepcopy(data)
setattr(item, "content", content)
data.update(kwargs)
for key, value in data.items():
try:
setattr(item, key, value)
except AttributeError:
pass
return item | def parse(cls, data, **kwargs):
"""Parse a JSON object into a model instance."""
data = data or {}
item = cls() if data else None
content = deepcopy(data)
setattr(item, "content", content)
data.update(kwargs)
for key, value in data.items():
try:
setattr(item, key, value)
except AttributeError:
pass
return item |
Python | def parse_list(cls, data, **kwargs):
"""Parse a list of JSON objects into a result set of model instances."""
results = ResultSet()
data = data or []
for obj in data:
if obj:
results.append(cls.parse(obj, **kwargs))
return results | def parse_list(cls, data, **kwargs):
"""Parse a list of JSON objects into a result set of model instances."""
results = ResultSet()
data = data or []
for obj in data:
if obj:
results.append(cls.parse(obj, **kwargs))
return results |
Python | def parse_dict(cls, data, **kwargs):
"""Parse a dict of JSON objects into a result set of model instances."""
results = ResultSet()
data = data or {}
for obj in data.keys():
if obj:
results.append(cls.parse(data[obj], **kwargs))
return results | def parse_dict(cls, data, **kwargs):
"""Parse a dict of JSON objects into a result set of model instances."""
results = ResultSet()
data = data or {}
for obj in data.keys():
if obj:
results.append(cls.parse(data[obj], **kwargs))
return results |
Python | def _list_groups(request, template, query):
"""Lists groups from given query."""
sort_form = SortForm(request.GET)
show_pagination = False
if sort_form.is_valid():
query = query.order_by(sort_form.cleaned_data['sort'], 'name')
else:
query = query.order_by('name')
paginator = Paginator(query, settings.ITEMS_PER_PAGE)
page = request.GET.get('page', 1)
try:
groups = paginator.page(page)
except PageNotAnInteger:
groups = paginator.page(1)
except EmptyPage:
groups = paginator.page(paginator.num_pages)
if paginator.count > settings.ITEMS_PER_PAGE:
show_pagination = True
data = dict(groups=groups, page=page, sort_form=sort_form, show_pagination=show_pagination)
return render(request, template, data) | def _list_groups(request, template, query):
"""Lists groups from given query."""
sort_form = SortForm(request.GET)
show_pagination = False
if sort_form.is_valid():
query = query.order_by(sort_form.cleaned_data['sort'], 'name')
else:
query = query.order_by('name')
paginator = Paginator(query, settings.ITEMS_PER_PAGE)
page = request.GET.get('page', 1)
try:
groups = paginator.page(page)
except PageNotAnInteger:
groups = paginator.page(1)
except EmptyPage:
groups = paginator.page(paginator.num_pages)
if paginator.count > settings.ITEMS_PER_PAGE:
show_pagination = True
data = dict(groups=groups, page=page, sort_form=sort_form, show_pagination=show_pagination)
return render(request, template, data) |
Python | def index_groups(request):
"""Lists all public groups (in use) on Mozillians."""
query = (Group.objects.filter(members__is_vouched=True)
.annotate(num_members=Count('members')))
template = 'groups/index_groups.html'
return _list_groups(request, template, query) | def index_groups(request):
"""Lists all public groups (in use) on Mozillians."""
query = (Group.objects.filter(members__is_vouched=True)
.annotate(num_members=Count('members')))
template = 'groups/index_groups.html'
return _list_groups(request, template, query) |
Python | def index_skills(request):
"""Lists all public groups (in use) on Mozillians."""
query = (Skill.objects.filter(members__is_vouched=True)
.annotate(num_members=Count('members')))
template = 'groups/index_skills.html'
return _list_groups(request, template, query) | def index_skills(request):
"""Lists all public groups (in use) on Mozillians."""
query = (Skill.objects.filter(members__is_vouched=True)
.annotate(num_members=Count('members')))
template = 'groups/index_skills.html'
return _list_groups(request, template, query) |
Python | def search(request, searched_object=Group):
"""Simple wildcard search for a group using a GET parameter.
Used for group/skill/language auto-completion.
"""
term = request.GET.get('term', None)
if request.is_ajax() and term:
groups = searched_object.search(term).values_list('name', flat=True)
return HttpResponse(json.dumps(list(groups)),
mimetype='application/json')
return HttpResponseBadRequest() | def search(request, searched_object=Group):
"""Simple wildcard search for a group using a GET parameter.
Used for group/skill/language auto-completion.
"""
term = request.GET.get('term', None)
if request.is_ajax() and term:
groups = searched_object.search(term).values_list('name', flat=True)
return HttpResponse(json.dumps(list(groups)),
mimetype='application/json')
return HttpResponseBadRequest() |
Python | def show(request, url, alias_model, template):
"""List all vouched users with this group."""
group_alias = get_object_or_404(alias_model, url=url)
if group_alias.alias.url != url:
return redirect('groups:show_group', url=group_alias.alias.url)
group = group_alias.alias
in_group = group.members.filter(user=request.user).exists()
profiles = group.members.vouched()
page = request.GET.get('page', 1)
paginator = Paginator(profiles, settings.ITEMS_PER_PAGE)
try:
people = paginator.page(page)
except PageNotAnInteger:
people = paginator.page(1)
except EmptyPage:
people = paginator.page(paginator.num_pages)
show_pagination = paginator.count > settings.ITEMS_PER_PAGE
profile = request.user.userprofile
hide_leave_group_button = (hasattr(group, 'steward') and
profile == group.steward)
data = dict(people=people,
group=group,
in_group=in_group,
show_pagination=show_pagination,
hide_leave_group_button=hide_leave_group_button)
if isinstance(group, Group) and group.steward:
""" Get the most globally popular skills that appear in the group
Sort them with most members first
"""
skills = (Skill.objects
.filter(members__in=profiles)
.annotate(no_users=Count('members'))
.order_by('-no_users'))
data.update(skills=skills)
data.update(irc_channels=group.irc_channel.split(' '))
data.update(members=profiles.count())
return render(request, template, data) | def show(request, url, alias_model, template):
"""List all vouched users with this group."""
group_alias = get_object_or_404(alias_model, url=url)
if group_alias.alias.url != url:
return redirect('groups:show_group', url=group_alias.alias.url)
group = group_alias.alias
in_group = group.members.filter(user=request.user).exists()
profiles = group.members.vouched()
page = request.GET.get('page', 1)
paginator = Paginator(profiles, settings.ITEMS_PER_PAGE)
try:
people = paginator.page(page)
except PageNotAnInteger:
people = paginator.page(1)
except EmptyPage:
people = paginator.page(paginator.num_pages)
show_pagination = paginator.count > settings.ITEMS_PER_PAGE
profile = request.user.userprofile
hide_leave_group_button = (hasattr(group, 'steward') and
profile == group.steward)
data = dict(people=people,
group=group,
in_group=in_group,
show_pagination=show_pagination,
hide_leave_group_button=hide_leave_group_button)
if isinstance(group, Group) and group.steward:
""" Get the most globally popular skills that appear in the group
Sort them with most members first
"""
skills = (Skill.objects
.filter(members__in=profiles)
.annotate(no_users=Count('members'))
.order_by('-no_users'))
data.update(skills=skills)
data.update(irc_channels=group.irc_channel.split(' '))
data.update(members=profiles.count())
return render(request, template, data) |
Python | def toggle_group_subscription(request, url):
"""Toggle the current user's membership of a group."""
group = get_object_or_404(Group, url=url)
profile = request.user.userprofile
# We don't operate on system groups using this view.
if not group.system:
if profile.groups.filter(id=group.id).exists():
profile.groups.remove(group)
else:
profile.groups.add(group)
update_basket_task.delay(profile.id)
return redirect(reverse('groups:show_group', args=[group.url])) | def toggle_group_subscription(request, url):
"""Toggle the current user's membership of a group."""
group = get_object_or_404(Group, url=url)
profile = request.user.userprofile
# We don't operate on system groups using this view.
if not group.system:
if profile.groups.filter(id=group.id).exists():
profile.groups.remove(group)
else:
profile.groups.add(group)
update_basket_task.delay(profile.id)
return redirect(reverse('groups:show_group', args=[group.url])) |
Python | def toggle_skill_subscription(request, url):
"""Toggle the current user's membership of a group."""
skill = get_object_or_404(Skill, url=url)
profile = request.user.userprofile
if profile.skills.filter(id=skill.id).exists():
profile.skills.remove(skill)
else:
profile.skills.add(skill)
return redirect(reverse('groups:show_skill', args=[skill.url])) | def toggle_skill_subscription(request, url):
"""Toggle the current user's membership of a group."""
skill = get_object_or_404(Skill, url=url)
profile = request.user.userprofile
if profile.skills.filter(id=skill.id).exists():
profile.skills.remove(skill)
else:
profile.skills.add(skill)
return redirect(reverse('groups:show_skill', args=[skill.url])) |
Python | def create(cls, **kwargs):
"""After creating User object, update ElasticSearch index."""
user = super(UserFactory, cls).create(**kwargs)
UserProfile.refresh_index(public_index=False)
UserProfile.refresh_index(public_index=True)
return user | def create(cls, **kwargs):
"""After creating User object, update ElasticSearch index."""
user = super(UserFactory, cls).create(**kwargs)
UserProfile.refresh_index(public_index=False)
UserProfile.refresh_index(public_index=True)
return user |
Python | def gravatar(email, default_avatar_url=settings.DEFAULT_AVATAR_URL,
size=175, rating='pg'):
"""Return the Gravatar URL for an email address."""
url = GRAVATAR_URL.format(emaildigest=md5(email).hexdigest())
url = urlparams(url, d=utils.absolutify(default_avatar_url),
s=size, r=rating)
return url | def gravatar(email, default_avatar_url=settings.DEFAULT_AVATAR_URL,
size=175, rating='pg'):
"""Return the Gravatar URL for an email address."""
url = GRAVATAR_URL.format(emaildigest=md5(email).hexdigest())
url = urlparams(url, d=utils.absolutify(default_avatar_url),
s=size, r=rating)
return url |
Python | def field_with_attrs(bfield, **kwargs):
"""Allows templates to dynamically add html attributes to bound
fields from django forms.
Copied from bedrock.
"""
if kwargs.get('label', None):
bfield.label = kwargs['label']
bfield.field.widget.attrs.update(kwargs)
return bfield | def field_with_attrs(bfield, **kwargs):
"""Allows templates to dynamically add html attributes to bound
fields from django forms.
Copied from bedrock.
"""
if kwargs.get('label', None):
bfield.label = kwargs['label']
bfield.field.widget.attrs.update(kwargs)
return bfield |
Python | def bootstrap(element):
"""Renders bootstrap forms in jinja2.
Takes an element that is either a field or an entire form and
renders the appropriate bootstrap elements.
"""
element_type = element.__class__.__name__.lower()
if element_type == 'boundfield':
template = get_template("bootstrapform/field.html")
context = Context({'field': element})
else:
template = get_template("bootstrapform/form.html")
context = Context({'form': element})
return mark_safe(template.render(context)) | def bootstrap(element):
"""Renders bootstrap forms in jinja2.
Takes an element that is either a field or an entire form and
renders the appropriate bootstrap elements.
"""
element_type = element.__class__.__name__.lower()
if element_type == 'boundfield':
template = get_template("bootstrapform/field.html")
context = Context({'field': element})
else:
template = get_template("bootstrapform/form.html")
context = Context({'form': element})
return mark_safe(template.render(context)) |
Python | def export_as_csv(modeladmin, request, queryset):
"""
Generic csv export admin action.
based on http://djangosnippets.org/snippets/1697/
"""
opts = modeladmin.model._meta
field_names = set([field.name for field in opts.fields])
if fields:
fieldset = set(fields)
field_names = field_names & fieldset
elif exclude:
excludeset = set(exclude)
field_names = field_names - excludeset
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = ('attachment; filename=%s.csv' %
unicode(opts).replace('.', '_'))
writer = csv.writer(response, delimiter=';')
if header:
writer.writerow(list(field_names))
for obj in queryset:
writer.writerow([unicode(getattr(obj, field)).encode('utf-8')
for field in field_names])
return response | def export_as_csv(modeladmin, request, queryset):
"""
Generic csv export admin action.
based on http://djangosnippets.org/snippets/1697/
"""
opts = modeladmin.model._meta
field_names = set([field.name for field in opts.fields])
if fields:
fieldset = set(fields)
field_names = field_names & fieldset
elif exclude:
excludeset = set(exclude)
field_names = field_names - excludeset
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = ('attachment; filename=%s.csv' %
unicode(opts).replace('.', '_'))
writer = csv.writer(response, delimiter=';')
if header:
writer.writerow(list(field_names))
for obj in queryset:
writer.writerow([unicode(getattr(obj, field)).encode('utf-8')
for field in field_names])
return response |
Python | def subscribe_to_basket(modeladmin, request, queryset):
"""Subscribe to Basket or update details of already subscribed."""
ts = [(mozillians.users.tasks.update_basket_task
.subtask(args=[user.userprofile.id]))
for user in queryset]
TaskSet(ts).apply_async()
messages.success(request, 'Basket update started.') | def subscribe_to_basket(modeladmin, request, queryset):
"""Subscribe to Basket or update details of already subscribed."""
ts = [(mozillians.users.tasks.update_basket_task
.subtask(args=[user.userprofile.id]))
for user in queryset]
TaskSet(ts).apply_async()
messages.success(request, 'Basket update started.') |
Python | def index_profiles(self, request):
"""Fire an Elastic Search Index Profiles task."""
index_all_profiles()
messages.success(request, 'Profile indexing started.')
return HttpResponseRedirect(reverse('admin:auth_user_changelist')) | def index_profiles(self, request):
"""Fire an Elastic Search Index Profiles task."""
index_all_profiles()
messages.success(request, 'Profile indexing started.')
return HttpResponseRedirect(reverse('admin:auth_user_changelist')) |
Python | def _get_privacy_fields(self, privacy_level):
"""Helper which returns a dict with privacy fields set to privacy_level"""
data = {}
for field in UserProfilePrivacyModel._meta._fields():
data[field.name] = privacy_level
# privacy_tshirt field has only one level of privacy available
data['privacy_tshirt'] = PRIVILEGED
return data | def _get_privacy_fields(self, privacy_level):
"""Helper which returns a dict with privacy fields set to privacy_level"""
data = {}
for field in UserProfilePrivacyModel._meta._fields():
data[field.name] = privacy_level
# privacy_tshirt field has only one level of privacy available
data['privacy_tshirt'] = PRIVILEGED
return data |
Python | def calculate_username(email):
"""Calculate username from email address.
Import modules here to prevent dependency breaking.
"""
from django.contrib.auth.models import User
email = email.split('@')[0]
username = re.sub(r'[^\w.@+-]', '-', email)
username = username[:USERNAME_MAX_LENGTH]
suggested_username = username
count = 0
while User.objects.filter(username=suggested_username).exists():
count += 1
suggested_username = '%s%d' % (username, count)
if len(suggested_username) > USERNAME_MAX_LENGTH:
# We failed to calculate a name for you, default to a
# email digest.
return base64.urlsafe_b64encode(
hashlib.sha1(email).digest()).rstrip('=')
return suggested_username | def calculate_username(email):
"""Calculate username from email address.
Import modules here to prevent dependency breaking.
"""
from django.contrib.auth.models import User
email = email.split('@')[0]
username = re.sub(r'[^\w.@+-]', '-', email)
username = username[:USERNAME_MAX_LENGTH]
suggested_username = username
count = 0
while User.objects.filter(username=suggested_username).exists():
count += 1
suggested_username = '%s%d' % (username, count)
if len(suggested_username) > USERNAME_MAX_LENGTH:
# We failed to calculate a name for you, default to a
# email digest.
return base64.urlsafe_b64encode(
hashlib.sha1(email).digest()).rstrip('=')
return suggested_username |
Python | def update_basket_task(instance_id):
"""Update Basket Task.
This task subscribes a user to Basket, if not already subscribed
and then updates his data on the Phonebook DataExtension. The task
retries on failure at most BASKET_TASK_MAX_RETRIES times and if it
finally doesn't complete successfully, it emails the
settings.BASKET_MANAGERS with details.
"""
from models import UserProfile
instance = UserProfile.objects.get(pk=instance_id)
if not BASKET_ENABLED or not instance.is_vouched:
return
data = {}
for group in Group.objects.exclude(steward=None):
name = group.name.upper().replace(' ', '_')
data[name] = 'N'
if instance.groups.filter(pk=group.id).exists():
data[name] = 'Y'
if instance.country:
data['country'] = instance.country
if instance.city:
data['city'] = instance.city
token = instance.basket_token
try:
if not token:
result = basket.subscribe(instance.user.email,
settings.BASKET_NEWSLETTER,
trigger_welcome='N')
token = result['token']
(UserProfile.objects
.filter(pk=instance_id).update(basket_token=token))
request('post', 'custom_update_phonebook',
token=token, data=data)
except (requests.exceptions.RequestException,
basket.BasketException), exception:
try:
update_basket_task.retry()
except (MaxRetriesExceededError, basket.BasketException):
_email_basket_managers('subscribe', instance.user.email,
exception.message) | def update_basket_task(instance_id):
"""Update Basket Task.
This task subscribes a user to Basket, if not already subscribed
and then updates his data on the Phonebook DataExtension. The task
retries on failure at most BASKET_TASK_MAX_RETRIES times and if it
finally doesn't complete successfully, it emails the
settings.BASKET_MANAGERS with details.
"""
from models import UserProfile
instance = UserProfile.objects.get(pk=instance_id)
if not BASKET_ENABLED or not instance.is_vouched:
return
data = {}
for group in Group.objects.exclude(steward=None):
name = group.name.upper().replace(' ', '_')
data[name] = 'N'
if instance.groups.filter(pk=group.id).exists():
data[name] = 'Y'
if instance.country:
data['country'] = instance.country
if instance.city:
data['city'] = instance.city
token = instance.basket_token
try:
if not token:
result = basket.subscribe(instance.user.email,
settings.BASKET_NEWSLETTER,
trigger_welcome='N')
token = result['token']
(UserProfile.objects
.filter(pk=instance_id).update(basket_token=token))
request('post', 'custom_update_phonebook',
token=token, data=data)
except (requests.exceptions.RequestException,
basket.BasketException), exception:
try:
update_basket_task.retry()
except (MaxRetriesExceededError, basket.BasketException):
_email_basket_managers('subscribe', instance.user.email,
exception.message) |
Python | def remove_from_basket_task(email, basket_token):
"""Remove from Basket Task.
This task unsubscribes a user to Basket. The task retries on
failure at most BASKET_TASK_MAX_RETRIES times and if it finally
doesn't complete successfully, it emails the
settings.BASKET_MANAGERS with details.
"""
if not BASKET_ENABLED or not basket_token:
return
try:
basket.unsubscribe(basket_token, email,
newsletters=settings.BASKET_NEWSLETTER)
except (requests.exceptions.RequestException,
basket.BasketException), exception:
try:
remove_from_basket_task.retry()
except (MaxRetriesExceededError, basket.BasketException):
_email_basket_managers('subscribe', email, exception.message) | def remove_from_basket_task(email, basket_token):
"""Remove from Basket Task.
This task unsubscribes a user to Basket. The task retries on
failure at most BASKET_TASK_MAX_RETRIES times and if it finally
doesn't complete successfully, it emails the
settings.BASKET_MANAGERS with details.
"""
if not BASKET_ENABLED or not basket_token:
return
try:
basket.unsubscribe(basket_token, email,
newsletters=settings.BASKET_NEWSLETTER)
except (requests.exceptions.RequestException,
basket.BasketException), exception:
try:
remove_from_basket_task.retry()
except (MaxRetriesExceededError, basket.BasketException):
_email_basket_managers('subscribe', email, exception.message) |
Python | def remove_incomplete_accounts(days=INCOMPLETE_ACC_MAX_DAYS):
"""Remove incomplete accounts older than INCOMPLETE_ACC_MAX_DAYS old."""
now = datetime.now() - timedelta(days=days)
(User.objects.filter(date_joined__lt=now)
.filter(userprofile__full_name='').delete()) | def remove_incomplete_accounts(days=INCOMPLETE_ACC_MAX_DAYS):
"""Remove incomplete accounts older than INCOMPLETE_ACC_MAX_DAYS old."""
now = datetime.now() - timedelta(days=days)
(User.objects.filter(date_joined__lt=now)
.filter(userprofile__full_name='').delete()) |
Python | def clean_groups(self):
"""Groups are saved in lowercase because it's easy and
consistent.
"""
if not re.match(r'^[a-zA-Z0-9 .:,-]*$', self.cleaned_data['groups']):
raise forms.ValidationError(_(u'Groups can only contain '
'alphanumeric characters, dashes, '
'spaces.'))
system_groups = [g.name for g in self.instance.groups.all()
if g.system]
groups = self.cleaned_data['groups']
new_groups = filter(lambda x: x,
map(lambda x: x.strip() or False,
groups.lower().split(',')))
return system_groups + new_groups | def clean_groups(self):
"""Groups are saved in lowercase because it's easy and
consistent.
"""
if not re.match(r'^[a-zA-Z0-9 .:,-]*$', self.cleaned_data['groups']):
raise forms.ValidationError(_(u'Groups can only contain '
'alphanumeric characters, dashes, '
'spaces.'))
system_groups = [g.name for g in self.instance.groups.all()
if g.system]
groups = self.cleaned_data['groups']
new_groups = filter(lambda x: x,
map(lambda x: x.strip() or False,
groups.lower().split(',')))
return system_groups + new_groups |
Python | def save(self):
"""Save the data to profile."""
self.instance.set_membership(Group, self.cleaned_data['groups'])
self.instance.set_membership(Skill, self.cleaned_data['skills'])
self.instance.set_membership(Language, self.cleaned_data['languages'])
super(ProfileForm, self).save() | def save(self):
"""Save the data to profile."""
self.instance.set_membership(Group, self.cleaned_data['groups'])
self.instance.set_membership(Skill, self.cleaned_data['skills'])
self.instance.set_membership(Language, self.cleaned_data['languages'])
super(ProfileForm, self).save() |
Python | def vouched_member_count(self, obj):
"""Return number of vouched members in group"""
# Annotated field, could be None or a float
if obj.vouched_member_count:
return int(obj.vouched_member_count)
return 0 | def vouched_member_count(self, obj):
"""Return number of vouched members in group"""
# Annotated field, could be None or a float
if obj.vouched_member_count:
return int(obj.vouched_member_count)
return 0 |
Python | def _create_thumbnail(self, source_image, geometry_string, options,
thumbnail):
"""
Creates the thumbnail by using default.engine
"""
ratio = default.engine.get_image_ratio(source_image)
geometry = parse_geometry(geometry_string, ratio)
image = default.engine.create(source_image, geometry, options)
default.engine.write(image, options, thumbnail)
# It's much cheaper to set the size here
size = default.engine.get_image_size(image)
thumbnail.set_size(size) | def _create_thumbnail(self, source_image, geometry_string, options,
thumbnail):
"""
Creates the thumbnail by using default.engine
"""
ratio = default.engine.get_image_ratio(source_image)
geometry = parse_geometry(geometry_string, ratio)
image = default.engine.create(source_image, geometry, options)
default.engine.write(image, options, thumbnail)
# It's much cheaper to set the size here
size = default.engine.get_image_size(image)
thumbnail.set_size(size) |
Python | def stringify_groups(groups):
"""Change a list of Group (or skills or languages) objects into a
space-delimited string.
"""
return u','.join([group.name for group in groups]) | def stringify_groups(groups):
"""Change a list of Group (or skills or languages) objects into a
space-delimited string.
"""
return u','.join([group.name for group in groups]) |
Python | def assign_autocomplete_to_groups():
"""Set auto_complete to True when member count is larger than
AUTO_COMPLETE_COUNT.
Note: no_members includes both vouched and unvouched users ATM. We
should count only vouched users.
"""
for model in [Group, Language, Skill]:
groups = (model.objects
.filter(always_auto_complete=False)
.annotate(no_members=Count('members'))
.filter(no_members__gte=AUTO_COMPLETE_COUNT))
if isinstance(model, Group):
groups = groups.filter(system=False)
model.objects.update(auto_complete=False)
# Conveting the ValuesListQuerySet to list is required to
# avoid mysql refusing to update the same tables used in the
# SELECT part.
(model.objects
.filter(pk__in=list(groups.values_list('id', flat=True)))
.update(auto_complete=True)) | def assign_autocomplete_to_groups():
"""Set auto_complete to True when member count is larger than
AUTO_COMPLETE_COUNT.
Note: no_members includes both vouched and unvouched users ATM. We
should count only vouched users.
"""
for model in [Group, Language, Skill]:
groups = (model.objects
.filter(always_auto_complete=False)
.annotate(no_members=Count('members'))
.filter(no_members__gte=AUTO_COMPLETE_COUNT))
if isinstance(model, Group):
groups = groups.filter(system=False)
model.objects.update(auto_complete=False)
# Conveting the ValuesListQuerySet to list is required to
# avoid mysql refusing to update the same tables used in the
# SELECT part.
(model.objects
.filter(pk__in=list(groups.values_list('id', flat=True)))
.update(auto_complete=True)) |
Python | def create(self, image, geometry, options):
"""
Processing conductor, returns the thumbnail as an image engine instance
"""
image = self.orientation(image, geometry, options)
image = self.colorspace(image, geometry, options)
image = self.scale(image, geometry, options)
image = self.crop(image, geometry, options)
return image | def create(self, image, geometry, options):
"""
Processing conductor, returns the thumbnail as an image engine instance
"""
image = self.orientation(image, geometry, options)
image = self.colorspace(image, geometry, options)
image = self.scale(image, geometry, options)
image = self.crop(image, geometry, options)
return image |
Python | def privacy_fields(cls):
"""
Return a dictionary whose keys are the names of the fields in this
model that are privacy-controlled, and whose values are the default
values to use for those fields when the user is not privileged to
view their actual value.
"""
# Cache on the class object
if cls.CACHED_PRIVACY_FIELDS is None:
privacy_fields = {}
field_names = cls._meta.get_all_field_names()
for name in field_names:
if name.startswith('privacy_') or not 'privacy_%s' % name in field_names:
# skip privacy fields and uncontrolled fields
continue
field = cls._meta.get_field(name)
# Okay, this is a field that is privacy-controlled
# Figure out a good default value for it (to show to users
# who aren't privileged to see the actual value)
if isinstance(field, ManyToManyField):
default = field.related.parent_model.objects.none()
else:
default = field.get_default()
privacy_fields[name] = default
# HACK: There's not really an email field on UserProfile, but it's faked with a property
if 'privacy_email' in field_names:
privacy_fields['email'] = u''
cls.CACHED_PRIVACY_FIELDS = privacy_fields
return cls.CACHED_PRIVACY_FIELDS | def privacy_fields(cls):
"""
Return a dictionary whose keys are the names of the fields in this
model that are privacy-controlled, and whose values are the default
values to use for those fields when the user is not privileged to
view their actual value.
"""
# Cache on the class object
if cls.CACHED_PRIVACY_FIELDS is None:
privacy_fields = {}
field_names = cls._meta.get_all_field_names()
for name in field_names:
if name.startswith('privacy_') or not 'privacy_%s' % name in field_names:
# skip privacy fields and uncontrolled fields
continue
field = cls._meta.get_field(name)
# Okay, this is a field that is privacy-controlled
# Figure out a good default value for it (to show to users
# who aren't privileged to see the actual value)
if isinstance(field, ManyToManyField):
default = field.related.parent_model.objects.none()
else:
default = field.get_default()
privacy_fields[name] = default
# HACK: There's not really an email field on UserProfile, but it's faked with a property
if 'privacy_email' in field_names:
privacy_fields['email'] = u''
cls.CACHED_PRIVACY_FIELDS = privacy_fields
return cls.CACHED_PRIVACY_FIELDS |
Python | def search(cls, query, include_non_vouched=False, public=False):
"""Sensible default search for UserProfiles."""
query = query.lower().strip()
fields = ('username', 'bio__text', 'email', 'ircname',
'country__text', 'country__text_phrase',
'region__text', 'region__text_phrase',
'city__text', 'city__text_phrase',
'fullname__text', 'fullname__text_phrase',
'fullname__prefix', 'fullname__fuzzy'
'groups__text')
s = PrivacyAwareS(cls)
if public:
s = s.privacy_level(PUBLIC)
s = s.indexes(cls.get_index(public))
if query:
q = dict((field, query) for field in fields)
s = (s.boost(fullname__text_phrase=5, username=5, email=5,
ircname=5, fullname__text=4, country__text_phrase=4,
region__text_phrase=4, city__text_phrase=4,
fullname__prefix=3, fullname__fuzzy=2,
bio__text=2).query(or_=q))
s = s.order_by('_score', 'name')
if not include_non_vouched:
s = s.filter(is_vouched=True)
return s | def search(cls, query, include_non_vouched=False, public=False):
"""Sensible default search for UserProfiles."""
query = query.lower().strip()
fields = ('username', 'bio__text', 'email', 'ircname',
'country__text', 'country__text_phrase',
'region__text', 'region__text_phrase',
'city__text', 'city__text_phrase',
'fullname__text', 'fullname__text_phrase',
'fullname__prefix', 'fullname__fuzzy'
'groups__text')
s = PrivacyAwareS(cls)
if public:
s = s.privacy_level(PUBLIC)
s = s.indexes(cls.get_index(public))
if query:
q = dict((field, query) for field in fields)
s = (s.boost(fullname__text_phrase=5, username=5, email=5,
ircname=5, fullname__text=4, country__text_phrase=4,
region__text_phrase=4, city__text_phrase=4,
fullname__prefix=3, fullname__fuzzy=2,
bio__text=2).query(or_=q))
s = s.order_by('_score', 'name')
if not include_non_vouched:
s = s.filter(is_vouched=True)
return s |
Python | def is_public_indexable(self):
"""For profile to be public indexable should have at least
full_name OR ircname OR email set to PUBLIC.
"""
for field in PUBLIC_INDEXABLE_FIELDS:
if (getattr(self, 'privacy_%s' % field, None) == PUBLIC and
getattr(self, field, None)):
return True
return False | def is_public_indexable(self):
"""For profile to be public indexable should have at least
full_name OR ircname OR email set to PUBLIC.
"""
for field in PUBLIC_INDEXABLE_FIELDS:
if (getattr(self, 'privacy_%s' % field, None) == PUBLIC and
getattr(self, field, None)):
return True
return False |
Python | def add_to_staff_group(self):
"""Keep users in the staff group if they're autovouchable."""
email = self.user.email
staff, created = Group.objects.get_or_create(name='staff', system=True)
if any(email.endswith('@' + x) for x in
settings.AUTO_VOUCH_DOMAINS):
self.groups.add(staff)
elif self.groups.filter(pk=staff.pk).exists():
self.groups.remove(staff) | def add_to_staff_group(self):
"""Keep users in the staff group if they're autovouchable."""
email = self.user.email
staff, created = Group.objects.get_or_create(name='staff', system=True)
if any(email.endswith('@' + x) for x in
settings.AUTO_VOUCH_DOMAINS):
self.groups.add(staff)
elif self.groups.filter(pk=staff.pk).exists():
self.groups.remove(staff) |
Python | def _email_now_vouched(self):
"""Email this user, letting them know they are now vouched."""
subject = _(u'You are now vouched on Mozillians!')
message = _(u'You\'ve now been vouched on Mozillians.org. '
u'You\'ll now be able to search, vouch '
u'and invite other Mozillians onto the site.')
send_mail(subject, message, settings.FROM_NOREPLY,
[self.user.email]) | def _email_now_vouched(self):
"""Email this user, letting them know they are now vouched."""
subject = _(u'You are now vouched on Mozillians!')
message = _(u'You\'ve now been vouched on Mozillians.org. '
u'You\'ll now be able to search, vouch '
u'and invite other Mozillians onto the site.')
send_mail(subject, message, settings.FROM_NOREPLY,
[self.user.email]) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.