query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Inserts Qt Creator markers around method docs. In order to display the method signature inside Qt Creator's tooltip, the method signature is also inserted as a text inside a hidden div. | def insert_methods_markers(soup):
# look for all memitem divs
divs = soup.find_all("div", "memitem")
for div in divs:
# look for method name and signature
memdoc = div.find("div", "memdoc")
memname_td = div.find("td","memname")
memname_table = div.find("table", "memname")
if not memdoc or not memname_td or not memname_table:
continue
# extract method name
fullmethodname = memname_td.get_text().strip()
methodname = fullmethodname.split("::")[-1]
# extract method signature
signature = memname_table.get_text().replace("\n", "")
compactsignature = signature.replace(" ","")
# insert Qt Creator markers around the memdoc div
memdoc.insert_before(method_start(methodname, compactsignature))
memdoc.insert_after(method_end(methodname))
# create a hidden div and insert method signature
hiddendiv = soup.new_tag("div", style="display: none")
hiddendiv.string = signature
memdoc.insert(0, hiddendiv) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_doc(func, doc):\r\n func.__doc__ = doc",
"def _add_doc(func, doc):\r\n func.__doc__ = doc",
"def _add_doc(func, doc):\r\n func.__doc__ = doc",
"def show_popup(self, view, docstring, location=None):",
"def signature(self) -> global___SnippetSignature:",
"def _add_doc(func, doc):\n func.__doc__ = doc",
"def methodHelp(self, req, method):\n p = self.get_method(method)\n return '\\n'.join((p.signature, '', p.description))",
"def __doc__(self, ???):",
"def brief_documentation(method: object) -> str:\n doc = method.__doc__\n if doc is not None:\n lines = doc.splitlines()\n if len(lines) > 0:\n return lines[0]\n return ''",
"def Help():\n names=api_method_dict.keys()\n names.sort()\n return ''.join(['**** ' + api_method_dict[name].__name__ + '\\n' + api_method_dict[name].__doc__ + '\\n'\n for name in names])",
"def docstring_hack():\n pass",
"def main_docstring():",
"def DocString():\n return",
"def func_doc():",
"def old_function_with_docstring(x, y):\n return x + y",
"def update_docstring(instance):\n try:\n docstring = instance.api_map['doc']\n except (KeyError, TypeError):\n docstring = 'No docstring provided.'\n\n instance.__class__.__doc__ = docstring\n instance.__class__.__call__.__signature__ = construct_signature(instance)\n\n return docstring",
"def wrapper(func):\n docstring = func.__doc__\n helpdict = parse_docstring(\n docstring, key_symbol=key_symbol,\n description_symbol=description_symbol)\n func.helpdict = helpdict\n # remove markers\n docstring = docstring.replace(key_symbol, '')\n func.__doc__ = docstring.replace(description_symbol, '')\n return func",
"def documentation_only():\n pass",
"def get_documentation(self, *args, **dargs):\n pass",
"def docstrings(param1, param2):\n return \"example string\"",
"def format_signature(sig: inspect.Signature, colon: bool) -> str:\n # First get a list with all params as strings.\n result = pdoc.doc._PrettySignature._params(sig) # type: ignore\n return_annot = pdoc.doc._PrettySignature._return_annotation_str(sig) # type: ignore\n\n multiline = (\n sum(len(x) + 2 for x in result) + len(return_annot)\n > pdoc.doc._PrettySignature.MULTILINE_CUTOFF\n )\n\n def _try_highlight(code: str) -> str:\n \"\"\"Try to highlight a piece of code using pygments, but return the input as-is if pygments detects errors.\"\"\"\n pretty = pygments.highlight(code, lexer, signature_formatter).strip()\n if '<span class=\"err\">' not in pretty:\n return pretty\n else:\n return html.escape(code)\n\n # Next, individually highlight each parameter using pygments and wrap it in a span.param.\n # This later allows us to properly control line breaks.\n pretty_result = []\n for i, param in enumerate(result):\n pretty = _try_highlight(param)\n if multiline:\n pretty = f\"\"\"<span class=\"param\">\\t{pretty},</span>\"\"\"\n else:\n pretty = f\"\"\"<span class=\"param\">{pretty}, </span>\"\"\"\n pretty_result.append(pretty)\n\n # remove last comma.\n if pretty_result:\n pretty_result[-1] = pretty_result[-1].rpartition(\",\")[0] + \"</span>\"\n\n # Add return annotation.\n anno = \")\"\n if return_annot:\n anno += f\" -> {_try_highlight(return_annot)}\"\n if colon:\n anno += \":\"\n if return_annot or colon:\n anno = f'<span class=\"return-annotation\">{anno}</span>'\n\n rendered = \"(\" + \"\".join(pretty_result) + anno\n\n if multiline:\n rendered = f'<span class=\"signature pdoc-code multiline\">{rendered}</span>'\n else:\n rendered = f'<span class=\"signature pdoc-code condensed\">{rendered}</span>'\n\n return Markup(rendered)",
"def print_hint(self):\n print(self.__doc__)",
"def add_function_signature_help(specification: dict) -> dict:\n for f in specification[\"functions\"][\"signatures\"]:\n for argset_idx, argset in enumerate(\n specification[\"functions\"][\"signatures\"][f][\"signatures\"]\n ):\n args_summary = \"\"\n args_list = []\n for arg in specification[\"functions\"][\"signatures\"][f][\"signatures\"][argset_idx][\n \"arguments\"\n ]:\n if arg[\"type\"] in [\"Function\", \"Modifier\"]:\n vals = [\n specification[\"functions\"][\"to_short\"].get(\n val, specification[\"functions\"][\"to_short\"].get(val)\n )\n for val in arg[\"values\"]\n ]\n args_summary += \"|\".join(vals) + \"()\"\n\n if arg.get(\"optional\", False) and arg.get(\"multiple\", False) is False:\n args_summary += \"?\"\n text = f'Zero or one of each function(s): {\", \".join([val for val in arg[\"values\"]])}'\n elif arg.get(\"optional\", False):\n args_summary += \"*\"\n text = f'Zero or more of each function(s): {\", \".join([val for val in arg[\"values\"]])}'\n else:\n text = f'One of following function(s): {\", \".join([val for val in arg[\"values\"]])}'\n\n elif arg[\"type\"] in [\"NSArg\", \"StrArg\", \"StrArgNSArg\"]:\n args_summary += f'{arg[\"type\"]}'\n if arg.get(\"optional\", False) and arg.get(\"multiple\", False) is False:\n args_summary += \"?\"\n if arg[\"type\"] in [\"NSArg\"]:\n text = f'Zero or one namespace argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n elif arg[\"type\"] == \"StrArgNSArg\":\n text = f'Zero or one namespace argument or default namespace argument (without prefix) of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n else:\n text = f'Zero or one string argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n elif arg.get(\"optional\", False):\n args_summary += \"*\"\n if arg[\"type\"] in [\"NSArg\"]:\n text = f'Zero or more namespace arguments of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n elif arg[\"type\"] == \"StrArgNSArg\":\n text = f'Zero or more namespace arguments or default namespace arguments (without prefix) of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n else:\n text = f'Zero or more of string arguments of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n else:\n if arg[\"type\"] in [\"NSArg\"]:\n text = f'Namespace argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n elif arg[\"type\"] == \"StrArgNSArg\":\n text = f'Namespace argument or default namespace argument (without prefix) of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n else:\n text = f'String argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n\n args_summary += \", \"\n args_list.append(text)\n\n args_summary = re.sub(\", $\", \"\", args_summary)\n specification[\"functions\"][\"signatures\"][f][\"signatures\"][argset_idx][\n \"argument_summary\"\n ] = f\"{f}({args_summary})\"\n specification[\"functions\"][\"signatures\"][f][\"signatures\"][argset_idx][\n \"argument_help_listing\"\n ] = args_list\n\n return specification",
"def add_documentation(cls, documentation):\n cls.__doc__ = documentation.CBAMLibrary\n methods = list(filter(lambda x: not x.startswith(\"_\"), dir(cls)))\n for method_name in methods:\n method = getattr(cls, method_name)\n if callable(method):\n name = method.__name__\n if hasattr(documentation, name):\n getattr(cls, name).__doc__ = getattr(documentation, name)",
"def __repr__(self):\r\n return self.func.__doc__",
"def __repr__(self):\n\t\treturn self.func.__doc__",
"def __repr__(self):\r\n return self.func.__doc__",
"def __repr__(self):\r\n return self.func.__doc__",
"def __repr__(self):\r\n return self.func.__doc__",
"def __repr__(self):\r\n return self.func.__doc__"
] | [
"0.63913465",
"0.63913465",
"0.63913465",
"0.6378863",
"0.6372265",
"0.6335496",
"0.63318473",
"0.62927926",
"0.62695724",
"0.62648576",
"0.61829376",
"0.6147917",
"0.6072649",
"0.605651",
"0.60020053",
"0.59880006",
"0.59529907",
"0.5952288",
"0.58864117",
"0.58829296",
"0.5819526",
"0.5817608",
"0.5804194",
"0.5800327",
"0.57929367",
"0.5786908",
"0.5779416",
"0.5779416",
"0.5779416",
"0.5779416"
] | 0.6983738 | 0 |
Creates the subcloud config file for a subcloud. | def _create_subcloud_config_file(self, context, subcloud, payload):
DEFAULT_STR = '<EDIT>'
pxe_cidr = payload.get(
'pxe-subnet', DEFAULT_STR)
management_vlan = payload.get(
'management-vlan', DEFAULT_STR)
management_interface_mtu = payload.get(
'management-interface-mtu', DEFAULT_STR)
management_interface_ports = payload.get(
'management-interface-port', DEFAULT_STR)
oam_cidr = payload.get(
'oam-subnet', DEFAULT_STR)
oam_gateway = payload.get(
'oam-gateway-ip', DEFAULT_STR)
oam_ip_floating_address = payload.get(
'oam-floating-ip', DEFAULT_STR)
oam_ip_unit_0_address = payload.get(
'oam-unit-0-ip', DEFAULT_STR)
oam_ip_unit_1_address = payload.get(
'oam-unit-1-ip', DEFAULT_STR)
oam_interface_mtu = payload.get(
'oam-interface-mtu', DEFAULT_STR)
oam_interface_ports = payload.get(
'oam-interface-port', DEFAULT_STR)
system_mode = payload.get(
'system-mode', DEFAULT_STR)
management_address_pool = self._get_management_address_pool(context)
systemcontroller_subnet = "%s/%d" % (
management_address_pool.network,
management_address_pool.prefix)
sc_mgmt_floating_ip = management_address_pool.floating_address
subcloud_config = ""
if system_mode in [SYSTEM_MODE_SIMPLEX, SYSTEM_MODE_DUPLEX,
SYSTEM_MODE_DUPLEX_DIRECT]:
subcloud_config += (
"[SYSTEM]\n"
"SYSTEM_MODE={}\n".format(system_mode))
if system_mode == SYSTEM_MODE_SIMPLEX:
subcloud_oamip_config = (
"IP_ADDRESS = {oam_ip_floating_address}\n"
).format(
oam_ip_floating_address=oam_ip_floating_address,
)
else:
subcloud_oamip_config = (
"IP_FLOATING_ADDRESS = {oam_ip_floating_address}\n"
"IP_UNIT_0_ADDRESS = {oam_ip_unit_0_address}\n"
"IP_UNIT_1_ADDRESS = {oam_ip_unit_1_address}\n"
).format(
oam_ip_floating_address=oam_ip_floating_address,
oam_ip_unit_0_address=oam_ip_unit_0_address,
oam_ip_unit_1_address=oam_ip_unit_1_address,
)
MIN_MANAGEMENT_SUBNET_SIZE = 8
tmp_management_subnet = validate_network_str(
subcloud.management_subnet,
minimum_size=MIN_MANAGEMENT_SUBNET_SIZE)
is_ipv6_mgmt = (tmp_management_subnet.version == 6)
# If ipv6 then we need pxe subnet and management_vlan.
# If user specified pxe boot subnet, then management vlan is required
# and vice versa
if is_ipv6_mgmt or (pxe_cidr != DEFAULT_STR) or \
(management_vlan != DEFAULT_STR):
subcloud_config += (
"[REGION2_PXEBOOT_NETWORK]\n"
"PXEBOOT_CIDR = {pxe_cidr}\n"
"[MGMT_NETWORK]\n"
"VLAN = {management_vlan}\n"
).format(
pxe_cidr=pxe_cidr,
management_vlan=management_vlan,
)
else:
subcloud_config += "[MGMT_NETWORK]\n"
subcloud_config += (
"CIDR = {management_cidr}\n"
"GATEWAY = {management_gateway}\n"
"IP_START_ADDRESS = {management_ip_start_address}\n"
"IP_END_ADDRESS = {management_ip_end_address}\n"
"DYNAMIC_ALLOCATION = Y\n"
"LOGICAL_INTERFACE = LOGICAL_INTERFACE_1\n"
"[LOGICAL_INTERFACE_1]\n"
"LAG_INTERFACE = N\n"
"INTERFACE_MTU = {management_interface_mtu}\n"
"INTERFACE_PORTS = {management_interface_ports}\n"
"[OAM_NETWORK]\n"
"CIDR = {oam_cidr}\n"
"GATEWAY = {oam_gateway}\n" +
subcloud_oamip_config +
"LOGICAL_INTERFACE = LOGICAL_INTERFACE_2\n"
"[LOGICAL_INTERFACE_2]\n"
"LAG_INTERFACE = N\n"
"INTERFACE_MTU = {oam_interface_mtu}\n"
"INTERFACE_PORTS = {oam_interface_ports}\n"
"[SHARED_SERVICES]\n"
"SYSTEM_CONTROLLER_SUBNET = {systemcontroller_subnet}\n"
"SYSTEM_CONTROLLER_FLOATING_ADDRESS = {sc_mgmt_floating_ip}\n"
"REGION_NAME = SystemController\n"
"ADMIN_PROJECT_NAME = admin\n"
"ADMIN_USER_NAME = admin\n"
"ADMIN_PASSWORD = {admin_password}\n"
"KEYSTONE_ADMINURL = {keystone_adminurl}\n"
"KEYSTONE_SERVICE_NAME = keystone\n"
"KEYSTONE_SERVICE_TYPE = identity\n"
"GLANCE_SERVICE_NAME = glance\n"
"GLANCE_SERVICE_TYPE = image\n"
"GLANCE_CACHED = True\n"
"[REGION_2_SERVICES]\n"
"REGION_NAME = {region_2_name}\n"
"[VERSION]\n"
"RELEASE = {release}\n"
).format(
management_cidr=subcloud.management_subnet,
management_gateway=subcloud.management_gateway_ip,
management_ip_start_address=subcloud.management_start_ip,
management_ip_end_address=subcloud.management_end_ip,
management_interface_mtu=management_interface_mtu,
management_interface_ports=management_interface_ports,
oam_cidr=oam_cidr,
oam_gateway=oam_gateway,
oam_interface_mtu=oam_interface_mtu,
oam_interface_ports=oam_interface_ports,
systemcontroller_subnet=systemcontroller_subnet,
sc_mgmt_floating_ip=sc_mgmt_floating_ip,
admin_password=cfg.CONF.cache.admin_password,
keystone_adminurl=cfg.CONF.cache.auth_uri,
region_2_name=subcloud.name,
release=subcloud.software_version,
)
return subcloud_config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generateConfig(run,subrun,conditions):\n \n configname = (conditions.numcdir + \"/\" + str(run) + \"/\" + str(subrun)\n + \"/numc_config_\" + str(run) + \"_\" + str(subrun) + \".cfg\")\n \n configContents = \"\"\n \n configContents += \"[software]\\n\"\n if conditions.oldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/setup.sh\\n\"\n elif conditions.newoldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/setup.sh\\n\"\n else:\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280/src/neutgeom/setup.sh\\n\"\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280_wBBBA05/src/neutgeom/setup.sh\\n\"\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/setup.sh\\n\"\n \n configContents += \"[geometry]\\n\"\n\n configContents += \"baseline = \" + conditions.geometry +\"\\n\"\n if conditions.waterair == \"water\":\n configContents += \"p0d_water_fill = 1\\n\"\n else:\n configContents += \"p0d_water_fill = 0\\n\"\n \n configContents += \"\"\"\n \n[configuration]\nmodule_list = neutMC\n\n[filenaming]\n\"\"\"\n configContents += \"comment = \" + conditions.comment + \"\\n\"\n configContents += \"run_number = \" + str(run) +\"\\n\"\n configContents += \"subrun = \" + str(subrun) + \"\\n\"\n\n if conditions.oldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/neut.card\n\"\"\"\n elif conditions.newoldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/neut.card\n\"\"\"\n else:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/neut.card\n\"\"\"\n\n configContents += \"flux_file = \" + conditions.ram_disk + \"/\" + conditions.flux_base + \"\\n\"\n\n#flux_file = flux_file\n#\"\"\"\n\n# configContents += \"flux_file_path = \" + conditions.ram_disk + \"/\" + conditions.flux_base\n\n# configContents += \"\"\" \n#flux_file_start = 1\n#flux_file_stop = 300\n#\"\"\"\n\n configContents += \"maxint_file = \" + conditions.maxint_file_local + \"\\n\"\n\n# default: 5e17 but for basket MC special production higher\n configContents += \"\"\" \npot = 5.0e17\nneutrino_type = beam\n\"\"\"\n if conditions.baskmagn == \"basket\":\n configContents += \"\"\" \nflux_region = basket\nmaster_volume = Basket \nrandom_start = 1\n\"\"\"\n elif conditions.baskmagn == \"magnet\":\n configContents += \"\"\" \nflux_region = magnet\nmaster_volume = Magnet \nrandom_start = 1\n\"\"\"\n else:\n print \"Unknown basket/magnet condition\"\n \n\n configContents += \"random_seed = \" + str(getRandom()) +\"\\n\"\n configContents += \"neut_seed1 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed2 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed3 = \" + str(getRandom())+\"\\n\" \n\n configContents += \"\\n\"\n configContents += \"[nd280mc]\\n\"\n configContents += \"mc_type=Neut_RooTracker \\n\"\n\n #print configContents\n\n try:\n macFile = open(configname,\"w\")\n macFile.write(configContents)\n \n except:\n print \"can't write config file\" \n \n\n return configname",
"def create_config_file(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite-local\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def create_config_file(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite-local\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out1\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n },\n {\n \"l3out\": {\n \"name\": \"l3out2\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def create_config_file_after(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\",\n \"provides\": [\n {\n \"contract_name\": \"contract-1\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def create_config_file_before(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\",\n \"provides\": [\n {\n \"contract_name\": \"contract-1\",\n },\n {\n \"contract_name\": \"contract-2\",\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def handle_cloudconfig(cfg, target=None):\n # check that cfg is dict\n if not isinstance(cfg, dict):\n raise ValueError(\"cloudconfig configuration is not in dict format\")\n\n # for each item in the dict\n # generate a path based on item key\n # if path is already in the item, LOG warning, and use generated path\n for cfgname, cfgvalue in cfg.items():\n cfgpath = \"50-cloudconfig-%s.cfg\" % cfgname\n if 'path' in cfgvalue:\n LOG.warning(\"cloudconfig ignoring 'path' key in config\")\n cfgvalue['path'] = cfgpath\n\n # re-use write_files format and adjust target to prepend\n LOG.debug('Calling write_files with cloudconfig @ %s', target)\n LOG.debug('Injecting cloud-config:\\n%s', cfg)\n write_files({'write_files': cfg}, target)",
"def _createConfigFile(self):\n configFile = self._configFile()\n try:\n with open(configFile) as fh:\n pass\n except IOError:\n try:\n with open(configFile, 'w') as fh:\n fh.write(\"[settings]\\n\")\n fh.write(\"debug = false\\n\")\n fh.write(\"hidefilenames = false\\n\")\n except IOError:\n pass",
"def post(self, subcloud_ref=None, qualifier=None):\n\n context = restcomm.extract_context_from_environ()\n\n if subcloud_ref is None:\n payload = eval(request.body)\n if not payload:\n pecan.abort(400, _('Body required'))\n name = payload.get('name')\n if not name:\n pecan.abort(400, _('name required'))\n management_subnet = payload.get('management-subnet')\n if not management_subnet:\n pecan.abort(400, _('management-subnet required'))\n management_start_ip = payload.get('management-start-ip')\n if not management_start_ip:\n pecan.abort(400, _('management-start-ip required'))\n management_end_ip = payload.get('management-end-ip')\n if not management_end_ip:\n pecan.abort(400, _('management-end-ip required'))\n management_gateway_ip = payload.get('management-gateway-ip')\n if not management_gateway_ip:\n pecan.abort(400, _('management-gateway-ip required'))\n systemcontroller_gateway_ip = \\\n payload.get('systemcontroller-gateway-ip')\n if not systemcontroller_gateway_ip:\n pecan.abort(400, _('systemcontroller-gateway-ip required'))\n\n self._validate_subcloud_config(context,\n name,\n management_subnet,\n management_start_ip,\n management_end_ip,\n management_gateway_ip,\n systemcontroller_gateway_ip)\n\n try:\n # Ask dcmanager-manager to add the subcloud.\n # It will do all the real work...\n return self.rpc_client.add_subcloud(context, payload)\n except RemoteError as e:\n pecan.abort(422, e.value)\n except Exception as e:\n LOG.exception(e)\n pecan.abort(500, _('Unable to create subcloud'))\n elif qualifier:\n if qualifier == 'config':\n subcloud = None\n\n if subcloud_ref.isdigit():\n # Look up subcloud as an ID\n try:\n subcloud = db_api.subcloud_get(context, subcloud_ref)\n except exceptions.SubcloudNotFound:\n pecan.abort(404, _('Subcloud not found'))\n else:\n # Look up subcloud by name\n try:\n subcloud = db_api.subcloud_get_by_name(context,\n subcloud_ref)\n except exceptions.SubcloudNameNotFound:\n pecan.abort(404, _('Subcloud not found'))\n\n payload = dict()\n if request.body:\n payload = eval(request.body)\n config_file = self._create_subcloud_config_file(\n context, subcloud, payload)\n result = dict()\n result['config'] = config_file\n return result\n else:\n pecan.abort(400, _('Invalid request'))\n else:\n pecan.abort(400, _('Invalid request'))",
"def create_config(self) -> None:\n pass",
"def create_config(self) -> None:\n pass",
"def create(ctx, **kwargs):\n # creates and activates pf9-express config file\n\n pf9_exp_conf_dir = ctx.obj['pf9_exp_conf_dir']\n \n # Backup existing config if one exist\n if os.path.exists(pf9_exp_conf_dir + 'express.conf'):\n with open(pf9_exp_conf_dir + 'express.conf', 'r') as current:\n lines = current.readlines()\n current.close()\n for line in lines:\n if 'config_name|' in line:\n line = line.strip()\n name = line.replace('config_name|','')\n\n filename = name + '.conf'\n shutil.copyfile(pf9_exp_conf_dir + 'express.conf', pf9_exp_conf_dir + filename)\n\n if not os.path.exists(pf9_exp_conf_dir):\n try:\n access_rights = 0o700\n os.makedirs(pf9_exp_conf_dir, access_rights)\n except Exception:\n click.echo(\"Creation of the directory %s failed\" % pf9_exp_conf_dir)\n else:\n click.echo(\"Successfully created the directory %s \" % pf9_exp_conf_dir)\n\n with open(pf9_exp_conf_dir + 'express.conf', 'w') as file:\n for k,v in ctx.params.items():\n file.write(k + '|' + str(v) + '\\n')\n click.echo('Successfully wrote Platform9 management plane configuration')",
"def add_subconfig(self, name, arg_kws=None, func=None):\n if name in self.subconfig:\n raise ValueError(\"Subconfig '%s' is already defined.\" % name)\n if arg_kws is None:\n arg_kws = dict()\n argparser = self.subparsers.add_parser(name, **arg_kws)\n subconfig = SubConfig(argparser, self.config, name, func)\n self.subconfig[name] = subconfig\n return subconfig",
"def create_settings_file():\n with open('./cfg/settings.cfg'.replace(\"/\", os.path.sep), 'w') as cfg:\n cfg.write('[report]\\nlogo = ./cfg/logo.png\\ncompany =\\nrecord =\\nunit =\\nexaminer =\\nnotes =\\n\\n[auth]\\ngmail = [email protected]\\npassw = yourpassword\\ndevid = 1234567887654321\\ncelnumbr = BackupPhoneNunmber\\n\\n[app]\\npkg = com.whatsapp\\nsig = 38a0f7d505fe18fec64fbf343ecaaaf310dbd799\\n\\n[client]\\npkg = com.google.android.gms\\nsig = 38918a453d07199354f8b19af05ec6562ced5788\\nver = 9877000'.replace(\"/\", os.path.sep))",
"def _CreateCfgFile():\n default_cfg = \"\"\"\nproject: \"fake_project\"\nzone: \"fake_zone\"\nstorage_bucket_name: \"fake_bucket\"\nclient_id: \"fake_client_id\"\nclient_secret: \"fake_client_secret\"\n\"\"\"\n return default_cfg",
"def setup_cloud(self, config):\n # Just write out the template to the pwd for right now\n template_file = './openstack.yaml'\n with open(template_file, 'w', encoding='utf-8') as fp:\n fp.write(config)\n self._cloud.create_stack(self._stack_name, template_file=template_file, wait=True)",
"def make_config(outfile, sample_rate, numof_channels, mode, server = 'localhost', shape = 'None',\n security_mode = False, saving_mode = False, data_file = 'Nofile', format = 'binary',\n resolution = 0.1, returning_speed = 8, channels = 0,\n color_bg = 'white', color_trigger = 'black',\n size_window = (1000, 800)):\n\n\n config = ConfigParser.RawConfigParser()\n\n config.add_section('technics')\n config.add_section('visualization')\n config.add_section('security')\n config.add_section('data')\n\n config.set('technics', 'sample_rate', sample_rate)\n config.set('technics', 'numof_channels', numof_channels)\n config.set('technics', 'server', server)\n config.set('technics', 'resolution', resolution)\n config.set('technics', 'speed', returning_speed)\n if channels == 0:\n channels = range(numof_channels+1)[1:numof_channels+1]\n config.set('technics', 'channels', channels)\n\n config.set('visualization', 'mode', mode)\n config.set('visualization', 'color_bg', color_bg)\n config.set('visualization', 'color_trigger', color_trigger)\n config.set('visualization', 'size_window', size_window)\n\n config.set('security', 'security_mode', security_mode)\n\n config.set('data', 'saving_mode', saving_mode)\n config.set('data', 'file', data_file)\n config.set('data', 'format', format)\n\n config_file = open(outfile, 'w+')\n\n config.write(config_file)",
"def create_custom_config(watch, start_cmd, stop_cmd, match_cmd):\n with open(TEMPLATE_LOCATION) as template:\n output = template.read()\n output = output.format(\n process_name=watch, match_clause='MATCHING {}'.format(match_cmd),\n group=watch, start_line=start_cmd, stop_line=stop_cmd)\n\n config_file = os.path.join(MONIT_CONFIG_DIR, 'appscale-{}.cfg'.format(watch))\n file_io.write(config_file, output)",
"def create_configfile():\n config = ConfigParser.ConfigParser()\n config.add_section('Common')\n config.set('Common', 'renewal days', 20)\n config.set('Common', 'delayed installation days', 5)\n config.set('Common', 'include chain', True)\n config.set('Common', 'account key', './config/key.pem')\n config.add_section('Load Balancer')\n config.set('Load Balancer', 'cluster', True)\n config.set('Load Balancer', 'Host 1', 'lb1.example.com')\n config.set('Load Balancer', 'Host 2', 'lb2.example.com')\n config.set('Load Balancer', 'username', 'admin')\n config.set('Load Balancer', 'password', 'password01')\n config.set('Load Balancer', 'datagroup', 'acme_responses_dg')\n config.set('Load Balancer', 'datagroup partition', 'Common')\n config.add_section('Certificate Authority')\n config.set('Certificate Authority', 'Directory URL',\n 'https://acme-v01.api.letsencrypt.org/directory')\n config.set('Certificate Authority', 'use proxy', False)\n config.set('Certificate Authority', 'proxy',\n 'http://proxy.example.com:8080')\n\n # As the config file contains password, we should be careful with permissions\n with os.fdopen(os.open(CONFIG_FILE, os.O_WRONLY | os.O_CREAT, 0o660), 'w') as config_file:\n config.write(config_file)",
"def _create_config(env_path):\n s2e_yaml = 's2e.yaml'\n version_path = os.path.join(os.path.dirname(__file__), '..', 'dat', 'VERSION')\n\n with open(version_path, 'r', encoding='utf-8') as fp:\n context = {\n 'creation_time': str(datetime.datetime.now()),\n 'version': fp.read().strip(),\n }\n\n render_template(context, s2e_yaml, os.path.join(env_path, s2e_yaml))",
"def _init_cfg_subcmd(subparsers):\n cfg_related = subparsers.add_parser(\"cfg\")\n cfg_subparsers = cfg_related.add_subparsers(dest=\"cfg_subcommand\")\n\n cfg_write_parser = cfg_subparsers.add_parser(\"write\")\n cfg_write_parser.add_argument(\n \"--level\",\n choices=[\"user\", \"cwd\"],\n default=None,\n help=\"Specify if this config is for user or just the working directory.\",\n )\n cfg_write_parser.add_argument(\n \"--open\", action=\"store_const\", const=True, default=False\n )\n cfg_subparsers.add_parser(\"show\")\n\n cfg_export_parser = cfg_subparsers.add_parser(\"export\")\n cfg_export_parser.add_argument(\"--dir\", default=os.getcwd())\n\n return cfg_related",
"def _init_cfg_subcmd(subparsers):\n cfg_related = subparsers.add_parser(\"cfg\",)\n cfg_subparsers = cfg_related.add_subparsers(dest=\"cfg_subcommand\")\n\n cfg_write_parser = cfg_subparsers.add_parser(\"write\")\n cfg_write_parser.add_argument(\n \"--level\",\n choices=[\"user\", \"cwd\"],\n default=None,\n help=\"Specify if this config is for user or just the working directory.\",\n )\n cfg_write_parser.add_argument(\n \"--open\", action=\"store_const\", const=True, default=False\n )\n cfg_subparsers.add_parser(\"show\")\n\n cfg_export_parser = cfg_subparsers.add_parser(\"export\")\n cfg_export_parser.add_argument(\"--dir\", default=os.getcwd())\n\n return cfg_related",
"def configureOCS(self,icpdInstallLogFile):\n methodName = \"configureOCS\"\n TR.info(methodName,\" Start configuration of OCS for CPD\")\n workerocs = \"/ibm/templates/ocs/workerocs.yaml\"\n workerocs_1az = \"/ibm/templates/ocs/workerocs1AZ.yaml\"\n if(len(self.zones)==1):\n shutil.copyfile(workerocs_1az,workerocs)\n self.updateTemplateFile(workerocs,'${az1}', self.zones[0])\n self.updateTemplateFile(workerocs,'${ami_id}', self.amiID)\n self.updateTemplateFile(workerocs,'${instance-type}', self.OCSInstanceType)\n self.updateTemplateFile(workerocs,'${instance-count}', self.NumberOfOCS)\n self.updateTemplateFile(workerocs,'${region}', self.region)\n self.updateTemplateFile(workerocs,'${cluster-name}', self.ClusterName)\n self.updateTemplateFile(workerocs, 'CLUSTERID', self.clusterID)\n self.updateTemplateFile(workerocs,'${subnet-1}',self.PrivateSubnet1ID)\n \n\n if(len(self.zones)>1):\n self.updateTemplateFile(workerocs,'${az2}', self.zones[1])\n self.updateTemplateFile(workerocs,'${az3}', self.zones[2])\n self.updateTemplateFile(workerocs,'${subnet-2}',self.PrivateSubnet2ID)\n self.updateTemplateFile(workerocs,'${subnet-3}',self.PrivateSubnet3ID)\n\n create_ocs_nodes_cmd = \"oc create -f \"+workerocs\n TR.info(methodName,\"Create OCS nodes\")\n try:\n retcode = check_output(['bash','-c', create_ocs_nodes_cmd])\n time.sleep(600)\n TR.info(methodName,\"Created OCS nodes %s\" %retcode)\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n \n ocs_nodes = []\n get_ocs_nodes = \"oc get nodes --show-labels | grep storage-node |cut -d' ' -f1 \"\n try:\n ocs_nodes = check_output(['bash','-c',get_ocs_nodes])\n nodes = ocs_nodes.split(\"\\n\")\n TR.info(methodName,\"OCS_NODES %s\"%nodes)\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n i =0\n while i < len(nodes)-1:\n TR.info(methodName,\"Labeling for OCS node %s \" %nodes[i])\n label_cmd = \"oc label nodes \"+nodes[i]+\" cluster.ocs.openshift.io/openshift-storage=''\"\n try: \n retcode = check_output(['bash','-c', label_cmd])\n TR.info(methodName,\"Label for OCS node %s returned %s\" %(nodes[i],retcode))\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n i += 1\n\n\n deploy_olm_cmd = \"oc create -f /ibm/templates/ocs/deploy-with-olm.yaml\"\n TR.info(methodName,\"Deploy OLM\")\n try:\n retcode = check_output(['bash','-c', deploy_olm_cmd]) \n time.sleep(300)\n TR.info(methodName,\"Deployed OLM %s\" %retcode)\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n create_storage_cluster_cmd = \"oc create -f /ibm/templates/ocs/ocs-storagecluster.yaml\"\n TR.info(methodName,\"Create Storage Cluster\")\n try:\n retcode = check_output(['bash','-c', create_storage_cluster_cmd]) \n time.sleep(600)\n TR.info(methodName,\"Created Storage Cluster %s\" %retcode)\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n install_ceph_tool_cmd = \"curl -s https://raw.githubusercontent.com/rook/rook/release-1.1/cluster/examples/kubernetes/ceph/toolbox.yaml|sed 's/namespace: rook-ceph/namespace: openshift-storage/g'| oc apply -f -\"\n TR.info(methodName,\"Install ceph toolkit\")\n try:\n retcode = check_output(['bash','-c', install_ceph_tool_cmd]) \n TR.info(methodName,\"Installed ceph toolkit %s\" %retcode)\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n TR.info(methodName,\"Configuration of OCS for CPD completed\")",
"def write_to_file(self, file_name=None, sub_path=None) -> None:\n super().write_to_file(file_name, settings.ARTILLERY_FOLDER)\n\n self.set_yaml_config()\n self.write_file_to_output(\n settings.ARTILLERY_YAML, self.yaml_config, append_mode=False, project_sub_folder=settings.ARTILLERY_FOLDER\n )",
"def create_bucket_config(args):\n\n sanitised_group = args.group.replace('/', '-')\n\n full_bucket_name = sanitised_group + \"-\" + args.bucket\n bucket_config = [\n \"# Plugin for \" + args.bucket + \" bucket\\n\",\n \"glb.locplugin[]: /usr/lib64/ugr/libugrlocplugin_s3.so \" + full_bucket_name + \" 15 s3s://s3.echo.stfc.ac.uk/\" + args.bucket + \"\\n\",\n \"locplugin.\" + full_bucket_name + \".xlatepfx: /\" + sanitised_group + \"/\" + args.bucket + \" /\\n\",\n \"locplugin.\" + full_bucket_name + \".s3.priv_key: \" + args.private_key + \"\\n\",\n \"locplugin.\" + full_bucket_name + \".s3.pub_key: \" + args.public_key + \"\\n\",\n \"locplugin.\" + full_bucket_name + \".s3.writable: true\\n\",\n \"locplugin.\" + full_bucket_name + \".s3.alternate: true\\n\",\n \"locplugin.\" + full_bucket_name + \".s3.ca_path: /etc/grid-security/certificates/\\n\",\n \"\\n\"\n ]\n\n with open(\"/etc/ugr/conf.d/\" + sanitised_group + \".conf\", \"a\") as f:\n f.writelines(bucket_config)",
"def generate_config(context):\n resources = []\n\n # Create an initial 'STARTED' pubsub notification.\n if 'pubsubTopic' in context.properties:\n resources.extend(\n create_pubsub_notification(\n context,\n depends_on=[],\n status_string='STARTED',\n ))\n\n # Required properties.\n billing_account_id = context.properties['billingAccountId']\n parent_organization = context.properties['parentOrganization']\n project_id = context.properties['projectId']\n\n # Optional properties, with defaults.\n high_security_network = context.properties.get('highSecurityNetwork', False)\n private_ip_google_access = context.properties.get('privateIpGoogleAccess', False)\n storage_bucket_lifecycle = context.properties.get('storageBucketLifecycle', 180)\n billing_account_friendly_name = context.properties.get('billingAccountFriendlyName', billing_account_id)\n # Use a project name if given, otherwise it's safe to fallback to use the\n # project ID as the name.\n project_name = context.properties.get('projectName', project_id)\n labels_obj = context.properties.get('labels', {})\n\n # Save this template's version number and all parameters inputs to the project metadata to keep track of what\n # operations were performed on a project.\n labels_obj.update({\n \"firecloud-project-template-version\" : str(FIRECLOUD_PROJECT_TEMPLATE_VERSION_ID)\n })\n\n for k, v in context.properties.items():\n label_k, label_v = satisfy_label_requirements('param--' + str(k), v)\n labels_obj.update({\n label_k: label_v\n })\n\n\n if high_security_network:\n labels_obj.update({\n \"vpc-network-name\" : FIRECLOUD_VPC_NETWORK_NAME,\n \"vpc-subnetwork-name\" : FIRECLOUD_VPC_SUBNETWORK_NAME\n })\n\n if 'parentFolder' in context.properties:\n parent_obj = {\n 'id': context.properties['parentFolder'],\n 'type': 'folder',\n }\n else:\n parent_obj = {\n 'id': context.properties['parentOrganization'],\n 'type': 'organization',\n }\n\n # Create the main project resource.\n resources.append({\n 'type': 'templates/project.py',\n 'name': 'fc-project',\n 'properties': {\n 'activateApis': FIRECLOUD_REQUIRED_APIS,\n 'billingAccountId': billing_account_id,\n 'billingAccountFriendlyName': billing_account_friendly_name,\n 'iamPolicies': create_iam_policies(context),\n 'labels': labels_obj,\n 'name': project_name,\n # The project parent. For FireCloud, this should refer to the\n # firecloud.org (or equivalent) GCP organization ID.\n 'parent': parent_obj,\n 'projectId': project_id,\n # If true, this would remove the default compute egine service\n # account. FireCloud doesn't use this SA, but we're leaving this set\n # to False to avoid changing any legacy behavior, at least initially.\n 'removeDefaultSA': False,\n # Removes the default VPC network for projects requiring stringent\n # network security configurations.\n 'removeDefaultVPC': high_security_network,\n 'createUsageExportBucket': False,\n # Always set up the storage logs and cromwell auth buckets for Firecloud\n 'storageLogsBucket': True,\n 'storageBucketLifecycle': storage_bucket_lifecycle,\n 'cromwellAuthBucket': True\n }\n })\n\n if high_security_network:\n resources.extend(create_high_security_network(context))\n resources.extend(create_firewall(context))\n if private_ip_google_access:\n resources.extend(create_private_google_access_dns_zone(context))\n else:\n resources.extend(create_default_network(context))\n\n if 'pubsubTopic' in context.properties:\n resources.extend(\n create_pubsub_notification(\n context,\n # This is somewhat hacky, but we can't simply collect the name of each\n # collected resource since template call nodes aren't \"real\" resources\n # that can be part of a dependsOn stanza. So instead, we collect the\n # names of all resources that are output by the network (which itself\n # depends on the project). It doesn't seem to be possible to concatenate\n # dependsOn arrays within the reference syntax, otherwise we could make\n # this depend explicitly on all resources from the template nodes.\n depends_on='$(ref.fc-network.resourceNames)',\n status_string='COMPLETED'))\n\n return {'resources': resources}",
"def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())",
"def makeconf():\n conf = {}\n available_locations = []\n print('Hi. We will help you make a config file.')\n input('Press ENTER to continue or CTRL+C to quit.... ')\n path = input('Please enter the path you want to save the config file. Default . (4nt direcoty) : ')\n time = input('How long, in hours, you would like the instance to leave? Default 24 (hours) : ')\n flavor = input('What is the Instance Flavor ID you would like to spawn? Default 201 (CPU:1, MEM:1gb, SSD:25gb) : ')\n operating_system = input('What is the Instance operating system ID you would like to install? Default 167 (CentOS 7) : ')\n currency = input('Which currency would you like to pay with? \"bitcoin, ethereum, litecoin or bitcoincash\". Default (bitcoin)')\n all_flavor = api_get('flavor')\n all_os = api_get('os')\n\n path = path if path else './instance-conf.json'\n time = time if time else '24'\n flavor = flavor if flavor else '201'\n operating_system = operating_system if operating_system else '167'\n currency = currency if currency else 'bitcoin'\n\n if not all_flavor.get('status') == 200 or not all_os.get('status') == 200:\n print(Bcolors.FAIL + 'ERROR: Something went wrong requesting facelesscloud server.' + Bcolors.ENDC)\n sys.exit(2)\n\n if not all_flavor['result'].get(flavor):\n print(Bcolors.FAIL + 'ERROR: flavor ID entered does not exist.' + Bcolors.ENDC)\n sys.exit(2)\n if not all_os['result'].get(operating_system):\n print(Bcolors.FAIL + 'ERROR: operating system ID entered does not exist.' + Bcolors.ENDC)\n sys.exit(2)\n\n available_locations = all_flavor['result'][flavor].get('available_locations')\n if not available_locations:\n print(Bcolors.FAIL + 'ERROR: No available location found for specified flavor.' + Bcolors.ENDC)\n sys.exit(2)\n\n i = 1\n location_num = {}\n for location in available_locations:\n location_num.update({i: location})\n i = i + 1\n\n region = input('Please select region to deploy instance ' + str(location_num) + ' : ')\n\n if int(region) not in location_num:\n print(Bcolors.FAIL + 'ERROR: Region ID selected not in displayed choice. Exiting no configuration file created.' + Bcolors.ENDC)\n sys.exit(2)\n\n region_id = get_region_id(location_num.get(int(region)))\n\n sshkey_path = input('Please enter the Public SSH Key path. (Let it blank if None.) : ')\n sshkey = file_to_string(sshkey_path) if sshkey_path else None\n kickstart_path = input('Please enter the kickstart Bash script path. (Let it blank if None.) : ')\n kickstart = file_to_string(kickstart_path) if kickstart_path else None\n\n if sshkey and not validate_ssh_key(sshkey):\n print(Bcolors.FAIL + 'ERROR: SSH-KEY format is bad ! Exiting no configuration file created.' + Bcolors.ENDC)\n sys.exit(2)\n\n conf.update({'hours_time': time, 'flavor': flavor, 'operating_system': operating_system, 'region': region_id, 'ssh_key': sshkey, 'kickstart': kickstart, 'currency': currency})\n try:\n with open(path, 'w') as conf_file:\n json.dump(conf, conf_file)\n conf_file.close()\n except FileNotFoundError as err: # Sublime give an error, but it's not.\n print(Bcolors.FAIL + 'ERROR: Config File path entered not found.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n except PermissionError as err:\n print(Bcolors.FAIL + 'ERROR: Config File path entered, Permission Denied.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n\n print(Bcolors.OKGREEN + 'SUCCESS, Config file writen to ' + path + Bcolors.ENDC)",
"def _setup_configfiles(self, Testboard):\n\n # Delete all root files which are already in the directory\n root_files = glob.glob(Testboard.testdir+'/*.root')\n for f in root_files:\n os.remove(f)\n # Change testboard name\n\tif Testboard.DTB and os.path.isfile(Testboard.testdir + \"/tb\"):\n self._config_file_content_substitute(Testboard.testdir + \"/tb\", {\"id\":Testboard.address})\n else:\n self._config_file_content_substitute(Testboard.testdir + \"/configParameters.dat\", {\"testboardName\":Testboard.address})\n\n # Get test specific config parameters (if available)\n params = ()\n try:\n params = self.init.items(\"Test \" + self.test.testname)\n except:\n return\n for par in params:\n file = par[0]\n if '.cfg' in file:\n section,pair = par[1].split(':')\n key,value = pair.split('=')\n config_file = BetterConfigParser()\n config_file.read(Testboard.testdir + \"/\" + file)\n config_file.set(section,key,value)\n write_file = open(Testboard.testdir + \"/\" + file, 'write')\n config_file.write(write_file)\n write_file.close()\n continue\n # Check for valid keys that represent config files\n elif \"testParameters\" in file or \"dacParameters\" in file or \"configParameters\" in file:\n pass\n elif \"tbmParameters\" in file or \"tbParameters\" in file:\n pass\n else:\n continue\n\n encoded_keys = par[1].split(\",\")\n keys = {}\n for key in encoded_keys:\n key = key.split(\"=\", 2)\n if len(key) != 2:\n continue\n keys[key[0]] = key[1]\n if len(file) < 4 or file[-4:] != \".dat\":\n file += \".dat\"\n self._config_file_content_substitute(Testboard.testdir + \"/\" + file, keys)",
"def generate_config(context):\n\n enable_flow_logs = context.properties.get('enableFlowLogs', False)\n\n subnetwork_resource = {\n 'name': context.properties['resourceName'],\n 'type': 'gcp-types/compute-beta:subnetworks',\n 'properties': {\n # Required properties.\n 'name':\n context.properties['name'],\n 'network':\n context.properties['network'],\n 'ipCidrRange':\n context.properties['ipCidrRange'],\n 'region':\n context.properties['region'],\n 'project':\n context.properties['projectId'],\n\n # Optional properties, with defaults.\n 'enableFlowLogs':\n enable_flow_logs,\n 'privateIpGoogleAccess':\n context.properties.get('privateIpGoogleAccess', False),\n 'secondaryIpRanges':\n context.properties.get('secondaryIpRanges', []),\n }\n }\n \n if enable_flow_logs:\n # If flow logs are enabled, we want to adjust the default config in two ways:\n # (1) Increase the sampling ratio (defaults to 0.5) so we sample all traffic.\n # (2) Reduce the aggregation interval to 30 seconds (default is 5secs) to save on\n # storage.\n subnetwork_resource['properties']['logConfig'] = {\n 'aggregationInterval': 'INTERVAL_30_SEC',\n 'enable': True,\n 'flowSampling': 1.0,\n 'metadata': 'INCLUDE_ALL_METADATA',\n }\n\n # Pass the 'dependsOn' property to the subnetwork resource if present.\n if 'dependsOn' in context.properties:\n subnetwork_resource['metadata'] = {\n 'dependsOn': context.properties['dependsOn']\n }\n\n output = [\n {\n 'name': 'name',\n 'value': subnetwork_resource['name'],\n },\n {\n 'name': 'selfLink',\n 'value': '$(ref.{}.selfLink)'.format(subnetwork_resource['name']),\n },\n ]\n\n return {'resources': [subnetwork_resource], 'outputs': output}",
"def cloud_config():\n return Namespace(\n project=os.environ.get('GCLOUD_PROJECT'),\n storage_bucket=os.environ.get('CLOUD_STORAGE_BUCKET'),\n client_secrets=os.environ.get('GOOGLE_CLIENT_SECRETS'),\n bigtable_instance=os.environ.get('BIGTABLE_CLUSTER'),\n api_key=os.environ.get('API_KEY'))"
] | [
"0.6121712",
"0.6021425",
"0.5970042",
"0.58997214",
"0.57222944",
"0.565262",
"0.5609398",
"0.55893815",
"0.55608094",
"0.55608094",
"0.5529405",
"0.5481049",
"0.5470068",
"0.5454739",
"0.5422953",
"0.5411979",
"0.53847975",
"0.53629035",
"0.5360652",
"0.5286819",
"0.5279199",
"0.5261233",
"0.5261053",
"0.5235409",
"0.52056396",
"0.52040553",
"0.51935893",
"0.5161139",
"0.516051",
"0.51563823"
] | 0.8010254 | 0 |
Get the subcloud users and passwords from keyring | def _get_subcloud_users(self):
DEFAULT_SERVICE_PROJECT_NAME = 'services'
# First entry is openstack user name, second entry is the user stored
# in keyring. Not sure why heat_admin uses a different keystone name.
SUBCLOUD_USERS = [
('nova', 'nova'),
('placement', 'placement'),
('sysinv', 'sysinv'),
('patching', 'patching'),
('heat', 'heat'),
('ceilometer', 'ceilometer'),
('vim', 'vim'),
('aodh', 'aodh'),
('panko', 'panko'),
('mtce', 'mtce'),
('cinder', 'cinder'),
('glance', 'glance'),
('neutron', 'neutron'),
('heat_admin', 'heat-domain'),
('gnocchi', 'gnocchi'),
('fm', 'fm')
]
user_list = list()
for user in SUBCLOUD_USERS:
password = keyring.get_password(user[1],
DEFAULT_SERVICE_PROJECT_NAME)
if password:
user_dict = dict()
user_dict['name'] = user[0]
user_dict['password'] = password
user_list.append(user_dict)
else:
LOG.error("User %s not found in keyring as %s" % (user[0],
user[1]))
pecan.abort(500, _('System configuration error'))
return user_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_credentials(service_name=\"dataforSeo\", uname=\"[email protected]\"):\n pw = keyring.get_password(service_name, uname)\n return [uname, pw]",
"def GetUserCredentials(self):\r\n # Create a local alias to the email variable to avoid Python's crazy\r\n # scoping rules.\r\n global keyring\r\n email = self.email\r\n if email is None:\r\n email = GetEmail(\"Email (login for uploading to %s)\" % self.server)\r\n password = None\r\n if keyring and not email in self.accounts_seen:\r\n try:\r\n password = keyring.get_password(self.host, email)\r\n except:\r\n # Sadly, we have to trap all errors here as\r\n # gnomekeyring.IOError inherits from object. :/\r\n print \"Failed to get password from keyring\"\r\n keyring = None\r\n if password is not None:\r\n print \"Using password from system keyring.\"\r\n self.accounts_seen.add(email)\r\n else:\r\n password = getpass.getpass(\"Password for %s: \" % email)\r\n if keyring:\r\n answer = raw_input(\"Store password in system keyring?(y/N) \").strip()\r\n if answer == \"y\":\r\n keyring.set_password(self.host, email, password)\r\n self.accounts_seen.add(email)\r\n return (email, password)",
"def get_user_credentials(connection):\n\n response = connection.get_json('user')\n user_data = response.get('user', None)\n if user_data is None:\n raise SAPCliError('gCTS response does not contain \\'user\\'')\n\n config_data = user_data.get('config', None)\n if config_data is None:\n return []\n\n user_credentials = [cred for cred in config_data if cred['key'] == 'USER_AUTH_CRED_ENDPOINTS']\n return json.loads(user_credentials[0]['value'])",
"def get_creds():\n with open(CREDS_PATH, 'r') as creds_file:\n creds = json.load(creds_file)\n return creds['uname'], creds['pword']",
"def get_auth(self):\n # Only return accepted keys from the auth_keys dictionary\n # This is to prevent exceptions thrown from keystone session\n returnDict = {}\n for key in self.creds:\n if key in self.auth_keys[self.api_version]:\n returnDict[key] = self.creds[key]\n return returnDict",
"def get_auth():\n config = configparser.RawConfigParser()\n config.read(\"speech.cfg\")\n apikey = config.get('auth', 'apikey')\n return (\"apikey\", apikey)",
"def auth(self):\n return self.creds(\"[email protected]\", cookie=\"USERTOKEN: authcookie\")",
"def get_appengine_credentials():\n return get_credentials()",
"def credentials():\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = (os.environ.get('OS_TENANT_NAME') or\n os.environ.get('OS_PROJECT_NAME'))\n auth_url = os.environ.get('OS_AUTH_URL')\n\n config = configparser.RawConfigParser()\n if config.read(_CREDS_FILE):\n username = username or config.get('admin', 'user')\n password = password or config.get('admin', 'pass')\n tenant_name = tenant_name or config.get('admin', 'tenant')\n auth_url = auth_url or config.get('auth', 'uri')\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'uri': auth_url\n }",
"def get_list_keys(rpc_user, rpc_pwd):\n data = '{\"jsonrpc\":\"2.0\",\"id\":\"1\",\"method\":\"listkeys\"}'\n return call_rpc(rpc_user, rpc_pwd, data)",
"def take_auth_data():\n home = str(Path.home())\n path_to_keys = '/Documents/twitter/keys/'\n\n files = [f for f in listdir(home+path_to_keys) if '.DS' not in f]\n\n tokens = []\n for f in files:\n with open(home+path_to_keys+f, 'r') as lines:\n ln = lines.readline().replace(\" \", \"\")\n tokens.append(ln)\n\n auth_data = dict(zip(files, tokens))\n return auth_data",
"def get_authentication_data():\n\n sigrhe_login = config_parser.get(\"sigrhe\", \"login\")\n sigrhe_password = config_parser.get(\"sigrhe\", \"password\")\n\n return sigrhe_login, sigrhe_password",
"def build_passwords(self, project_update, runtime_passwords):\n passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)\n if project_update.credential:\n passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')\n passwords['scm_username'] = project_update.credential.get_input('username', default='')\n passwords['scm_password'] = project_update.credential.get_input('password', default='')\n return passwords",
"def credentials(self):\n return CurrentProject().config.credentials[self.key]",
"def newcred(self):\n return {'login': input('username: '),\n 'password': getpass.getpass()}",
"def list_credentials():\n creds = load_auth()\n max_username_len = max([len(c.username) for c in creds]) if len(creds) > 0 else 1\n long_format = f\"{{:{max_username_len}}} for {{}}\"\n for cred in creds:\n if len(cred.hostname) > 0:\n print(str.format(long_format, cred.username, cred.hostname))\n else:\n print(cred.username)\n if len(creds) == 0 and os.isatty(1):\n print(\"No credentials configured\")",
"def get_users():\n conn = pymongo.Connection(\"localhost\",27017)\n db = conn[\"paperDB\"]\n userRecords = db.users\n\n users = dict()\n for user in userRecords.find():\n users[user['user']] = user['password']\n\n print \"%d users loaded.\" % (len(users))\n return users",
"def get_user_password(sockfile):\n return (\"root\", \"fnxm\")",
"def get_saucelabs_username_and_key():\r\n return {\"username\": settings.SAUCE.get('USERNAME'), \"access-key\": settings.SAUCE.get('ACCESS_ID')}",
"def get_leaf_credentials(self):\n credentials = {}\n with open('setup1.json', 'r') as f:\n json_text = f.read()\n setup = json.loads(json_text)\n for value in setup['topology']['nodes']:\n if value['role'] == 'LEAF':\n credentials.update({value['mgmt_ip_address'] : value['credential']})\n return credentials",
"def credentials(self) -> Mapping:",
"def auth(self):\n return auth.get_auth()",
"def __iter__(self):\n return self.cli.passwords.keys().__iter__()",
"def get_all_credentials():\n session = db.get_session()\n return (session.query(network_models_v2.Credential).all())",
"def test_get_user_api_keys(self):\n pass",
"def cli(ctx):\n return ctx.gi.cannedkeys.get_keys()",
"def list_user_keys(self):\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"GET\", \"/1/keys\", self.timeout)",
"def get_spine_credentials(self):\n credentials = {}\n with open('setup1.json', 'r') as f:\n json_text = f.read()\n setup = json.loads(json_text)\n for value in setup['topology']['nodes']:\n if value['role'] == 'SPINE':\n credentials.update({value['mgmt_ip_address'] : value['credential']})\n return credentials",
"def user_access(self):\n results = {}\n self.cache_hosts_wts_tokens(self.download_list)\n for hostname in self.known_hosts.keys():\n if self.known_hosts[hostname].available is False:\n logger.critical(\n f\"Was unable to get user authorization from {hostname}.\"\n )\n continue\n access_token = self.known_hosts[hostname].access_token\n authz = get_user_auth(hostname, access_token)\n results[hostname] = authz\n\n return results",
"def get_password_testing():\n if settings.CLOUD:\n return [os.environ.get('passwordtest')]\n with open('env.yaml') as file_name:\n data = yaml.safe_load(file_name)\n return (data['test_variables']['password'],)"
] | [
"0.65946317",
"0.6310419",
"0.6153865",
"0.605549",
"0.60493493",
"0.6014239",
"0.59450334",
"0.592866",
"0.5894807",
"0.58728194",
"0.58414346",
"0.5836289",
"0.5806845",
"0.5778761",
"0.57776064",
"0.5770482",
"0.5766917",
"0.5747226",
"0.57193136",
"0.57034826",
"0.5702921",
"0.5668885",
"0.56638867",
"0.5656928",
"0.5647715",
"0.5611182",
"0.56091535",
"0.5605522",
"0.5604878",
"0.56000894"
] | 0.68357867 | 0 |
Get the system controller's management address pool | def _get_management_address_pool(self, context):
session = KeystoneClient().endpoint_cache.get_session_from_token(
context.auth_token, context.project)
sysinv_client = SysinvClient(consts.DEFAULT_REGION_NAME, session)
return sysinv_client.get_management_address_pool() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def customer_owned_ipv4_pool(self) -> str:\n return pulumi.get(self, \"customer_owned_ipv4_pool\")",
"def get_pool():\n app = get_app()\n return app['pool']",
"def getManageIp(self):\n dev = self.device()\n if dev: return dev.getManageIp()\n return \"\"",
"def pool(self):\n return self._properties.get('pool')",
"def systemManagerAddress(self):\n return self._systemManagerAddress.value",
"def systemManagerAddress(self):\n return self._systemManagerAddress.value",
"def get_pool ( self ):\n if self._poolstack:\n return self._poolstack[-1]\n else:\n return self.get_new_pool ( force=True )",
"def get_device_pool(arn=None):\n pass",
"def get_manager_ip():\n return os.environ[MANAGER_IP_KEY]",
"def getPools(self):\n data = self.connect('get','pools',None)\n return data",
"def get_consumed_management_ips():\n all_ips = list()\n\n # let's also grab consumed management ips as well\n if configuration.deployment_backend == \"openstack\":\n if openstackUtils.connect_to_openstack():\n dhcp_leases = openstackUtils.get_consumed_management_ips()\n else:\n return all_ips\n else:\n dhcp_leases = osUtils.get_dhcp_leases()\n # let's also grab current dhcp reservations\n dhcp_reservations = osUtils.get_dhcp_reservations()\n for dr in dhcp_reservations:\n ip = str(dr[\"ip-address\"])\n last_octet = ip.split('.')[-1]\n all_ips.append(int(last_octet))\n\n for lease in dhcp_leases:\n ip = str(lease[\"ip-address\"])\n logger.debug(\"adding active lease %s\" % ip)\n last_octet = ip.split('.')[-1]\n all_ips.append(int(last_octet))\n\n return all_ips",
"def getSSIDPool(self):\n return self.request('getPool')",
"def get_mgmt_address_for_pid(pid):\n return _gen_addr(MGMT_ADDR_BASE, pid)",
"def resource_pool(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_pool\")",
"def _determine_resource_pool(session, vm_):\n resource_pool = \"\"\n if \"resource_pool\" in vm_.keys():\n resource_pool = _get_pool(vm_[\"resource_pool\"], session)\n else:\n pool = session.xenapi.pool.get_all()\n if not pool:\n resource_pool = None\n else:\n first_pool = session.xenapi.pool.get_all()[0]\n resource_pool = first_pool\n pool_record = session.xenapi.pool.get_record(resource_pool)\n log.debug(\"resource pool: %s\", pool_record[\"name_label\"])\n return resource_pool",
"def pool(self) -> NodePool:\n\n return self._pool",
"def ip_address_pools(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"ip_address_pools\")",
"def show_resource_pool(client, private_cloud, resource_pool, location):\n return client.get(location, private_cloud, resource_pool)",
"def get_default_resource_pool(self):\n try:\n return self.client.list_resource_pools()[0]['resource_pool']\n except VMwareError as e:\n raise VMwareBackendError(e)",
"def agent_pool(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"agent_pool\")",
"def _get_pool (self, event):\n return self.pool",
"def getOsmHost(self):\n return self.osm_host",
"def get_master_address(self):\n if self.master_address:\n return self.master_address\n return super(CelerySentinelConnectionPool, self).get_master_address()",
"def pool(self) -> asyncpg.pool.Pool:\n return self.bot.pool",
"def _get_pools():\n conn = libvirt.open(None)\n try:\n _spsfs = list()\n _spsnetfs = list()\n if conn:\n # file system pool\n _spsfs = conn.listAllStoragePools(flags=128)\n # nfs pool\n _spsnetfs = conn.listAllStoragePools(flags=256)\n else:\n _logger.error('Failed to contact hypervisor')\n raise ValueError('Failed to contact hypervisor.')\n except libvirt.libvirtError as e:\n _logger.error('Failed to collect vm pool data: %s', str(e))\n raise ValueError('Failed to collect vm pool data.') from e\n finally:\n conn.close()\n return _spsfs, _spsnetfs",
"def getAddress(self):\r\n return self._container.getAddress()",
"def get_management_address(self, ensure_configuration=False):\n primary = self.get_interface(GENERIC_IFNAME + '0')\n prefix, prefix_len = ULA_PREFIX.split('/', 1)\n eui = netaddr.EUI(primary.lladdr)\n ip_str = str(eui.ipv6_link_local()).replace('fe80::', prefix[:-1])\n\n if not primary.is_up:\n self.up(primary)\n\n ip = netaddr.IPNetwork('%s/%s' % (ip_str, prefix_len))\n if ensure_configuration and ip not in primary.addresses:\n primary.addresses.append(ip)\n self.update_interface(primary)\n return ip_str",
"def perform(self, resource_handler):\n instance_id = ast.literal_eval(self.instance_data['instance_id'])['Id']\n info = resource_handler.cli.inspect_container(container=instance_id)\n ip_addresses = []\n for k, v in info['NetworkSettings']['Networks'].items():\n ip_addresses.append(v['IPAddress'])\n return ip_addresses[0]",
"def perform(self, resource_handler):\n instance_id = ast.literal_eval(self.instance_data['instance_id'])['Id']\n info = resource_handler.cli.inspect_container(container=instance_id)\n ip_addresses = []\n for k, v in info['NetworkSettings']['Networks'].items():\n ip_addresses.append(v['IPAddress'])\n return ip_addresses[0]",
"def get_pools():\n pools = ch_core.hookenv.action_get('pools')\n if pools:\n return [p.strip() for p in pools.split(',')]\n return None"
] | [
"0.6537697",
"0.6467163",
"0.6289136",
"0.6148119",
"0.59776145",
"0.59776145",
"0.59086293",
"0.59005976",
"0.58809096",
"0.57775706",
"0.57616085",
"0.5676692",
"0.56216216",
"0.5545167",
"0.5511758",
"0.5487965",
"0.54841584",
"0.54414916",
"0.54396343",
"0.5414244",
"0.54076976",
"0.54067034",
"0.5398969",
"0.5386937",
"0.53736806",
"0.53665113",
"0.5364621",
"0.5315709",
"0.5315709",
"0.5303296"
] | 0.8022331 | 0 |
Get details about subcloud. | def get(self, subcloud_ref=None, qualifier=None):
context = restcomm.extract_context_from_environ()
if subcloud_ref is None:
# List of subclouds requested
subclouds = db_api.subcloud_get_all_with_status(context)
result = dict()
result['subclouds'] = []
first_time = True
subcloud_list = []
subcloud_status_list = []
# We get back a subcloud, subcloud_status pair for every
# subcloud_status entry corresponding to a subcloud. (Subcloud
# info repeats)
# Aggregate all the sync status for each of the
# endpoints per subcloud into an overall sync status
for subcloud, subcloud_status in subclouds:
subcloud_dict = db_api.subcloud_db_model_to_dict(subcloud)
subcloud_status_dict = db_api.subcloud_status_db_model_to_dict(
subcloud_status)
subcloud_dict.update(subcloud_status_dict)
if not first_time:
if subcloud_list[-1]['id'] == subcloud_dict['id']:
# We have a match for this subcloud id already,
# check if we have a same sync_status
if subcloud_list[-1][consts.SYNC_STATUS] != \
subcloud_dict[consts.SYNC_STATUS]:
subcloud_list[-1][consts.SYNC_STATUS] = \
consts.SYNC_STATUS_OUT_OF_SYNC
if subcloud_status:
subcloud_status_list.append(
db_api.subcloud_endpoint_status_db_model_to_dict( # noqa
subcloud_status))
subcloud_list[-1][
consts.ENDPOINT_SYNC_STATUS] = subcloud_status_list
else:
subcloud_status_list = []
if subcloud_status:
subcloud_status_list.append(
db_api.subcloud_endpoint_status_db_model_to_dict( # noqa
subcloud_status))
subcloud_list.append(subcloud_dict)
else:
if subcloud_status:
subcloud_status_list.append(
db_api.subcloud_endpoint_status_db_model_to_dict(
subcloud_status))
subcloud_list.append(subcloud_dict)
first_time = False
for s in subcloud_list:
result['subclouds'].append(s)
return result
else:
# Single subcloud requested
subcloud = None
subcloud_dict = dict()
subcloud_status_list = []
endpoint_sync_dict = dict()
if subcloud_ref.isdigit():
# Look up subcloud as an ID
try:
subcloud = db_api.subcloud_get(context, subcloud_ref)
except exceptions.SubcloudNotFound:
pecan.abort(404, _('Subcloud not found'))
else:
# Look up subcloud by name
try:
subcloud = db_api.subcloud_get_by_name(context,
subcloud_ref)
except exceptions.SubcloudNameNotFound:
pecan.abort(404, _('Subcloud not found'))
subcloud_id = subcloud.id
if qualifier:
# Configuration for this subcloud requested.
# Encrypt before sending.
if qualifier == 'config':
result = dict()
user_list = self._get_subcloud_users()
# Use a hash of the subcloud name + management subnet
# as the encryption key
hashstring = subcloud.name + subcloud.management_subnet
h = MD5.new()
h.update(hashstring)
encryption_key = h.hexdigest()
user_list_string = json.dumps(user_list)
user_list_encrypted = crypt.urlsafe_encrypt(
encryption_key,
user_list_string)
result['users'] = user_list_encrypted
return result
else:
pecan.abort(400, _('Invalid request'))
else:
# Data for this subcloud requested
# Build up and append a dictionary of the endpoints
# sync status to the result.
for subcloud, subcloud_status in db_api. \
subcloud_get_with_status(context, subcloud_id):
subcloud_dict = db_api.subcloud_db_model_to_dict(
subcloud)
# may be empty subcloud_status entry, account for this
if subcloud_status:
subcloud_status_list.append(
db_api.subcloud_endpoint_status_db_model_to_dict(
subcloud_status))
endpoint_sync_dict = {consts.ENDPOINT_SYNC_STATUS:
subcloud_status_list}
subcloud_dict.update(endpoint_sync_dict)
return subcloud_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cloud_information(self):\n url = \"%s/state/teams/%s/cloud\" % (self.url, self.identifier, )\n return perform_request(url)",
"def show_private_cloud(client, private_cloud, location):\n return client.get(private_cloud, location)",
"def getting_info(self, cloud_path):\n\t\telog(\"getting info on {}\".format(cloud_path))",
"def subProject(self):\n logger.debug(\"Func: subProject/getter\")\n return self._subProjectsList[self.currentSubIndex]",
"def get_cloud_detail(sky):\n debug(\"Getting cloud details\")\n clouds = cloud_map(sky)\n debug(\"There are {} clouds listed in the Metar\".format(len(clouds)))\n thickest = thickest_clouds(clouds)\n debug(\"Found thickest clouds: thick: {} -- base {}\".format(thickest[0], thickest[1]))\n return {\n \"thickness\": thickest[0],\n \"base\": thickest[1]\n }",
"def got_info(self, cloud_obj):",
"def show(sub, arg):\n\n arg = int(arg)\n\n if sub == 'collection':\n res = api.get_collection(arg)\n elif sub == 'domain':\n res = api.get_domain(arg)\n elif sub == 'problem':\n res = api.get_problem(arg)\n elif sub == 'plan':\n res = api.get_plan(arg)\n else:\n print(\"Error: Unrecognized sub-command, {0}\".format(sub))\n exit(1)\n\n pprint.pprint(res)",
"def __get_general_subscr_info(self):\n query = (\"SELECT d.datname, r.rolname, s.subenabled, \"\n \"s.subconninfo, s.subslotname, s.subsynccommit, \"\n \"s.subpublications FROM pg_catalog.pg_subscription s \"\n \"JOIN pg_catalog.pg_database d \"\n \"ON s.subdbid = d.oid \"\n \"JOIN pg_catalog.pg_roles AS r \"\n \"ON s.subowner = r.oid \"\n \"WHERE s.subname = %(name)s AND d.datname = %(db)s\")\n\n result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)\n if result:\n return result[0]\n else:\n return False",
"def getSubInterface(self,subname,numNodesSub):\n subOptionInfo_p = []\n subSchemInfo_p = []\n filename_t = subname + '.sub'\n data_p = self.readNetlist(filename_t)\n subOptionInfo_p, subSchemInfo_p = self.separateNetlistInfo(data_p)\n if len(subOptionInfo_p) > 0:\n newline = subOptionInfo_p[0]\n newline = newline.split('.subckt '+ subname) \n intLine = newline[1].split()\n newindex = numNodesSub[subname]\n nodesInfoLine = intLine[0:newindex]\n return nodesInfoLine",
"def list(self):\n SubDets = namedtuple(\"SubDetails\", [\"subscription_id\", \"name\"])\n return [SubDets(\"123\", \"sub1\")]",
"def query_subregion(self, subregion):\n return self.sql_query(\"\"\" select from Subregion where name = \"{}\" \"\"\".format(subregion))",
"def test_get_cloud(self):\n pass",
"def get_cloud_from_controller():\n\n cmd = ['juju', 'show-controller', '--format=yaml']\n output = subprocess.check_output(cmd)\n if six.PY3:\n output = output.decode('utf-8')\n cloud_config = yaml.load(output)\n # There will only be one top level controller from show-controller,\n # but we do not know its name.\n assert len(cloud_config) == 1\n try:\n return list(cloud_config.values())[0]['details']['cloud']\n except KeyError:\n raise KeyError(\"Failed to get cloud information from the controller\")",
"def getSubProjects(self):\n logger.debug(\"Func: getSubProjects\")\n\n return self._subProjectsList",
"def do_overcloud_show(tuskar, args, outfile=sys.stdout):\n overcloud = utils.find_resource(tuskar.overclouds, args.overcloud)\n print_overcloud_detail(overcloud, outfile=outfile)",
"def print_overcloud_detail(overcloud, outfile=sys.stdout):\n\n formatters = {\n 'attributes': fmt.attributes_formatter,\n 'counts': fmt.counts_formatter,\n }\n overcloud_dict = overcloud.to_dict()\n fmt.print_dict(overcloud_dict, formatters, outfile=outfile)",
"def get_subvols(self, refresh=False):\n if not refresh and hasattr(self, \"subvols\"):\n return\n self.subvols = {}\n cmd = ['btrfs', 'subvol', 'list', '-p', self.path]\n out, err, ret = self.justcall(cmd)\n if ret != 0:\n raise InitError(\"error running btrfs subvol list %s:\\n\"%self.path+err)\n\n for line in out.split(\"\\n\"):\n if len(line) == 0:\n continue\n l = line.split()\n subvol = {}\n subvol['id'] = l[1]\n subvol['parent_id'] = l[3]\n subvol['top'] = l[6]\n subvol['path'] = line[line.index(\" path \")+6:]\n self.subvols[subvol['id']] = subvol",
"def _get_subclient_properties(self):\r\n super(SalesforceSubclient, self)._get_subclient_properties()\r\n\r\n if 'cloudAppsSubClientProp' in self._subclient_properties:\r\n self._cloud_apps_subclient_prop = self._subclient_properties['cloudAppsSubClientProp']\r\n if 'salesforceSubclient' in self._cloud_apps_subclient_prop:\r\n sfsubclient = self._cloud_apps_subclient_prop['salesforceSubclient']\r\n self._objects = sfsubclient.get('backupSfObjects')\r\n self._files = sfsubclient.get('backupFileObjects')\r\n self._metadata = sfsubclient.get('backupSFMetadata')\r\n self._archived_deleted = sfsubclient.get('backupArchivedandDeletedRecs')",
"def post(self, subcloud_ref=None, qualifier=None):\n\n context = restcomm.extract_context_from_environ()\n\n if subcloud_ref is None:\n payload = eval(request.body)\n if not payload:\n pecan.abort(400, _('Body required'))\n name = payload.get('name')\n if not name:\n pecan.abort(400, _('name required'))\n management_subnet = payload.get('management-subnet')\n if not management_subnet:\n pecan.abort(400, _('management-subnet required'))\n management_start_ip = payload.get('management-start-ip')\n if not management_start_ip:\n pecan.abort(400, _('management-start-ip required'))\n management_end_ip = payload.get('management-end-ip')\n if not management_end_ip:\n pecan.abort(400, _('management-end-ip required'))\n management_gateway_ip = payload.get('management-gateway-ip')\n if not management_gateway_ip:\n pecan.abort(400, _('management-gateway-ip required'))\n systemcontroller_gateway_ip = \\\n payload.get('systemcontroller-gateway-ip')\n if not systemcontroller_gateway_ip:\n pecan.abort(400, _('systemcontroller-gateway-ip required'))\n\n self._validate_subcloud_config(context,\n name,\n management_subnet,\n management_start_ip,\n management_end_ip,\n management_gateway_ip,\n systemcontroller_gateway_ip)\n\n try:\n # Ask dcmanager-manager to add the subcloud.\n # It will do all the real work...\n return self.rpc_client.add_subcloud(context, payload)\n except RemoteError as e:\n pecan.abort(422, e.value)\n except Exception as e:\n LOG.exception(e)\n pecan.abort(500, _('Unable to create subcloud'))\n elif qualifier:\n if qualifier == 'config':\n subcloud = None\n\n if subcloud_ref.isdigit():\n # Look up subcloud as an ID\n try:\n subcloud = db_api.subcloud_get(context, subcloud_ref)\n except exceptions.SubcloudNotFound:\n pecan.abort(404, _('Subcloud not found'))\n else:\n # Look up subcloud by name\n try:\n subcloud = db_api.subcloud_get_by_name(context,\n subcloud_ref)\n except exceptions.SubcloudNameNotFound:\n pecan.abort(404, _('Subcloud not found'))\n\n payload = dict()\n if request.body:\n payload = eval(request.body)\n config_file = self._create_subcloud_config_file(\n context, subcloud, payload)\n result = dict()\n result['config'] = config_file\n return result\n else:\n pecan.abort(400, _('Invalid request'))\n else:\n pecan.abort(400, _('Invalid request'))",
"def get(self):\r\n url = '{0}/subdomains/{1}'.format(self.parent.get_url(),\r\n port.to_u(self.object_id))\r\n return http.Request('GET', url), parsers.parse_json",
"def detail(self):\n info = self.info()\n info[u'services'] = {}\n for item in self.get_endpoints():\n try:\n info[u'services'][item.service].append(item.endpoint)\n except:\n info[u'services'][item.service] = [item.endpoint]\n return info",
"def subdataset(self):\n return self._clip_metadata.get(\"subdataset\")",
"def server_info(subresource, server, proxyfilename, baseurl):\n server = HTTPRequests(url=server, localcert=proxyfilename, localkey=proxyfilename, version='HC')\n\n dictresult, status, reason = server.get(baseurl, {'subresource' : subresource})\n\n return dictresult['result'][0]",
"def _get_subcloud_users(self):\n DEFAULT_SERVICE_PROJECT_NAME = 'services'\n # First entry is openstack user name, second entry is the user stored\n # in keyring. Not sure why heat_admin uses a different keystone name.\n SUBCLOUD_USERS = [\n ('nova', 'nova'),\n ('placement', 'placement'),\n ('sysinv', 'sysinv'),\n ('patching', 'patching'),\n ('heat', 'heat'),\n ('ceilometer', 'ceilometer'),\n ('vim', 'vim'),\n ('aodh', 'aodh'),\n ('panko', 'panko'),\n ('mtce', 'mtce'),\n ('cinder', 'cinder'),\n ('glance', 'glance'),\n ('neutron', 'neutron'),\n ('heat_admin', 'heat-domain'),\n ('gnocchi', 'gnocchi'),\n ('fm', 'fm')\n ]\n\n user_list = list()\n for user in SUBCLOUD_USERS:\n password = keyring.get_password(user[1],\n DEFAULT_SERVICE_PROJECT_NAME)\n if password:\n user_dict = dict()\n user_dict['name'] = user[0]\n user_dict['password'] = password\n user_list.append(user_dict)\n else:\n LOG.error(\"User %s not found in keyring as %s\" % (user[0],\n user[1]))\n pecan.abort(500, _('System configuration error'))\n\n return user_list",
"def get_details(self):",
"def _get_subclient_content_(self):\r\n return self.content",
"def tag_cloud():\n\n return LOAD('plugin_wiki','cloud')",
"def main():\n # Take login credentials from the command line if provided\n # Otherwise, take them from your environment variables file ~/.profile\n description = ('Simple application that logs on to the APIC'\n ' and displays all of the Subnets.')\n creds = Credentials('apic', description)\n creds.add_argument('--tenant', help='The name of Tenant')\n args = creds.get()\n\n # Login to APIC\n session = Session(args.url, args.login, args.password)\n resp = session.login()\n if not resp.ok:\n print('%% Could not login to APIC')\n\n # Download all of the tenants, app profiles, and Subnets\n # and store the names as tuples in a list\n tenants = Tenant.get(session)\n for tenant in tenants:\n check_longest_name(tenant.name, \"Tenant\")\n if args.tenant is None:\n get_subnet(session, tenant)\n else:\n if tenant.name == args.tenant:\n get_subnet(session, tenant)\n\n # Display the data downloaded\n template = '{0:' + str(longest_names[\"Tenant\"]) + '} ' \\\n '{1:' + str(longest_names[\"Bridge Domain\"]) + '} ' \\\n '{2:' + str(longest_names[\"Subnet\"]) + '} ' \\\n '{3:' + str(longest_names[\"Scope\"]) + '}'\n print(template.format(\"Tenant\", \"Bridge Domain\", \"Subnet\", \"Scope\"))\n print(template.format('-' * longest_names[\"Tenant\"],\n '-' * longest_names[\"Bridge Domain\"],\n '-' * longest_names[\"Subnet\"],\n '-' * longest_names[\"Scope\"]))\n for rec in sorted(data):\n print(template.format(*rec))",
"def action_view_subcontract(self):\n action = self.env.ref('subcontract.subcontract_rfq')\n result = action.read()[0]\n\n # Remvove the context since the action basically display RFQ and not PO.\n result['context'] = {}\n order_line_ids = self.env['subcontract.order.line'].search([('orderpoint_id', '=', self.id)])\n subcontract_ids = order_line_ids.mapped('order_id')\n\n result['domain'] = \"[('id','in',%s)]\" % (subcontract_ids.ids)\n\n return result",
"def info(self):\n return self.client.call('GET', self.name + 'info')"
] | [
"0.6381916",
"0.6156021",
"0.6005347",
"0.5974986",
"0.59666353",
"0.5932248",
"0.5835864",
"0.56580114",
"0.5640536",
"0.5608603",
"0.5590534",
"0.55747557",
"0.5502725",
"0.54266745",
"0.53677464",
"0.5365587",
"0.5317455",
"0.5309048",
"0.52923965",
"0.52414227",
"0.5190467",
"0.5181201",
"0.5168413",
"0.51450825",
"0.5118707",
"0.5093468",
"0.5077918",
"0.5067912",
"0.5067366",
"0.5008514"
] | 0.65140164 | 0 |
Create a new subcloud. | def post(self, subcloud_ref=None, qualifier=None):
context = restcomm.extract_context_from_environ()
if subcloud_ref is None:
payload = eval(request.body)
if not payload:
pecan.abort(400, _('Body required'))
name = payload.get('name')
if not name:
pecan.abort(400, _('name required'))
management_subnet = payload.get('management-subnet')
if not management_subnet:
pecan.abort(400, _('management-subnet required'))
management_start_ip = payload.get('management-start-ip')
if not management_start_ip:
pecan.abort(400, _('management-start-ip required'))
management_end_ip = payload.get('management-end-ip')
if not management_end_ip:
pecan.abort(400, _('management-end-ip required'))
management_gateway_ip = payload.get('management-gateway-ip')
if not management_gateway_ip:
pecan.abort(400, _('management-gateway-ip required'))
systemcontroller_gateway_ip = \
payload.get('systemcontroller-gateway-ip')
if not systemcontroller_gateway_ip:
pecan.abort(400, _('systemcontroller-gateway-ip required'))
self._validate_subcloud_config(context,
name,
management_subnet,
management_start_ip,
management_end_ip,
management_gateway_ip,
systemcontroller_gateway_ip)
try:
# Ask dcmanager-manager to add the subcloud.
# It will do all the real work...
return self.rpc_client.add_subcloud(context, payload)
except RemoteError as e:
pecan.abort(422, e.value)
except Exception as e:
LOG.exception(e)
pecan.abort(500, _('Unable to create subcloud'))
elif qualifier:
if qualifier == 'config':
subcloud = None
if subcloud_ref.isdigit():
# Look up subcloud as an ID
try:
subcloud = db_api.subcloud_get(context, subcloud_ref)
except exceptions.SubcloudNotFound:
pecan.abort(404, _('Subcloud not found'))
else:
# Look up subcloud by name
try:
subcloud = db_api.subcloud_get_by_name(context,
subcloud_ref)
except exceptions.SubcloudNameNotFound:
pecan.abort(404, _('Subcloud not found'))
payload = dict()
if request.body:
payload = eval(request.body)
config_file = self._create_subcloud_config_file(
context, subcloud, payload)
result = dict()
result['config'] = config_file
return result
else:
pecan.abort(400, _('Invalid request'))
else:
pecan.abort(400, _('Invalid request')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_overcloud_create(tuskar, args, outfile=sys.stdout):\n overcloud_roles = tuskar.overcloud_roles.list()\n overcloud_dict = create_overcloud_dict(args, overcloud_roles)\n overcloud = tuskar.overclouds.create(**overcloud_dict)\n print_overcloud_detail(overcloud, outfile=outfile)",
"def createSubCollection(self, newcollection='/EDSZone/home/jtilson/testdir'):\n with self._open_connection() as session:\n utilities.log.info('Attempt to create the collection {}'.format(newcollection))\n try: \n coll = session.collections.create(newcollection)\n utilities.log.info('Created a subcollection with id {} and path {}'.format(coll.id, coll.path))\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n utilities.log.warn('IRODS subcollection: {}'.format(message))\n #sys.exit(1)\n print(coll.path)\n return coll.path",
"def _create_subcloud_config_file(self, context, subcloud, payload):\n DEFAULT_STR = '<EDIT>'\n\n pxe_cidr = payload.get(\n 'pxe-subnet', DEFAULT_STR)\n management_vlan = payload.get(\n 'management-vlan', DEFAULT_STR)\n management_interface_mtu = payload.get(\n 'management-interface-mtu', DEFAULT_STR)\n management_interface_ports = payload.get(\n 'management-interface-port', DEFAULT_STR)\n oam_cidr = payload.get(\n 'oam-subnet', DEFAULT_STR)\n oam_gateway = payload.get(\n 'oam-gateway-ip', DEFAULT_STR)\n oam_ip_floating_address = payload.get(\n 'oam-floating-ip', DEFAULT_STR)\n oam_ip_unit_0_address = payload.get(\n 'oam-unit-0-ip', DEFAULT_STR)\n oam_ip_unit_1_address = payload.get(\n 'oam-unit-1-ip', DEFAULT_STR)\n oam_interface_mtu = payload.get(\n 'oam-interface-mtu', DEFAULT_STR)\n oam_interface_ports = payload.get(\n 'oam-interface-port', DEFAULT_STR)\n system_mode = payload.get(\n 'system-mode', DEFAULT_STR)\n\n management_address_pool = self._get_management_address_pool(context)\n systemcontroller_subnet = \"%s/%d\" % (\n management_address_pool.network,\n management_address_pool.prefix)\n sc_mgmt_floating_ip = management_address_pool.floating_address\n\n subcloud_config = \"\"\n if system_mode in [SYSTEM_MODE_SIMPLEX, SYSTEM_MODE_DUPLEX,\n SYSTEM_MODE_DUPLEX_DIRECT]:\n subcloud_config += (\n \"[SYSTEM]\\n\"\n \"SYSTEM_MODE={}\\n\".format(system_mode))\n\n if system_mode == SYSTEM_MODE_SIMPLEX:\n subcloud_oamip_config = (\n \"IP_ADDRESS = {oam_ip_floating_address}\\n\"\n ).format(\n oam_ip_floating_address=oam_ip_floating_address,\n )\n else:\n subcloud_oamip_config = (\n \"IP_FLOATING_ADDRESS = {oam_ip_floating_address}\\n\"\n \"IP_UNIT_0_ADDRESS = {oam_ip_unit_0_address}\\n\"\n \"IP_UNIT_1_ADDRESS = {oam_ip_unit_1_address}\\n\"\n ).format(\n oam_ip_floating_address=oam_ip_floating_address,\n oam_ip_unit_0_address=oam_ip_unit_0_address,\n oam_ip_unit_1_address=oam_ip_unit_1_address,\n )\n\n MIN_MANAGEMENT_SUBNET_SIZE = 8\n tmp_management_subnet = validate_network_str(\n subcloud.management_subnet,\n minimum_size=MIN_MANAGEMENT_SUBNET_SIZE)\n\n is_ipv6_mgmt = (tmp_management_subnet.version == 6)\n\n # If ipv6 then we need pxe subnet and management_vlan.\n # If user specified pxe boot subnet, then management vlan is required\n # and vice versa\n if is_ipv6_mgmt or (pxe_cidr != DEFAULT_STR) or \\\n (management_vlan != DEFAULT_STR):\n subcloud_config += (\n \"[REGION2_PXEBOOT_NETWORK]\\n\"\n \"PXEBOOT_CIDR = {pxe_cidr}\\n\"\n \"[MGMT_NETWORK]\\n\"\n \"VLAN = {management_vlan}\\n\"\n ).format(\n pxe_cidr=pxe_cidr,\n management_vlan=management_vlan,\n )\n else:\n subcloud_config += \"[MGMT_NETWORK]\\n\"\n\n subcloud_config += (\n \"CIDR = {management_cidr}\\n\"\n \"GATEWAY = {management_gateway}\\n\"\n \"IP_START_ADDRESS = {management_ip_start_address}\\n\"\n \"IP_END_ADDRESS = {management_ip_end_address}\\n\"\n \"DYNAMIC_ALLOCATION = Y\\n\"\n \"LOGICAL_INTERFACE = LOGICAL_INTERFACE_1\\n\"\n \"[LOGICAL_INTERFACE_1]\\n\"\n \"LAG_INTERFACE = N\\n\"\n \"INTERFACE_MTU = {management_interface_mtu}\\n\"\n \"INTERFACE_PORTS = {management_interface_ports}\\n\"\n \"[OAM_NETWORK]\\n\"\n \"CIDR = {oam_cidr}\\n\"\n \"GATEWAY = {oam_gateway}\\n\" +\n subcloud_oamip_config +\n \"LOGICAL_INTERFACE = LOGICAL_INTERFACE_2\\n\"\n \"[LOGICAL_INTERFACE_2]\\n\"\n \"LAG_INTERFACE = N\\n\"\n \"INTERFACE_MTU = {oam_interface_mtu}\\n\"\n \"INTERFACE_PORTS = {oam_interface_ports}\\n\"\n \"[SHARED_SERVICES]\\n\"\n \"SYSTEM_CONTROLLER_SUBNET = {systemcontroller_subnet}\\n\"\n \"SYSTEM_CONTROLLER_FLOATING_ADDRESS = {sc_mgmt_floating_ip}\\n\"\n \"REGION_NAME = SystemController\\n\"\n \"ADMIN_PROJECT_NAME = admin\\n\"\n \"ADMIN_USER_NAME = admin\\n\"\n \"ADMIN_PASSWORD = {admin_password}\\n\"\n \"KEYSTONE_ADMINURL = {keystone_adminurl}\\n\"\n \"KEYSTONE_SERVICE_NAME = keystone\\n\"\n \"KEYSTONE_SERVICE_TYPE = identity\\n\"\n \"GLANCE_SERVICE_NAME = glance\\n\"\n \"GLANCE_SERVICE_TYPE = image\\n\"\n \"GLANCE_CACHED = True\\n\"\n \"[REGION_2_SERVICES]\\n\"\n \"REGION_NAME = {region_2_name}\\n\"\n \"[VERSION]\\n\"\n \"RELEASE = {release}\\n\"\n ).format(\n management_cidr=subcloud.management_subnet,\n management_gateway=subcloud.management_gateway_ip,\n management_ip_start_address=subcloud.management_start_ip,\n management_ip_end_address=subcloud.management_end_ip,\n management_interface_mtu=management_interface_mtu,\n management_interface_ports=management_interface_ports,\n oam_cidr=oam_cidr,\n oam_gateway=oam_gateway,\n oam_interface_mtu=oam_interface_mtu,\n oam_interface_ports=oam_interface_ports,\n systemcontroller_subnet=systemcontroller_subnet,\n sc_mgmt_floating_ip=sc_mgmt_floating_ip,\n admin_password=cfg.CONF.cache.admin_password,\n keystone_adminurl=cfg.CONF.cache.auth_uri,\n region_2_name=subcloud.name,\n release=subcloud.software_version,\n )\n return subcloud_config",
"def subnet_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.create_subnet(**kwargs)",
"def createSubproject(self, nameOfSubProject):\n logger.debug(\"Func: createSubproject\")\n\n if nameOfSubProject in self._subProjectsList:\n msg = \"%s is already in sub-projects list\" % nameOfSubProject\n # logger.warning(msg)\n # raise Exception([340, msg])\n self._exception(340, msg)\n return\n\n self._subProjectsList.append(nameOfSubProject)\n self._saveSubprojects(self._subProjectsList)\n self.currentSubIndex = len(self._subProjectsList)-1\n return self._subProjectsList",
"def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)",
"def _add_subport(self, trunk, subport, df_parent):\n model = trunk_models.ChildPortSegmentation(\n id=self._get_subport_id(trunk, subport),\n topic=trunk.project_id,\n parent=trunk.port_id,\n port=subport.port_id,\n segmentation_type=subport.segmentation_type,\n segmentation_id=subport.segmentation_id,\n )\n self.nb_api.create(model)",
"def create():",
"def create():",
"def create(cls, body: CloudAccount):\n\t\tpass",
"def create_subspecialty(sub_data):\n return get_or_create_object(sub_data, Subspecialty)",
"def create(self):\n\n if self.call(method='addSubdomain', args=[self.domainname, self.subdomain]):\n return self",
"def launch_instance(cloud):\n js = _get_jetstream_conn()\n\n sgs = ['CloudLaunchDefault']\n kp_name = \"cloudman_key_pair\"\n inst_size = 'm1.small'\n network_id = '86a1c3e8-b1fb-41f3-bcaf-8334567fe989'\n lc = js.compute.instances.create_launch_config()\n lc.add_network_interface(network_id)\n\n img_id = '2cf07e4a-62a8-41c2-9282-f3c53962f296' # Gxy Standalone 161021b01\n name = 'ea-galaxy-{0}'.format(strftime(\"%m-%d-%H-%M\", localtime()))\n\n i = js.compute.instances.create(\n name, img_id, inst_size, security_groups=sgs, launch_config=lc,\n key_pair=kp_name)\n return i",
"def create_database(args):\n Print.GN('Creating and configuring Cloud SQL.')\n connection_name = '%s:%s:%s' % (\n args.cloud_sql_project, args.cloud_sql_region, args.cloud_sql_name)\n subprocess.check_output(\n ['gcloud', 'sql', 'instances', 'create',\n args.cloud_sql_name,\n '--project', args.cloud_sql_project,\n '--activation-policy', 'ALWAYS',\n '--tier', 'db-n1-standard-1',\n '--assign-ip',\n '--no-backup',\n '--database-version', 'MYSQL_5_6',\n '--region', 'us-west1',\n '--storage-auto-increase',\n '--storage-size', '10',\n '--storage-type', 'HDD'])\n subprocess.check_output(['gcloud', 'sql', 'users', 'set-password',\n 'root', '%',\n '--password', args.cloud_sql_password,\n '--instance', args.cloud_sql_name,\n '--project', args.cloud_sql_project])\n subprocess.check_output(['gcloud', 'sql', 'databases', 'create',\n 'leonardo',\n '--instance', args.cloud_sql_name,\n '--project', args.cloud_sql_project])\n print('Successfully created instance: %s' % connection_name)\n return connection_name",
"def init_cloud_api(self, args=None):\n pass",
"def create_epic():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/epics\".format(STORED_ID['project_id']))\n name = \"\".join(choices(string.ascii_letters, k=6))\n body = {\"name\": name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n STORED_ID['epic_id'] = response.json()['id']",
"def _create_subscription(self):\n try:\n self.client.create_subscription(\n name=self.subscription_path, topic=self.topic_path\n )\n except NotFound:\n # suitable topic does not exist in the Pitt-Google project\n raise ValueError(\n (\n f\"A subscription named {self.subscription_name} does not exist\"\n \"in the Google Cloud Platform project \"\n f\"{settings.GOOGLE_CLOUD_PROJECT}, \"\n \"and one cannot be automatically create because Pitt-Google \"\n \"does not publish a public topic with the same name.\"\n )\n )\n else:\n self._log_and_print(f\"Created subscription: {self.subscription_path}\")",
"def create(*args):",
"def create_cloud_provider(providername):\n backend_name = request.get_json().get(\"backend\")\n service_name = request.get_json().get(\"service\")\n response = jsonify(\n admin.create_provider(\n current_app.scoped_session(),\n providername,\n backend=backend_name,\n service=service_name,\n )\n )\n return response",
"def create_subscription(post, user, sub_type=None, update=False):\n subs = Subscription.objects.filter(post=post.root, user=user)\n sub = subs.first()\n\n default = Subscription.TYPE_MAP.get(user.profile.message_prefs,\n Subscription.LOCAL_MESSAGE)\n\n empty = sub_type is None\n # Get the current sub type from what's given or the existing sub\n sub_type = None if empty else sub_type\n # No type has been given so default\n sub_type = sub_type or default\n\n # Ensure the sub type is not set to something wrote\n if sub and update:\n # Update an existing subscription\n sub.type = sub_type\n sub.save()\n else:\n # Drop all existing subscriptions for the user by default.\n subs.delete()\n Subscription.objects.create(post=post.root, user=user, type=sub_type)\n\n # Recompute subscription count\n subs_count = Subscription.objects.filter(post=post.root).exclude(type=Subscription.NO_MESSAGES).count()\n\n # Update root subscription counts.\n Post.objects.filter(pk=post.root.pk).update(subs_count=subs_count)",
"def create_pubsub_subscription(client, project, topic, name):\n topic_name = pubsub.topic_name(project, topic)\n full_name = pubsub.subscription_name(project, name)\n if client.get_subscription(full_name):\n return\n\n client.create_subscription(full_name, topic_name)",
"def create(args):\n print('Creates an HPC fleet with given name \"{}\"'.format(args.fleet_name))",
"def create(subnetModeDetails):\n\n # Remove id as it's created automatically\n if 'id' in subnetModeDetails:\n del subnetModeDetails['id']\n\n schema = SubnetModeSchema()\n new_subnetMode = schema.load(subnetModeDetails, session=db.session)\n db.session.add(new_subnetMode)\n db.session.commit()\n\n # Serialize and return the newly created deployment\n # in the response\n data = schema.dump(new_subnetMode)\n return data, 201",
"def __init__(__self__,\n resource_name: str,\n args: PrivateCloudArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def create(ctx):\n pass",
"def create(self):\n logging.debug(\"%s create called\" % self)\n # networks = self.infra.get(\"networks\")\n notify(\"Creating network %s\" % self.name)\n self.cloudnet = cn.create(self.name, cidr=self.cidr)\n return True",
"def create():\n pass",
"def test_register_cloud(self):\n pass",
"def setup(args):\n\n start = time.time()\n \n SETUP_SUMMARY_FILE = os.path.splitext(os.path.basename(args.setup.name))[0] + '.cid'\n print SETUP_SUMMARY_FILE\n\n with shell.Step(1):\n print \"Cloud setup validation:\"\n\n # Load cloud configuration\n print \"* Parsing the cloud XML definition file\"\n config = etree.parse(args.setup)\n\n # Validate the configuration file\n print \"* Validating the cloud XML definition against the XML schema\"\n conf.schema('cloud-setup').assertValid(config)\n\n cloud = config.getroot().attrib\n\n # Raise an error if an unmanaged cloud is requested\n print \"* Checking for supported setup type\"\n if 'manager' not in cloud:\n raise NotImplementedError(\"Unmanaged clouds are not yet supported\")\n\n # Instantiate connections\n with shell.Step(2):\n print \"Instantiation of the cloud manager connection:\"\n print \"* Connecting to the VPC manager\"\n c = boto.VPCConnection(args.access_key_id, args.secret_key)\n\n with shell.Step(3):\n print \"Creation and setup of the virtual private cloud:\"\n # Get max vpc size (16) using the cloud subnet IP range\n print \"* Getting or creating the VPC\"\n vpc, created = c.get_or_create(str(cidr.CIDR(cloud['cidr'], 16)))\n subnet_cidr = cidr.CIDR(cloud['cidr'])\n if created:\n print \" └ New VPC created with ID '{0}'\".format(vpc.id)\n print \"* Waiting for VPC creation to complete\",\n vpc = shell.wait(vpc, 'available', interval=0)\n else:\n print \" └ Using existing VPC with ID '{0}'\".format(vpc.id)\n print \"* Checking for valid CIDR block of the existing VPC\"\n vpc_cidr = cidr.CIDR(vpc.cidr_block)\n if subnet_cidr.base not in vpc_cidr:\n raise ValueError(\"The requested subnet CIDR block base \" \\\n \"address ({0}) falls outside the VPC CIDR \" \\\n \"block ({1!s}).\\nAcceptable values are in \" \\\n \"the range {1.base} - {1.last}.\".format(\n subnet_cidr.base, vpc_cidr))\n\n if subnet_cidr.size > vpc_cidr.size:\n raise ValueError(\"The requested subnet CIDR size (/{0.block},\"\\\n \" {0.size} IPs) is too big for the \" \\\n \"existing VPC CIDR size (/{1.block}, {1.size}\"\\\n \" IPs).\".format(subnet_cidr, vpc_cidr))\n\n with shell.Step(4):\n print \"Subnet, gateway, addressing and routing setup:\"\n\n print \"* Getting or creating subnet\"\n subnet, created = vpc.get_or_create_subnet(str(subnet_cidr))\n if created:\n print \" └ New subnet created with ID '{0}'\".format(subnet.id)\n else:\n print \" └ Using existing subnet with ID '{0}'\".format(subnet.id)\n\n print \"* Getting or creating internet gateway\"\n gateway, created = vpc.get_or_create_gateway()\n if created:\n print \" └ New gateway created with ID '{0}'\".format(gateway.id)\n else:\n print \" └ Using existing gateway with ID '{0}'\".format(gateway.id)\n\n print \"* Getting public IP address\"\n address, created = c.get_or_create_address()\n if created:\n print \" └ New address created with IP '{0.public_ip}'\".format(\n address\n )\n else:\n print \" └ Using existing address with IP '{0.public_ip}'\".format(\n address\n )\n\n print \"* Setting up routing\"\n print \" └ Getting route table\"\n route_table = c.get_all_route_tables()[0]\n print \" └ Associating route table with subnet\"\n route_table.associate(subnet)\n print \" └ Creating route to internet gateway\"\n route_table.route('0.0.0.0/0', gateway=gateway)\n\n with shell.Step(5):\n print \"Security resources setup:\"\n\n print \"* Creating temporary security group\"\n group = vpc.create_security_group(\n 'pop-' + random_string(16),\n 'Temporary security group for a POP application'\n )\n print \" └ New security group created with ID '{0.id}'\".format(group)\n\n print \"* Authorizing all internal traffic\"\n group.authorize(-1, 0, 65535, src_group=group)\n\n print \"* Authorizing external SSH access\"\n group.authorize('tcp', 22, 22, \"0.0.0.0/0\")\n\n print \"* Creating key pair\"\n key = c.create_key_pair('pop-' + random_string(16))\n print \" └ New key pair created with name '{0.name}'\".format(key)\n\n with shell.Step(6):\n print \"Virtual machines boot process:\"\n\n print \"* Getting needed images\"\n images = c.get_all_images(config.xpath('//setup/machine/@image'))\n images = dict([(image.id, image) for image in images])\n\n print \"* Launching instances\"\n reservations = {}\n for machine in config.xpath('//setup/machine'):\n machine = machine.attrib\n image = images[machine['image']]\n res = image.run(\n key_name=key.name,\n security_groups=[group.id,],\n instance_type=machine.get('type', DEFAULT_MACHINE_TYPE),\n subnet_id=subnet.id,\n private_ip_address=machine['ip'],\n )\n\n print \" └ New reservation (ID: {0}, IP: {1})\".format(\n res.id,\n machine['ip']\n )\n reservations[machine['ip']] = machine, res.instances[0]\n\n print \"* Waiting for machines to boot\"\n for ip, (machine, instance) in reservations.iteritems():\n print \" └ Waiting for machine @ {0} to boot\".format(ip),\n shell.wait(instance, 'running', interval=.5)\n\n print \"* Associating public IP address to POP application manager\"\n address.associate(reservations[cloud['manager']][1])\n\n print \"* Waiting for manager to come online\",\n shell.wait(ConnectionAttempt(address.public_ip, 22), 'connected', interval=.8)\n\n with shell.Step(7):\n print \"Local environment setup:\"\n\n print \"* Saving private key to disk\"\n with open(KEY_FILENAME, 'w') as fh:\n fh.write(key.material)\n os.chmod(KEY_FILENAME, stat.S_IRUSR | stat.S_IWUSR)\n print \" └ Private key written to '{0}'\".format(KEY_FILENAME)\n\n print \"* Generating local fabfile\"\n \n local = os.path.join(os.path.dirname(fabfiles.__file__), 'local.pyt')\n with open(local, 'r') as rfh:\n with open('fabfile.py', 'w') as wfh:\n wfh.write(rfh.read().format(**{\n 'gendate': datetime.today(),\n 'mgraddress': address.public_ip,\n 'remoteuser': USER,\n 'cloudsetup': SETUP_SUMMARY_FILE,\n 'keyfilename': KEY_FILENAME,\n }))\n \n with open('cloud.i.xml', 'w') as fh:\n fh.write(xml.format_document(config))\n\n print \"* Saving cloud setup to XML file\"\n cloud.update({\n 'vpc': vpc.id,\n 'subnet': subnet.id,\n 'gateway': gateway.id,\n 'security-group': group.id,\n 'key-pair': key.name,\n 'public-address': address.public_ip,\n 'key-filename': KEY_FILENAME,\n })\n for machine, instance in reservations.itervalues():\n machine['instance-id'] = instance.id\n machine['launch-time'] = instance.launch_time\n\n with open(SETUP_SUMMARY_FILE, 'w') as fh:\n fh.write(xml.format_document(config))\n\n print \"* Removing old public key from known hosts (if present)\"\n\n try:\n with open(KNOWN_HOSTS, 'r') as fh:\n known_hosts = fh.read()\n except:\n print \" └ Could not read {0}\".format(KNOWN_HOSTS)\n else:\n known_hosts, count = re.subn(\n '\\n{0} .*'.format(re.escape(address.public_ip)),\n '',\n known_hosts\n )\n if count:\n try:\n with open(KNOWN_HOSTS, 'w') as fh:\n fh.write(known_hosts)\n except:\n print \" └ Could not write changes back to {0}\".format(\n KNOWN_HOSTS\n )\n else:\n print \" └ Public key for IP {0} removed\".format(\n address.public_ip\n )\n else:\n print \" └ No public key matching IP {0} found\".format(\n address.public_ip\n )\n\n duration = int(time.time() - start)\n duration = '{0:.0f}m {1:.0f}s'.format(duration // 60, duration % 60)\n\n with shell.Wrapper(72):\n print\n print \"Cloud setup completed in {0}; you can manually connect to the \"\\\n \"manager using the following command:\\n\".format(duration)\n\n print shell.hilite(\n \" ssh -i {0} {1}@{2}\".format(KEY_FILENAME, USER, address.public_ip),\n shell.MAGENTA\n )\n\n with shell.Wrapper(72):\n print\n print \"Alternatively, you can use the commands already provided by \" \\\n \"the generated fabfile. To rapidly obtain some help about them,\"\\\n \" execute the following command in the directory where the \" \\\n \"fabfile is located (make sure you have a recent fabric \" \\\n \"installation):\\n\"\n print shell.hilite(\" fab --list\", shell.MAGENTA)",
"def test_001_create_empty(self):\n ret = svcmgr.main(argv=[\"create\", \"-s\", SVCNAME])\n assert ret == 0"
] | [
"0.6277258",
"0.5818628",
"0.58078766",
"0.5735925",
"0.5690465",
"0.5561859",
"0.55411196",
"0.55319256",
"0.55319256",
"0.54515827",
"0.5434961",
"0.54282016",
"0.54041386",
"0.5379939",
"0.535935",
"0.5331922",
"0.53081214",
"0.5296964",
"0.5261467",
"0.525956",
"0.52542055",
"0.5245969",
"0.5240818",
"0.52344924",
"0.5230545",
"0.5204428",
"0.5204378",
"0.51982594",
"0.51969016",
"0.5196871"
] | 0.7108754 | 0 |
r""" Dialog is method to send messges to users. | def Dialog(self, usrId: int, message: str, keybaord=None):
self.session.method("messages.send",
{
"user_id": usrId,
"keyboard": keybaord,
"message": message
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def alert(self, msg):\r\n messagedialog = Gtk.MessageDialog(self, type=1, buttons=1, message_format=msg)\r\n messagedialog.run()\r\n messagedialog.destroy()",
"def dialog(message, timeout=0, buttons=DIALOGBUTTON_OK):\n warnings.warn(\"This Method moved uwstyle.dialog()\", DeprecationWarning)\n return uwstyle.dialog(message, timeout, buttons)",
"def notifyUser(self, message):\n\n\n msg = QtWidgets.QMessageBox(self)\n msg.setText(message)\n msg.exec_()",
"def __sendMessage(self):\n # TODO: Switch to this when implemented\n \n msg = self.ui.inputWidget.toPlainText()\n self.ui.inputWidget.clear()\n strv = StringView()\n strv.appendText(unicode(msg))\n self._amsn_conversation.sendMessage(strv)\n self.ui.textEdit.append(\"<b>/me says:</b><br>\"+unicode(msg)+\"\")",
"def ask_dialog(self, title=\"\", vars=[], help=\"\"):\n\t\tpass",
"def show_message_dialog(text):\n\n dialog = QDialog()\n interface = messageGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n dialog.exec_()",
"def show_dialog_ync(self, title: str, message: str) -> Gtk.ResponseType:\n dialog = self.ui.get_object(\"ync_dialog\")\n dialog.set_transient_for(self.ui.get_object(\"mainWindow\"))\n dialog.set_title(title)\n self.ui.get_object(\"ync_label\").set_markup(message)\n response = dialog.run()\n dialog.hide()\n return response",
"def _show_dialog(self, content: dict):\n raise NotImplementedError",
"def openDialog(self):\n\n dialog = OlaInterfaceDialog(self.iface)\n #activityRequestManager.setLogger(dialog.getLogger())\n #stakeholderRequestManager.setLogger(dialog.getLogger())\n dialog.show()\n dialog.exec_()",
"def safe_message_dialog(self, markup, msgtype=gtk.MESSAGE_ERROR):\n gtk.gdk.threads_enter()\n mbox = gtk.MessageDialog(type=msgtype, buttons=gtk.BUTTONS_OK)\n mbox.set_markup(markup)\n mbox.run()\n mbox.destroy()\n gtk.gdk.threads_leave()",
"def _onUsers(self, event):\n dialog = sc.UsersDialog(self)\n dialog.ShowModal()\n dialog.Destroy()\n self.updateUsers()",
"def on_cancel(self):\n if self.dialog_list.entry_widget and self.dialog_list.entry_widget.value:\n selected_index = self.dialog_list.entry_widget.value[0]\n dialog_name = self.dialog_list.values[selected_index]\n text = self.chat_box.entry_widget.value.strip()\n if text:\n send_status = TG.sender.send_msg(dialog_name, text)\n if send_status:\n self.chat_box.entry_widget.value = \"\"\n self.load_history()\n self.dialog_list.entry_widget.value = self.dialog_list.values.index(self.current_peer.print_name)\n self.editw = self._widgets__.index(self.chat_box)\n else:\n npyscreen.notify_ok_cancel('Please select receiver first.')",
"def showMessage(self):",
"def __init__(self, dialog):\n\t\tself.dialog = dialog",
"def message(self,message,style=wx.OK | wx.ICON_INFORMATION):\n dlg = wx.MessageDialog(self, message, self.app.title, style)\n answer = dlg.ShowModal()\n dlg.Destroy()\n return answer",
"def message(text, title, msg_type, msg_buttons, window=None):\n msg = gtk.MessageDialog(window, gtk.DIALOG_MODAL, msg_type, msg_buttons,\\\n text)\n msg.set_title(title)\n msg.set_markup(text) # Format Message text with pango\n msg.set_position(gtk.WIN_POS_CENTER)\n #msg.set_modal(True) #should already be a modal window\n #msg.set_keep_above(True)\n response = msg.run()\n msg.destroy()\n return response",
"def MessageDialog( message, caption, style ):\n dlg = wx.MessageDialog( wx.GetApp().GetTopWindow(), message, caption, style )\n result = dlg.ShowModal()\n dlg.Destroy()\n \n return result",
"def open(self, message: str):\n self.dlg_title.setText(message)\n self.progress_dialog.show()",
"def doMessageWindow(msg):\n _loadMsgSettings()\n if settings.has_key(msg):\n return\n global dialog\n dialog = QtGui.QDialog()\n msgDialog = ui.message.Ui_Dialog()\n msgDialog.setupUi(dialog)\n msgDialog.messageLabel.setText(msg)\n dialog.exec_()\n if msgDialog.showAgainCheckBox.isChecked():\n settings[msg] = True\n _saveMsgSettings()",
"def show_dialog(self):\n self.ui = Ui_unordered_list_dialog()\n self.ui.setupUi(self)\n self.ui.group_box_types.setStyleSheet(const.QGROUPBOX_STYLE)\n ok_button = self.ui.button_box.button(QtGui.QDialogButtonBox.Ok)\n ok_button.setDefault(True)\n ok_button.setAutoDefault(True)\n ok_button.setFocus(True)\n cancel_button = self.ui.button_box.button(QtGui.QDialogButtonBox.Cancel)\n cancel_button.setDefault(False)\n cancel_button.setAutoDefault(False)\n self.exec_()",
"def show_dialog(self, widget, data):\n\t\twidget.show()",
"def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass",
"def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass",
"def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass",
"def show_dialog_yn(self, title: str, message: str) -> Gtk.ResponseType:\n dialog = self.ui.get_object(\"yn_dialog\")\n dialog.set_transient_for(self.ui.get_object(\"mainWindow\"))\n dialog.set_title(title)\n self.ui.get_object(\"yn_label\").set_markup(message)\n response = dialog.run()\n dialog.hide()\n return response",
"def send_messages(self):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username,\r\n font=self.title_font, bg=self.bg_color, height=2)\r\n user_label.pack(pady=10, padx=50)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=10)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n write_message = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n write_message.pack()\r\n scrollbar_msg.config(command=write_message.yview)\r\n button_speech_rec = Button(self.root, text=\"listen\\nto speech\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.create_speech_thread(write_message))\r\n button_speech_rec.pack(pady=10)\r\n button_send = Button(self.root, text=\"send\", font=self.text_font,\r\n height=2, width=20, command=lambda: self.send(write_message))\r\n button_send.pack(pady=10)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.choose_path)\r\n button_send.pack(pady=10)",
"def process_dialog(self, dialog):\n\n\t\tfirst \t= dialog.firstName.text()\n\t\tlast \t= dialog.lastName.text()\n\t\temail \t= dialog.email.text()\t\n\t\tage \t= dialog.age.value()\n\n\t\tplural \t= (age > 1) and \"s\" or \"\"\n\t\t\n\t\tmsg = \"\"\"\n\t\t\tHi, %(first)s %(last)s! <br>\n\t\t\tYour E-Mail Address is %(email)s <br>\n\t\t\tAnd you are %(age)d year%(plural)s old! \n\t\t\"\"\" % locals()\n\n\t\tself.infoLabel.setText(msg)",
"def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass",
"def onAccepted():\n dialog.done(1)",
"def __init__(self, dialog):\r\n \r\n self.d = dialog"
] | [
"0.671091",
"0.65158796",
"0.63043237",
"0.62783515",
"0.6270365",
"0.62470025",
"0.6136271",
"0.6118671",
"0.6094713",
"0.60610235",
"0.5989061",
"0.5960819",
"0.59547096",
"0.5923431",
"0.59099215",
"0.5885811",
"0.58625066",
"0.58583194",
"0.5829125",
"0.5825792",
"0.579828",
"0.577753",
"0.577753",
"0.577753",
"0.5758891",
"0.5738714",
"0.573796",
"0.57278323",
"0.56738764",
"0.56688786"
] | 0.7148442 | 0 |
take raw tweet receveid from stream into dataframe. | def tweet_to_df(tweet):
count = helper("./data")
dict_ = {}
dict_["text"] = tweet.text
dict_["user"] = tweet.user.description
dict_["user_location"] = tweet.user.location
dict_["screem_name"] = tweet.user.screen_name
dict_["account_date_cr"] = tweet.user.created_at
dict_["nb_followers"] = tweet.user.followers_count
dict_["profile_color"] = tweet.user.profile_background_color
dict_["tweet_id"] = tweet.id_str
dict_["tweet_date"] = tweet.created_at
dict_["nb_retweeted"] = tweet.retweet_count
dict_["tweet coordinates"] = tweet.coordinates
tweet_data = pd.DataFrame(dict_, index=[0])
return tweet_data.to_csv(f"C:/Users/info/Desktop/projects/tweetanalyser/data/{count+1}.csv") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_tweets(event):\r\n return pd.read_csv(\"data/tweets/%s.csv\" % event, \r\n dtype={\r\n 'tweet_id': str,\r\n 'in_reply_tweet': str,\r\n 'thread': str,\r\n 'user_id': str,\r\n 'in_reply_user': str\r\n },\r\n engine=\"python\")",
"def _process_stream(self):\n input_df = self._read_stream()\n\n clean_df = input_df \\\n .select(from_json(input_df.value.cast('string'),\n self.schema).alias('value'))\n explode_df = clean_df.select('value.*')\n\n enrich_df = explode_df \\\n .withColumn('event_time', explode_df.event_time.cast(TimestampType())) \\\n .withColumn('process_time', current_timestamp())\n\n return enrich_df",
"def get_tweets(api, query):\n \n results = []\n for tweet in tweepy.Cursor(api.search, q=query).items(1000):\n results.append(tweet)\n \n id_list = [tweet.id for tweet in results]\n #unpack into dataframe\n data = pd.DataFrame(id_list,columns=['id'])\n \n data[\"text\"]= [tweet.text.encode('utf-8') for tweet in results]\n data[\"datetime\"]=[tweet.created_at for tweet in results]\n data[\"Location\"]=[tweet.place for tweet in results]\n \n return data",
"def get_tweets_data(self):\n query = \"select * from tweets;\"\n with sql.connect('./{}.db'.format(self.name)) as conn:\n proc_data = conn.execute(query)\n data = proc_data.fetchall()\n\n cols = [\"id\", \"tweet_id\", \"insert_date\", \"created_at\", \"hashtag\"]\n tweets = pd.DataFrame.from_records(data=data, columns=cols)\n\n return tweets",
"def getretweetdf(tweetdf):\r\n retweeteddf = tweetdf[tweetdf['retweeted'] == 1]\r\n retweeteddf.reset_index(inplace=True)\r\n \r\n return retweeteddf",
"def load_tweets(fp):\r\n ans = pd.read_csv(fp, sep='\\t')\r\n return ans",
"def get_stream_id(self) -> str:",
"def write_tweet(tweet):\n try:\n tweet_data = [tweet.date, tweet.content.encode('utf-8'), tweet.id, tweet.likeCount,\n tweet.replyCount,\n tweet.retweetCount, tweet.quoteCount,\n tweet.user.username, tweet.user.id, tweet.user.followersCount,\n tweet.user.friendsCount,\n tweet.user.statusesCount, tweet.user.verified, tweet.user.url, tweet.url]\n if tweet.mentionedUsers is not None:\n tweet_data.append([tweet.mentionedUsers])\n else:\n tweet_data.append(None)\n if tweet.quotedTweet is not None:\n tweet_data.append(tweet.quotedTweet.id)\n tweet_data.append(tweet.quotedTweet.content.encode('utf-8'))\n tweet_data.append(tweet.quotedTweet.user.username)\n tweet_data.append(tweet.quotedTweet.user.id)\n if tweet.quotedTweet.mentionedUsers is not None:\n tweet_data.append([tweet.quotedTweet.mentionedUsers])\n else:\n tweet_data.append(None)\n else:\n tweet_data.append(None)\n tweet_data.append(None)\n tweet_data.append(None)\n tweet_data.append(None)\n return tweet_data\n except UnicodeEncodeError:\n pass",
"def tweet_to_salmon_vars(self, tweet):\n # there might be more than one URL in the tweet. find the one on our domain.\n # https://dev.twitter.com/docs/tweet-entities\n link = None\n for url_data in tweet.get('entities', {}).get('urls', []):\n # expanded_url isn't always provided\n url = url_data.get('expanded_url') or url_data.get('url')\n if url and urlparse.urlparse(url).netloc == self.key().name():\n link = url\n\n # parse the timestamp, formatted e.g. 'Sun, 01 Jan 2012 11:44:57 +0000'\n created_at = tweet.get('created_at')\n if created_at:\n created_at = re.sub(' \\+[0-9]{4}$', '', created_at)\n updated = datetime.datetime.strptime(created_at,\n '%a, %d %b %Y %H:%M:%S')\n updated = updated.isoformat()\n else:\n updated = ''\n\n return {\n 'id': util.tag_uri(self.DOMAIN, str(tweet.get('id'))),\n 'author_name': tweet.get('from_user_name'),\n 'author_uri': 'acct:%[email protected]' % tweet.get('from_user'),\n 'in_reply_to': link,\n 'content': tweet.get('text'),\n 'title': tweet.get('text'),\n 'updated': updated,\n }",
"def get_subscriber_row(self, stream_session_info):\n\n uptime = \"unknown\"\n last_msg_time = \"unknown\"\n if (\n stream_session_info.uptime is not None\n and stream_session_info.last_msg_sent_time is not None\n ):\n uptime_str = str(\n datetime.timedelta(milliseconds=stream_session_info.uptime)\n )\n last_msg_time_str = convertTime(stream_session_info.last_msg_sent_time)\n uptime = uptime_str.split(\".\")[0]\n last_msg_time = last_msg_time_str\n\n row = [\n stream_session_info.subscriber_id,\n uptime,\n stream_session_info.total_streamed_msgs,\n last_msg_time,\n ]\n return row",
"def pandaData():\r\n tweets = pd.DataFrame()\r\n return tweets",
"def process_statuses(statuses):\n\n status_list = []\n for status in statuses:\n\n # There is no indicator for whether a tweet is\n # a retweet. We just have to check whether\n # 'retweeted_status' is in the object\n try:\n status.retweeted_status\n is_retweet = True\n except:\n is_retweet = False\n\n status_list.append([me.screen_name, follower.screen_name,\n status.text, is_retweet, status.created_at])\n\n return pd.DataFrame(status_list)",
"def load_raw_data(path: str) -> pd.DataFrame:\n data = []\n with open(path) as file:\n for line in file:\n data.append(line)\n data_df = pd.DataFrame(data, columns = {'tweet'})\n return data_df",
"def process_tweet(tweet):\n d = {}\n d['hastags'] = [hashtag['text'] for hashtag in tweet['entities']['hashtags']]\n d['text'] = tweet['text']\n d['user'] = tweet['user']['screen_name']\n d['user_loc'] = tweet['user']['location']\n return d",
"def map_tweepy_array (self, tweet):\n new_tweet = [tweet.created_at,\n tweet.id,\n tweet.id_str,\n tweet.truncated,\n tweet.text,\n str(constants.TRACKS),\n tweet.source,\n tweet.source_url,\n tweet.in_reply_to_status_id,\n tweet.in_reply_to_status_id_str,\n tweet.in_reply_to_user_id,\n tweet.in_reply_to_user_id_str,\n tweet.in_reply_to_screen_name,\n tweet.user.screen_name,\n tweet.user.location,\n tweet.geo,\n tweet.coordinates,\n tweet.place,\n tweet.contributors,\n tweet.is_quote_status,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.favorited,\n tweet.retweeted,\n tweet.lang ]\n\n return new_tweet",
"def join(tw_df, rtt_df):\n original_tw_id = []\n author_ids = []\n rtt_dates = []\n groups = rtt_df.groupby('original_tweet_id').groups\n for k in groups.keys():\n l_a = []\n l_r = []\n original_tw_id.append(k)\n for index in groups[k]:\n line = rtt_df.iloc[[index]]\n l_a.append(int(line['author_id']))\n l_r.append(str(line['retweet_date']))\n author_ids.append(l_a)\n rtt_dates.append(l_r)\n \n df_temp = pd.DataFrame()\n df_temp['natural_key'] = original_tw_id\n df_temp['rtt_author_ids'] = author_ids\n df_temp['retweet_dates'] = rtt_dates\n df_temp = df_temp.set_index('natural_key')\n tw_df = tw_df.set_index('natural_key')\n return tw_df.join(df_temp)",
"def recall_tweet(tweet_id):\n \n # Reference CFs\n pool = get_cass_pool()\n tweet_delivery_cf = column_family(pool, \"TweetDelivery\")\n user_timeline_cf = column_family(pool, \"UserTimeline\")\n \n with pycassa.batch.Mutator(pool, queue_size=50) as batch:\n batch.write_consistency_level = cass_types.ConsistencyLevel.QUORUM\n \n # Read who the tweet was delivered to\n # Would make many calls in real system, see above.\n row_key = int(tweet_id)\n try:\n delivery_cols = tweet_delivery_cf.get(row_key)\n except (pycassa.NotFoundException):\n delivery_cols = {}\n # Have {user_name : None}\n \n for col_name in delivery_cols.keys():\n delivered_user_name = col_name\n \n # Delete from the UserTimeline CF\n row_key = delivered_user_name\n columns = [\n int(tweet_id)\n ]\n batch.remove(user_timeline_cf, row_key, columns=columns)\n \n # Delete from TweetDelivery CF\n row_key = int(tweet_id)\n columns = [\n delivered_user_name\n ]\n batch.remove(tweet_delivery_cf, row_key, columns=columns)\n # Exit Batch\n return",
"def get_data(data):\n data = data.iloc[1:]\n data['text'] = data['text'].values.astype('unicode')\n data['date'] = data['date'].values.astype('str')\n data['time'] = data['time'].values.astype('unicode')\n # remove rows with mixed sentiment\n data = data[data['sentiment'] < 2]\n data.index = range(len(data))\n \n return data",
"def process_sentiment(self):\r\n\r\n\r\n print(\"Beginning sentiment analysis\")\r\n # textblob time\r\n #tweet_sentiment = [TextBlob(tweet['filtered_text']).sentiment for index, tweet in self.tweet_dataframe.iterrows()]\r\n #self.tweet_dataframe['polarity'] = [i.polarity for i in tweet_sentiment]\r\n #self.tweet_dataframe['subjectivity'] = [i.subjectivity for i in tweet_sentiment]\r\n\r\n #vader time\r\n #http://t-redactyl.io/blog/2017/04/applying-sentiment-analysis-with-vader-and-the-twitter-api.html\r\n sentiment = []\r\n\r\n analyzer = SentimentIntensityAnalyzer()\r\n\r\n for tweet in self.tweet_dataframe['filtered_text']:\r\n vs = analyzer.polarity_scores(tweet)\r\n sentiment.append(vs['compound'])\r\n\r\n self.tweet_dataframe['vader_polarity'] = pd.Series(sentiment)",
"def find_transcript_data(row):\n #Calls create_transcript_df which creates a df for each transcript.\n trans_df = create_transcript_df(\n row, row['teacher_handle'].strip(), \n row['student_handle'].strip(), row['transcript'].strip())\n #Finds the first response time and defines it as the First Response Time (FRT)\n rt, frt = rt_data(trans_df)\n student_response, teacher_response = response_lengths(trans_df)\n #vocab = the total number of vocab words used in the transcript.\n vocab_list = np.asarray([item for sublist in trans_df.vocab.values for item in sublist])\n session_length_secs = (trans_df.Time_Stamps.iloc[-1] - trans_df.Time_Stamps.iloc[0]).seconds\n \n #Finding student to teacher ratio, round to nearest hundreth.\n exchange_ratio = round(trans_df.Student_Bool.sum()/float((trans_df['Student_Bool']==False).sum()),2)\n #returns all of the data found above, place in new columns under plain df.\n return trans_df.to_dict(), frt, rt, trans_df.vocab_count.sum(), vocab_list, trans_df.approp_count.sum(), session_length_secs, student_response, teacher_response, exchange_ratio,trans_df.has_drag_drop.sum()",
"def process_message_row(self, message_row):\n msg_received_ts = message_row[0]\n msg_raw_data = json.loads(message_row[1])\n msg_data = {\n \"ts\": msg_received_ts,\n \"origin_ts\": msg_raw_data[\"origin_server_ts\"],\n \"origin\": msg_raw_data[\"origin\"],\n \"sender\": msg_raw_data[\"sender\"],\n \"event_id\": msg_raw_data[\"event_id\"],\n \"room_id\": msg_raw_data[\"room_id\"],\n \"message\": msg_raw_data[\"content\"][\"body\"],\n \"url\": msg_raw_data[\"content\"].get(\"url\", None),\n \"chat_type\": \"matrix\",\n \"nick\": self.sender_to_nick(msg_raw_data[\"sender\"]),\n }\n return msg_data",
"def getDataset(api, dataset, headsize, itemsize):\n userlist = dataset['user'].head(headsize).tolist()\n\n res = []\n [res.append(x) for x in userlist if x not in res]\n\n data = []\n for x in res:\n val = []\n h = []\n r = 0\n f = 0\n try:\n for tweet in tweepy.Cursor(api.user_timeline, id=x).items(itemsize):\n h.extend(func.extractHashtag(tweet.text))\n t = func.clean_tweets(tweet.text)\n k = func.sentiment_analyzer_scores(t)\n val.append(k)\n r = r + tweet.retweet_count\n f = f + tweet.favorite_count\n hl = []\n [hl.append(x) for x in h if x not in hl]\n data.append([str(x), mean(val), hl, r, f])\n except tweepy.TweepError: # Caused by inexistance of user x\n pass\n\n return pd.DataFrame(data, columns=['user', 'sent', 'hashtags', 'rt', 'fav'])",
"def tweetSent(df_key):\n print('1/3 sent_transformer, initializing.')\n\n df_sent = df_key.copy()\n root = 'proc_data'\n year = df_sent['date'][0].year\n month = df_sent['date'][0].month\n pos_list = []\n neu_list = []\n neg_list = []\n cmp_list = []\n analyzer = SentimentIntensityAnalyzer()\n\n print('2/3 sent_transformer, transforming tweets to sentiments.')\n\n for _,nrows in tqdm_notebook(df_sent.iterrows()):\n vs = analyzer.polarity_scores(nrows['cleantext'])\n pos_list.append(vs['pos'])\n neu_list.append(vs['neu'])\n neg_list.append(vs['neg'])\n cmp_list.append(vs['compound'])\n\n df_sent['vad_positive'] = pos_list\n df_sent['vad_neutral'] = neu_list\n df_sent['vad_negative'] = neg_list\n df_sent['vad_compound'] = cmp_list\n\n filename = 'proc_sent_{}_{}.pkl'.format(year,month)\n filestring = os.path.join(root, filename)\n\n # output to pickle\n with open(filestring, 'wb') as filehandle: \n # store the data\n pickle.dump(df_sent, filehandle)\n\n print('3/3 sent_transformer, process complete.')\n return df_sent",
"def readFromStream(cls, stream):\n list_ = []\n event_id_list = []\n for tr in stream:\n if tr.stats.event.id not in event_id_list:\n list_.append(tr.stats.event)\n event_id_list.append(tr.stats.event.id)\n\n log.info('Read event information of %d events from stream %s' % (len(list_), stream.hash))\n return cls(list_)",
"def getbasics(tfinal):\n tfinal[\"screen_name\"] = df[\"user\"].apply(lambda x: x[\"screen_name\"])\n tfinal[\"user_id\"] = df[\"user\"].apply(lambda x: x[\"id\"])\n tfinal[\"followers_count\"] = df[\"user\"].apply(lambda x: x[\"followers_count\"])\n return tfinal",
"def process_tweets(s3_working_bucket: str, date: tuple) -> DataFrame:\n\n logging.debug(\"Start reading tweets csv.\")\n df_tweets = stdm.read_csv(spark, s3_working_bucket, date, \"twitter\", \"tweets\")\n\n logging.debug(\"Calling extract_tweet_source function.\")\n df_tweets = stp.extract_tweet_source(df_tweets)\n\n logging.debug(\"Calling col_to_datetime function with df_tweets data.\")\n df_tweets = stp.col_to_datetime(df_tweets, \"tweet_created_at\")\n\n logging.debug(\"Calling merge_texts function.\")\n df_tweets = stp.merge_texts(df_tweets)\n\n logging.debug(\"Calling get_tickers function.\")\n df_tweets = stp.get_tickers(df_tweets)\n\n # In case json files are loaded not in order.\n logging.debug(\"Calling order_by_col function with df_tweets data.\")\n df_tweets = stp.order_by_col(df_tweets, \"tweet_created_at\")\n\n logging.debug(\"Calling drop_outofrange function with df_tweets data.\")\n df_tweets = stp.drop_outofrange(df_tweets, \"tweet_created_at\", date)\n\n return df_tweets",
"def run(self):\n new_tweets = pd.DataFrame(\n columns=[\"tweet_id\", \"congress\", \"session\", \"date\", \"vote\"],\n dtype=str,\n )\n for item in self.senate_data[\"vote_summary\"][\"votes\"][\"vote\"]:\n query = (\n \"congress == @self.congress \"\n \"and session == @self.session \"\n \"and date == @item['vote_date'] \"\n \"and vote == @item['vote_number']\"\n )\n\n # If the current vote isn't already processed, then process it\n if self.tweets.query(query).empty:\n try:\n text, party_data, vote_data = self.senate_obj.process_vote(\n item\n )\n status = self.twitter_api.update_status(text)\n # Keep track of new tweets to be reconciled with old\n # tweets later\n new_tweets = new_tweets.append(\n {\n \"tweet_id\": status.id_str,\n \"congress\": self.congress,\n \"session\": self.session,\n \"date\": item[\"vote_date\"],\n \"vote\": item[\"vote_number\"],\n **party_data,\n **vote_data,\n },\n ignore_index=True,\n )\n except Exception as e:\n # Tweet failed for some reason\n logging.error(\"Tweet failed\")\n logging.error(item)\n logging.error(e)\n\n # Only process a limited number of tweets in a single run\n if len(new_tweets) == self.MAX_TWEETS:\n break\n\n if not new_tweets.empty:\n logging.info(f\"Tweeted {len(new_tweets)} new votes\")\n self.__save(self.tweets.append(new_tweets))\n # Function needs to return something to work as a Cloud Function\n return new_tweets[\"tweet_id\"].to_json()\n else:\n return \"{}\" # Empty JSON object",
"def getNewsFeed(self, userId):\r\n tweets = []\r\n \r\n tweets += self.tweets_by_user[userId]\r\n for other in self.follows[userId]:\r\n tweets += self.tweets_by_user[other]\r\n \r\n last_10_tweets = sorted(tweets)[-10:]\r\n return [tweetId for _, tweetId in last_10_tweets][::-1]",
"def processTweet(title, tweet, remove_title=False):\n\n # create a title regex and initialize a dictionary to hold results\n\n texp = r\"#?\" + r\" ?\".join(processTitle(title).split(\" \"))\n results = {}\n\n # retrieve author metadata\n\n results['author_id'] = tweet.author.id\n results['author_name'] = tweet.author.name\n results['author_verified'] = tweet.author.verified\n results['author_followers'] = tweet.author.followers_count\n results['author_friends'] = tweet.author.friends_count\n results['author_favorites'] = tweet.author.favourites_count\n results['author_statuses'] = tweet.author.statuses_count\n\n # retrieve tweet metadata\n\n results['tweet_id'] = tweet.id\n results['tweet_datetime'] = tweet.created_at.strftime('%Y-%m-%d %H:%m:%S')\n results['tweet_favorites'] = tweet.favorite_count\n results['tweet_retweets'] = tweet.retweet_count\n\n retweet = re.search('^RT @\\w+:', tweet.text)\n results['tweet_retweet'] = True if retweet else False\n\n mention = re.search('@\\w+', tweet.text)\n results['tweet_mention'] = True if mention and not retweet else False\n\n # retrieve raw tweet text and clean it up\n\n text = tweet.text.replace('\\n', '').replace(\"'\", \"\").replace('\"', '')\n text = re.sub(r'(RT )?@\\w+:?', '', text)\n text = re.sub(texp, '', text, flags=re.IGNORECASE) if remove_title else text\n text = re.sub(r' {2,}', ' ', text).strip()\n\n results['tweet_text'] = text\n return results",
"def format_tweet(tweet):\n user = tweet['user']\n return {\n 'tweet_id': tweet['id'],\n 'hashtag': HASHTAG,\n 'text': tweet['text'],\n 'created_at': tweet['created_at'],\n 'user': {\n 'user_id': user['id'],\n 'name': user['name'],\n 'handle': user['screen_name'],\n 'profile_image_url': user['profile_image_url'],\n 'profile_url': f\"https://twitter.com/{user['screen_name']}\"\n }\n }"
] | [
"0.6158354",
"0.5887331",
"0.5759622",
"0.57456875",
"0.526741",
"0.5223173",
"0.52187014",
"0.51563835",
"0.51196",
"0.51054513",
"0.51028097",
"0.5037918",
"0.5022799",
"0.50189",
"0.50117546",
"0.5011563",
"0.5006884",
"0.49508464",
"0.49424398",
"0.4938977",
"0.49200106",
"0.49180028",
"0.4902092",
"0.48943594",
"0.4887909",
"0.48780778",
"0.48674878",
"0.4858924",
"0.4839496",
"0.47926775"
] | 0.5895293 | 1 |
Explicitly sets the shapes of the lightcurve and label tensor, otherwise TensorFlow can't infer it. | def set_shape_function(self, lightcurve: tf.Tensor, label: tf.Tensor):
lightcurve.set_shape([self.time_steps_per_example, 1])
label.set_shape([1])
return lightcurve, label | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_shapes(self, batch_size, features_in, labels_in):\n features_in['mcts_features'] = tf.reshape(\n features_in['mcts_features'], [batch_size, self._env_state_space],\n name='mcts_feature_reshape')\n\n features_in['policy_features'] = tf.reshape(\n features_in['policy_features'], [batch_size, self._env_state_space],\n name='policy_feature_reshape')\n\n labels_in['action_tensor'] = tf.reshape(\n labels_in['action_tensor'], [batch_size, self._env_action_space],\n name='action_reshape')\n\n labels_in['mean_tensor'] = tf.reshape(\n labels_in['mean_tensor'], [batch_size, self._env_action_space],\n name='mean_reshape')\n\n labels_in['logstd_tensor'] = tf.reshape(\n labels_in['logstd_tensor'], [batch_size, self._env_action_space],\n name='logstd_reshape')\n\n labels_in['value_tensor'] = tf.reshape(\n labels_in['value_tensor'], [batch_size], name='value_reshape')\n\n labels_in['return_tensor'] = tf.reshape(\n labels_in['return_tensor'], [batch_size], name='return_reshape')\n\n labels_in['old_neg_logprob_tensor'] = tf.reshape(\n labels_in['old_neg_logprob_tensor'], [batch_size], name='log_reshape')\n\n labels_in['mcts_enable_tensor'] = tf.reshape(\n labels_in['mcts_enable_tensor'], [batch_size], name='mcts_reshape')\n\n labels_in['policy_action_tensor'] = tf.reshape(\n labels_in['policy_action_tensor'], [batch_size, self._env_action_space],\n name='policy_action_reshape')\n\n labels_in['policy_value_tensor'] = tf.reshape(\n labels_in['policy_value_tensor'], [batch_size],\n name='policy_value_reshape')\n\n labels_in['policy_return_tensor'] = tf.reshape(\n labels_in['policy_return_tensor'], [batch_size],\n name='policy_return_reshape')\n\n labels_in['policy_old_neg_logprob_tensor'] = tf.reshape(\n labels_in['policy_old_neg_logprob_tensor'], [batch_size],\n name='log_reshape')\n\n return features_in, labels_in",
"def set_label_shape(label):\n label.set_shape([1])\n return label",
"def set_shapes(images, labels):\n images.set_shape(images.get_shape().merge_with(\n tf.TensorShape([batch_size, None, None, None])))\n labels.set_shape(labels.get_shape().merge_with(\n tf.TensorShape([batch_size])))\n return images, labels",
"def set_shapes(transpose_input, batch_size, images, labels):\n if transpose_input:\n images.set_shape(images.get_shape().merge_with(\n tf.TensorShape([None, None, None, batch_size])))\n labels.set_shape(\n labels.get_shape().merge_with(tf.TensorShape([batch_size])))\n else:\n images.set_shape(images.get_shape().merge_with(\n tf.TensorShape([batch_size, None, None, None])))\n labels.set_shape(\n labels.get_shape().merge_with(tf.TensorShape([batch_size])))\n\n return images, labels",
"def set_shape(self, shape):\n self._shape = self._shape.merge_with(shape)",
"def build(self, input_shape):\n if self.use_scale:\n self.scale = self.add_weight(\n name='scale',\n shape=(),\n initializer='ones',\n dtype=self.dtype,\n trainable=True)\n else:\n self.scale = None\n super(HypLuongAttention, self).build(input_shape)",
"async def infer_shape_scalar(track, *args):\n return NOSHAPE",
"def build_graph(self):\n\n\n\n self.inputs.append( #uint8\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='input/lr')) \n\n self.label.append(\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='label/hr'))",
"def __init__(self, shape, dtype=tf.float32, name=None):\n super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name))",
"def shape(tensor):\n raise NotImplementedError",
"def test_shape_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert atom.lr.shape == atom.shape",
"def with_same_shape(old, new):\n if isinstance(old, tf.Tensor) and isinstance(new, tf.Tensor):\n return tensor_util.with_same_shape(old, new)\n return new",
"def set_shape_predict(im, im_path, height, width):\n im = tf.reshape(im, tf.convert_to_tensor([height, width, 3]))\n\n im_path.set_shape([])\n return im, im_path",
"def reset_shapes():\n if bpy.context.object.modeling_cloth:\n ob = bpy.context.object\n else: \n ob = extra_data['last_object']\n\n if ob.data.shape_keys == None:\n ob.shape_key_add('Basis') \n if 'modeling cloth source key' not in ob.data.shape_keys.key_blocks:\n ob.shape_key_add('modeling cloth source key') \n if 'modeling cloth key' not in ob.data.shape_keys.key_blocks:\n ob.shape_key_add('modeling cloth key') \n ob.data.shape_keys.key_blocks['modeling cloth key'].value=1\n \n keys = ob.data.shape_keys.key_blocks\n count = len(ob.data.vertices)\n co = np.zeros(count * 3, dtype=np.float32)\n keys['modeling cloth source key'].data.foreach_get('co', co)\n keys['modeling cloth key'].data.foreach_set('co', co)\n\n data[ob.name].vel *= 0\n \n ob.data.shape_keys.key_blocks['modeling cloth key'].mute = True\n ob.data.shape_keys.key_blocks['modeling cloth key'].mute = False",
"def reshape(self, *shape):\n newTensor = super(MKLTensor, self).reshape(*shape)\n newTensor.set_mkl(self)\n return newTensor",
"def _setup_type_shapes(self, named_ops, extra_type_shapes):\n type_shape_set = set()\n for op in six.itervalues(named_ops):\n type_shape_set.update(op.input_type_shapes)\n type_shape_set.update(op.output_type_shapes)\n if extra_type_shapes is not None:\n type_shape_set.update(extra_type_shapes)\n\n # _type_shapes: a list of all the typeshapes this loom object supports.\n self._type_shapes = sorted(type_shape_set)\n\n # Enforce uniqueness for non-empty TypeShape tags.\n non_empty_tags = set()\n for ts in self._type_shapes:\n if ts.tag:\n if ts.tag in non_empty_tags:\n raise TypeError('Tags on tagged TypeShapes must be unique; '\n '%s occured more than once.' % (ts.tag,))\n else:\n non_empty_tags.add(ts.tag)\n\n # _type_shape_to_idx: a dict mapping TypeShape objects to their indices in\n # '_type_shapes'.\n self._type_shape_to_idx = {ts: idx for idx, ts in\n enumerate(self._type_shapes)}",
"async def infer_shape_embed(track, x):\n return NOSHAPE",
"def set_shape(self, connection_shape):\n self.shape = connection_shape",
"def LSTMBiasInit(shape, dtype):\n shape = np.array(shape)\n\n # Check internal consistencies.\n assert shape.shape == (1,), shape\n assert shape[0] % 4 == 0, shape\n\n n = shape[0] // 4\n ones = tf.fill([n], tf.constant(1, dtype=dtype))\n zeros = tf.fill([3 * n], tf.constant(0, dtype=dtype))\n return tf.concat([ones, zeros], 0)",
"def _build(self, shape):\n if self.multi_label:\n if shape.ndims != 2:\n raise ValueError('`y_true` must have rank=2 when `multi_label` is '\n 'True. Found rank %s.' % shape.ndims)\n self._num_labels = shape[1]\n variable_shape = tensor_shape.TensorShape(\n [tensor_shape.Dimension(self.num_thresholds), self._num_labels])\n\n else:\n variable_shape = tensor_shape.TensorShape(\n [tensor_shape.Dimension(self.num_thresholds)])\n self._build_input_shape = shape\n # Create metric variables\n self.true_positives = self.add_weight(\n 'true_positives',\n shape=variable_shape,\n initializer=init_ops.zeros_initializer)\n self.true_negatives = self.add_weight(\n 'true_negatives',\n shape=variable_shape,\n initializer=init_ops.zeros_initializer)\n self.false_positives = self.add_weight(\n 'false_positives',\n shape=variable_shape,\n initializer=init_ops.zeros_initializer)\n self.false_negatives = self.add_weight(\n 'false_negatives',\n shape=variable_shape,\n initializer=init_ops.zeros_initializer)\n\n if self.multi_label:\n with ops.init_scope():\n # This should only be necessary for handling v1 behavior. In v2, AUC\n # should be initialized outside of any tf.functions, and therefore in\n # eager mode.\n if not context.executing_eagerly():\n K._initialize_variables(K._get_session()) # pylint: disable=protected-access\n\n self._built = True",
"def check_shape_equal(pred, labels):\n if pred.shape != labels.shape:\n raise ValueError('Prediction and labels shapes must be equal:'\n f'{pred.shape} vs {labels.shape}.')",
"def changeInputShape(self,shape):\n self.input_shape = shape",
"def set_shape(self):\n\t\theigh = 150.\n\t\tself.own_shape_matrix = np.array([[50., 0., 0., 0.],\n\t\t\t\t\t\t\t\t\t\t\t[ 0., 50., 0., 0.],\n\t\t\t\t\t\t\t\t\t\t\t[ 0., 0., heigh, heigh/2],\n\t\t\t\t\t\t\t\t\t\t\t[ 0., 0., 0., 1.]])\n\t\t\n\t\t\n\t\tpass",
"def __init__(self, shape, dtype='float32'):\n if not isinstance(shape, (tuple, list)):\n raise TypeError('shape must be a tuple or list: %s' % str(shape))\n self._type_shape = loom.TypeShape(dtype, shape)",
"def __init__(self, shape, name=None):\r\n\r\n super().__init__(tf.placeholder(tf.uint8, [None] + list(shape), name=name))\r\n self._shape = shape\r\n self._output = tf.cast(super().get(), tf.float32) / 255.0",
"def random_shape(gts, reference_shape, pca_model):\n\n def synthesize(lms):\n return detect.synthesize_detection(pca_model, menpo.shape.PointCloud(\n lms).bounding_box()).points.astype(np.float32)\n\n bb, = tf.py_func(synthesize, [gts], [tf.float32])\n shape = align_reference_shape(reference_shape, bb)\n shape.set_shape(reference_shape.get_shape())\n\n return shape",
"def __init__(self, shape, name=None):\n\n super().__init__(tf.placeholder(tf.uint8, [None] + list(shape), name=name))\n self._shape = shape\n self._output = tf.cast(super().get(), tf.float32) / 255.0",
"def check_label_shapes(labels, preds, shape=0):\n\n if shape == 0:\n label_shape, pred_shape = len(labels), len(preds)\n else:\n label_shape, pred_shape = labels.shape, preds.shape\n\n if label_shape != pred_shape:\n raise ValueError(\"Shape of labels {} does not match shape of \"\n \"predictions {}\".format(label_shape, pred_shape))",
"def load_tf_llr(filepath: Union[str, os.PathLike], dist_s: Union[Distribution, PixelCNN] = None,\n dist_b: Union[Distribution, PixelCNN] = None, input_shape: tuple = None):\n model_dir = Path(filepath).joinpath('model')\n h5files = [f.name for f in model_dir.glob('[!.]*.h5')]\n if 'model_s.h5' in h5files and 'model_b.h5' in h5files:\n model_s, dist_s = build_model(dist_s, input_shape, str(model_dir.joinpath('model_s.h5').resolve()))\n model_b, dist_b = build_model(dist_b, input_shape, str(model_dir.joinpath('model_b.h5').resolve()))\n return dist_s, dist_b, model_s, model_b\n else:\n dist_s = tf.keras.models.load_model(model_dir.joinpath('model.h5'), compile=False)\n if 'model_background.h5' in h5files:\n dist_b = tf.keras.models.load_model(model_dir.joinpath('model_background.h5'), compile=False)\n else:\n dist_b = None\n return dist_s, dist_b, None, None",
"async def infer_shape_env_add(track, env1, env2):\n return NOSHAPE"
] | [
"0.6137845",
"0.60065645",
"0.5872062",
"0.57126576",
"0.5393481",
"0.53474146",
"0.51422065",
"0.51182985",
"0.5107572",
"0.5032816",
"0.50240093",
"0.5020178",
"0.49794286",
"0.4963954",
"0.49477613",
"0.49311468",
"0.4915292",
"0.4882078",
"0.48768413",
"0.4874847",
"0.4861257",
"0.48603436",
"0.4851606",
"0.4851366",
"0.48478976",
"0.48215923",
"0.48168626",
"0.48073372",
"0.48024184",
"0.47746164"
] | 0.768438 | 0 |
Prepares the lightcurves for training with several preprocessing and augmenting steps. | def preprocess_and_augment_lightcurve(self, lightcurve: np.ndarray) -> np.ndarray:
lightcurve = self.remove_random_values(lightcurve) # Helps prevent overfitting.
lightcurve = self.roll_lightcurve(lightcurve) # Helps prevent overfitting.
# Current network expects a fixed length.
lightcurve = self.make_uniform_length(lightcurve, self.time_steps_per_example)
lightcurve = self.normalize(lightcurve)
lightcurve = np.expand_dims(lightcurve, axis=-1) # Network uses a "channel" dimension.
return lightcurve | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _build_preprocessing(self):\n\n # For now, do nothing\n pass",
"def preprocess(self):\n\n mm_magcoord.add_aacgm_coordinates(self)\n mm_magcoord.add_quasi_dipole_coordinates(self)\n mm_sc.calculate_ecef_velocity(self)\n mm_sc.add_ram_pointing_sc_attitude_vectors(self)\n\n return",
"def __init__(self, features_number, surfaces_dimensions, taus, first_layer_polarities,\n delay_coeff, net_seed = 0, verbose=False):\n self.basis = []\n self.activations = []\n self.taus = taus\n self.layers = len(features_number)\n self.surfaces_dimensions = surfaces_dimensions\n self.features_number = features_number\n self.delay_coeff = delay_coeff\n self.verbose = verbose\n self.polarities = []\n self.polarities.append(first_layer_polarities)\n # attribute containing all surfaces computed in each layer and sublayer\n self.surfaces = []\n # attribute containing all optimization errors computed in each layer \n # and sublayer\n self.errors = []\n #setting the seed\n rng = np.random.RandomState()\n if (net_seed!=0):\n rng.seed(net_seed)\n # In the first layer I am going to process only 2 polarities corresponging\n # to on off events\n num_polarities = 1 \n for layer, nfeatures in enumerate(features_number):\n #basis and activations of a single sublayer\n sublayers_basis = []\n sublayers_activations = []\n self.polarities.append(nfeatures)\n for sublayer in range(2**layer):\n #basis and activations of a single layer\n basis_set = []\n activations_set = []\n for j in range(nfeatures):\n basis_set.append(rng.rand(surfaces_dimensions[layer][1], surfaces_dimensions[layer][0]*num_polarities))\n basis_set[j][surfaces_dimensions[layer][1]//2, [surfaces_dimensions[layer][0]//2 + surfaces_dimensions[layer][0]*a for a in range(num_polarities)]] = 1\n #activations, or aj (as in the paper) are set randomly between -1 and 1\n activations_set.append((rng.rand()-0.5)*2)\n sublayers_basis.append(np.array(basis_set))\n sublayers_activations.append(np.array(activations_set))\n self.basis.append(sublayers_basis)\n self.activations.append(sublayers_activations)\n num_polarities = nfeatures",
"def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)",
"def prepare_learning(self):\n print 'Separating inputs and outputs...'\n self.inputs, self.outputs = extract_samples(self.matches,\n self.input_features,\n self.output_feature)\n\n print 'Normalizing data...'\n self.normalizer, self.inputs = normalize(self.inputs)\n\n print 'Separating train and test sets...'\n self.train_inputs, self.train_outputs, self.test_inputs, self.test_outputs = split_samples(self.inputs, self.outputs)\n\n print 'Building neural network...'\n self.network = buildNetwork(len(self.input_features),\n 2 * len(self.input_features),\n 1,\n outclass=SigmoidLayer,\n bias=True)\n\n print 'Building and filling pybrain train set object...'\n self.train_set = ClassificationDataSet(len(self.input_features))\n\n for i, input_line in enumerate(self.train_inputs):\n self.train_set.addSample(self.train_inputs[i],\n [self.train_outputs[i] - 1])\n\n self.trainer = BackpropTrainer(self.network, dataset=self.train_set,\n momentum=0.5, weightdecay=0.0)\n\n self.train_set.assignClasses()",
"def preprocess(self):\n\n if self.x_range == None:\n x_min = min(np.min(self.fx), np.min(self.gx))\n x_max = max(np.max(self.fx), np.max(self.gx))\n self.x_range = [x_min,x_max]\n\n f_inter = interpolate.interp1d(self.fx, self.fy, 'cubic', fill_value = 'extrapolate')\n g_inter = interpolate.interp1d(self.gx, self.gy, 'cubic', fill_value = 'extrapolate')\n fgx_new = np.linspace(self.x_range[0], self.x_range[1], self.N)\n fy_new = f_inter(fgx_new)\n gy_new = g_inter(fgx_new)\n\n self.fx, self.fy = fgx_new, fy_new\n self.gx, self.gy = fgx_new, gy_new",
"def setup_training(self):\n self.transitions = deque(maxlen=TRANSITION_HISTORY_SIZE)\n self.total_rewards = []\n self.rewards = []\n self.steps = []\n self.average_rewards = []\n self.average_steps = []\n self.model = initialize_model()\n self.invalid_actions = 0\n self.average_invalid_actions = []\n self.total_invalid_actions = []",
"def preprocess(self):",
"def E_step_precompute(self, model_params, my_suff_stat, my_data):",
"def prep_base(self):\n\n self.config.logger.info(\"Preparing base layer land use data...\")\n\n # set start time\n t0 = time.time()\n\n # extract and process base layer land cover data\n base_data = rdr.read_base(self.config, self.observed_landclasses, self.sequence_metric_dict,\n metric_seq=self.metric_sequence_list, region_seq=self.region_sequence_list)\n\n # unpack variables\n self.spat_ludata, self.spat_water, self.spat_coords, self.spat_aez_region, self.spat_grid_id, self.spat_aez, \\\n self.spat_region, self.ngrids, self.cellarea, self.celltrunk, self.sequence_metric_dict = base_data\n\n self.config.logger.info('PERFORMANCE: Base spatial landuse data prepared in {0} seconds'.format(time.time() - t0))",
"def _init_layers(self):\n self._init_predictor()\n if self.use_edge_fusion:\n self._init_edge_module()",
"def preprocess_train_data(self):\r\n print(\"* Preprocessing training data.\", flush=True)\r\n prep.create_HDF_file(self.C.training_set, is_training_set=True)\r\n\r\n self.print_time_elapsed()",
"def test_learning_curves():\n\n p = pipeline.Pipeline(\n FX_TRAIN,\n FX_TEST,\n FX_LOOKUP,\n RESULTS_DIR\n )\n\n data = p.learning_curves()",
"def stage(self):\n\n # prepare projected land allocation data\n self.prep_projected()\n\n # prepare base land use data\n self.prep_base()\n\n # harmonize grid area between projected and base layer land allocation\n self.harmony()\n\n # apply constraints\n self.set_constraints()\n\n # create kernel density filter if not running multiple jobs\n self.kernel_filter()\n\n # set data for step zero\n self.set_step_zero()",
"def _initial_setup(self, **train_kwargs):\n self._update(time_step=0., **train_kwargs)",
"def train_loop_pre(self, current_step):\r\n pass",
"def _pre_draw_bge(self):\r\n self._pre_draw_common()\r\n # draw rays\r\n self._drawRays()",
"def _prepare_image(self, image, initial_shape, gt_shape=None):\n image.landmarks['initial_shape'] = initial_shape\n image = image.rescale_to_reference_shape(\n self.reference_shape, group='initial_shape',\n interpolator=self.interpolator)\n\n if gt_shape:\n image.landmarks['gt_shape'] = initial_shape\n\n if self.n_levels > 1:\n if self.scaled_levels:\n pyramid = image.gaussian_pyramid(\n n_levels=self.n_levels, downscale=self.downscale)\n else:\n pyramid = image.smoothing_pyramid(\n n_levels=self.n_levels, downscale=self.downscale)\n images = [compute_features(i, self.feature_type)\n for i in pyramid]\n images.reverse()\n else:\n images = [compute_features(image, self.feature_type)]\n\n return images",
"def _training_before_hook(self):\n pass",
"def before_fit(self):\n self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, \"gather_preds\")\n if not self.run:\n return\n\n # Prepare ground truth container, set here as y_true's always stay the same\n self.y_true = []",
"def __init__(self, *args):\n _hypre.HypreSmoother_swiginit(self, _hypre.new_HypreSmoother(*args))",
"def start(self):\n self.model.stateVector = self.model.initialStateVector\n self.transformPriorErrorCovariance()\n self.transformMeasurementError()",
"def CreateLoader(self,block=0):\n if block==0: \n train_data = MyTrainset_RGB(folder=self.path_trainsets)\n val_data = MyTrainset_RGB(folder=self.path_valsets)\n else:\n if self.mode == 'lpp':\n # the trueimage and image should be RGB images\n folder_RGB_train = os.path.join('Datasets','Trainsets_RGB')\n #else, creates a loader loading output of the previous layer\n folder_temp = os.path.join(self.path_save,'ImagesLastBlock')\n train_data = MyDataset_OneBlock_RGB(\n folder_trueimage = os.path.join(folder_RGB_train),\n folder_blurredimage = os.path.join(folder_RGB_train),\n folder_trueblur = os.path.join(folder_temp,'train','block_'+str(block-1),'trueblur'),\n folder_noise_std = os.path.join(folder_temp,'train','block_'+str(block-1),'noise_std'),\n folder_mk = os.path.join(folder_temp,'train','block_'+str(block-1),'mk_vec'),\n folder_diagSigma = os.path.join(folder_temp,'train','block_'+str(block-1),'diagSigma_vec'),\n folder_newmh = os.path.join(folder_temp,'train','block_'+str(block-1),'newmh_vec'),\n folder_newSigmah = os.path.join(folder_temp,'train','block_'+str(block-1),'newSigmah_vec'),\n folder_Gammap = os.path.join(folder_temp,'train','block_'+str(block-1),'Gammap_vec'),\n folder_LAMBDAk = os.path.join(folder_temp,'train','block_'+str(block-1),'LAMBDAk_vec'),dtype = self.dtype)\n folder_RGB_val = os.path.join('Datasets','Valsets_RGB')\n val_data = MyDataset_OneBlock_RGB(\n folder_trueimage = os.path.join(folder_RGB_val),\n folder_blurredimage = os.path.join(folder_RGB_val),\n folder_trueblur = os.path.join(folder_temp,'val','block_'+str(block-1),'trueblur'),\n folder_noise_std = os.path.join(folder_temp,'val','block_'+str(block-1),'noise_std'),\n folder_mk = os.path.join(folder_temp,'val','block_'+str(block-1),'mk_vec'),\n folder_diagSigma = os.path.join(folder_temp,'val','block_'+str(block-1),'diagSigma_vec'),\n folder_newmh = os.path.join(folder_temp,'val','block_'+str(block-1),'newmh_vec'),\n folder_newSigmah = os.path.join(folder_temp,'val','block_'+str(block-1),'newSigmah_vec'),\n folder_Gammap = os.path.join(folder_temp,'val','block_'+str(block-1),'Gammap_vec'),\n folder_LAMBDAk = os.path.join(folder_temp,'val','block_'+str(block-1),'LAMBDAk_vec'),dtype = self.dtype)\n else:\n #else, creates a loader loading output of the previous layer\n folder_temp = os.path.join(self.path_save,'ImagesLastBlock')\n train_data = MyDataset_OneBlock(\n folder_trueimage = os.path.join(folder_temp,'train','block_'+str(block-1),'trueimage'),\n folder_blurredimage = os.path.join(folder_temp,'train','block_'+str(block-1),'blurredimage'),\n folder_trueblur = os.path.join(folder_temp,'train','block_'+str(block-1),'trueblur'),\n folder_noise_std = os.path.join(folder_temp,'train','block_'+str(block-1),'noise_std'),\n folder_mk = os.path.join(folder_temp,'train','block_'+str(block-1),'mk_vec'),\n folder_diagSigma = os.path.join(folder_temp,'train','block_'+str(block-1),'diagSigma_vec'),\n folder_newmh = os.path.join(folder_temp,'train','block_'+str(block-1),'newmh_vec'),\n folder_newSigmah = os.path.join(folder_temp,'train','block_'+str(block-1),'newSigmah_vec'),\n folder_Gammap = os.path.join(folder_temp,'train','block_'+str(block-1),'Gammap_vec'),\n folder_LAMBDAk = os.path.join(folder_temp,'train','block_'+str(block-1),'LAMBDAk_vec'), dtype = self.dtype)\n val_data = MyDataset_OneBlock(\n folder_trueimage = os.path.join(folder_temp,'val','block_'+str(block-1),'trueimage'),\n folder_blurredimage = os.path.join(folder_temp,'val','block_'+str(block-1),'blurredimage'),\n folder_trueblur = os.path.join(folder_temp,'val','block_'+str(block-1),'trueblur'),\n folder_noise_std = os.path.join(folder_temp,'val','block_'+str(block-1),'noise_std'),\n folder_mk = os.path.join(folder_temp,'val','block_'+str(block-1),'mk_vec'),\n folder_diagSigma = os.path.join(folder_temp,'val','block_'+str(block-1),'diagSigma_vec'),\n folder_newmh = os.path.join(folder_temp,'val','block_'+str(block-1),'newmh_vec'),\n folder_newSigmah = os.path.join(folder_temp,'val','block_'+str(block-1),'newSigmah_vec'),\n folder_Gammap = os.path.join(folder_temp,'val','block_'+str(block-1),'Gammap_vec'),\n folder_LAMBDAk = os.path.join(folder_temp,'val','block_'+str(block-1),'LAMBDAk_vec'), dtype = self.dtype)\n self.train_loader = DataLoader(train_data, batch_size=self.batch_size[0], shuffle=True)\n self.val_loader = DataLoader(val_data, batch_size=self.batch_size[1], shuffle=False)",
"def setup(self):\n self.ca_lines = []\n self.ca_lines.append(self.build_initial_line())\n self.set_display_from_lines()",
"def preparePipelines(self):\n\n # Construct the differnent states making up the pipeline\n\n # Input assembly state describes how primitives are assembled\n # This pipeline will assemble vertex data as a triangle lists (though we only use one triangle)\n inputAssemblyState = vk.VkPipelineInputAssemblyStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,\n topology = vk.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST\n )\n # Rasterization state\n rasterizationState = vk.VkPipelineRasterizationStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,\n polygonMode = vk.VK_POLYGON_MODE_FILL,\n cullMode = vk.VK_CULL_MODE_NONE,\n frontFace = vk.VK_FRONT_FACE_COUNTER_CLOCKWISE,\n depthClampEnable = vk.VK_FALSE,\n rasterizerDiscardEnable = vk.VK_FALSE,\n depthBiasEnable = vk.VK_FALSE,\n lineWidth = 1.0\n )\n # Color blend state describes how blend factors are calculated (if used)\n # We need one blend attachment state per color attachment (even if blending is not used\n blendAttachmentState = vk.VkPipelineColorBlendAttachmentState(\n colorWriteMask = 0xf,\n blendEnable = vk.VK_FALSE\n )\n colorBlendState = vk.VkPipelineColorBlendStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,\n attachmentCount = 1,\n pAttachments = [blendAttachmentState]\n )\n # Viewport state sets the number of viewports and scissor used in this pipeline\n # Note: This is actually overriden by the dynamic states (see below)\n viewportState = vk.VkPipelineViewportStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,\n viewportCount = 1,\n scissorCount = 1\n )\n # Enable dynamic states\n # Most states are baked into the pipeline, but there are still a few dynamic states that can be changed within a command buffer\n #To be able to change these we need do specify which dynamic states will be changed using this pipeline. Their actual states are set later on in the command buffer.\n # For this example we will set the viewport and scissor using dynamic states\n dynamicStateEnables = [vk.VK_DYNAMIC_STATE_VIEWPORT, vk.VK_DYNAMIC_STATE_SCISSOR]\n dynamicState = vk.VkPipelineDynamicStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,\n dynamicStateCount = len(dynamicStateEnables),\n pDynamicStates = dynamicStateEnables\n )\n\n # Depth and stencil state containing depth and stencil compare and test operations\n # We only use depth tests and want depth tests and writes to be enabled and compare with less or equal\n opState = vk.VkStencilOpState(\n failOp = vk.VK_STENCIL_OP_KEEP,\n passOp = vk.VK_STENCIL_OP_KEEP,\n compareOp = vk.VK_COMPARE_OP_ALWAYS\n )\n depthStencilState = vk.VkPipelineDepthStencilStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,\n depthTestEnable = vk.VK_TRUE,\n depthWriteEnable = vk.VK_TRUE,\n depthCompareOp = vk.VK_COMPARE_OP_LESS_OR_EQUAL,\n depthBoundsTestEnable = vk.VK_FALSE,\n stencilTestEnable = vk.VK_FALSE,\n front = opState,\n back = opState\n )\n # Multi sampling state\n # This example does not make use fo multi sampling (for anti-aliasing), the state must still be set and passed to the pipeline\n multisampleState = vk.VkPipelineMultisampleStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,\n rasterizationSamples = vk.VK_SAMPLE_COUNT_1_BIT,\n pSampleMask = None\n )\n # Vertex input descriptions\n # Specifies the vertex input parameters for a pipeline\n #Vertex input binding\n # This example uses a single vertex input binding at binding point 0 (see vkCmdBindVertexBuffers)\n vertexInputBinding = vk.VkVertexInputBindingDescription(\n binding = 0,\n stride = self.vertexShape.size * self.vertexShape.itemsize,\n inputRate = vk.VK_VERTEX_INPUT_RATE_VERTEX\n )\n # Input attribute bindings describe shader attribute locations and memory layouts\n vertexInputAttributs = []\n # These match the following shader layout (see triangle.vert):\n # layout (location = 0) in vec3 inPos;\n # layout (location = 1) in vec3 inColor;\n # Attribute location 0: Position\n vertexInputAttribut = vk.VkVertexInputAttributeDescription(\n binding = 0,\n location = 0,\n # Position attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32)\n format = vk.VK_FORMAT_R32G32B32_SFLOAT,\n offset = 0 # offsetof(vertexShape, position)\n )\n vertexInputAttributs.append(vertexInputAttribut)\n vertexInputAttribut = vk.VkVertexInputAttributeDescription(\n binding = 0,\n location = 1,\n # Color attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32)\n format = vk.VK_FORMAT_R32G32B32_SFLOAT,\n offset = self.vertexShape[0].size * self.vertexShape.itemsize # offsetof(vertexShape, color)\n )\n vertexInputAttributs.append(vertexInputAttribut)\n\n # Vertex input state used for pipeline creation\n vertexInputState = vk.VkPipelineVertexInputStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,\n vertexBindingDescriptionCount = 1,\n pVertexBindingDescriptions = [vertexInputBinding],\n vertexAttributeDescriptionCount = len(vertexInputAttributs),\n pVertexAttributeDescriptions = vertexInputAttributs\n )\n # Shaders\n shaderStages = []\n # Vertex shader\n shaderStage = vk.VkPipelineShaderStageCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n # Set pipeline stage for this shader\n stage = vk.VK_SHADER_STAGE_VERTEX_BIT,\n # Load binary SPIR-V shader\n module = vks.vulkantools.loadShader(self.getAssetPath() + \"shaders/triangle/triangle.vert.spv\", self.device),\n pName = \"main\"\n )\n shaderStages.append(shaderStage)\n # Fragment shader\n shaderStage = vk.VkPipelineShaderStageCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n # Set pipeline stage for this shader\n stage = vk.VK_SHADER_STAGE_FRAGMENT_BIT,\n # Load binary SPIR-V shader\n module = vks.vulkantools.loadShader(self.getAssetPath() + \"shaders/triangle/triangle.frag.spv\", self.device),\n pName = \"main\"\n )\n shaderStages.append(shaderStage)\n\n # Assign the pipeline states to the pipeline creation info structure\n pipelineCreateInfo = vk.VkGraphicsPipelineCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,\n # The layout used for this pipeline (can be shared among multiple pipelines using the same layout)\n layout = self.pipelineLayout,\n # Renderpass this pipeline is attached to\n renderPass = self.renderPass,\n pVertexInputState = vertexInputState,\n pInputAssemblyState = inputAssemblyState,\n pRasterizationState = rasterizationState,\n pColorBlendState = colorBlendState,\n pMultisampleState = multisampleState,\n pViewportState = viewportState,\n pDepthStencilState = depthStencilState,\n pDynamicState = dynamicState,\n stageCount = len(shaderStages),\n pStages = shaderStages\n )\n # Create rendering pipeline using the specified states\n self.pipelines = vk.vkCreateGraphicsPipelines(self.device, self.pipelineCache, 1, [pipelineCreateInfo], None)\n try:\n self.pipeline = self.pipelines[0]\n except TypeError:\n self.pipeline = self.pipelines\n # Shader modules are no longer needed once the graphics pipeline has been created\n vk.vkDestroyShaderModule(self.device, shaderStages[0].module, None)\n vk.vkDestroyShaderModule(self.device, shaderStages[1].module, None)",
"def __init__(self, factory, radii, heights, layers_lcs, transform_data,\n layers_physical_names, transfinite_r_data, transfinite_h_data,\n transfinite_phi_data, straight_boundary=None,\n layers_surfaces_names=None, surfaces_names=None,\n volumes_names=None):\n primitives = []\n k = 1 / 3.0 # inner quadrangle part of the first layer radius\n transfinite_types = [0, 0, 0, 1, 3]\n h_cnt = 0.0 # height counter\n if layers_lcs is None:\n layers_lcs = [[1 for _ in radii] for _ in heights]\n if surfaces_names is None:\n surfaces_names = [['NX', 'X', 'NY', 'Y', 'NZ', 'Z']]\n if layers_surfaces_names is None:\n layers_surfaces_names = [[0 for _ in radii] for _ in heights]\n if volumes_names is not None:\n new_layers_physical_names = [[volumes_names[x] for x in y]\n for y in layers_physical_names]\n layers_physical_names = new_layers_physical_names\n for i, h in enumerate(heights):\n c = radii[0] / math.sqrt(2.0)\n kc = k * radii[0] / math.sqrt(2.0)\n bottom_h = h_cnt # primitive bottom h\n top_h = h_cnt + h # primitive top h\n h_cnt += h\n if straight_boundary is None:\n # Core center\n primitives.append(Primitive(\n factory,\n [\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [[], [], [], [], [], [], [], [], [], [], [], []],\n [\n transfinite_phi_data,\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[0],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core X\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]], [], [], [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[0],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core Y\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [[0, 0, bottom_h, 1]], [[0, 0, top_h, 1]], [], [],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[0],\n transfinite_h_data[i],\n ],\n transfinite_types[2],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NX\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n [\n [], [], [], [],\n [], [[0, 0, bottom_h, 1]], [[0, 0, top_h, 1]], [],\n [], [], [], []\n ],\n [\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NY\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [], [], [[0, 0, top_h, 1]], [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_h_data[i],\n ],\n transfinite_types[4],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Layers\n for j in range(1, len(radii)):\n c1 = radii[j - 1] / math.sqrt(2.0)\n c2 = radii[j] / math.sqrt(2.0)\n # Layer X\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[j],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer Y\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[j],\n transfinite_h_data[i]\n ],\n transfinite_types[2],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NX\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n [transfinite_r_data[j][0], transfinite_r_data[j][1],\n rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NY\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[j][0], transfinite_r_data[j][1],\n rc],\n transfinite_h_data[i]\n ],\n transfinite_types[4],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n else:\n if straight_boundary[0] == 0:\n curve_types = {\n 'C': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[0] == 1:\n curve_types = {\n 'C': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[0] == 2:\n curve_types = {\n 'C': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n else:\n curve_types = {\n 'C': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n # Core center\n primitives.append(Primitive(\n factory,\n [\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['C'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []],\n [\n transfinite_phi_data,\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[0],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core X\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['X'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[0],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][0]]\n ))\n # Core Y\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['Y'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[0],\n transfinite_h_data[i],\n ],\n transfinite_types[2],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NX\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['NX'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NY\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['NY'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_h_data[i],\n ],\n transfinite_types[4],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Layers\n for j in range(1, len(radii)):\n if straight_boundary[j] == 0:\n curve_types = {\n 'X': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[j] == 1:\n curve_types = {\n 'X': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[j] == 2:\n curve_types = {\n 'X': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n else:\n curve_types = {\n 'X': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n c1 = radii[j - 1] / math.sqrt(2.0)\n c2 = radii[j] / math.sqrt(2.0)\n # Layer X\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['X'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[j],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer Y\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['Y'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[j],\n transfinite_h_data[i]\n ],\n transfinite_types[2],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NX\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['NX'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n [transfinite_r_data[j][0],\n transfinite_r_data[j][1], rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NY\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['NY'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[j][0],\n transfinite_r_data[j][1], rc],\n transfinite_h_data[i]\n ],\n transfinite_types[4],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n Complex.__init__(self, factory, primitives)",
"def prepare(params, samples):\r\n return",
"def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)",
"def __loadPreProcessedData(self):\n le = joblib.load(self.le_filename)\n X = np.loadtxt(self.X_filename, delimiter=',').astype(int)\n raw_y = np.loadtxt(self.y_filename, delimiter=',').astype(int)\n y = le.inverse_transform(raw_y)\n ##Initialize atrtribute for this class\n self.le, self.X, self.y = le, X, y",
"def train_loop_begin(self):\r\n for _, train_loss_metric in self.training_losses.items():\r\n train_loss_metric.reset_states()\r\n\r\n for _, metrics in self.training_metrics.items():\r\n for metric in metrics:\r\n metric.reset_states()"
] | [
"0.5853681",
"0.5652716",
"0.5586441",
"0.55639637",
"0.5559372",
"0.55316097",
"0.54928076",
"0.5492174",
"0.5487747",
"0.5486643",
"0.54820913",
"0.5477242",
"0.54748636",
"0.546489",
"0.54609436",
"0.54541045",
"0.5415051",
"0.54064196",
"0.5394433",
"0.5393204",
"0.53924644",
"0.5387611",
"0.537597",
"0.5372213",
"0.5370301",
"0.5352626",
"0.5344943",
"0.53378093",
"0.53361976",
"0.53274214"
] | 0.6309759 | 0 |
Randomly rolls the lightcurve, moving starting elements to the end. | def roll_lightcurve(lightcurve: np.ndarray) -> np.ndarray:
shift = np.random.randint(0, len(lightcurve))
return np.roll(lightcurve, shift) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def roll(self):\n return random.randrange(1, sides + 1)",
"def roll(self):\n\t\treturn randint(1, self.num_sides)",
"def move(self):\n if self._z >= 75:\n a = random.random()\n print(str(a))\n if a < 0.2:\n self._z += 1\n if a > 0.2 and a < 0.9:\n self._z -= 1\n if a > 0.9:\n self._z = self._z\n else: \n self._z -= 1\n \n b = random.random()\n print(str(b))\n if b < 0.1:\n self._y += 1\n if b > 0.1 and b < 0.2:\n self._y -= 1\n if b > 0.2 and b < 0.25:\n self._x -= 1\n if b > 0.25:\n self._x += 1",
"def __random_movement(self):\n\t\tself.__steps += 1 \t\t# Increment after every frame\n\t\t# When __steps greater than threshold reverse the direction\n\t\t# and set threshold to a new random value\n\t\tif self.__steps >= self.__threshold_steps:\t\n\t\t\tif self.direction == 'RIGHT':\n\t\t\t\tself.move_left()\n\t\t\t\tself.direction = 'LEFT'\n\t\t\telse:\n\t\t\t\tself.move_right()\n\t\t\t\tself.direction = 'RIGHT'\n\t\t\tself.__threshold_steps = random.randint(25,50)\n\t\t\tself.__steps = 0\n\t\t# Confines the Donkeys movement to within the boundary \n\t\tself.__check_boundary()",
"def roll(self):\n return randint(1, self.num_sides)",
"def roll(self):\n return randint(1, self.num_sides)",
"def roll(self):\n return randint(1, self.num_sides)",
"def roll(self):\n return randint(1, self.num_sides)",
"def roll(self):\n return randint(1, self.sides)",
"def roll(self):\n return randint(1, self.sides)",
"def roll(self):\n self.current_roll = random.randint(self.min, self.max)\n return self.current_roll",
"def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)",
"def shuffle(self): \n for x in range(12):\n self.right(primary=-60, counter=0)\n time.sleep(.1)\n self.left(primary=-60, counter=0)\n time.sleep(.1)\n self.stop()",
"def totem_random():\n random_head()\n random_head()\n random_head()",
"def rseq(start=0.0, stop=1.0, N=10, randomness=0.5):\n\n return (randomness * sort(start + (stop - start) * rand(N))\n + (1 - randomness) * frange(start, stop, npts=N))",
"def _move_range_shuffle(self, range_len):\n start = randint(0, len(self.state) - range_len)\n end = start + range_len\n\n # print(\"start: \" + str(start))\n # print(\"end: \" + str(end))\n # print(\"range_len: \" + str(range_len))\n # print(\"prior state: \", self.state)\n # print(\"prior dict: \", self.wiz_to_pos)\n\n copy_state = self.state[start:end]\n\n #for wizard in copy_state:\n # print(wizard)\n\n random.shuffle(copy_state)\n\n for i, wizard in enumerate(copy_state):\n #print(\"wiz1_loop: \" + wizard)\n self.state[start + i] = wizard\n self.wiz_to_pos[wizard] = start + i\n\n # print(\"post state: \", self.state)\n # print(\"post dict: \", self.wiz_to_pos)\n # print('\\n Error:', self.dict_check())\n # print(\"end\\n \\n\")",
"def MoveRandom(self):\n r = random.randint(0,3)\n if r == 0: self.x += 1\n elif r == 1: self.y += 1\n elif r == 2: self.x -= 1\n elif r == 3: self.y -= 1",
"def update(self):\n if self.iteration > self.rate:\n self.iteration = 0\n heading = (random.random() * 180) - 90\n self.speed = 0.1\n if heading >= 0:\n self.heading = heading\n else:\n self.heading = 360 + heading\n self.iteration += 1\n self.setVector(self.speed, self.heading)",
"def random_move(turtle, distance):\n angle = uniform(-90,90)\n d = uniform(0,distance)\n turtle.left(angle)\n turtle.forward(d)",
"def roll(self):\n return random.randint(1,self.sides)\n #return int(self.sides*random.random() + 1.0)",
"def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5",
"def roll(self):\n return randint(1,6)",
"def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100",
"def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)",
"def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)",
"def _sample_lam(self, cur_y, cur_z):\n old_loglik = self._loglik(cur_y, cur_z)\n old_lam = self.lam\n \n # modify the feature ownership matrix\n self.lam = np.random.beta(1,1)\n new_loglik = self._loglik(cur_y, cur_z)\n move_prob = 1 / (1 + np.exp(old_loglik - new_loglik));\n if random.random() < move_prob:\n pass\n else:\n self.lam = old_lam",
"def pull_arm(self):\n return np.random.normal(loc = 0, scale = 1)+self.mean",
"def _move_range_mirror(self, range_len):\n #start1 = randint(range_len, len(self.state) - range_len)\n start = randint(0, len(self.state) - range_len)\n #range_list = choice([[start1, start1 - range_len], [start2, start2 + range_len]])\n end = start + range_len\n\n copy_state = self.state[start:end]\n copy_state.reverse()\n self.state[start:end] = copy_state\n\n for wizard in self.state[start:end]:\n self.wiz_to_pos[wizard] = self.state.index(wizard)",
"def roll(self):\n return random.choice(self.sides)",
"def roll(d=20):\n\treturn random.randint(1, d)"
] | [
"0.6308937",
"0.5868974",
"0.5851354",
"0.58155835",
"0.5804617",
"0.5804617",
"0.5804617",
"0.5804617",
"0.57860935",
"0.57860935",
"0.5687163",
"0.568123",
"0.5665804",
"0.5656522",
"0.5652201",
"0.5631834",
"0.56227976",
"0.5609408",
"0.5596563",
"0.5579034",
"0.55781525",
"0.55763113",
"0.5573231",
"0.55564713",
"0.55564713",
"0.55556726",
"0.55492157",
"0.554218",
"0.5534474",
"0.54965645"
] | 0.72958153 | 0 |
Builds a list of possible criteria for a given tweet image url | def build_criteria_for_image(image_url):
return CriteriaBuilder.image_to_search_criteria_candidates(image_url) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_image_link():\n image_links = set()\n supplemented_keyword = urllib.parse.quote(\n supplemented_keywords[random.randint(0,\n len(supplemented_keywords) - 1)],\n safe='')\n main_keyword = urllib.parse.quote(\n main_keywords[random.randint(0,\n len(main_keywords) - 1)], safe='')\n\n # print('the theme of cats: ' + supplemented_keyword)\n\n search_query = (main_keyword + ' ' + supplemented_keyword).replace(\n ' ', '%20')\n url = 'https://www.google.com/search?q=' + \\\n search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n while 'https://' not in image_link or r'\\\\u' in image_link or '.jpg' not in image_link:\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n return image_link",
"def _find_urls(photo):\n url = None\n for t in urltypes:\n if t in photo:\n if photo[t] in urls:\n return None, None\n url = photo[t]\n break\n thumb_urls = []\n for t in thumb_urltypes:\n if t in photo:\n thumb_urls.append(photo[t])\n return thumb_urls, url",
"def analyze_twitter_user_images(twitter_id, twitter_screen_name, twitter_image_url_https, kind_of_image_twitter_profile,\n kind_of_image_twitter_background, kind_of_image_twitter_banner):\n twitter_image_type = None\n twitter_profile_image_url_https = None\n twitter_profile_background_image_url_https = None\n twitter_profile_banner_url_https = None\n analyze_image_url_results = {\n 'image_url_valid': False,\n 'image_width': None,\n 'image_height': None,\n 'image_format': None\n }\n\n if kind_of_image_twitter_profile:\n twitter_profile_image_url_https = twitter_image_url_https\n twitter_image_type = TWITTER_PROFILE_IMAGE_NAME\n analyze_image_url_results = analyze_remote_url(twitter_profile_image_url_https)\n if not analyze_image_url_results['image_url_valid']:\n # image url is broken so reaching out to Twitter and getting new image\n twitter_profile_image_url_https = retrieve_latest_twitter_image_url(\n twitter_id, twitter_screen_name, kind_of_image_twitter_profile=True)\n if not twitter_profile_image_url_https:\n # new twitter image url not found\n error_results = {\n 'twitter_id': twitter_id,\n 'twitter_screen_name': twitter_screen_name,\n 'twitter_profile_image_url_https': twitter_profile_image_url_https,\n 'twitter_profile_background_image_url_https': twitter_profile_background_image_url_https,\n 'twitter_profile_banner_url_https': twitter_profile_banner_url_https,\n 'twitter_image_type': twitter_image_type,\n 'analyze_image_url_results': analyze_image_url_results\n }\n return error_results\n\n # new twitter image url found\n analyze_image_url_results = analyze_remote_url(twitter_profile_image_url_https)\n\n elif kind_of_image_twitter_background:\n twitter_profile_background_image_url_https = twitter_image_url_https\n twitter_image_type = TWITTER_BACKGROUND_IMAGE_NAME\n analyze_image_url_results = analyze_remote_url(twitter_profile_background_image_url_https)\n if not analyze_image_url_results['image_url_valid']:\n # image url is broken so reaching out to Twitter and getting new image\n twitter_profile_background_image_url_https = retrieve_latest_twitter_image_url(\n twitter_id, twitter_screen_name, kind_of_image_twitter_background=True)\n if not twitter_profile_background_image_url_https:\n # new twitter image url not found\n error_results = {\n 'twitter_id': twitter_id,\n 'twitter_screen_name': twitter_screen_name,\n 'twitter_profile_image_url_https': twitter_profile_image_url_https,\n 'twitter_profile_background_image_url_https': twitter_profile_background_image_url_https,\n 'twitter_profile_banner_url_https': twitter_profile_banner_url_https,\n 'twitter_image_type': twitter_image_type,\n 'analyze_image_url_results': analyze_image_url_results\n }\n return error_results\n\n # new twitter image url found\n analyze_image_url_results = analyze_remote_url(twitter_profile_background_image_url_https)\n\n elif kind_of_image_twitter_banner:\n twitter_profile_banner_url_https = twitter_image_url_https\n twitter_image_type = TWITTER_BANNER_IMAGE_NAME\n analyze_image_url_results = analyze_remote_url(twitter_profile_banner_url_https)\n if not analyze_image_url_results['image_url_valid']:\n # image url is broken so reaching out to Twitter and getting new image\n twitter_profile_banner_url_https = retrieve_latest_twitter_image_url(\n twitter_id, twitter_screen_name, kind_of_image_twitter_background=True)\n if not twitter_profile_banner_url_https:\n # new twitter image url not found\n error_results = {\n 'twitter_id': twitter_id,\n 'twitter_screen_name': twitter_screen_name,\n 'twitter_profile_image_url_https': twitter_profile_image_url_https,\n 'twitter_profile_background_image_url_https': twitter_profile_background_image_url_https,\n 'twitter_profile_banner_url_https': twitter_profile_banner_url_https,\n 'twitter_image_type': twitter_image_type,\n 'analyze_image_url_results': analyze_image_url_results\n }\n return error_results\n\n # new twitter image url found\n analyze_image_url_results = analyze_remote_url(twitter_profile_banner_url_https)\n\n results = {\n 'twitter_id': twitter_id,\n 'twitter_screen_name': twitter_screen_name,\n 'twitter_profile_image_url_https': twitter_profile_image_url_https,\n 'twitter_profile_background_image_url_https': twitter_profile_background_image_url_https,\n 'twitter_profile_banner_url_https': twitter_profile_banner_url_https,\n 'twitter_image_type': twitter_image_type,\n 'analyze_image_url_results': analyze_image_url_results\n }\n return results",
"def collect_urls_by_keyword(keyword: str, page_num: int, n_max: int, scenery: bool = False):\n if scenery:\n keyword += '-scenery'\n r = requests.get(f'https://api.unsplash.com/search/collections?query={keyword}&page={page_num}&per_page={n_max}&client_id=1H5zbugfvk1UOnX60y2yiMyjNMrE-vdpThxGkP1y9_E')\n data = r.json()\n results = data['results']\n urls = [res['cover_photo']['urls']['raw'] for res in results]\n return urls",
"def get_image_links(queries):\n images = []\n\n for query in queries:\n url = 'http://www.bing.com/images/search?q=' + urllib.quote_plus(query) + '&FORM=HDRSC2'\n soup = BeautifulSoup(requests.get(url).text, 'lxml')\n links = [a['src'] for a in soup.find_all('img', {'src': re.compile('mm.bing.net')})]\n images.extend(links)\n time.sleep(5) # wait 5 seconds before next scrape\n\n return images",
"def _get_values(self, inputs:list) ->list:\n result = inputs[0].str.contains('https://pbs.twimg.com/|.png|.jpg').astype(int).values\n result = result.reshape(-1, 1)\n return result",
"def get_possible_thumbnail(self):\n meta = self.get_meta_data()\n print meta\n if \"og:image\" in meta:\n return meta[\"og:image\"]\n elif \"twitter:image:src\" in meta:\n return meta[\"twitter:image:src\"]\n else:\n images = self.get_image_data()\n temp_url = \"\"\n temp_width = 0\n for img in images:\n if img[\"image_width\"] > temp_width:\n temp_url = img[\"image_url\"]\n temp_width = img[\"image_width\"]\n\n return temp_url",
"def build_end_url_list(url):\n http_types = [\"http://\", \"https://\"]\n dub_types = [\"www.\", \"\"] # this order needs to preserved for testing at www.hgdatascience.com\n http_dub_urls = [\"{}{}{}\".format(h_type, dub_type, url) for dub_type in dub_types for h_type in http_types]\n return http_dub_urls",
"def filter_tweets(tweets):\n # We keep only tweets by chrisalbon with pictures\n search_tweets = [tw for tw in tweets if tw['username'] == '@chrisalbon' and len(tw['images']) > 0]\n # He made multiple tweets on the same topic, we keep only the most recent tweets\n # We use the indexes of the reversed tweet list and dictionnaries to keep only key \n unique_search_index = sorted(list({t['text'].lower():i for i,t in list(enumerate(search_tweets))[::-1]}.values()))\n unique_search_tweets = [search_tweets[i] for i in unique_search_index]\n\n # Keep non-downloaded tweets\n most_recent_file = sorted([datetime.datetime.fromtimestamp(os.path.getmtime(path)) \n for path in glob.glob(\"./downloaded_pics/*.jpg\")], reverse=True)[0]\n recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > most_recent_file]\n\n # Uncomment for testing new tweets\n # recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > datetime.datetime(2017, 7, 6, 13, 41, 48)]\n return recent_seach_tweets",
"def construct_url(screen_name):\n number_of_tweets = \"200\"\n urls = []\n for x in xrange(1, 6):\n urls.append('https://api.twitter.com/1.1/statuses/user_timeline.json?'\n 'screen_name=' + screen_name + '&count=' +\n number_of_tweets + '&page=' + str(x))\n return urls",
"def find_target_urls(url_list):\n candidate_urls = []\n \n #iterate through urls\n for url in get_urls(get_clean_text(message_list)):\n #skip any urls from our 33mail mask domain\n if re.findall('33mail', url):\n pass\n #return everything else\n else:\n candidate_urls.append(url)\n return candidate_urls",
"def check_source_image_active(twitter_user_images_dict, kind_of_image_twitter_profile,\n kind_of_image_twitter_background, kind_of_image_twitter_banner):\n is_active_version = False\n if kind_of_image_twitter_profile:\n latest_twitter_profile_image_url_https = retrieve_latest_twitter_image_url(\n twitter_user_images_dict['twitter_id'], twitter_user_images_dict['twitter_screen_name'],\n kind_of_image_twitter_profile=True)\n if latest_twitter_profile_image_url_https == \\\n twitter_user_images_dict['twitter_profile_image_url_https']:\n is_active_version = True\n\n elif kind_of_image_twitter_background:\n latest_twitter_profile_background_image_url_https = retrieve_latest_twitter_image_url(\n twitter_user_images_dict['twitter_id'], twitter_user_images_dict['twitter_screen_name'],\n kind_of_image_twitter_background=True)\n if latest_twitter_profile_background_image_url_https == \\\n twitter_user_images_dict['twitter_profile_background_image_url_https']:\n is_active_version = True\n\n elif kind_of_image_twitter_banner:\n latest_twitter_profile_banner_url_https = retrieve_latest_twitter_image_url(\n twitter_user_images_dict['twitter_id'], twitter_user_images_dict['twitter_screen_name'],\n kind_of_image_twitter_banner=True)\n if latest_twitter_profile_banner_url_https == \\\n twitter_user_images_dict['twitter_profile_banner_url_https']:\n is_active_version = True\n\n return is_active_version",
"def FindImages(text, widgetName):\n\n winAppearance = {}\n start = text.find(\"class='appearance'\")\n\n if start < 0:\n return winAppearance\n\n imagesDir = GetDocImagesDir()\n\n end = start + text.find(\"</table>\")\n text = text[start:end]\n split = text.split()\n\n for indx, items in enumerate(split):\n\n if \"src=\" in items:\n possibleImage = items.replace(\"src=\", \"\").strip()\n possibleImage = possibleImage.replace('\"', \"\")\n f = urllib.request.urlopen(_trunkURL + possibleImage)\n stream = f.read()\n elif \"alt=\" in items:\n plat = items.replace(\"alt=\", \"\").replace(\"'\", \"\").strip()\n path = os.path.join(imagesDir, plat, widgetName + \".png\")\n if not os.path.isfile(path):\n image = wx.ImageFromStream(BytesIO(stream))\n image.SaveFile(path, wx.BITMAP_TYPE_PNG)\n\n winAppearance[plat] = path\n\n return winAppearance",
"def make_image_data_list(image_filenames):\n img_requests = []\n for imgname in image_filenames:\n try:\n with open(imgname, 'rb') as f:\n ctxt = b64encode(f.read()).decode()\n img_requests.append({\n 'image': {'content': ctxt},\n 'features': [{\n 'type': 'TEXT_DETECTION',\n 'maxResults': 1\n }]\n })\n\n except:\n print(\"Image not found\")\n\n\n return img_requests",
"def topictweets(url):\n article = get_article(url)\n keywords = get_keywords(article['text'])\n entities = get_entities(article['text'])\n q = twitter_query(keywords, entities)\n result = search({'q': q, 'count': 100, 'result_type': 'mixed'})\n tweets = screen_name_filter(result.statuses, 'media')\n return tweets",
"def _buildImageParams(self, items):\n params = {}\n # Empty items returns empty params\n if not items:\n return params\n\n for item in items:\n if item.find('=') != -1:\n param, value = item.split('=', 1)\n params[param] = value\n else:\n params[item] = True\n\n if 'page' in params and params['page'] is not True:\n params['link'] = self._getWikiLink(params['page'])\n\n # Validate params with limited # of values\n for param_allowed in IMAGE_PARAMS:\n if (param_allowed in params and\n not (params[param_allowed] in IMAGE_PARAMS[param_allowed])):\n del params[param_allowed]\n\n return params",
"def photo_dict(phrase):\n switcher = {\n '병원 위치': 'https://maps.googleapis.com/maps/api/staticmap?center=37.507144,127.063737&zoom=16&size=640x480&markers=color:blue%7Clabel:S%7C37.507144,127.063737&key=AIzaSyCF-XXYf7IW1mkUZFeZF84BCcZdtC-z1M0',\n '병원 운영시간': 'http://gunn.pausd.org/sites/default/files/16-17-Bell-Schedule-Color---Compatible-Font.png',\n '프로모션 A': 'http://media.dontpayfull.com/media/deals/eurostar-promo-code.jpg',\n '프로모션 B': 'http://media.dontpayfull.com/media/deals/namebubbles-com-coupon-code.jpg',\n '프로모션 C': 'https://s-media-cache-ak0.pinimg.com/originals/79/79/31/79793174d230a27e9168bbccb33df62f.jpg',\n '의료진': 'https://s-media-cache-ak0.pinimg.com/736x/f4/89/ef/f489ef22363cf1e4c2a4fb5b1cd8aec5.jpg',\n '병원 사진': 'https://www.hpcimedia.com/images/website/ManChemNews/DIR_30/F_28071.jpg',\n '병원 진료과목': 'https://s-media-cache-ak0.pinimg.com/originals/d5/05/09/d505091a57d42d3ed1de8b6f9d906fdb.jpg'\n }\n default_url = 'http://autopartstoys.com/images/M127205243.jpg'\n return switcher.get(phrase, default_url)",
"def parse(self):\n imset = []\n imdir = remkdir(os.path.join(self._datadir, 'images'))\n csv_actors = readcsv(os.path.join(self._datadir, 'facescrub_actors.txt'), separator='\\t')\n for (subjectname, imageid, faceid, url, bbox, sha256) in csv_actors[1:]:\n categoryname = subjectname.replace(' ', '_')\n (xmin,ymin,xmax,ymax) = bbox.split(',')\n imset.append(ImageDetection(url=url, filename=os.path.join(imdir, '%s_%s.jpg' % (categoryname, imageid)), category=categoryname, xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax, attributes={'GENDER':'male'}))\n\n csv_actresses = readcsv(os.path.join(self._datadir, 'facescrub_actresses.txt'), separator='\\t')\n for (subjectname, imageid, faceid, url, bbox, sha256) in csv_actresses[1:]:\n categoryname = subjectname.replace(' ', '_')\n (xmin,ymin,xmax,ymax) = bbox.split(',')\n imset.append(ImageDetection(url=url, filename=os.path.join(imdir, '%s_%s.jpg' % (categoryname, imageid)), category=categoryname, xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax, attributes={'GENDER':'female'}))\n\n return imset",
"def get_images_by_vulnerability(self, **kwargs):\n ...",
"def extract_images_url(url, source):\n if source == \"mangaseeonline\":\n r = s.post(\n \"http://playwright:5000/scrape\",\n json={\n \"url\": url.replace(\"-page-1\", \"\"), \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[@id=\"TopPage\"]/descendant::img/@src')\n if source == \"nettruyen\":\n r = s.get(\n settings.SPLASH_URL, params={\n \"url\": url.replace(\"-page-1\", \"\"), \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[@class=\"reading-detail box_doc\"]/div/img/@src')\n if source == \"doctruyen3q\":\n r = s.get(\n settings.SPLASH_URL, params={\"url\": url, \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[contains(@id, \"page_\")]/img/@src')\n if source == \"truyenkinhdien\":\n r = s.get(\n settings.SPLASH_URL.replace(\"render.html\", \"execute\"),\n params={\"url\": url, \"lua_source\": lua_script, \"wait\": 1},\n )\n tree = html.fromstring(r.json()[\"html\"])\n return tree.xpath(\n '//*[@class=\"sgdg-gallery\"]/a[not(contains(@style,\"display:none\"))]/img/@src'\n )",
"async def get_url_images(session, url):\n content = await get_page(session, url)\n if not content:\n return []\n soup = BeautifulSoup(content, features=\"html.parser\")\n image_sources = [img['src'] for img in soup.find_all('img')]\n image_sources_fixed = [f'https:{source}' if 'https:' not in source else source for source in image_sources]\n images = []\n for source in image_sources_fixed:\n image = await get_image(session, source)\n if image:\n images.append((source, image))\n\n return images",
"def filter_thumbnail_only(_list):\n result = list()\n for count, href in enumerate(_list):\n if count > 15:\n break\n if get_verified_response(get_thumbnail(href)).status == 200:\n result.append(href)\n return result",
"def getTagsFromUrlList(self,ImageUrlList):\n dict={}\n #print ImageUrlList\n #print ('printing each url')\n for imageUrl in ImageUrlList :\n #print (imageUrl[0])\n tagsJson,status = self.getTagsUsingImageUrl(imageUrl[0])\n tagList,jsonParseStatus=self.parseJson(tagsJson)\n dict[imageUrl[0]]=tagList\n #print (dict)\n return dict,max(status,jsonParseStatus)",
"def crawling_images_url(self, data_dict, output_choice, commentors = False, tagged = True):\n\n content = []\n usernamelist = []\n\n for key in data_dict:\n data = self.json_url(data_dict[key]['url'])\n tempdict = data['entry_data']['PostPage'][0]['graphql']['shortcode_media']\n \n if commentors:\n for comment in tempdict['edge_media_to_comment']['edges']:\n newuser = comment['node']['owner']['username']\n if newuser not in usernamelist and newuser != self.username:\n usernamelist.append(newuser)\n data1 = self.rootuser_info(self.userpage_scraper(newuser))\n if output_choice[0]:\n print(data1)\n elif output_choice[1]:\n content.append(data1)\n\n if tagged:\n for tag in tempdict['edge_media_to_tagged_user']['edges']:\n newuser = tag['node']['user']['username']\n if newuser not in usernamelist and newuser != self.username:\n usernamelist.append(newuser)\n data1 = self.rootuser_info(self.userpage_scraper(newuser))\n if output_choice[0]:\n print(data1)\n elif output_choice[1]:\n content.append(data1)\n\n if output_choice[1]:\n self.pretty_print({'content': content}, False)",
"def scrape(self):\n reg = re.compile(self.regex)\n images = self.soup.findAll('img')\n results = []\n for img in images:\n try:\n url = dict(img.attrs)['src']\n url = self._make_url_path(url)\n if reg.match(url):\n results.append(url)\n\n except:\n pass\n\n print 'Img tag scraping OK'\n return results",
"async def getImageURLS(self, tags, fuzzy=False, singlePage=False):\n if fuzzy:\n tags = tags.split(\" \")\n for tag in tags:\n tag = tag + \"~\"\n temp = \" \"\n tags = temp.join(tags)\n print(tags)\n num = await self.totalImages(tags)\n if num != 0:\n PID = 0\n imgList = []\n XML = None\n t = True\n tempURL = self.urlGen(tags=tags, PID=PID)\n while t:\n with async_timeout.timeout(10):\n async with self.session.get(url=tempURL) as XML:\n XML = await XML.read()\n XML = ET.XML(XML)\n XML = self.ParseXML(XML)\n if XML is None:\n return None\n if len(imgList) >= int(XML['posts']['@count']): # \"if we're out of images to process\"\n t = False # \"end the loop\"\n else:\n for data in XML['posts']['post']:\n imgList.append(str(data['@file_url']))\n if singlePage:\n return imgList\n PID += 1\n return imgList\n else:\n return None",
"def img_urls(self, media, type = \"low_resolution\"):\n\n imgs = {}\n\n for item in media:\n if item[\"type\"] != \"image\":\n continue\n\n imgs[item[\"id\"]] = item[\"images\"][type][\"url\"]\n\n return imgs",
"def get_image_vulnerabilities(self, **kwargs) -> ImageVulnerabilitiesReport:\n ...",
"def get_google_image_urls(query_str, n_imgs=3):\n base = 'http://www.google.com/search?'\n query_vars= {'q': query_str, 'tbm': 'isch'}\n google_resp = requests.get(base, params=query_vars)\n\n if google_resp.status_code != 200:\n print(google_resp.status_code)\n raise requests.exceptions.RequestException('Google Image GET request failed!')\n\n google_soup = BeautifulSoup(google_resp.text\n , 'html.parser')\n img_tags = google_soup.find_all('img')\n\n if len(img_tags) == 0:\n raise UserWarning('No images were found with query string %s' % query_str)\n\n img_urls = list(map(lambda x: x['src'], img_tags))[:min(n_imgs, len(img_tags)-1)]\n return(img_urls)",
"def get_images(self,soup,Images):\n \n img=soup.find_all('a',href=re.compile(\"/photo.php?fbid=\"))\n img1=soup.find_all('a',href=re.compile(\"/photo\"))\n m=' '\n if img !=[]:\n img_href='https://www.facebook.com'+img[0]['href']\n m+=img_href+'\\n'\n \n elif img1 !=[]:\n img_href='https://www.facebook.com'+img1[0]['href']\n m+=img_href+'\\n'\n \n else:\n img=soup.find_all('a',href=re.compile(\"pcb\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n' \n \n \n else:\n img=soup.find_all('a',href=re.compile(\"photos\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n'\n \n Images.append(m)\n \n return Images"
] | [
"0.5719972",
"0.5613022",
"0.55762494",
"0.5220945",
"0.5181238",
"0.517683",
"0.51250625",
"0.50868136",
"0.5066444",
"0.5054839",
"0.50462866",
"0.50453806",
"0.50050765",
"0.50005877",
"0.4949409",
"0.49419117",
"0.49135473",
"0.49121162",
"0.49065983",
"0.48964915",
"0.48854837",
"0.4884732",
"0.48822063",
"0.48725492",
"0.4863637",
"0.48619068",
"0.48512608",
"0.48334712",
"0.4831197",
"0.48192474"
] | 0.7036948 | 0 |
Returns a list of twittersearch.result with the given search results for each candidate | def find_tweets(criteria_candidates):
results = TweetFinder.__get_results(criteria_candidates)
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_search_results(twitter_dict, search_dict):\r\n\r\n search_list = [search_dict['username']] \r\n search_specified_list = []\r\n\r\n for user in search_list:\r\n search_users_list = [user]\r\n \r\n for operation in search_dict['operations']:\r\n search_users_list = search_helper(search_users_list, operation,\\\r\n twitter_dict)\r\n \r\n search_specified_list += search_users_list\r\n \r\n return search_specified_list",
"def getResults():",
"def get_candidates(results, num_results=None):\n candidates = OrderedDict()\n\n for result in results.order_by('candidate__race_type', 'candidate__order',\n 'entry_version'):\n candidate = result.candidate\n\n if candidates.get(candidate):\n candidates[candidate].append(result)\n else:\n candidates.update({candidate: [result]})\n\n return [[c] + r[0:num_results] if num_results else r\n for c, r in candidates.items()]",
"def get_results_for(t_client, search_q):\n results = t_client.search(q=\"#\"+search_q)\n\n # This can be refactored\n return [\n {\n \"author\": \"@%s\" % t.from_user,\n \"text\": t.text,\n \"id\": t.id,\n \"date_h\": t.created_at.strftime(\"%H:%M:%S %d/%m/%Y\"),\n \"date\": time.mktime(t.created_at.timetuple()),\n } for t in results\n ]",
"def parse_search_results(fields, results):\n my_results = []\n for result in results:\n my_results.append(SearchAnimeResult(fields, result))\n return my_results",
"def get_all_candidates(self) -> list:",
"def get_list_of_results(self):\n return self.__result_list",
"def get_pokemon_results(results):\n response = []\n for result in results:\n hashtags = result.hashtags\n text = result.text\n if is_likely_pokemon_related(text, hashtags):\n try:\n response.append(format_content(text))\n except Exception as error:\n logger.error(error)\n return response",
"def searchresults_entries(searchresults_soup):\n for elem in searchresults_soup.find_all(\"div\", class_=\"patternSearchResult\"):\n href = _getAttr(elem.select(\"div.twikiTopRow > a\"), \"href\")\n web, topic = tuple(href.split(\"/\")[-2:])\n rev = _getStr(elem.select(\"span.twikiSRRev > a\"))\n author = _getStr(elem.select(\"span.twikiSRAuthor > a\"))\n summary = _getStr(elem.select(\"div.twikiSummary\"))\n yield (\"{web}.{topic}\".format(web=web, topic=topic), rev, author, summary)",
"def available_results(self):\n out = []\n for i in range(len(self)):\n out.append(self._get_result(i))\n return out",
"def get_search_results (twitter_data, search_data):\n\n search_list = [search_data['username']] # start with the first username\n temp = [] # initialize\n\n for operation in search_data['operations']: # go through every operation\n for username in search_list:\n if operation == 'following':\n for name in twitter_data[username]['following']:\n if not name in temp:\n temp.append (name)\n\n elif operation == 'followers':\n for name in all_followers (twitter_data, username):\n if not name in temp:\n temp.append (name)\n\n search_list = temp\n temp = []\n search_list.sort() # sort the list alphabetically for testing purposes\n return search_list",
"def results(self):\n\n return self._search_resut",
"def parse_wiki_search_candidates(search_result: str, base_url: str, cache: Cache = None) -> list:\n hyperlinks_list = []\n\n results_html = BeautifulSoup(search_result, \"html.parser\")\n html_headings = results_html.findAll(\"div\", class_=\"mw-search-result-heading\")\n\n for heading in html_headings:\n heading_a = heading.find(\"a\")\n heading_link_end = heading_a[\"href\"]\n heading_title = heading_a[\"title\"]\n heading_link = f\"{base_url}{heading_link_end}\"\n\n if cache is not None:\n cache.add(heading_link)\n\n if heading_link[-1] == \")\":\n heading_link = list(heading_link)\n heading_link[-1] = \"\\\\)\"\n heading_link = \"\".join(heading_link)\n\n hyperlink = f\"[{heading_title}]({heading_link})\"\n hyperlinks_list.append(hyperlink)\n\n return hyperlinks_list",
"def fetch_search_results (self, search_str, list_from=0, list_to=10):\n # properly encode the search string\n encoded_search_string = quote(search_str)\n\n paths = [\n ['search', encoded_search_string, 'titles', {'from': list_from, 'to': list_to}, ['summary', 'title']],\n ['search', encoded_search_string, 'titles', {'from': list_from, 'to': list_to}, 'boxarts', '_342x192', 'jpg'],\n ['search', encoded_search_string, 'titles', ['id', 'length', 'name', 'trackIds', 'requestId']],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', {'from': list_from, 'to': list_to}, ['summary', 'title']],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', {'from': list_from, 'to': list_to}, 'boxarts', '_342x192', 'jpg'],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', ['id', 'length', 'name', 'trackIds', 'requestId']]\n ]\n response = self._path_request(paths=paths)\n return self._process_response(response=response, component='Search results')",
"def show_search_results():\n\n #Get values from search-box via AJAX\n current_keyword = request.form.get('search').lower()\n print \"**********************\"\n print current_keyword\n print \"**********************\"\n tweets = get_tweets_by_api(term=current_keyword)\n\n result = []\n\n for tweet in tweets:\n # Exclude retweets since they appear as duplicatses to endu ser\n if tweet.retweeted_status is None:\n # Convert tweet text from unicode to text\n tweet_id = tweet.id\n text = unicodedata.normalize('NFKD', tweet.text).encode('ascii', 'ignore')\n # Find URL in text and bind to url\n # url = re.search('((?:http|https)(?::\\\\/{2}[\\\\w]+)(?:[\\\\/|\\\\.]?)(?:[^\\\\s\"]*))', text)\n url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)\n # Remove URL from text\n text_wo_url = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', text, flags=re.MULTILINE)\n # Handle / Name\n user = unicodedata.normalize('NFKD', tweet.user.screen_name).encode('ascii', 'ignore')\n # Count of favorites\n favorite_count = tweet.favorite_count\n #Return dictionary of hashtags with hashtag as key and number of occurances as value\n if tweet.hashtags:\n # Convert hashtags from unicode to string\n ht_list = []\n for hashtag in tweet.hashtags:\n ht_str = unicodedata.normalize('NFKD', hashtag.text).encode('ascii', 'ignore')\n ht_list.append(ht_str.lower())\n hashtags = Counter(ht_list)\n else:\n hashtags = tweet.hashtags\n # Convert tweet from unicode to datetime\n created_at = tweet.created_at\n # format created_at string to ISO 8610\n created_at_str = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y'))\n # create a moment from the string\n created_at = moment.date(created_at_str, 'YYYY-MM-DD HH:mm:ss')\n result.append({'created_at': created_at_str, 'tweet_text': text_wo_url, 'user': user,\n 'favorite_count': favorite_count, 'hashtags': hashtags,\n 'url': url, 'tweet_id': tweet_id})\n\n print \"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\"\n print result\n print \"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\"\n\n return jsonify(result=result) #, tweets",
"def results(self) -> list:\n return self.__results",
"def results(self):\n return self._result_list",
"def list(self):\n return self.results_list",
"def get_search_results(results):\n if len(results) == 0:\n return []\n if type(results) == tuple and len(results) == 2:\n (code, arr) = results\n elif type(results) == list:\n arr = results\n\n res = []\n for item in arr:\n res.append(LDAPSearchResult(item))\n\n return res",
"def twitter(self):\n\n q = \" OR \".join(self.search_terms) + \" -filter:retweets\"\n results = self.__api.search(q=q, lang='en', count=100)\n\n tweets = []\n\n for res in results:\n\n publishedAt = datetime.strptime(res._json['created_at'], '%a %b %d %H:%M:%S +0000 %Y').strftime(\"%Y-%m-%d\")\n\n if (res._json['in_reply_to_screen_name'] == None and publishedAt == datetime.now().strftime(\"%Y-%m-%d\")):\n tweets.append([res._json['id'],\n res._json['text'],\n res._json['user']['screen_name'],\n publishedAt,\n res._json['user']['followers_count']])\n\n self.list = pd.DataFrame(tweets, columns=['id', 'title', 'user', 'publishedAt', 'followers_count']).nlargest(10,\n 'followers_count')\n\n return",
"def search(search, candidates):\n choicer = choices.Choice()\n for candidate in candidates:\n choicer.add(candidate)\n return choicer.search(search)",
"def _search_torrents(ctx, client, query):\n\n result_count = ctx.params.get('results', 25)\n (discovered, merged,) = (set(), set(),)\n\n def _torrent_callback(item, **kwargs):\n discovered.add(item)\n\n with _build_spinner(ctx, (\n '{style.BOLD} searching for '\n '{fore.GREEN}{query}{style.RESET} ...'\n ).format(**COLORED, **locals())):\n # perform the actual search\n client.search(query, _torrent_callback, results=result_count)\n\n with _build_spinner(ctx, (\n '{style.BOLD} merging trackers for {fore.GREEN}{discovered_count}'\n '{style.RESET} {style.BOLD}results{style.RESET} ...'\n ).format(discovered_count=len(discovered), **COLORED, **locals())):\n # create mappings dictionary of hashes and trackers\n mappings = {}\n for torrent in discovered:\n if torrent['hash'] not in mappings:\n mappings[torrent['hash']] = set()\n for tracker in furl.furl(torrent['magnet']).args.getlist('tr'):\n mappings[torrent['hash']].add(tracker)\n\n # update torrent item magnets with full mappings dictionary\n for torrent in discovered:\n if torrent['hash'] in mappings:\n merged_trackers = furl.furl(torrent['magnet'])\\\n .remove('tr')\\\n .add({'tr': list(mappings[torrent['hash']])}).url\n torrent['magnet'] = (\n 'magnet:{merged_trackers}'\n ).format(**locals())\n merged.add(torrent)\n\n for torrent in _sort_torrents(\n ctx, list(merged),\n ctx.params.get('sort', 'seeders')\n ):\n yield torrent",
"def get_candidates(self, searchspace):\n candidates_list = list()\n candidates = [x for x in product(*searchspace[1])]\n for c in candidates:\n params = {}\n for name, value in zip(searchspace[0], c):\n params[name] = value\n candidates_list.append(CandidateDescriptor(**params))\n\n return candidates_list",
"def search_results(self):\r\n route_name = self.request.matched_route.name\r\n mdict = self.matchdict\r\n rdict = self.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n # Always search the fulltext content\r\n with_content = True\r\n\r\n conn_str = self.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n params = self.params\r\n page = params.get('page', 0)\r\n count = params.get('count', 50)\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif self.request.user and self.request.user.username:\r\n username = self.request.user.username\r\n\r\n res_list = searcher.search(\r\n phrase,\r\n content=with_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page,\r\n )\r\n\r\n # if the route name is search_ajax we want a json response\r\n # else we just want to return the payload data to the mako template\r\n if 'ajax' in route_name or 'api' in route_name:\r\n return {\r\n 'success': True,\r\n 'message': \"\",\r\n 'payload': {\r\n 'search_results': [dict(res) for res in res_list],\r\n 'result_count': len(res_list),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }\r\n }\r\n else:\r\n return {\r\n 'search_results': res_list,\r\n 'count': len(res_list),\r\n 'max_count': 50,\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }",
"def extractSearchResults(self, html):\n results = list()\n soup = BeautifulSoup(html, 'html.parser')\n div = soup.find('div', id='main')\n if (type(div) == types.NoneType):\n div = soup.find('div', id='center_col')\n if (type(div) == types.NoneType):\n div = soup.find('body')\n if (type(div) != types.NoneType):\n lis = div.findAll('a')\n if(len(lis) > 0):\n for link in lis:\n if (type(link) == types.NoneType):\n continue\n \n url = link['href']\n if url.find(\".google\") > 6:\n continue\n \n url = self.extractUrl(url)\n if(cmp(url, '') == 0):\n continue\n title = link.renderContents()\n title = re.sub(r'<.+?>', '', title)\n result = SearchResult()\n result.setURL(url)\n print '### URL: ' + url\n result.setTitle(title)\n span = link.find('div')\n if (type(span) != types.NoneType):\n content = span.renderContents()\n content = re.sub(r'<.+?>', '', content)\n result.setContent(content)\n results.append(result)\n return results",
"def get_results(self):\n return self.results",
"def get_results(self):\n return self.results",
"def __find_correlations(self, results):\n\n for result in results[:self.__result_limit]:\n\n # pub without venue\n if len(result['ven']) == 0:\n result['alternative'] = []\n\n with self.vix.searcher(weighting=Frequency) as vs:\n vq_parse = QueryParser('key', self.vix.schema).parse(result['pub']['crossref'])\n tresult = vs.search(vq_parse, limit=None, )\n if len(tresult) != 0:\n result['ven'] = {}\n result['added'] = 1\n for attr in tresult[0].items():\n result['ven'][attr[0]] = attr[1]\n\n self.__output.append(result)\n\n # venue without pub or venue with a list of pubs\n elif len(result['pub']) == 0 or (\n isinstance(result['pub'], list) and len(result['pub']) > 1):\n result['alternative'] = []\n\n with self.pix.searcher(weighting=Frequency) as ps:\n pq_parse = QueryParser('crossref', self.pix.schema).parse(result['ven']['key'])\n tresult = ps.search(pq_parse, limit=None, )\n\n if len(tresult):\n plist = []\n tmp = dict()\n for el in tresult:\n for attr in el.items():\n if attr[0] == 'title' and attr[1] not in [x['title'] for x in result['pub']]:\n plist.append(attr[1])\n break\n\n result['alternative'] = plist\n self.__output.append(result)\n\n # mixed case\n elif len(self.__output) == 0 or not result['ven']['key'] in [x['key'] for x in self.__output]:\n lis = [x for x in results if len(x['ven']) and x['ven']['key'] == result['ven']['key']]\n tmp = {}\n if len(lis) <= 1:\n tmp = {'key': result['pub']['key'],\n 'score': result['score'],\n 'pub': [x['pub'] for x in lis],\n 'ven': result['ven'],\n 'alternative': list()}\n else:\n tmp = {'key': result['ven']['key'],\n 'score': result['score'],\n 'pub': [x['pub'] for x in lis],\n 'ven': result['ven'],\n 'alternative': list()}\n plist = []\n with self.pix.searcher() as ps:\n pq_parse = QueryParser('crossref', self.pix.schema).parse(tmp['key'])\n tresult = ps.search(pq_parse, limit=None, )\n if len(tresult):\n for el in tresult:\n for attr in el.items():\n if attr[0] == 'title' and attr[1] not in [x['title'] for x in tmp['pub']]:\n plist.append(attr[1])\n break\n\n tmp['alternative'] = plist\n self.__output.append(tmp)",
"def get_all_mkresults(self):\n\n all_results = ()\n self._logger.debug(\"Getting all mk results\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT result_id, mk_ind_first, mk_ind_second, mk_ind_third, \\\nmk_ind_fourth, course, time FROM mk_ind_result ORDER BY time DESC\")\n results = cursor.fetchall()\n\n for result_id, first_id, second_id, third_id, fourth_id, course, timestamp in results:\n intermediate_results = ()\n\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\nplayer WHERE player_id = {0}\".format(first_id))\n first = cursor.fetchall()\n first_name_first, last_name_first, \\\n nickname_first = first[0]\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\nplayer WHERE player_id = {0}\".format(second_id))\n second = cursor.fetchall()\n first_name_second, last_name_second, \\\n nickname_second = second[0]\n try:\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\n player WHERE player_id = {0}\".format(third_id))\n third = cursor.fetchall()\n first_name_third, last_name_third, \\\n nickname_third = third[0]\n except MySQLdb.OperationalError:\n first_name_third = ''\n last_name_third = ''\n nickname_third = ''\n try:\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\n player WHERE player_id = {0}\".format(fourth_id))\n fourth = cursor.fetchall()\n first_name_fourth, last_name_fourth, \\\n nickname_fourth = fourth[0]\n except MySQLdb.OperationalError:\n first_name_fourth = ''\n last_name_fourth = ''\n nickname_fourth = ''\n\n intermediate_results = intermediate_results + \\\n (result_id, first_name_first, last_name_first,\n nickname_first, first_name_second, last_name_second,\n nickname_second, first_name_third,\n last_name_third, nickname_third, first_name_fourth,\n last_name_fourth, nickname_fourth, course,\n timestamp.strftime('%Y-%m-%d'))\n\n all_results = all_results + (intermediate_results,)\n del intermediate_results\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return all_results",
"def get_tweets(candidate):\n statuses = api.GetUserTimeline(screen_name=candidate[\"twitter\"])\n # Use NLP to try to figure out where the tweets are\n for tweet in statuses:\n if tweet.user.screen_name == candidate[\"twitter\"]:\n parsed_location = nlp(tweet.text)\n for ent in parsed_location.ents:\n if ent.label_ == \"GPE\":\n location = geocode(ent.text)\n if \"results\" in location.keys():\n if len(location[\"results\"]) >= 1:\n # Look for a country\n for address_component in location[\"results\"][0][\"address_components\"]:\n if \"country\" in address_component[\"types\"] and address_component[\"short_name\"] == \"US\":\n print(tweet)\n return {\n \"tweet\": f\"https://twitter.com/{candidate['twitter']}/status/{tweet.id}\",\n \"user_image\": tweet.user.profile_image_url_https,\n \"lat\": location[\"results\"][0][\"geometry\"][\"location\"][\"lat\"],\n \"lon\": location[\"results\"][0][\"geometry\"][\"location\"][\"lng\"],\n \"location\": ent.text\n }\n return None"
] | [
"0.6786443",
"0.6461131",
"0.6449749",
"0.6402167",
"0.6381735",
"0.6232362",
"0.619198",
"0.6130979",
"0.6125187",
"0.60322547",
"0.60284257",
"0.6014197",
"0.60123754",
"0.59707797",
"0.5924647",
"0.59239054",
"0.5910617",
"0.58979976",
"0.5829122",
"0.5818287",
"0.5793622",
"0.57875395",
"0.57839537",
"0.5778277",
"0.5760375",
"0.5757023",
"0.5757023",
"0.57568014",
"0.57428133",
"0.5728072"
] | 0.6826167 | 0 |
Obtain random samples from pred_df and call them samples that do not have neolithic sites in them | def randomSamplingFromDF(site_df, pred_df):
# sample len(site_df) number points from pred_df
rand_sites = random.sample(range(len(pred_df)), len(site_df))
rand_sites_sorted = list(reversed(sorted(rand_sites)))
nonsite_df = pred_df.iloc[rand_sites_sorted]
less_pred_df = pred_df.drop(rand_sites_sorted, axis=0)
return (nonsite_df, less_pred_df) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def negative_sampling(data: pd.DataFrame,\n vocab: np.ndarray,\n noise_distribution: list,\n neg_sample_size: int\n ) -> pd.DataFrame:\n \n def samples_generator(word: str\n ) -> List[str]:\n while True:\n samples = np.random.choice(\n vocab, neg_sample_size, p=noise_distribution\n )\n if word not in samples:\n return samples\n \n data['negative_samples'] = data['centre_word'].apply(samples_generator)\n return data",
"def bias_population(df, ratio, seed=0):\n\n true_selector = df[PREDICTION_COLNAME].values == True\n false_selector = df[PREDICTION_COLNAME].values == False\n\n df_true = df[true_selector]\n df_false = df[false_selector]\n\n n_rows_false = int(round(ratio * len(df_true)))\n\n ##check the number of selected rows for false\n if n_rows_false > len(df_false): \n raise Warning(\"Ratio is not respected. Return max size of False instead.\")\n n_rows_selected = len(df_false)\n else:\n n_rows_selected = n_rows_false\n\n df_false = utils.sample_random(df_false, n_rows_false, seed=seed)\n\n return pd.concat([df_true, df_false])",
"def train_test_samples(df):\n\n from math import floor\n\n shuffled_df = df.reindex(np.random.permutation(df.index))\n\n seventy_five_percent = int(floor(len(shuffled_df) * 0.75))\n train_df = shuffled_df.iloc[:seventy_five_percent, ]\n test_df = shuffled_df.iloc[seventy_five_percent:, ]\n\n return train_df, test_df",
"def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n return neg_inds\n else:\n return self.random_choice(neg_inds, num_expected)",
"def sample_negative(ratings):\r\n ## user_pool = set(ratings['userId'].unique())\r\n item_pool = set(ratings['itemId'].unique())\r\n\r\n interact_status = ratings.groupby('userId')['itemId'].apply(set).reset_index().rename(\r\n columns={'itemId': 'interacted_items'})\r\n interact_status['negative_items'] = interact_status['interacted_items'].apply(lambda x: item_pool - x)\r\n interact_status['negative_samples'] = interact_status['negative_items'].apply(lambda x: random.sample(x, 99))\r\n return interact_status[['userId', 'negative_samples']]",
"def negative_sampling(word_input, target, unigrams_table, neg_examples_size=5):\n negative_examples = []\n while len(negative_examples) is not neg_examples_size:\n neg_sample = np.random.choice(unigrams_table)\n # Make sure that the negative example is not the same as the training or as the target.\n # This will block if there only is one value within the unigram table\n if (neg_sample != word_input) and (neg_sample != target):negative_examples.append(neg_sample)\n else:pass\n return negative_examples",
"def negative_sampling(self):\n \n self.train_arr = []\n sample_list = np.random.choice(list(range(self.item_count)), size = 10 * len(self.interactions) * self.num_ns)\n \n sample_idx = 0\n for user, pos_item, _ in self.interactions:\n ns_count = 0\n \n while True:\n neg_item = sample_list[sample_idx]\n if not is_visited(self.rating_mat, user, neg_item):\n self.train_arr.append((user, pos_item, neg_item))\n sample_idx += 1\n ns_count += 1\n if ns_count == self.num_ns:\n break\n \n sample_idx += 1",
"def add_uniform_random_negatives(\n ds,\n num_nodes,\n num_negs_per_pos,\n):\n negative_sampler = RandomUniformNegativeSampler(num_nodes, num_negs_per_pos)\n return ds.map(\n negative_sampler, deterministic=False, num_parallel_calls=tf.data.AUTOTUNE\n )",
"def get_random_predictions(shp):\n return np.random.rand(*shp)",
"def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n repeat_ = num_expected // neg_inds.numel()\n return torch.cat((neg_inds.repeat(repeat_), self.random_choice(neg_inds, num_expected % neg_inds.numel())))\n else:\n return self.random_choice(neg_inds, num_expected)",
"def get_noised_result(self, sample_state, global_state, noise_flag=True):\n #print(\"SUM Query get_noised_result\")\n def add_noise(v):\n if noise_flag:\n return v + tf.random_normal(tf.shape(v), stddev=global_state.stddev)\n else:\n return v\n\n\n return nest.map_structure(add_noise, sample_state), global_state",
"def sample_train_data(dataset ,target,data_len, resp = True ):\r\n np.random.seed(222)\r\n ixes = np.random.choice(dataset.index, data_len, replace = False)\r\n print(ixes)\r\n under_df = dataset.iloc[ixes]\r\n if resp==True:\r\n under_target = target.iloc[ixes]\r\n return under_df, under_target\r\n else:\r\n return under_df",
"def generate_non_sensitive_feat():\n def gen_gaussian(mean_in, cov_in, class_label):\n nv = multivariate_normal(mean = mean_in, cov = cov_in)\n X = nv.rvs(n_samples)\n y = np.ones(n_samples, dtype=float) * class_label\n return nv,X,y\n \n # generate 1000 data points per class label\n n_samples = 1000\n \n # draw non-sensitive features for class label 1 (positive class)\n mu1, sigma1 = [2, 2], [[5, 1], [1, 5]]\n pos_dist, X_non_sensitive_positive, y_positive = gen_gaussian(mu1, sigma1, 1)\n \n # draw non-sensitive features for class label -1 (negative class)\n mu2, sigma2 = [-2,-2], [[10, 1], [1, 3]]\n neg_dist, X_non_sensitive_negative, y_negative = gen_gaussian(mu2, sigma2, -1)\n\n # combine the positive and negative class\n X_non_sensitive = np.vstack((X_non_sensitive_positive, X_non_sensitive_negative))\n y = np.hstack((y_positive, y_negative))\n\n # randomly shuffle the data\n idx = np.arange(0, n_samples * 2)\n np.random.shuffle(idx)\n X_non_sensitive = X_non_sensitive[idx]\n y = y[idx]\n \n return X_non_sensitive, y, pos_dist, neg_dist",
"def filter_samples(df, normal_samples, damaged_samples, assembly_samples, missing_samples, damaged_thread_samples,\n loosening_samples, move_samples):\n # Count the sample types\n count_df = df.groupby(['sample_nr'])['label'].median()\n unique, counts = np.unique(count_df, return_counts=True)\n labels_count_dict = {A: B for A, B in zip(unique, counts)}\n\n # Take only the amount of samples that's needed to fill the requirement\n sampled_list = []\n for label in labels_count_dict:\n subindex = list(np.unique(df.loc[df['label'] == label].index.get_level_values(0)))\n\n if label == 0:\n to_take = normal_samples * labels_count_dict[0]\n elif label == 1:\n to_take = damaged_samples * labels_count_dict[1]\n elif label == 2:\n to_take = assembly_samples * labels_count_dict[2]\n elif label == 3:\n to_take = missing_samples * labels_count_dict[3]\n elif label == 4:\n to_take = damaged_thread_samples * labels_count_dict[4]\n elif label == 5:\n to_take = loosening_samples * labels_count_dict[5]\n elif label == 6:\n to_take = move_samples * labels_count_dict[6]\n\n sample_ids = np.random.choice(subindex, int(to_take), replace=False)\n sampled_df = df[df.index.get_level_values(0).isin(sample_ids)]\n sampled_list.append(sampled_df)\n\n taken_data = pd.concat(sampled_list, ignore_index=False).sort_values(['sample_nr', 'event'])\n\n # Reset the sample numbers\n taken_data = taken_data.reset_index()\n taken_data['sample_nr'] = (taken_data['sample_nr'] != taken_data['sample_nr'].shift(1)).astype(int).cumsum()\n taken_data['event'] = taken_data.index\n taken_data = taken_data.set_index(['sample_nr', 'event'])\n taken_data = taken_data.reset_index('event', drop=True)\n taken_data = taken_data.set_index(taken_data.groupby(level=0).cumcount().rename('event'), append=True)\n taken_data = taken_data.sort_index()\n\n return taken_data",
"def random_df(request):\n old_state = np.random.get_state()\n\n def fin():\n # tear down: reset the prng after the test to the pre-test state\n np.random.set_state(old_state)\n\n request.addfinalizer(fin)\n np.random.seed(1)\n return pd.DataFrame(\n {'some_count': np.random.randint(1, 8, 20)},\n index=range(0, 20))",
"def apply_randomization(features, label, randomize_prob):\n rnd_tok = lambda: tf.as_string(tf.random.uniform([], 0, 99999999, tf.int32))\n\n for idx in CAT_FEATURE_INDICES:\n key = feature_name(idx)\n # Ignore lint since tf.cond should evaluate lambda immediately.\n features[key] = tf.cond(tf.random.uniform([]) < randomize_prob,\n rnd_tok,\n lambda: features[key]) # pylint: disable=cell-var-from-loop\n return features, label",
"def sample_bernoulli(self, probabilities):\n return tf.nn.relu(tf.sign(probabilities - tf.random.uniform(probabilities.shape)))",
"def get_unlabelled_tweets_reannotation():\n conn = get_connection()\n c = conn.cursor()\n #res = c.execute('SELECT * FROM tweets WHERE tweets.is_about_depression is null AND tweets.username IN (SELECT username FROM users WHERE mentions_depr=1)').fetchall()\n res = c.execute('SELECT * FROM tweets WHERE tweets.is_about_depression IN (0, 1, 2) AND tweets.is_about_depression2 IS NULL ORDER BY random()').fetchall()\n conn.close()\n return np.array(res)",
"def sample_negative_answers(self, answer_list, batch_size):\n return np.random.choice(answer_list, batch_size)",
"def get_noisy_samples(X, std):\n # std = STDEVS[subset][FLAGS.dataset][FLAGS.attack]\n X_noisy = np.clip(X + rand_gen.normal(loc=0.0, scale=std, size=X.shape), 0, 1)\n return X_noisy",
"def test_no_duplicates_and_positives_in_negative_sample(self):\n model = PoincareModel(self.data_large, negative=3)\n positive_nodes = model.node_relations[0] # Positive nodes for node 0\n num_samples = 100 # Repeat experiment multiple times\n for i in range(num_samples):\n negatives = model._sample_negatives(0)\n self.assertFalse(positive_nodes & set(negatives))\n self.assertEqual(len(negatives), len(set(negatives)))",
"def reset_rf_samples():\n forest._generate_sample_indices = (lambda rs, n_samples:\n forest.check_random_state(rs).randint(0, n_samples, n_samples))",
"def das(input_df, counts_var, noise_parameter):\n df = input_df.copy(deep=True)\n \n n = df.shape[0]\n \n # add laplace noise \n noise = np.random.laplace(loc=0, scale=noise_parameter, size=n)\n df['noisy_counts'] = df[counts_var] + noise\n \n # post processing\n df['nonneg_counts'] = post_proc(df.noisy_counts, df.pop_count.sum())\n df.nonneg_counts = np.round(df.nonneg_counts)\n \n return df",
"def generate_fake_samples(generator, latent_dim, n):\n\n # Sample points from latent space\n x_input = generate_latent_points(latent_dim, n)\n\n # Forward pass\n X = generator.predict(x_input)\n\n # Generate labels for the discriminator\n y = np.zeros((n, 1))\n\n return X, y",
"def prune_train_dataset(self, all_labels, train_idxs):\n\n # -- prune samples if necessary to have equal sized splits\n neg_idxs = [idx for idx in train_idxs if all_labels[idx] == self.NEG_LABEL]\n pos_idxs = [idx for idx in train_idxs if all_labels[idx] == self.POS_LABEL]\n n_samples = min(len(neg_idxs), len(pos_idxs))\n\n rstate = np.random.RandomState(7)\n rand_idxs_neg = rstate.permutation(neg_idxs)\n rand_idxs_pos = rstate.permutation(pos_idxs)\n\n neg_idxs = rand_idxs_neg[:n_samples]\n pos_idxs = rand_idxs_pos[:n_samples]\n train_idxs = np.concatenate((pos_idxs, neg_idxs))\n\n return train_idxs",
"def split_data(df: pd.DataFrame):\n size = int(df.shape[0] * 0.8)\n indexes = np.random.choice(df.index, size, replace=False)\n train_set = df.loc[indexes]\n test_set = df.loc[~df.index.isin(indexes)]\n return train_set, test_set",
"def get_untrained_rand_for(bool_var, ml_models, training_feature, test_feature):\n if bool_var:\n rand_for_name = 'random_forest' + get_suffix_ml_model()\n rand_for = RandomForestRegressor()\n multi_rand_for = MultiOutputRegressor(rand_for)\n rand_for_train_feat = list(training_feature)\n rand_for_test_feat = list(test_feature)\n ml_models.append([rand_for_name, multi_rand_for, rand_for_train_feat,\n rand_for_test_feat])",
"def sample_masks(num_samples: int,\n num_features: int,\n seed: Optional[int] = None):\n rng = np.random.RandomState(seed)\n positions = np.tile(np.arange(num_features), (num_samples, 1))\n permutation_fn = np.vectorize(rng.permutation, signature='(n)->(n)')\n permutations = permutation_fn(positions) # A shuffled range of positions.\n num_disabled_features = rng.randint(1, num_features + 1, (num_samples, 1))\n # For num_disabled_features[i] == 2, this will set indices 0 and 1 to False.\n return permutations >= num_disabled_features",
"def find_nonexceed(trainy, train_tree_node_ID, pred_tree_node_ID, thres):\n \n npred = pred_tree_node_ID.shape[0]\n out = np.zeros((npred, thres.shape[0]))*np.nan\n for i in prange(pred_tree_node_ID.shape[0]):\n for j in prange(thres.shape[0]):\n idxs = np.where(train_tree_node_ID == pred_tree_node_ID[i, :])[0]\n sample = trainy[idxs]\n out[i, j] = (sample < thres[j]).sum() / float(sample.shape[0])\n return out",
"def train_test_split_drifters():\n df = process_raw_df()\n ids = np.unique(df.index.get_level_values(level=0))\n rng = np.random.default_rng(seed=1)\n train_ids = np.sort(rng.choice(ids, size=len(ids)//2, replace=False))\n test_ids = np.sort(np.setdiff1d(ids, train_ids))\n train_df = df[df.index.get_level_values(level=0).isin(train_ids)].copy()\n test_df = df[df.index.get_level_values(level=0).isin(test_ids)].copy()\n return train_df, test_df"
] | [
"0.5981191",
"0.5967193",
"0.58507997",
"0.58230144",
"0.57899874",
"0.57562643",
"0.5723299",
"0.5722238",
"0.5689162",
"0.5658051",
"0.5632118",
"0.56263244",
"0.5613833",
"0.56116945",
"0.5548289",
"0.5542219",
"0.5518756",
"0.55116874",
"0.5510275",
"0.55085206",
"0.54868424",
"0.5478621",
"0.5466586",
"0.54659516",
"0.5457779",
"0.5456892",
"0.5455284",
"0.5425222",
"0.5420852",
"0.54069585"
] | 0.7193351 | 0 |
Raise the quantitative term's values to an integer power, i.e. polynomial. | def __pow__(self, power):
try:
power = float(power)
except:
raise ValueError('expecting a float')
if power == int(power):
name = '%s^%d' % (self.name, int(power))
else:
name = '%s^%0.2f' % (self.name, power)
value = Quantitative(name, func=self, transform=lambda x: np.power(x, power))
value.power = power
value.namespace = self.namespace
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __pow__(self, exponent):\n return self.runtime.pow(self, exponent)",
"def __pow__(self, exponent):\n return Quantity(pow(self._value, exponent), pow(self.unit, exponent))",
"def __pow__(self, power):\n\n try:\n power = float(power)\n except:\n raise ValueError, 'expecting a float'\n\n if power == int(power):\n name = '%s^%d' % (self.name, int(power))\n else:\n name = '%s^%0.2f' % (self.name, power)\n\n value = quantitative(name, func=self, transform=lambda x: N.power(x, power))\n value.power = power\n value.namespace = self.namespace\n return value",
"def __pow__(self, power):\n if power == 1:\n return self\n elif power == 0:\n return Polynomial(1)\n\n self.polynomials = {key: val for key, val in self.polynomials.items() if val != 0}\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n\n attributes = {}\n\n # Using Binomial theorem\n n = 0\n m = power\n use_n = True\n\n for k in range(0, power + 1):\n result = self.calculate_combinatorial_number(power, k)\n\n for index, polynomial in self.polynomials.items():\n if use_n:\n result *= pow(polynomial, (power - n))\n n += 1\n use_n = False\n else:\n result *= pow(polynomial, (power + m))\n m -= 1\n use_n = True\n\n attributes[\"x\" + str(n - 1)] = result\n\n return Polynomial(**attributes)",
"def power(base, exponent):\n return base ** exponent",
"def __pow__( self, power ):\r\n\t\tif ( power > 0 ):\r\n\t\t\treturn fraction( self.numerator ** power, self.denominator ** power )\r\n\t\tif ( power < 0 ):\r\n\t\t\treturn fraction( self.denominator ** abs( power ), self.numerator ** abs( power ) )\r\n\t\treturn 1",
"def power(x): \r\n return x(1)",
"def __pow__(self, power):\n value = power * (self.val) ** (power - 1)\n der = {k: value * v for k, v in self.der.items()}\n return AutoDiffReverse(self.val ** power, None, der)",
"def __pow__(self,n):\r\n\t\t\r\n\t\t# take power\r\n\t\tp = self.power(n)\r\n\t\t\r\n\t\treturn p",
"def __pow__(self, ???):",
"def __rpow__(self, ???):",
"def power(num, exponent):\n return num ** exponent",
"def __pow__(self, exponent):\n return type(self)(self.parent(),\n self._simplify(pow(self._express, exponent)))",
"def signedpower(x: pd.Series, a) -> pd.Series:\n return x.pow(a)",
"def power(x, y):\n return x ** y",
"def __rpow__(self, power):\n value = power ** self.val\n der = {k: value * v * np.log(power) for k, v in self.der.items()}\n return AutoDiffReverse(value, None, der)",
"def _pow_(self, n):\n assert n > 0\n return generic_power(self, n)",
"def __rpow__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Pow.apply(other, self)",
"def convert_pow(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = block.var(op.output(\"Out\")[0]).dtype\n dtype = _convert_dtype_value(dtype)\n factor = op.attr(\"factor\")\n factor = _expr.const(factor, dtype=dtype)\n out = _op.power(x, factor)\n g.add_node(op.output(\"Out\")[0], out)",
"def easy_eval(self, x):\n answer = 0\n for i in range(len(self.coefficients)):\n coef = self.coefficients[i]\n degree = len(self.coefficients) - (i+1)\n answer += x ** degree * coef\n return answer",
"def power(self, a):\n if self.getDegree() == -1:\n return Polynomial([])\n \n if a == 0:\n return Polynomial([1])\n \n root = self.power(a/2)\n if a % 2 == 0:\n return root.multiply(root)\n \n else:\n return root.multiply(root).multiply(self)",
"def __pow__(self, exp):\n # We have (p o Q)^e = p^e o Q\n coeff = (self._unit_simplex_polynomial**exp).coeff\n if isinstance(exp, numbers.Integral):\n r = self.degree() * exp\n else:\n r = 0\n for i in range(len(exp)):\n r += self[i].degree() * exp[i]\n return PolynomialBernsteinSimplex(coeff, self.vertices, r)",
"def __pow__(self, other):\n return MyCustomNumber(self.value ** other.value)",
"def __pow__(self,power):\n return Factor().__build( VarSet(self.v) , np.power(self.t,power) )",
"def power(self, value: int):\n self._power = value",
"def __pow__(self, n):\n if not isinstance(n, Integer):\n try:\n n = Integer(n)\n except TypeError:\n raise TypeError(\"Exponent n (= %s) must be an integer.\" % n)\n if n == 1:\n return self\n if n == 0:\n return Factorization([])\n if self.is_commutative():\n return Factorization([(p, n*e) for p, e in self], unit=self.unit()**n, cr=self.__cr, sort=False, simplify=False)\n from sage.groups.generic import power\n return power(self, n, Factorization([]))",
"def power(x, y):\n return x^y",
"def __pow__(self,*args):\r\n pass",
"def add_polynomial_features(x, power):\n if type(power) is int and type(x) is np.ndarray:\n return np.concatenate([x**i for i in range(1, power+1)], axis=1)\n return None",
"def Incrpower(self, increment):\n self.power += increment"
] | [
"0.6837971",
"0.67961013",
"0.6763757",
"0.66937846",
"0.6623749",
"0.66045123",
"0.660015",
"0.65881026",
"0.65583694",
"0.65102786",
"0.6501719",
"0.64707184",
"0.6433635",
"0.64150035",
"0.6349218",
"0.63410777",
"0.63376534",
"0.6324819",
"0.63147056",
"0.6307297",
"0.629967",
"0.6297986",
"0.627157",
"0.62696695",
"0.62677884",
"0.62429214",
"0.6232209",
"0.6224958",
"0.62199056",
"0.6218941"
] | 0.68291706 | 1 |
Factor is initialized with keys, representing all valid levels of the factor. | def __init__(self, termname, keys, ordinal=False):
if not ordinal:
self.keys = list(set(keys))
self.keys.sort()
else:
self.keys = keys
if len(set(keys)) != len(list(keys)):
raise ValueError('keys for ordinal Factor should be unique, in increasing order')
self._name = termname
self.termname = termname
self.ordinal = ordinal
if self.ordinal:
name = self.termname
else:
name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys]
Term.__init__(self, name, termname=self.termname, func=self.get_columns) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, poss_keys, poss_vals):\n self.Poss_Tree = {x: list(POSS_DIGITS) for x in poss_keys}\n self.place = len(str(poss_keys[0]))",
"def factors(self, name):\n val_loc = self._get_value_loc(name)\n factors = OrderedDict()\n for val in val_loc:\n f = val.get('factor', None)\n if f: factors[val['value']] = f\n return factors",
"def form_factors(self, scores, keys):\n for key in keys:\n grams = key.split('-')\n if np.all([ len(scores[grams[j]]) > 0 for j in range(len(grams)) ]):\n scores[key] = scores[grams[0]].index_select(1, self.idx_sro_to[grams[0]])\n for j in range(1, len(grams)):\n scores[key] = scores[key] * scores[grams[j]].index_select(1, self.idx_sro_to[grams[j]])\n\n return scores",
"def sorted_factors(self):\n if not hasattr(self, \"_sorted_factors\"):\n self._sorted_factors = {}\n for expr in self.graded_dict:\n for order in self.graded_dict[expr]:\n for factors in self.graded_dict[expr][order]:\n for factor in factors:\n if is_factor(factor) and factor.name not in self._sorted_factors:\n self._sorted_factors[factor.name] = Factor(factor.name, sorted(factor.levels))\n return self._sorted_factors",
"def get_all_factors(self, verbose = 1):\n logDict = self.cal_all_factors(verbose)\n return((self.generatedFactorDict, logDict))",
"def factor_select_initial(self):\n indicators = set()\n factor_levels = self.words.keys()\n for level, words in self.words.items():\n other_levels = set(factor_levels) - {level}\n w1 = set(words) # words associated with this factor level\n w2 = set() # words associated with the other factor levels\n for other_level in other_levels:\n w2.update(self.words.get(other_level))\n specific_words = w1 - w2\n indicators.update(specific_words)\n self.indicator_words = list(indicators)",
"def __init__(self, key):\n self.key = [int_mapping(k) for k in key]",
"def init(cls, levels: List[str]) -> List[Level]:\n return [cls(lvl, val) for val, lvl in enumerate(levels)]",
"def get_factors():",
"def return_factorized_dict(ls):\r\n factos = pd.unique(pd.factorize(ls)[0])\r\n categs = pd.unique(pd.factorize(ls)[1])\r\n if -1 in factos:\r\n categs = np.insert(categs,np.where(factos==-1)[0][0],np.nan)\r\n return dict(zip(categs,factos))",
"def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))",
"def factors(self):\n self.assert_sampled()\n return self._factors",
"def __init__(self, mapping, items=None):\n self.items = items if items else []\n self.mapping = mapping\n self.keys = []\n for key in mapping.keys():\n self.keys.append(key)",
"def fullfact(levels):\n n = len(levels) # number of factors\n nb_lines = np.prod(levels) # number of trial conditions\n H = np.zeros((nb_lines, n))\n \n level_repeat = 1\n range_repeat = np.prod(levels)\n for i in range(n):\n range_repeat //= levels[i]\n lvl = []\n for j in range(levels[i]):\n lvl += [j]*level_repeat\n rng = lvl*range_repeat\n level_repeat *= levels[i]\n H[:, i] = rng\n \n return H",
"def factors(self):\n X = [Var(i,2) for i in range(self.nvar)]\n factors = [Factor([],np.exp(self.c))] \n # TODO: exclude if zero? or exclude if inf/-inf, or if in \"assigned\", or?\n factors = factors + [Factor([X[i]],[-th,th]).exp() for i,th in enumerate(self.h) if self.dims[i]>1]\n L = coo(self.L)\n factors = factors + [Factor([X[i],X[j]],[[th,-th],[-th,th]]).exp() for i,j,th in zip(L.row,L.col,L.data) if i<j]\n return factors\n # TODO: should we exponentiate if isLog not True? ",
"def set_keys(cls, val):\n keys = []\n check = lambda dict, key, default_val: dict[key] if key in dict.keys() else default_val\n for i in range(val['row_max']):\n keys.append([check(val, '{}_{}'.format(x, y), cls.NOTAVAILABLE) \\\n for x, y in list(product([i], list(range(val['col_max']))))])\n if not hasattr(cls, '_keys'):\n cls._keys = {val['name'] : keys}\n else:\n cls._keys[val['name']] = keys",
"def __init__(self, level):\n self.level = level\n self.my_map = {}\n self.my_level = []\n self.my_grid = []",
"def get_factors(self, triples):\n pass",
"def __init__(self, factor=None, op=None, factors=None):\n if factors is not None:\n self.factors = factors\n else:\n self.factors = factor\n self.op = op",
"def levels(self):\n raise NotImplementedError(\"Subclasses sohuld implement levels\")",
"def get_factors(self, factor_encoding=\"one-hot\"):\r\n\r\n if not self.levels:\r\n df = pd.DataFrame(0, index=range(self.number_elements), columns=[self.type_value])\r\n df.loc[list(self.direct_indices.keys()), [self.type_value]] = 1\r\n return df\r\n\r\n levels = list(self.levels.keys())\r\n levels_list = [f\"{self.type_value}.{level}\" for level in levels]\r\n factors = pd.DataFrame(0, index=range(self.number_elements), columns=levels_list)\r\n for index, level in enumerate(levels):\r\n index_keys = list(self.levels[level].keys())\r\n factors.loc[index_keys, [levels_list[index]]] = 1\r\n if factor_encoding == \"one-hot\":\r\n return factors\r\n sum_factors = factors.sum(axis=1)\r\n if factor_encoding == \"categorical\" and sum_factors.max() > 1:\r\n raise HedFileError(\"MultipleFactorSameEvent\",\r\n f\"{self.type_value} has multiple occurrences at index {sum_factors.idxmax()}\", \"\")\r\n elif factor_encoding == \"categorical\":\r\n return self._one_hot_to_categorical(factors, levels)\r\n else:\r\n raise ValueError(\"BadFactorEncoding\",\r\n f\"{factor_encoding} is not in the allowed encodings: {str(self.ALLOWED_ENCODINGS)}\")",
"def __init__(self, keys, values):\n self.keys = keys\n self.values = values",
"def __init__(self):\n # map of (key, index in list)\n self.map = dict()\n \n # list of keys for random selection\n self.keys = []",
"def factor_list(f):\n coeff, factors = dmp_factor_list(f.rep, f.lev, f.dom)\n return coeff, [ (f.per(g), k) for g, k in factors ]",
"def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n for tree in self.train_trees:\n for t_lemma, formeme in tree.nodes:\n if t_lemma not in self.dict_t_lemma:\n self.dict_t_lemma[t_lemma] = dict_ord\n dict_ord += 1\n if formeme not in self.dict_formeme:\n self.dict_formeme[formeme] = dict_ord\n dict_ord += 1\n\n self.dict_size = dict_ord",
"def set_factors(self, name, factormap, safe=False):\n e = False\n if name in self.masks():\n if self._get_subtype(name) != 'single':\n e = True\n else:\n if self._get_type(name) != 'single':\n e = True\n if e:\n if safe:\n err = \"Can only set factors to 'single' type categorical variables!\"\n raise TypeError(err)\n else:\n return None\n vals = self.codes(name)\n facts = list(factormap.keys())\n val_loc = self._get_value_loc(name)\n if not all(f in vals for f in facts):\n err = 'At least one factor is mapped to a code that does not exist '\n err += 'in the values object of \"{}\"!'\n raise ValueError(err.format(name))\n for value in val_loc:\n if value['value'] in factormap:\n value['factor'] = factormap[value['value']]\n else:\n value['factor'] = None\n return None",
"def __init__(self, degree):\r\n self.root = Node([], [])\r\n self.min_num_keys = degree - 1 \r\n self.max_num_keys = 2*degree - 1",
"def __load_factors(self):\n\t\tin_path = self.dir_base / self[\"files\"][\"factors\"]\n\t\tlog.info(\"Loading factors from %s\" % in_path)\n\t\t(W,H,doc_ids,terms) = load_nmf_factors(in_path)\n\t\tcolumns = np.arange(1, self[\"k\"]+1, dtype=int)\n\t\tself.document_associations = pd.DataFrame(W, index = doc_ids, columns = columns)\n\t\tself.term_associations = pd.DataFrame(np.transpose(H), index = terms, columns = columns)",
"def __init__(self):\n super().__init__()\n self.mapping = {}\n self.values = set()\n self.type = 'Categorical'\n self.dimensionality = 1\n self.distType = 'Discrete'\n self.isFloat = False",
"def __init__(self):\n self.d = dict()\n self.arr = [set() for i in range(0,26)]"
] | [
"0.5926843",
"0.5815458",
"0.57676935",
"0.57458484",
"0.5718854",
"0.5558079",
"0.5531594",
"0.55071265",
"0.5450327",
"0.54118156",
"0.5399657",
"0.5393091",
"0.53577244",
"0.5311729",
"0.5306964",
"0.5304899",
"0.52516556",
"0.524584",
"0.52402365",
"0.5225359",
"0.5224217",
"0.5186623",
"0.5177691",
"0.5161182",
"0.51357293",
"0.5109698",
"0.51013625",
"0.5099268",
"0.5096278",
"0.50877255"
] | 0.5946812 | 0 |
Verify that all values correspond to valid keys in self. | def verify(self, values):
s = set(values)
if not s.issubset(self.keys):
raise ValueError('unknown keys in values') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify(self, values):\n s = set(values)\n if not s.issubset(self.keys):\n raise ValueError, 'unknown keys in values'",
"def is_valid(self):\n\n # Test whether every element in required_keys is in actual_keys\n actual_keys = set(self.fields.keys())\n required_keys = set(self.required_keys)\n has_required_keys = required_keys <= actual_keys\n if not has_required_keys:\n return False\n\n # TODO: Complete the following block. \n\n # Assume all is valid at first, then as soon as one invalid\n # is detected, whole thing becomes invalid.\n all_valid = True \n\n # Now iterate over each key-value pair to check\n for key, value in self.fields.items():\n if key == 'byr':\n this_key_valid = len(str(value)) == 4 and (1920 <= value <= 2002)\n all_valid = all_valid and this_key_valid\n if key == 'iyr':\n this_key_valid = len(str(value)) == 4 and (2010 <= value <= 2020)\n all_valid = all_valid and this_key_valid\n if key == 'eyr':\n this_key_valid = len(str(value)) == 4 and (2020 <= value <= 2030)\n all_valid = all_valid and this_key_valid\n if key == 'hgt':\n if len(str(value)) < 4:\n all_valid = False\n else:\n ending = value[-2:]\n num = int(value[:-2])\n this_key_valid = (ending == 'in' and (59 <= num <= 76)) or (ending == 'cm' and (150 <= num <= 193))\n all_valid = all_valid and this_key_valid\n if key == 'hcl':\n re_str = '#[0-9a-f]{6}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 7\n all_valid = all_valid and this_key_valid\n if key == 'ecl':\n this_key_valid = value in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n all_valid = all_valid and this_key_valid\n if key == 'pid':\n re_str = '[0-9]{9}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 9\n all_valid = all_valid and this_key_valid\n if key == 'cid':\n this_key_valid = True\n all_valid = all_valid and this_key_valid\n\n # If all fields are valid, return True\n return all_valid",
"def validate(self):\n if not self.keys:\n raise ValueError(\"Virtual host missing keys\")\n for i in self.keys:\n i.validate()",
"def check_keys(self):",
"def assert_keys_have_values(self, caller, *keys):\n for key in keys:\n self.assert_key_has_value(key, caller)",
"def check_keys(set_name, keys, value, expect_key):\n\trecords = lib.read_all_records(set_name)\n\n\tfor key in keys:\n\t\tdigest = lib.get_key_digest(set_name, key)\n\t\tmeta_key, meta_ttl, record = records[str(digest).encode().hex()]\n\t\tlib.validate_record(key, record, [\"value\"], [value])\n\t\tlib.validate_meta(key, meta_key, meta_ttl, expect_key)",
"def has_valid_values(self):\n for element, value in self.items():\n if not (0 <= value <= 1):\n return False\n return True",
"def invalid(values):\n # for box in values.keys():\n # if len(values[box]) == 0:\n # return True\n # return False\n return len([box for box in values.keys() if len(values[box]) == 0]) != 0",
"def _has_all_keys_from(d, valid_d):\n\n for k, v in valid_d.items():\n if k not in d:\n return False\n return True",
"def _check_allowed_values(self, parameters):\n for key, allowed_values in self.ALLOWED_VALUES:\n self.log([u\"Checking allowed values for parameter '%s'\", key])\n if key in parameters:\n value = parameters[key]\n if value not in allowed_values:\n self._failed(u\"Parameter '%s' has value '%s' which is not allowed.\" % (key, value))\n return\n self.log(u\"Passed\")",
"def check_invalid_items(**kwargs: Tuple[T, Iterable[T]]):\n for key, (value, possible) in kwargs.items():\n possible = set(possible)\n if value not in possible:\n raise ValueError(f\"{key}={value} is not in: {possible}\")",
"def checkKeysCorrection(self, input, valid_keys):\n for key in input.keys():\n if key not in valid_keys:\n print(\"[ERROR] Key '%s' does not exist.\" % key)\n return False\n # check whether all result keys are included in valid keys\n if key == \"result\" and not self.checkResultsCorrection(result=input[\"result\"], valid_keys=valid_keys):\n return False\n return True",
"def validate(self, key, val):\n return True",
"def validate(self, key, val):\n return True",
"def _check_dict_validity(self, incoming_dict: dict):\n # check key error\n # check value error\n\n for key in incoming_dict.keys():\n # check invalid key.\n if key not in self.all_valid_keys:\n raise IncomingRowerDictInvalidKeyError(\"Incoming rower data dict has unknown key, data rejected. \"\n + key)\n\n # check value if key is valid.\n value = incoming_dict.get(key, None)\n if value is None:\n if key in self.mandatory_keys:\n # Mandatory keys should have value.\n raise IncomingRowerDictInvalidKeyError(\"Incoming rower data dict has wrong key, data rejected. \"\n + key)\n else:\n # Got the value, check the value.\n if key in self.integer_keys:\n # integer keys should be integer\n if int(value) != value:\n raise IncomingRowerDictInvalidValueError(\"Incoming rower data dict has wrong key, \"\n \"data rejected. \" + key + \":\" + str(value))\n if key not in self.negative_keys:\n # non-negative keys should be non-negative\n if value < 0:\n raise IncomingRowerDictInvalidValueError(\"Incoming rower data dict has wrong key, \"\n \"data rejected. \" + key + \":\" + str(value))\n\n # make sure mandatory keys exists.\n for m_key in self.mandatory_keys:\n if m_key not in incoming_dict.keys():\n raise IncomingRowerDictMissingKeyError('Incoming rower data dict has insufficient keys, '\n 'mandatory keys not found. '+m_key)",
"def check_solved(self, values):\n if values == None: #Forward_checking determines that values state is invalid -> set false, check if false here.\n return False\n\n for box in values.keys():\n if len(values[box]) != 1:\n return False\n return True",
"def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)",
"def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)",
"def check_expected_values(self, expected_values, scraped_values):\n\n\t\tfor key in expected_values:\n\t\t\tself.assertIn(key, scraped_values)\n\t\t\tself.assertEqual(expected_values[key], scraped_values[key])",
"def check_all_have_keys(dict_list, keys, name):\n if len(dict_list) == 0:\n return\n keys = set(keys)\n for dct in dict_list:\n if not keys.issubset(dct.keys()):\n raise DGLError('Expect all {} to include keys {}, but got {}.'.format(\n name, keys, dct.keys()))",
"def validate(self):\n for key, value in self._configurations.items():\n value.validate()",
"def solved(values):\n # for box in values.keys():\n # if len(values[box]) != 1:\n # return False\n # return True\n return len([box for box in values.keys() if len(values[box]) != 1]) == 0",
"def _validate(self):\n for p in self.parameters:\n #Check for missing required parameters:\n if p.is_required and not(p.is_set):\n raise ValueError(\"Parameter %s is not set.\" \\\n % p.names[-1])\n #Also repeat the parameter validation here, just in case?",
"def _validate(self):\n for name, prop in self._properties.iteritems():\n value = getattr(self, name, None)\n prop._do_validate(value)",
"def __check_key_validity(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple\")\n if len(key) != 2:\n raise ValueError(\"key must be of length two\")\n if not (isinstance(key[0], int) and isinstance(key[1], int)):\n raise TypeError(\"elements of key must be integers\")\n if not ((0 <= key[0] < self.m) and (0 <= key[1] < self.n)):\n raise exc.OutOfBoundsError(\"key is out of bounds\")",
"def validate(self, data: Dict):\n for key in self.__dict__.keys():\n if not key.startswith('__') and key != 'id':\n if data[key] == '' or data[key] is None:\n raise ValidationError(\n message=f'{key} should not be \"{data[key]}\"'\n )",
"def test_validation(self):\n\n with self.assertRaises(ValidationError):\n Dictionary().validate([1,2,3])\n\n with self.assertRaises(ValidationError):\n Dictionary().validate('stringy')\n\n with self.assertRaises(ValidationError):\n Dictionary().validate(1)",
"def test_validation(self):\r\n\r\n with self.assertRaises(ValidationError):\r\n Dictionary().validate([1,2,3])\r\n\r\n with self.assertRaises(ValidationError):\r\n Dictionary().validate('stringy')\r\n\r\n with self.assertRaises(ValidationError):\r\n Dictionary().validate(1)",
"def validate(self):\n if not self.key or not self.certificates:\n raise ValueError(\"Key or certificate missing in Keypair\")",
"def validate(self, keypoints):\n for k in keypoints:\n self.validate_keypoints(k)"
] | [
"0.79676634",
"0.7523778",
"0.7267099",
"0.70457435",
"0.70228183",
"0.68855286",
"0.68023276",
"0.6781746",
"0.6705627",
"0.65737057",
"0.6545705",
"0.6543366",
"0.6543026",
"0.6543026",
"0.65038353",
"0.6500311",
"0.64711374",
"0.64711374",
"0.64206946",
"0.6416924",
"0.63834405",
"0.6353777",
"0.63085717",
"0.62931544",
"0.6279882",
"0.6271905",
"0.6259496",
"0.6254058",
"0.6245448",
"0.6236198"
] | 0.79266214 | 1 |
Formula(self) + Formula(other) When adding \'intercept\' to a factor, this just returns Formula(self, namespace=self.namespace) | def __add__(self, other):
if isinstance(other, Term) and other.name is 'intercept':
return Formula(self, namespace=self.namespace)
else:
return Term.__add__(self, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __add__(self, other):\n \n if other.name is 'intercept':\n return formula(self, namespace=self.namespace)\n else:\n return term.__add__(self, other)",
"def __add__(self, other):\n\n other = Formula(other)\n terms = self.terms + other.terms\n pieces = sorted([(term.name, term) for term in terms])\n terms = [piece[1] for piece in pieces]\n f = Formula(terms)\n if _namespace_equal(self.namespace, other.namespace):\n f.namespace = self.namespace\n return f",
"def __add__(self, other):\n\n other = formula(other, namespace=self.namespace)\n terms = self.terms + other.terms\n pieces = [(term.name, term) for term in terms]\n pieces.sort()\n terms = [piece[1] for piece in pieces]\n return formula(terms, namespace=self.namespace)",
"def _add(self, other):\n if isinstance(other, SeqFormula):\n form1, v1 = self.formula, self.variables[0]\n form2, v2 = other.formula, other.variables[0]\n formula = form1 + form2.subs(v2, v1)\n start, stop = self._intersect_interval(other)\n return SeqFormula(formula, (v1, start, stop))",
"def __add__(self, other):\n if isinstance(other, Factorization):\n other = other.value()\n return self.value() + other",
"def __add__(self, other):\n attributes = {}\n\n for index, polynomial in self.polynomials.items():\n attributes[\"x\" + str(index)] = polynomial\n\n for index, polynomial in other.polynomials.items():\n if index in self.polynomials:\n attributes[\"x\" + str(index)] = self.polynomials[index] + polynomial\n else:\n attributes[\"x\" + str(index)] = polynomial\n\n return Polynomial(**attributes)",
"def __radd__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(other, self)",
"def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)",
"def __add__(self, other):\n result = self.__class__()\n result._terms.extend(self)\n\n if isinstance(other, self._term_class):\n if any(\n isinstance(other, term.__class__) and other.name == term.name\n for term in self._terms\n ):\n msg = (\n f\"There is already a term of type {other.__class__} with name \"\n f\"'{other.name}' in {self.__class__}. Please provide a different \"\n f\"name for {other}.\"\n )\n raise ValueError(msg)\n else:\n result._terms.append(other)\n elif isinstance(other, self.__class__):\n for term in other:\n result += term\n else:\n msg = f\"Unsupported operand type(s) for +: {type(self)} and {type(other)}.\"\n raise TypeError(msg)\n\n return result",
"def __add__(self, other):\n if (len(self.arg) < len(other.arg)):\n summ = Polynomial(other.arg)\n i = len(self.arg) - 1\n for x in self.arg:\n summ.arg[i] = self.arg[i] + summ.arg[i]\n i = i - 1\n else:\n summ = Polynomial(self.arg)\n i = len(other.arg) - 1\n for x in other.arg:\n summ.arg[i] = other.arg[i] + summ.arg[i]\n i = i - 1\n return summ",
"def __add__(self, other):\n try:\n total = {self.var: 1, other.var: 1}\n return AutoDiffReverse(self.val + other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val + other, None, {self.var: 1})",
"def __add__(self, other):\n cls = self.__class__\n return cls(self.x+other.x, self.y+other.y, self.z+other.z)",
"def __iadd__(self, other):\n self.MergeWith(other)\n return self",
"def __iadd__(self, other):\n self.x += other.x\n self.y += other.y\n return self",
"def __add__(self, other):\n base = deepcopy(self)\n base += other # (+=) == __iadd__\n return base",
"def __radd__(self, other):\n\n\t\t# Maintain state of self and create new trace variable new_var\n\t\tnew_var = Var(self.val, self.der)\n\t\treturn new_var.__add__(other)",
"def __iadd__(self, other):\n\n return self + other",
"def __iadd__(self, other):\n self.center += other.center\n self.radius += other.radius\n return self",
"def __radd__(self, other):\n\n return self.__add__(other)",
"def __add__(self, other):\n return self.add(other)",
"def __add__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] += other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self, other\n if( len( self ) < len( other ) ) : c_l1, c_l2 = other, self\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )",
"def __radd__(self, other):\n return self.runtime.add(self, other)",
"def __radd__(self, other):\n return self + other",
"def __radd__(self, other):\n return self + other",
"def __radd__(self, other):\n return self.__add__(other)",
"def __radd__(self, other):\n return self.__add__(other)",
"def __radd__(self, other):\n return self.__add__(other)",
"def __radd__(self, other):\n return self.__add__(other)",
"def __radd__(self, other):\n return self.__add__(other)",
"def __radd__(self, other):\n return self.__add__(other)"
] | [
"0.892706",
"0.7850612",
"0.7582543",
"0.7247086",
"0.6729214",
"0.6699146",
"0.6654158",
"0.6565891",
"0.6484925",
"0.64614576",
"0.6342464",
"0.63215107",
"0.6316699",
"0.62675995",
"0.62595725",
"0.62517965",
"0.6236327",
"0.6232033",
"0.62143403",
"0.61953527",
"0.6192967",
"0.6191404",
"0.6180888",
"0.6180888",
"0.61804146",
"0.61804146",
"0.61804146",
"0.61804146",
"0.61804146",
"0.61804146"
] | 0.90075254 | 0 |
Return the 'main effect' columns of a factor, choosing an optional reference key. The reference key can be one of the keys of the Factor, or an integer, representing which column to remove. It defaults to 0. | def main_effect(self, reference=None):
names = self.names()
if reference is None:
reference = 0
else:
try:
reference = self.keys.index(reference)
except ValueError:
reference = int(reference)
def maineffect_func(value, reference=reference):
rvalue = []
keep = lrange(value.shape[0])
keep.pop(reference)
for i in range(len(keep)):
rvalue.append(value[keep[i]] - value[reference])
return np.array(rvalue)
keep = lrange(len(self.names()))
keep.pop(reference)
__names = self.names()
_names = ['%s-%s' % (__names[keep[i]], __names[reference]) for i in range(len(keep))]
value = Quantitative(_names, func=self,
termname='%s:maineffect' % self.termname,
transform=maineffect_func)
value.namespace = self.namespace
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main_effect(self, reference=None):\n\n if reference is None:\n reference = 0\n\n names = self.names()\n\n def maineffect_func(value, reference=reference):\n rvalue = []\n keep = range(value.shape[0])\n keep.pop(reference)\n for i in range(len(keep)):\n rvalue.append(value[keep[i]] - value[reference])\n return N.array(rvalue)\n \n keep = range(len(self.names()))\n keep.pop(reference)\n __names = self.names()\n _names = ['%s-%s' % (__names[keep[i]], __names[reference]) for i in range(len(keep))]\n value = quantitative(_names, func=self, \n termname='%s:maineffect' % self.termname,\n transform=maineffect_func)\n value.namespace = self.namespace\n return value",
"def removeFixedEffect(self, index=None):\n if self._n_terms==0:\n pass\n if index is None or index==(self._n_terms-1):\n\n self._n_terms-=1\n F = self._F.pop() #= self.F[:-1]\n A = self._A.pop() #= self.A[:-1]\n self._A_identity.pop() #= self.A_identity[:-1]\n REML_term = self._REML_term.pop()# = self.REML_term[:-1]\n self._B.pop()# = self.B[:-1]\n self._n_fixed_effs-=F.shape[1]*A.shape[0]\n if REML_term:\n self._n_fixed_effs_REML-=F.shape[1]*A.shape[0]\n\n pass\n elif index >= self.n_terms:\n raise Exception(\"index exceeds max index of terms\")\n else:\n raise NotImplementedError(\"currently only last term can be removed\")\n pass\n self._rebuild_indicator()\n self.clear_cache('Fstar','Astar','Xstar','Xhat',\n 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')",
"def column_fast(self, key):\n return self._matrix[:, self.dataframe.columns.get_loc(key)]",
"def get_cols_drop():",
"def _full_ablated(tt_factors, fac_num_to_remove):\r\n\r\n # turn factors into tuple, then remove factor from each mode's matrix\r\n factors = tuple(tt_factors)\r\n factors = tuple([np.delete(f, fac_num_to_remove, axis=1) for f in factors])\r\n\r\n # create a KTensor from tensortools to speed up some math\r\n kt = KTensor(factors)\r\n\r\n # create full tensor\r\n return kt.full()",
"def _drop_columns_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n res = res.select(op.columns_produced())\n return res",
"def clearFixedEffect(self):\n self._A = []\n self._F = []\n self._B = []\n self._A_identity = []\n self._REML_term = []\n self._n_terms = 0\n self._n_fixed_effs = 0\n self._n_fixed_effs_REML = 0\n self.indicator = {'term':np.array([]),\n 'row':np.array([]),\n 'col':np.array([])}\n self.clear_cache('Fstar','Astar','Xstar','Xhat',\n 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')",
"def dependent_cols():\n\n return ...",
"def remove_extra_index_from_context_actions(context_action_dict):\n keys_to_keep = {'initial_value', 'replacement_value'}\n for question in context_action_dict:\n for obj_dct in context_action_dict[question]:\n total_keys = set(obj_dct.keys())\n keys_to_remove = total_keys - keys_to_keep\n for key in keys_to_remove:\n obj_dct.pop(key)\n return context_action_dict",
"def get_cols_dummy():",
"def clear_factors(self, name):\n val_loc = self._get_value_loc(name)\n for value in val_loc:\n value['factor'] = None\n return None",
"def test_remove_columns(self):\n table = Table('table1', key=['col1', 'col2'])[\n Column('col1'),\n Column('col2'),\n Column('col3'),\n Column('col4'),\n ]\n\n table.remove_columns(('col2', 'col3'))\n\n self.assertEqual(2, len(table.columns))\n self.assertEqual('col1', table.columns[0].name)\n self.assertEqual('col4', table.columns[1].name)\n self.assertEqual([], table.key)",
"def prune_(self):\n idx = self.factor_lams() > 0\n self.factors = [f[:, idx] for f in self.factors]\n self.rank = np.sum(idx)",
"def select_columns(data):\n\n #Channels to be excluded\n features_delete = np.arange(46, 50)\n features_delete = np.concatenate([features_delete, np.arange(59, 63)])\n features_delete = np.concatenate([features_delete, np.arange(72, 76)])\n features_delete = np.concatenate([features_delete, np.arange(85, 89)])\n features_delete = np.concatenate([features_delete, np.arange(98, 102)])\n features_delete = np.concatenate([features_delete, np.arange(134, 243)])\n features_delete = np.concatenate([features_delete, np.arange(244, 249)])\n return np.delete(data, features_delete, 1)",
"def inv_mix_columns(state):\n mix_columns(state, m=inv_ax)",
"def _remove_from_index_operations(self, which, transforms):\n if len(transforms) == 0:\n transforms = which.properties()\n removed = np.empty((0,), dtype=int)\n for t in list(transforms):\n unconstrained = which.remove(t, self._raveled_index())\n removed = np.union1d(removed, unconstrained)\n if t is __fixed__:\n self._highest_parent_._set_unfixed(self, unconstrained)\n\n return removed",
"def _remove_from_index_operations(self, which, transforms):\n if len(transforms) == 0:\n transforms = which.properties()\n removed = np.empty((0,), dtype=int)\n for t in list(transforms):\n unconstrained = which.remove(t, self._raveled_index())\n removed = np.union1d(removed, unconstrained)\n if t is __fixed__:\n self._highest_parent_._set_unfixed(self, unconstrained)\n\n return removed",
"def preferred_rep(self):\n # reducing coefficients mod torsion\n if self.torsion != 'free':\n for key, value in self.items():\n self[key] = value % self.torsion\n\n # removing key:value pairs with value = 0\n zeros = [k for k, v in self.items() if not v]\n for key in zeros:\n del self[key]",
"def sub_columns(df):\n torque = [col for col in df.columns.tolist() if 'Torque' in col]\n return torque",
"def test_explicit_fixed_effects_without_mask(tmp_path):\n shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3\n _, fmri_data, design_matrices =\\\n write_fake_fmri_data_and_design(shapes, rk, file_path=tmp_path)\n contrast = np.eye(rk)[1]\n\n # session 1\n multi_session_model = FirstLevelModel().fit(\n fmri_data[0], design_matrices=design_matrices[:1])\n dic1 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # session 2\n multi_session_model.fit(\n fmri_data[1], design_matrices=design_matrices[1:])\n dic2 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # fixed effects model\n multi_session_model.fit(\n fmri_data, design_matrices=design_matrices)\n fixed_fx_dic = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n contrasts = [dic1['effect_size'], dic2['effect_size']]\n variance = [dic1['effect_variance'], dic2['effect_variance']]\n\n # test without mask variable\n (\n fixed_fx_contrast,\n fixed_fx_variance,\n fixed_fx_stat,\n ) = compute_fixed_effects(contrasts, variance)\n assert_almost_equal(\n get_data(fixed_fx_contrast),\n get_data(fixed_fx_dic['effect_size']))\n assert_almost_equal(\n get_data(fixed_fx_variance),\n get_data(fixed_fx_dic['effect_variance']))\n assert_almost_equal(\n get_data(fixed_fx_stat), get_data(fixed_fx_dic['stat']))",
"def remove_cofactors_from_Sij(Sij_df, cofactors):\n if len(cofactors) == 0:\n return Sij_df\n\n # Get a list of cofactors in the model\n cofactors = list(set(cofactors) & set(Sij_df.index.tolist()))\n\n # Remove row of cofactors\n nSij_df = Sij_df.drop(cofactors)\n\n allRxns = nSij_df.columns.tolist()\n\n # Get all columns (j) with all zero entries\n rxns_involving_cofactors_only = nSij_df.columns[(\n nSij_df == 0).all()].tolist()\n\n remainRxns = list(set(allRxns) - set(rxns_involving_cofactors_only))\n\n # Drop all columns with zero entries\n nSij_df2 = nSij_df[sorted(remainRxns)]\n\n return nSij_df2",
"def remove_columns(data, col_ids):\n return np.delete(data, col_ids, axis=1)",
"def declutter(vector):\n for key in vector:\n clutter_values = [value for value in vector[key] if vector[key][value]<2] # gather everything with a value less than two and save it in a list\n for feature in clutter_values: # remove everything in the clutter values from a dictionary\n vector[key].pop(feature,None)\n return vector",
"def remove_fact(self, category, line_number):\n raise NotImplementedError()",
"def transform(self, x):\n found_columns = x.columns.intersection(self.columns)\n if len(found_columns) != len(self.columns) and not self.ignore_absent:\n raise KeyError(\n f\"{set(self.columns).difference(x.columns)} not found in the DataFrame\"\n )\n else:\n return x.loc[:, found_columns]",
"def drop_extra_columns(self):\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)",
"def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols",
"def maybe_droplevels(index: Index, key) -> Index:\n # drop levels\n original_index = index\n if isinstance(key, tuple):\n # Caller is responsible for ensuring the key is not an entry in the first\n # level of the MultiIndex.\n for _ in key:\n try:\n index = index._drop_level_numbers([0])\n except ValueError:\n # we have dropped too much, so back out\n return original_index\n else:\n try:\n index = index._drop_level_numbers([0])\n except ValueError:\n pass\n\n return index",
"def RemoveZeroVar(chain):\n return chain[:, np.invert((np.sum(np.var(chain, axis=0), axis=1)<1e-10)), :]",
"def RemoveZeroVar(chain):\n\treturn chain[:, np.invert((np.sum(np.var(chain, axis=0), axis=1)<1e-10)), :]"
] | [
"0.4996892",
"0.4893916",
"0.48386535",
"0.4811764",
"0.4706963",
"0.46767664",
"0.46290508",
"0.46205783",
"0.44753227",
"0.44314414",
"0.44152927",
"0.4411382",
"0.43374035",
"0.43291432",
"0.43158427",
"0.43082914",
"0.43082914",
"0.42838398",
"0.42731515",
"0.42687482",
"0.4253091",
"0.4252874",
"0.42440832",
"0.4242624",
"0.42392364",
"0.42276177",
"0.42213324",
"0.42027488",
"0.4191514",
"0.41860825"
] | 0.5115011 | 0 |
String representation of list of termnames of a formula. | def __str__(self):
value = []
for term in self.terms:
value += [term.termname]
return '<formula: %s>' % ' + '.join(value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_formula_in_list(self):\n return tree_to_string(self.expression)",
"def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names",
"def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names",
"def __str__(self):\n termStrings = []\n for term in self.LHS:\n coefficient = term[0]\n unknownSet = term[1]\n\n termString = str(coefficient) + ' * '\n unknownStrings = []\n for unknown in unknownSet:\n unknownString = unknown[0].__class__.__name__ + '@' + str(id(unknown[0]))[-4:] + '.' + unknown[1] # last 4 digits of variable ID . attribute name\n unknownStrings.append(unknownString)\n termString += str.join(' * ', unknownStrings)\n termStrings.append(termString)\n\n termStrings = str.join(' + ', termStrings)\n return termStrings + ' = ' + str(self.RHS)",
"def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames",
"def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames",
"def list_formulae():\n return _list_tindyb_unique_values(\"formula\", dbpath=__dbpath__)",
"def terms(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"terms\")",
"def __str__( self ) :\n\n return( ' '.join( [ \"%g\" % c_l for c_l in self.coefficients ] ) )",
"def formula(self):\n terms = []\n for ff in self.formulae:\n terms += list(ff.terms)\n return Formula(terms)",
"def Rstr(self):\n\n results = []\n for expr in self.graded_dict:\n _expr = str(expr).replace(\"*\", \":\")\n if _expr != '1':\n for order in self.graded_dict[expr]:\n for factors in self.graded_dict[expr][order]:\n results.append(':'.join([_expr] + [f.name for f in factors]))\n else:\n for order in self.graded_dict[expr]:\n for factors in self.graded_dict[expr][order]:\n if factors:\n results.append(':'.join([f.name for f in factors]))\n else:\n results.append('1')\n return \"+\".join(results)",
"def to_string(self):\n\n # Get the header of the formula and the body separately\n header = f\"p cnf {self.nvars} {self.nclauses}\"\n body = \"\\n\".join(map(clause_to_string,\n self.base_clauses + self.aux_clauses))\n\n # Return the header and the body combined\n return header + \"\\n\" + body",
"def __str__(self):\n string = ''\n for degree, coef in enumerate(self.coefs, 1):\n degree = degree - 1\n string += str(coef)+'x^' + str(degree) + ' + '\n string = string[0:-3] # remove the last ' + '\n return string",
"def _repr_latex_(self):\n reprlatex = \"\"\n if not self._terms:\n reprlatex += \"0\"\n else:\n for term in self:\n termlatex = term._reprlatex\n if not reprlatex:\n # Adding the first term. No leading +.\n reprlatex += termlatex\n else:\n if not termlatex.startswith(\"-\"):\n # Is it the first term added to the sum? No leading +.\n reprlatex += f\"+ {termlatex}\"\n else:\n reprlatex += termlatex\n\n return f\"${reprlatex}$\"",
"def __str__(self):\n\n return \"%s:\\n%s\" % (self.name, self.top_terms_tuples(25))",
"def formulaToString(*args):\n return _libsbml.formulaToString(*args)",
"def __repr__(self):\n\n return \"<Terms term=%s>\" % (self.word)",
"def list_repr(self):\n pretty = []\n pretty.append(self.name)\n for node in self.evaluation_sequence:\n pretty.append(node.list_repr())\n return '\\n '.join(pretty)",
"def __str__(self):\n ingredient_names = [str(ingredient) for ingredient in self.ingredients]\n return ', '.join(ingredient_names)",
"def toString(self):\n \n if not self.coeff_map:\n raise Exception('no coeffs in constrain %s'%self.name)\n \n if self.result is None:\n raise Exception('result of this constrain is unknown!')\n \n if self.name is None:\n res=\"\"\n else:\n res=self.name+\": \"\n \n res+=coeff_sum(self.coeff_map) \n \n res+=self.op\n res+=\" \"+str(self.result)\n \n return res;",
"def getElementName(self):\n return _libsbml.ListOfFunctionTerms_getElementName(self)",
"def terms(self):\n return self._terms",
"def __repr__(self):\n st = '\\nProof(syntax=\\\"' + self.syntax + '\\\", formula_list=[\\n'\n for l in self.proof[:-1]:\n st += str(l) + ',\\n'\n return st + str(self.proof[-1]) + '])'",
"def print_formulas(self):\n for formula in self.formulas:\n if formula:\n print(formula)",
"def list_tokens(self) -> str:\n\n return self._format_symbol_table_content(\"Tokens\", self._symbols.keys())",
"def __str__(self):\n output = \"\"\n for i in self.values:\n st = []\n output += \"[\"\n for j in i:\n st.append(str(j))\n output += \",\".join(st)+\"]\"\n return str(self.m)+\"x\"+str(self.n)+\" [\" + output + \"]\"",
"def _flow_terms(flow):\n return flow.synonyms",
"def _atoms_string(self):\n return_str = 'Atoms\\n\\n'\n for atom in self.atoms:\n return_str += '{}\\n'.format(atom.input_string())\n return_str += '\\n'\n return return_str",
"def __str__(self):\n s = \"\"\n for v in self.vectors:\n s += str(v) + \"\\n\"\n return s",
"def toStringList(self):\n totStringList = []\n \n if whichKernel_ == 3:\n # structure is\n # dim\n # nr of kernels\n # ##for each kernel\n # direction\n # dim\n # center\n # translation\n # base\n # evalue\n # ##d value is useless here\n totStringList.append( int2Str(self._centers.shape[0]))\n totStringList.append( int2Str(self._centers.shape[1]))\n for k in range(self._centers.shape[1]):\n #dim\n totStringList.append(int2Str(self._centers.shape[0]))\n #center\n totStringList += vec2List(self._centers[:,k])\n #translation\n totStringList += vec2List(self._translations[:,k])\n #base\n totStringList.append(double2Str(self._bases[k]))\n #evalue\n totStringList.append(double2Str(self._eValues[k]))\n else:\n assert False, \"todo\" # todo\n \n return totStringList"
] | [
"0.69266677",
"0.68499625",
"0.68499625",
"0.64276206",
"0.6309525",
"0.6309525",
"0.6289808",
"0.60963464",
"0.58912927",
"0.58158964",
"0.58133405",
"0.579924",
"0.5791099",
"0.57738787",
"0.57626975",
"0.57447326",
"0.5739617",
"0.57184756",
"0.5633028",
"0.5621945",
"0.55728614",
"0.555047",
"0.5524148",
"0.5474634",
"0.54460526",
"0.5443871",
"0.5424984",
"0.5414442",
"0.54130685",
"0.5401423"
] | 0.7957115 | 0 |
Create (transpose) of the design matrix of the formula within namespace. Extra arguments are passed to each term instance. If the formula just contains an intercept, then the keyword argument 'nrow' indicates the number of rows (observations). | def __call__(self, *args, **kw):
if 'namespace' in kw:
namespace = kw['namespace']
else:
namespace = self.namespace
allvals = []
intercept = False
iindex = 0
for t in self.terms:
t = copy.copy(t)
t.namespace = namespace
val = t(*args, **kw)
isintercept = False
if hasattr(t, "termname"):
if t.termname == 'intercept':
intercept = True
isintercept = True
interceptindex = iindex
allvals.append(None)
if val.ndim == 1 and not isintercept:
val.shape = (1, val.shape[0])
allvals.append(val)
elif not isintercept:
allvals.append(val)
iindex += 1
if not intercept:
try:
allvals = np.concatenate(allvals)
except:
pass
else:
nrow = kw.get('nrow', -1)
if allvals != []:
if interceptindex > 0:
n = allvals[0].shape[1]
else:
n = allvals[1].shape[1]
allvals[interceptindex] = np.ones((1,n), np.float64)
allvals = np.concatenate(allvals)
elif nrow <= 1:
raise ValueError('with only intercept in formula, keyword \'nrow\' argument needed')
else:
allvals = I(nrow=nrow)
allvals.shape = (1,) + allvals.shape
return np.squeeze(allvals) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self, *args, **kw):\n\n if 'namespace' in kw:\n namespace = kw['namespace']\n else:\n namespace = self.namespace\n \n\n allvals = []\n intercept = False\n iindex = 0\n for t in self.terms:\n\n t.namespace = namespace\n val = t(*args, **kw)\n\n isintercept = False\n if hasattr(t, \"termname\"):\n if t.termname == 'intercept': \n intercept = True\n isintercept = True\n interceptindex = iindex\n allvals.append(None)\n\n if val.ndim == 1 and not isintercept:\n val.shape = (1, val.shape[0])\n allvals.append(val)\n elif not isintercept:\n allvals.append(val)\n iindex += 1\n\n if not intercept:\n try:\n allvals = N.concatenate(allvals)\n except:\n pass\n else:\n nrow = kw.get('nrow', -1)\n if allvals != []:\n if interceptindex > 0:\n n = allvals[0].shape[1]\n else:\n n = allvals[1].shape[1]\n allvals[interceptindex] = N.ones((1,n), N.float64) \n allvals = N.concatenate(allvals)\n elif nrow <= 1: \n raise ValueError, 'with only intercept in formula, keyword \\'nrow\\' argument needed'\n else:\n allvals = I(nrow=nrow)\n allvals.shape = (1,) + allvals.shape\n return allvals",
"def _eval_transpose(self):\n coeff, matrices = self.as_coeff_matrices()\n return MatMul(\n coeff, *[transpose(arg) for arg in matrices[::-1]]).doit()",
"def create_design_matrix(self):\n self.design_matrix = np.zeros([self.n, self.p])\n self.design_matrix[:,0] = 1.0 #First comlum is 1 (bias term)\n\n for i in range(self.n):\n for j in range(1,self.p):\n self.design_matrix[i,j] = self.phi(self.x[i],j)\n\n self.design_eigvals = np.linalg.eigvals([email protected]_matrix)",
"def make_matrix(self):\n self.leftmost_element()\n self.rightmost_element()\n self.interior_element()\n\n #Transforms all sympy symbolic expressions for the lagrange polynomials into callable functions.\n self.psi_funcs = [sym.lambdify([self.x], self.psi[i], modules = \"numpy\") for i in range(3*self.Ne)]",
"def make_design_matrix(array):\n return sm.add_constant(make_col_vector(array), prepend=False)",
"def T(self):\n # TODO - your code here\n matrix_transpose = [];\n \n for j in range(self.w):\n matrix_transpose.append(self.get_column(j));\n \n return Matrix(matrix_transpose);",
"def CreateMatrix(self) -> BaseMatrix:",
"def CreateMatrix(self) -> BaseMatrix:",
"def CreateDesignMatrix_X(z, x, y, n ):\n if len(x.shape) > 1:\n x = np.ravel(x)\n y = np.ravel(y)\n\n N = len(x)\n l = int((n+1)*(n+2)/2) \n X = np.ones((N,l))\n\n for i in range(1,n+1):\n q = int((i)*(i+1)/2)\n for k in range(i+1):\n X[:,q+k] = x**(i-k) * y**k\n \n X, z_, indicies = shuffle(X, z)\n X_train, X_test, z_train, z_test = train_test_split(X, z_, test_size=split_train_test, random_state=seed, shuffle=False)\n X_test, X_val, z_test, z_val = train_test_split(X_test, z_test, test_size=split_test_val, random_state=seed, shuffle=False)\n\n return X, X_train, X_test, X_val, z_train, z_test, z_val, indicies",
"def build_matrix(\n self,\n term_index: typing.Union[\n dict, mz.preprocessors.units.Vocabulary.TermIndex],\n initializer=lambda: np.random.uniform(-0.2, 0.2)\n ) -> np.ndarray:\n input_dim = len(term_index)\n matrix = np.empty((input_dim, self._output_dim))\n # Starting the smallest index to the largest to ensure reproducibility\n for term, index in sorted(term_index.items(), key = lambda x: x[1]):\n matrix[index] = initializer()\n return matrix",
"def _create_transpose(cls, onnx_node, inputs, opset_version):\n shape = inputs[0].shape\n perm = onnx_node.getattr(\"perm\", list(range(len(shape) - 1, -1, -1)))\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(perm)",
"def create_correlationMatrix(obj):\n pass",
"def design_matrix(nonlinear_p, data, prior):\n P, ecc, omega, M0 = nonlinear_p[:4] # we don't need the jitter here\n\n t = data._t_bmjd\n t0 = data._t_ref_bmjd\n zdot = cy_rv_from_elements(t, P, 1., ecc, omega, M0, t0, 1e-8, 128)\n\n M1 = np.vander(t - t0, N=prior.poly_trend, increasing=True)\n M = np.hstack((zdot[:, None], M1))\n\n return M",
"def T(self):\n return Op('transpose', self)",
"def make_matrix(num_rows, num_cols, entry_fn):\n return [[entry_fn(i, j)\n for j in range(num_cols)]\n for i in range(num_rows)]",
"def get_design_matrix(x):\n\tF = np.ones((10, 1))\n\tF = np.hstack((F, x))\n\n\treturn F",
"def make_tag_matrix(self):\n pass",
"def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here",
"def _make_random_matrix(self, n_components, n_features):",
"def make_matrix(num_rows, num_cols, entry_fn):\n return [[entry_fn(i, j) for j in list(range(num_cols))]\n for i in list(range(num_rows))]",
"def build_Xij_inv_matrix(self,Nmesh=64):\n H0, F = self.cosmo.H0, self.cosmo.F\n Lbox = self.attrs['Lbox']\n kgrid = initialize_kgrid(Nmesh,Lbox)\n kmag_grid = np.linalg.norm(kgrid,axis=3)\n w_grid = self.cosmo.Pk_lin(kmag_grid)*(1/Lbox**3)*np.exp(-kmag_grid*kmag_grid*self.RG*self.RG)\n k2 = kmag_grid**2\n k2[0,0,0] = 1 \n #----------------------------------------------------\n cspace = np.arange(0,18)\n \n xij_tensor = [[np.sum(np.conj(Hhats[i](kgrid,k2,H0,F))*Hhats[j](kgrid,k2,H0,F)*w_grid)\n for j in cspace[self.cmask]] for i in cspace[self.cmask]]\n \n xij_tensor = np.array(xij_tensor)\n self.xij_tensor_inv = np.linalg.inv(xij_tensor.real)",
"def influence_matrix(self, span: typing.Sequence[int], grid_spacing: typing.Sequence[float],\n components: typing.Sequence[str]):\n pass",
"def ion_matrix(ion_coefficients, atomic_number, ion_number):\n offdiag = np.zeros(atomic_number)\n index = ion_coefficients.index\n for i in index:\n offdiag[i] = ion_coefficients.loc[i]\n diag = np.hstack([-offdiag, np.zeros(1)])\n return (np.diag(diag) + np.diag(offdiag, k=-1))[ion_number, :]",
"def __init__(self,n_terms=3):\r\n self.n_terms = n_terms\r\n self.num_parameters = 3 * self.n_terms",
"def _relax_matrix(self, n=1):\n\n for i in range(n):\n self.level.mid.reshape(-1)[:] = self.R_w.dot(self.level.mid.reshape(-1)) \\\n + self.omega * self.level.rhs / self.D",
"def make_matrix(num_rows, num_cols, entry_fn):\r\n return [[entry_fn(i,j) # given i, create a list\r\n for j in range(num_cols)] # [entry_fn(i,0),....]\r\n for i in range(num_rows)] # create one list for each i\r",
"def make_matrix(num_rows: int, num_cols: int, entry_fn: Callable[[int, int], float]) -> Matrix:\n return [[entry_fn(i, j)\n for j in range(num_cols)]\n for i in range(num_rows)]",
"def CreateMatrix(*args, **kwargs):\n return _gdi_.GraphicsRenderer_CreateMatrix(*args, **kwargs)",
"def make_matrix():\n\n # this imports category, category, data, text, pptx_data\n\n if use_test_data:\n # make a test matrix using create_test_matrix\n m = matrixfuncs.create_test_matrix()\n\n else:\n # make a matrix by connecting to Slides! and connecting to a data\n # table.\n import transformations.utils.slidesconf as slidesconf\n from Forgetdata.Matrix import ConnectionDefinition\n conn = ConnectionDefinition()\n conn.ConnectionString = mtd_filepath # set at top of file\n conn.Name = \"Test\"\n conn.Provider = \"SPSS MTD File\"\n liveConnection = slidesconf.connect(conn.ConnectionString,\n name=conn.Name,\n provider_name=conn.Provider)\n\n m = liveConnection[table_selected]\n\n x = tr.MatrixManipulator(m)\n matrixfuncs.printMatrix(m)\n\n for c in m[0]:\n c.TopMember.Label = c.TopMember.Label.encode('ascii', 'ignore')\n\n return m, x",
"def CreateAttributeMatrix(dims, name, type):\n am = simpl.AttributeMatrix.Create(dims, name, type)\n return am"
] | [
"0.61720544",
"0.5275372",
"0.5216206",
"0.51585174",
"0.5141125",
"0.50233305",
"0.49952924",
"0.49952924",
"0.49816874",
"0.4925911",
"0.48947468",
"0.48677468",
"0.48216242",
"0.48089668",
"0.4760504",
"0.47341752",
"0.47339922",
"0.47330117",
"0.46915984",
"0.46374306",
"0.46152654",
"0.4607989",
"0.4604817",
"0.459873",
"0.45959923",
"0.45888126",
"0.45841593",
"0.45839822",
"0.45827338",
"0.45789725"
] | 0.6225268 | 0 |
Determine whether a given term is in a formula. | def hasterm(self, query_term):
if not isinstance(query_term, Formula):
if isinstance(query_term, string_types):
try:
query = self[query_term]
return query.termname in self.termnames()
except:
return False
elif isinstance(query_term, Term):
return query_term.termname in self.termnames()
elif len(query_term.terms) == 1:
query_term = query_term.terms[0]
return query_term.termname in self.termnames()
else:
raise ValueError('more than one term passed to hasterm') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hasterm(self, query_term):\n\n if not isinstance(query_term, formula):\n if type(query_term) == type(\"name\"):\n try: query = self[query_term]\n except: return False\n elif isinstance(query_term, term):\n return query_term.termname in self.termnames()\n elif len(query_term.terms) == 1:\n query_term = query_term.terms[0]\n return query_term.termname in self.termnames()\n else:\n raise ValueError, 'more than one term passed to hasterm'",
"def isLeFormula(formula):\r\n return '->' in formula",
"def isFormula(string):\r\n string = string.replace(' ', '')\r\n if string == '':\r\n return True\r\n elif re.sub(r\"\\w|\\d|->|_|\\(|\\)|~\", '', string):\r\n return False\r\n elif re.findall(r\"(?<!\\w_)\\d+|(?<!\\w)\\d+|->->\", string):\r\n return False\r\n else:\r\n string1 = string.replace('~', '').replace('->', '+')\r\n info = re.findall(r'\\w_\\d+|\\w\\d*', string1)\r\n for part in info:\r\n string1 = string1.replace(part, '(-1)')\r\n try:\r\n eval(string1)\r\n except:\r\n return False\r\n string2 = string.replace('~', '-').replace('->', '+')\r\n info = re.findall(r'\\w_\\d+|\\w\\d*', string2)\r\n for part in info:\r\n string2 = string2.replace(part, '(-1)')\r\n try:\r\n eval(string2)\r\n except:\r\n return False\r\n return True",
"def is_atomic(formula):\n return isinstance(formula, Symbol) or isinstance(formula, Predicate)",
"def _is_term_exist(self, term):\n return term in self.postingDict",
"def isSetFormula(self):\n return _libsbml.Rule_isSetFormula(self)",
"def has_formula(self, check_formula):\n for formula in self.formulas:\n if formula.formula == check_formula:\n return True\n if self.parent is not None:\n return self.parent.has_formula(check_formula)\n return False",
"def isSetFormula(self):\n return _libsbml.KineticLaw_isSetFormula(self)",
"def valid(formula):\r\n\r\n try:\r\n return not re.search(r'\\b0[0-9]', formula) and eval((formula) is True\r\n #except ArithmeticError:\r\n #return False\r\n except:\r\n return False",
"def term_restrictions(data):\n\n term = [\"1st\", \"2nd\", \"3rd\", \"1ST\", \"2ND\", \"3RD\"]\n if data not in term:\n return False\n return True",
"def is_pos(self, term):\n return term in self.pos",
"def is_operator(formula):\n return is_binary_operator(formula) or isinstance(formula, Not)",
"def contains(self, term):\n\t\tif term in self.textFile:\n\t\t\treturn True\n\t\t\n\t\treturn False",
"def _CheckExceptionTerm(self, term, rules):\n flag = False\n for keyword in rules:\n if rules[keyword] == 'starts':\n flag = flag or term.startswith(keyword)\n if rules[keyword] == 'ends':\n flag = flag or term.endswith(keyword)\n if rules[keyword] == 'contains':\n flag = flag or (keyword in term)\n return flag",
"def check_cons(term, cons):\n res = True\n for constr in cons:\n if constr.issubset(term):\n res = False\n break\n return res",
"def contains(self, symbol):\n return symbol in self.table",
"def is_satisfiable(formula: Formula) -> bool:\n # Task 2.5c\n variables = list(sorted(formula.variables()))\n assignment_dict = all_models(list(variables))\n for val in truth_values(formula, assignment_dict):\n if val:\n return True\n return False",
"def contains(self, symbol):\r\n return symbol in self.s_table",
"def is_prefix(self, term: str, labels: istr = None) -> bool:",
"def is_quantifier_free(formula):\n assert type(formula) is Formula\n # Task 11.3.1\n if is_constant(formula.root) or is_variable(formula.root) or is_relation(formula.root) or is_equality(formula.root):\n return True\n\n if is_quantifier(formula.root):\n return False\n\n is_first = is_quantifier_free(formula.first)\n if is_binary(formula.root):\n return is_first and is_quantifier_free(formula.second)\n\n return is_first",
"def evaluate(formula: Formula, model: Model) -> bool:\r\n assert is_model(model)\r\n assert formula.variables().issubset(variables(model))\r\n # Task 2.1\r\n if is_unary(formula.root):\r\n return not evaluate(formula.first, model)\r\n elif is_constant(formula.root):\r\n if formula.root == 'T':\r\n return True\r\n return False # if its not 'T' than it must be 'F'\r\n elif is_variable(formula.root):\r\n return model.get(formula.root)\r\n else:\r\n # if we got here, than it must be binary operation\r\n assert (is_binary(formula.root))\r\n return evaluate_binary_operation_handler(formula, model)",
"def contain_op(self, expr):\n return expr in self.table.inv",
"def has_u(term):\n return term.amp.has(u)",
"def check(self, formula):\n return formula.semantic(self, self.worlds[point])",
"def get_is_term(self):\n \n approx_Z = self.add_is_approximation()\n \n return T.sum(T.exp(approx_Z))",
"def check_for_term(cls, word):\n\n # TO DO: allow for some kind of broader search so that:\n #\n # (1) If a word is really a two-word phrase, check if\n # either of those words is in the Term table.\n #\n # (2) If the stemmed word is in stemmed terms, add it as a match\n\n if cls.query.filter(cls.word == word).first() is None:\n return False\n else:\n return True",
"def is_in_prenex_normal_form(formula):\n assert type(formula) is Formula\n # Task 11.3.2\n if is_constant(formula.root) or is_variable(formula.root) or is_relation(formula.root) or is_equality(formula.root):\n return True\n\n if is_quantifier(formula.root):\n return is_in_prenex_normal_form(formula.predicate)\n\n is_first = is_quantifier_free(formula.first)\n if is_binary(formula.root):\n return is_first and is_quantifier_free(formula.second)\n\n return is_first",
"def evaluate(formula: Formula, model: Model) -> bool:\n assert is_model(model)\n assert formula.variables().issubset(variables(model))\n # Task 2.1\n if is_constant(formula.root):\n return True if formula.root == 'T' else False\n elif is_variable(formula.root):\n return model[formula.root]\n elif is_unary(formula.root):\n return not evaluate(formula.first, model)\n elif is_binary(formula.root):\n return eval_binary(evaluate(formula.first, model), evaluate(formula.second, model), formula.root)",
"def is_binary_operator(formula):\n return isinstance(formula, And) or isinstance(formula, Or) \\\n or isinstance(formula, If) or isinstance(formula, Iff)",
"def __contains__(self, item):\n return item in self._terms"
] | [
"0.7291431",
"0.6615992",
"0.63654065",
"0.6328823",
"0.6289445",
"0.62756157",
"0.62110174",
"0.60039693",
"0.5977211",
"0.5941926",
"0.5867839",
"0.586434",
"0.5817701",
"0.5814318",
"0.5751372",
"0.57293576",
"0.5700423",
"0.568234",
"0.5663981",
"0.5624077",
"0.56082124",
"0.56007904",
"0.5585034",
"0.55766237",
"0.55601317",
"0.5550635",
"0.55402815",
"0.55285436",
"0.5516049",
"0.5490912"
] | 0.7191981 | 1 |
Return a list of the indices of all columns associated to a given term. | def termcolumns(self, query_term, dict=False):
if self.hasterm(query_term):
names = query_term.names()
value = {}
for name in names:
value[name] = self._names.index(name)
else:
raise ValueError('term not in formula')
if dict:
return value
else:
return list(itervalues(value)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def indices_for_column(self, colname):\n rv = []\n\n fieldid = self.attrs[colname]\n\n for index in self.indices:\n if index.columns[0] == fieldid:\n rv.append(index)\n\n # TODO: New indices\n\n return rv",
"def termcolumns(self, query_term, dict=False):\n\n if self.hasterm(query_term):\n names = query_term.names()\n value = {}\n for name in names:\n value[name] = self._names.index(name)\n else:\n raise ValueError, 'term not in formula'\n if dict:\n return value\n else:\n return value.values()",
"def _dofidxs(self):\n return [const['dofidxs'] for i, const in self._constraints_df.iterrows()]",
"def get_column_indices(self, tag_patterns, columns):\n patterns = TagPattern.parse_list(tag_patterns)\n indices = []\n for i, column in enumerate(columns):\n for pattern in patterns:\n if pattern.match(column):\n indices.push(i)\n return indices",
"def get_column_indices(tag_patterns, columns):\n tag_patterns = TagPattern.parse_list(tag_patterns)\n columns = [Column.parse(column) for column in columns]\n indices = []\n for i, column in enumerate(columns):\n for pattern in tag_patterns:\n if pattern.match(column):\n indices.append(i)\n return indices",
"def columnIndexes(a):\n nrows = (a.size-2)+1\n return a[1*np.arange(nrows)[:,None] + np.arange(2)]",
"def order_column_indices(self):\n return self._order_column_indices()",
"def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])",
"def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))",
"def get_label_indices(df: DataFrame, labels: list):\n return [idx for idx, name in enumerate(df.columns) if name in labels]",
"def _get_index_lists(self, mat):\n n_row, n_col = mat.shape\n \n col_ind_at_row, row_ind_at_col = [],[]\n for i in range(n_row):\n aux_ind = _np.where(mat[i]>0)[0]\n if len(aux_ind) == 0:\n raise Exception('Row {} is composed of zeros'.format(i))\n col_ind_at_row.append(aux_ind)\n \n for j in range(n_col):\n aux_ind = _np.where(mat[:,j]>0)[0]\n if len(aux_ind) == 0:\n raise Exception('Column {} is composed of zeros'.format(j))\n \n row_ind_at_col.append(aux_ind)\n \n return col_ind_at_row, row_ind_at_col",
"def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices",
"def get_indexes_of_features_to_vary(self, features_to_vary='all'):\n if features_to_vary == \"all\":\n return [i for i in range(len(self.encoded_feature_names))]\n else:\n ixs = []\n encoded_cats_ixs = self.get_encoded_categorical_feature_indexes()\n encoded_cats_ixs = [item for sublist in encoded_cats_ixs for item in sublist]\n for colidx, col in enumerate(self.encoded_feature_names):\n if colidx in encoded_cats_ixs and col.startswith(tuple(features_to_vary)):\n ixs.append(colidx)\n elif colidx not in encoded_cats_ixs and col in features_to_vary:\n ixs.append(colidx)\n return ixs",
"def indices(self):\n return self.index.indices",
"def get_columns(self, *args, **kw):\n\n v = self.namespace[self._name]\n while True:\n if callable(v):\n if isinstance(v, (Term, Formula)):\n v = copy.copy(v)\n v.namespace = self.namespace\n v = v(*args, **kw)\n else: break\n\n n = len(v)\n\n if self.ordinal:\n col = [float(self.keys.index(v[i])) for i in range(n)]\n return np.array(col)\n\n else:\n value = []\n for key in self.keys:\n col = [float((v[i] == key)) for i in range(n)]\n value.append(col)\n return np.array(value)",
"def get_undef_cols_idx(x, undef_val):\n undef_col_idx = []\n for col_idx in range(x.shape[1]):\n column = x[:, col_idx]\n if((column == undef_val).all()):\n undef_col_idx.append(col_idx)\n\n return undef_col_idx",
"def column_indexer(data):\n idCol = {label: index for index, label in enumerate(data.columns)}\n return idCol",
"def get_column_indices(indices, inputs, multiple):\n if multiple:\n res = OrderedDict()\n for p in indices:\n ov, onnx_i = get_column_index(p, inputs)\n if ov not in res:\n res[ov] = []\n res[ov].append(onnx_i)\n return res\n\n onnx_var = None\n onnx_is = []\n for p in indices:\n ov, onnx_i = get_column_index(p, inputs)\n onnx_is.append(onnx_i)\n if onnx_var is None:\n onnx_var = ov\n elif onnx_var != ov:\n cols = [onnx_var, ov]\n raise NotImplementedError( # pragma: no cover\n \"sklearn-onnx is not able to merge multiple columns from \"\n \"multiple variables ({0}). You should think about merging \"\n \"initial types.\".format(cols))\n return onnx_var, onnx_is",
"def term_columns(self):\n\n return [\n cast(self.model_class.id, String),\n self.model_class.title,\n self.model_class.text,\n self.model_class.author_name,\n self.model_class.author_place,\n self.model_class.category,\n self.model_class.organization,\n self.model_class.note,\n UserGroup.name,\n User.realname,\n User.username\n ]",
"def indices_of(self, col_name, value):\n return list(self._obj[self._obj[col_name] == value].index\n ) if col_name in self._obj.columns else None",
"def getIndices(schema_attr_levels):\n return np.array(list(schema_attr_levels.values())).flatten().tolist()",
"def GetColumnsOption(self, data) :\n indices = [ int(x.replace(self.label, '')) for x in data.columns if self.label in x and x.replace(self.label, '')!='' ]\n return indices",
"def getColumnIndices(*args, filepath=\"CO2.tab\"):\n # idxDict = {\"PT\": 0, \"TM\": 0, \"HG\": 0, \"SEG\": 0}\n idxDict = {\"PT\": 0, \"TM\": 0, \"HG\": 0, \"VISG\": 0, \"VISHL\": 0, \"ROG\": 0, \"ROHL\": 0}\n if filepath:\n cols = tabLineToList(readFullLine(filepath, 52))\n for key in idxDict:\n idxDict[key] = cols.index(key)\n return idxDict",
"def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes",
"def get_encoded_categorical_feature_indexes(self):\n cols = []\n for col_parent in self.categorical_feature_names:\n temp = [self.encoded_feature_names.index(\n col) for col in self.encoded_feature_names if col.startswith(col_parent) and\n col not in self.continuous_feature_names]\n cols.append(temp)\n return cols",
"def get_indices(self):\r\n return self._indices",
"def get_pd_row_column_idx(df, queries, type=\"column\"):\n\n names = df.columns.values if type == \"column\" else df.index.values if type == \"row\" else None\n sidx = np.argsort(names)\n Indices = sidx[np.searchsorted(names, queries, sorter=sidx)]\n\n return Indices",
"def getIndices(self):\r\n return self._indices",
"def col_to_indices(col):\r\n return [(row, col) for row in range(0, 9)]",
"def get_indices(self):\n\n def query(rel): \n return \"\"\"SELECT pg_class.relname, pg_index.indkey\n FROM pg_class, pg_index\n WHERE (pg_index.indexrelid = pg_class.oid)\n AND (pg_index.indrelid = (SELECT pg_class.oid FROM pg_class WHERE pg_class.relname = \\'{}\\'));\n \"\"\".format(rel)\n\n rels = tpch.schema.keys()\n idxs = dict.fromkeys(rels)\n\n with self.tpch_cxn.cursor() as curs:\n for rel in rels:\n curs.execute(query(rel))\n idxs_ = curs.fetchall()\n idxs_ = dict(idxs_) # index -> index keys \n \n # TODO this can be done cleanly in query\n # pg_index.indkey is a SQL array of attributes indices in their respective tables\n split=lambda attrs: attrs.split() \n cast=lambda attrs: list(map(lambda attr: int(attr)-1, attrs))\n invertindex=lambda attrs: list(np.array(schema[rel])[attrs])\n\n attrs = idxs_.values() \n attrs = list(map(split, attrs))\n attrs = list(map(cast, attrs))\n attrs = list(map(invertindex, attrs))\n\n idxs_ = {key : attrs[i] for i, key in enumerate(idxs_.keys())}\n idxs[rel] = idxs_\n return idxs"
] | [
"0.6974512",
"0.6489588",
"0.634191",
"0.6214408",
"0.61922437",
"0.61877096",
"0.610803",
"0.59453714",
"0.5941658",
"0.58720946",
"0.5815702",
"0.5770179",
"0.576646",
"0.57438403",
"0.5735558",
"0.57219255",
"0.5706351",
"0.57011116",
"0.56981784",
"0.5687769",
"0.56733966",
"0.56524795",
"0.56186",
"0.5608526",
"0.5556215",
"0.55416393",
"0.55273294",
"0.5496907",
"0.54950845",
"0.5476047"
] | 0.6924449 | 1 |
Return a list of the names in the formula. The order of the names corresponds to the order of the columns when self is evaluated. | def names(self):
allnames = []
for term in self.terms:
allnames += term.names()
return allnames | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_column_names(self):\n names = []\n names.append(self.question_column + \"_agree_lot\")\n names.append(self.question_column + \"_agree_little\")\n names.append(self.question_column + \"_neither\")\n names.append(self.question_column + \"_dis_little\")\n names.append(self.question_column + \"_dis_lot\")\n return names",
"def list_formulae():\n return _list_tindyb_unique_values(\"formula\", dbpath=__dbpath__)",
"def getColumnsNames(self):\r\n ColsName = []\r\n for i in range(len(self.columns)):\r\n ColsName.append(self.columns[i].getColName())\r\n return ColsName",
"def get_formula_in_list(self):\n return tree_to_string(self.expression)",
"def _generate_column_names(self):\n names = []\n # Assuming same number of compounds for every reaction!\n\n names = ['compound_{}'.format(i) for i in range(self.total_compounds)]\n names += ['compound_{}_amount'.format(i)\n for i in range(self.total_compounds)]\n for grid_param in self.params_grid_data.keys():\n names.append(grid_param)\n\n return names",
"def return_names(self):\n return self.__name_list",
"def names(self):\n \n return self.column_names.copy()",
"def names(self):\n return [da.name for da in self]",
"def names(self) -> list[str]:",
"def getBindedNames(self):\n names = []\n for function in self.functions:\n names.append(function.__name__)\n return \", \".join(names)",
"def getNames(self) -> List[unicode]:\n ...",
"def names(self):\r\n return self.get_field(self.name_field)",
"def names(self):\n if self.dtype.fields:\n return list(self.dtype.names)\n elif getattr(self, \"_coldefs\", None) is not None:\n return self._coldefs.names\n else:\n return None",
"def get_re_analysis_grid_column_names_by_order(self):\n self.column_name_list = self.get_grid_column_names_by_order(self.re_analysis_grid_div_id)\n return self.column_name_list",
"def compound_names(self) -> List[str]:\n return None",
"def names(self) -> List:\n ...",
"def getColumnNames(self):\n return self.columnNames",
"def get_rates_grid_column_names_by_order(self):\n self.column_name_list = self.get_grid_column_names_by_order(self.rates_grid_div_id)\n return self.column_name_list",
"def names(self):\n return self._names_to_cols.keys()",
"def get_names(self):\n return self.__names",
"def getColumnNames(self):\n return self.colnames",
"def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]",
"def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names",
"def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names",
"def name(self):\n return [o.name for o in self.obs]",
"def get_column_names(self):\n # here, creating combined column/volue column names for uniqueness\n colname_temp = list()\n for column in self.col_value:\n colname_temp.append(self.question_column + \"-\" + str(column))\n return colname_temp",
"def get_names(self):\n return self.names",
"def get_variable_names(self):\n return [var[1] for var in self.variables]",
"def exog_names(self):\n return self.data.xnames",
"def column_names(self):\n return self.data.columns.values"
] | [
"0.6978195",
"0.6668148",
"0.6560185",
"0.65295213",
"0.6467635",
"0.64270437",
"0.6383126",
"0.6363696",
"0.6346294",
"0.633791",
"0.6307332",
"0.6303675",
"0.62803394",
"0.62780005",
"0.62708455",
"0.62452453",
"0.6240283",
"0.6224373",
"0.6216947",
"0.6202079",
"0.61605483",
"0.61537635",
"0.6131869",
"0.6131869",
"0.61029345",
"0.60988635",
"0.6097984",
"0.60893065",
"0.6075147",
"0.6064416"
] | 0.66823804 | 1 |
Return a list of the term names in the formula. These are the names of each term instance in self. | def termnames(self):
names = []
for term in self.terms:
names += [term.termname]
return names | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames",
"def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames",
"def terms(self):\n return self._terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def get_names(self):\n return [doc['name'] for doc in self.vocab]",
"def __str__(self):\n value = []\n for term in self.terms:\n value += [term.termname]\n return '<formula: %s>' % ' + '.join(value)",
"def __str__(self):\n value = []\n for term in self.terms:\n value += [term.termname]\n return '<formula: %s>' % ' + '.join(value)",
"def variables(self):\n return [term.variable for term in self.terms]",
"def get_all_terms(self):\n return self.term.all()",
"def list_formulae():\n return _list_tindyb_unique_values(\"formula\", dbpath=__dbpath__)",
"def formula(self):\n terms = []\n for ff in self.formulae:\n terms += list(ff.terms)\n return Formula(terms)",
"def terms(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"terms\")",
"def terms(self) -> Tuple[Term, ...]:\n ...",
"def name(self):\n return [o.name for o in self.obs]",
"def legend_names(self):\n return [leg.label for leg in self.legends]",
"def names(self):\r\n return self.get_field(self.name_field)",
"def get_formula_in_list(self):\n return tree_to_string(self.expression)",
"def names(self):\n return self.__names",
"def get_names(self):\n return self.__names",
"def getNames(self) -> List[unicode]:\n ...",
"def names(self):\n return [da.name for da in self]",
"def getElementName(self):\n return _libsbml.ListOfFunctionTerms_getElementName(self)"
] | [
"0.8022005",
"0.8022005",
"0.6990429",
"0.6694842",
"0.6694842",
"0.6694842",
"0.6694842",
"0.6694842",
"0.6694842",
"0.6694842",
"0.6694842",
"0.6694842",
"0.66725796",
"0.6669108",
"0.6669108",
"0.66505086",
"0.661695",
"0.6545174",
"0.6503503",
"0.6435962",
"0.6298151",
"0.62815154",
"0.62602836",
"0.6252528",
"0.6215876",
"0.62152296",
"0.620644",
"0.6196373",
"0.61649495",
"0.61619556"
] | 0.8274434 | 0 |
This returns a formula whose columns are the pairwise product of the columns of self and other. | def __mul__(self, other, nested=False):
other = Formula(other)
selftermnames = self.termnames()
othertermnames = other.termnames()
I = len(selftermnames)
J = len(othertermnames)
terms = []
termnames = []
for i in range(I):
for j in range(J):
termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))
pieces = sorted(termname.split('*'))
termname = '*'.join(pieces)
termnames.append(termname)
selfnames = self.terms[i].names()
othernames = other.terms[j].names()
if self.terms[i].name is 'intercept':
_term = other.terms[j]
_term.namespace = other.namespace
elif other.terms[j].name is 'intercept':
_term = self.terms[i]
_term.namespace = self.namespace
else:
names = []
d1 = len(selfnames)
d2 = len(othernames)
for r in range(d1):
for s in range(d2):
name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))
pieces = sorted(name.split('*'))
name = '*'.join(pieces)
names.append(name)
def product_func(value, d1=d1, d2=d2):
out = []
for r in range(d1):
for s in range(d2):
out.append(value[r] * value[d1+s])
return np.array(out)
cself = copy.copy(self.terms[i])
cother = copy.copy(other.terms[j])
sumterms = cself + cother
sumterms.terms = [cself, cother] # enforce the order we want
_term = Quantitative(names, func=sumterms,
termname=termname,
transform=product_func)
if _namespace_equal(self.namespace, other.namespace):
_term.namespace = self.namespace
terms.append(_term)
return Formula(terms) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __mul__(self, othertr):\n res = self.dot(othertr)\n return res",
"def __mul__(self, other, nested=False):\n\n other = formula(other, namespace=self.namespace)\n\n selftermnames = self.termnames()\n othertermnames = other.termnames()\n\n I = len(selftermnames)\n J = len(othertermnames)\n\n terms = []\n termnames = []\n\n for i in range(I):\n for j in range(J):\n termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))\n pieces = termname.split('*')\n pieces.sort()\n termname = '*'.join(pieces)\n termnames.append(termname)\n\n selfnames = self.terms[i].names()\n othernames = other.terms[j].names()\n\n if self.terms[i].name is 'intercept':\n _term = other.terms[j]\n _term.namespace = other.namespace\n\n elif other.terms[j].name is 'intercept':\n _term = self.terms[i]\n _term.namespace = self.namespace\n else:\n names = []\n\n d1 = len(selfnames) \n d2 = len(othernames)\n\n for r in range(d1):\n for s in range(d2):\n name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))\n pieces = name.split('*')\n pieces.sort()\n name = '*'.join(pieces)\n names.append(name)\n\n def product_func(value, d1=d1, d2=d2):\n\n out = []\n for r in range(d1):\n for s in range(d2):\n out.append(value[r] * value[d1+s])\n return N.array(out)\n\n sumterms = self + other\n sumterms.terms = [self, other] # enforce the order we want\n sumterms.namespace = self.namespace\n\n _term = quantitative(names, func=sumterms, termname=termname,\n transform=product_func)\n _term.namespace = self.namespace\n\n\n terms.append(_term)\n\n return formula(terms, namespace=self.namespace)",
"def __mul__(self, other: Any) -> ColumnOperators:\n return self.operate(mul, other)",
"def __mul__(self, other):\n\n newlist = [v for v in self.args]\n for i, v in enumerate(newlist):\n newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1],\n newlist[i][2])\n return Dyadic(newlist)",
"def pd(self, other):\n return Matriz([self]).T() * Matriz([other])",
"def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here",
"def __mul__(self, other):\n if hasattr(other, 'as_homogenous_transformation'):\n return basis(homogenous_transformation = self.as_homogenous_transformation() * other.as_homogenous_transformation())\n elif hasattr(other, 'n'):\n if other.n == (3,1):\n b = matrix.col((other[0], other[1], other[2], 1))\n elif other.n == (4,1):\n b = other\n else:\n raise TypeError(b, \"Incompatible matrices\")\n p = self.as_homogenous_transformation() * b\n if other.n == (3,1):\n return matrix.col(p[0:3])\n else:\n return p\n else:\n raise TypeError(b)",
"def prod_mat(self,other):\n [rs,cs],[ro,co] = self.D,other.D\n assert cs == ro, \"tailles incompatibles\"\n return Mat([rs,co], lambda i,j : prod_scal(self.ligne(i),other.col(j)))",
"def __mul__(self, other):\n # \n # TODO - your code here\n #\n \n result = [];\n row_result = [];\n product = 0;\n \n if(self.w != other.h):\n raise(ValueError, \"Matrices can not multiply for their dimesion doesn't match\"); \n \n for row in self.g:\n row_result = [];\n for j in range(other.w):\n product = dot_product(row,other.get_column(j));\n row_result.append(product);\n result.append(row_result);\n \n return Matrix(result);",
"def transpose_dot(self, other):\n from divisi2 import operators\n return operators.transpose_dot(self, other)",
"def __mul__(self, other):\n\n newlist = [v for v in self.args]\n for i, v in enumerate(newlist):\n newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1])\n return Vector(newlist)",
"def _mul(self, other):\n if isinstance(other, SeqFormula):\n form1, v1 = self.formula, self.variables[0]\n form2, v2 = other.formula, other.variables[0]\n formula = form1 * form2.subs(v2, v1)\n start, stop = self._intersect_interval(other)\n return SeqFormula(formula, (v1, start, stop))",
"def dot(self,other):\n if len(self) == len(other):\n res = 0\n for a,b in zip(self,other):\n res += a*b\n return res\n else: \n raise ValueError(\"The length is not matched\")",
"def __matmul__(self, other):\n return F.MatMul.apply(self, other)",
"def __mul__(self, other):\n return Trits(self.trits * other)",
"def mul_elementwise(self, other):\n # XXX: flint matrices do not support elementwise multiplication\n return self.to_ddm().mul_elementwise(other.to_ddm()).to_dfm()",
"def product_on_basis(self, t1, t2):\n return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.",
"def __rmul__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mul, other)",
"def __mul__(self, other):\n try:\n total = {self.var: other.val, other.var: self.val}\n return AutoDiffReverse(self.val * other.val, None, total)\n except AttributeError:\n return AutoDiffReverse(self.val * other, None, {self.var: other})",
"def __pow__(self, other):\n n = len(self)\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i]**other\n\n return v",
"def __mul__(self, other):\n if self.n != other.m:\n raise TypeError(\"Illegal dimensions for mul operator\")\n tmp = [[0 for _ in xrange(self.n)] for _ in xrange(other.m)]\n for i in xrange(self.n):\n for j in xrange(other.m):\n for k in xrange(other.n):\n tmp[i][j] += self.values[i][k] * other.values[k][j]\n res = []\n for i in tmp:\n res += i\n return simplematrix(self.n, other.m, res)",
"def product(self, x, y):\n return self( x.lift() * y.lift() )",
"def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod",
"def __mul__(self, other):\r\n return self.prod(other)",
"def __truediv__(self, other: Any) -> ColumnOperators:\n return self.operate(truediv, other)",
"def product_on_basis(self, g1, g2):\n return self.monomial(g1 * g2)",
"def dot_product(row, column):\n return reduce(lambda x, y: x + y, [x * y for x, y in zip(row, column)])",
"def __mul__(self, other):\n if isinstance(other, (int, float)):\n return Matrix([[self.values[row][index] * other\n for index in range(len(self.values[0]))]\n for row in range(len(self.values))])\n\n elif isinstance(other, Vector):\n return Vector([other.dot(Vector(row)) for row in self.values])\n\n elif isinstance(other, Matrix):\n return Matrix([(other.transpose() * Vector(row)).values\n for row in self.values])",
"def dot_product(a,b):\n return sum(pairwise_mult(a,b))",
"def geometric_product(b1, b2):\n if MV.is_orthogonal:\n return MV.product_orthogonal_blades(b1, b2)\n else:\n result = MV.base_mul_table[(b1, b2)]\n return result"
] | [
"0.64140505",
"0.63255924",
"0.6279798",
"0.6277444",
"0.6248262",
"0.62374085",
"0.6095252",
"0.6049075",
"0.6034012",
"0.6012328",
"0.59264356",
"0.5915682",
"0.5843071",
"0.5842043",
"0.5837421",
"0.5814968",
"0.5803651",
"0.57777655",
"0.5762164",
"0.57367915",
"0.5723196",
"0.5689638",
"0.56696737",
"0.5663304",
"0.56447804",
"0.56443644",
"0.5643554",
"0.56402904",
"0.5636508",
"0.56306666"
] | 0.6386615 | 1 |
Return a formula with all terms in other removed from self. If other contains term instances not in formula, this function does not raise an exception. | def __sub__(self, other):
other = Formula(other)
terms = copy.copy(self.terms)
for term in other.terms:
for i in range(len(terms)):
if terms[i].termname == term.termname:
terms.pop(i)
break
f = Formula(terms)
f.namespace = self.namespace
return f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __sub__(self, other):\n\n other = formula(other, namespace=self.namespace)\n terms = copy.copy(self.terms)\n\n for term in other.terms:\n for i in range(len(terms)):\n if terms[i].termname == term.termname:\n terms.pop(i)\n break \n return formula(terms, namespace=self.namespace)",
"def __sub__(self, other):\n result = self.__class__()\n result._terms.extend(self)\n\n if isinstance(other, self._term_class):\n if other not in result:\n msg = f\"Term {other} not in {self.__class__}.\"\n raise ValueError(msg)\n else:\n result._terms.remove(other)\n elif isinstance(other, self.__class__):\n for term in other:\n result -= term\n else:\n msg = f\"Unsupported operand type(s) for +: {type(self)} and {type(other)}.\"\n raise TypeError(msg)\n\n return result",
"def difference(self, other): # type: (Term) -> Term\n return self.intersect(other.inverse)",
"def __add__(self, other):\n\n other = formula(other, namespace=self.namespace)\n terms = self.terms + other.terms\n pieces = [(term.name, term) for term in terms]\n pieces.sort()\n terms = [piece[1] for piece in pieces]\n return formula(terms, namespace=self.namespace)",
"def __add__(self, other):\n\n other = Formula(other)\n terms = self.terms + other.terms\n pieces = sorted([(term.name, term) for term in terms])\n terms = [piece[1] for piece in pieces]\n f = Formula(terms)\n if _namespace_equal(self.namespace, other.namespace):\n f.namespace = self.namespace\n return f",
"def disjunction(self, second_formula):\n new_robustness = lambda s, t : max( self.robustness(s,t),\n second_formula.robustness(s,t) )\n new_formula = STLFormula(new_robustness)\n\n return new_formula",
"def __mul__(self, other, nested=False):\n\n other = Formula(other)\n\n selftermnames = self.termnames()\n othertermnames = other.termnames()\n\n I = len(selftermnames)\n J = len(othertermnames)\n\n terms = []\n termnames = []\n\n for i in range(I):\n for j in range(J):\n termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))\n pieces = sorted(termname.split('*'))\n termname = '*'.join(pieces)\n termnames.append(termname)\n\n selfnames = self.terms[i].names()\n othernames = other.terms[j].names()\n\n if self.terms[i].name is 'intercept':\n _term = other.terms[j]\n _term.namespace = other.namespace\n elif other.terms[j].name is 'intercept':\n _term = self.terms[i]\n _term.namespace = self.namespace\n else:\n names = []\n\n d1 = len(selfnames)\n d2 = len(othernames)\n\n for r in range(d1):\n for s in range(d2):\n name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))\n pieces = sorted(name.split('*'))\n name = '*'.join(pieces)\n names.append(name)\n\n def product_func(value, d1=d1, d2=d2):\n\n out = []\n for r in range(d1):\n for s in range(d2):\n out.append(value[r] * value[d1+s])\n return np.array(out)\n\n cself = copy.copy(self.terms[i])\n cother = copy.copy(other.terms[j])\n sumterms = cself + cother\n sumterms.terms = [cself, cother] # enforce the order we want\n\n _term = Quantitative(names, func=sumterms,\n termname=termname,\n transform=product_func)\n\n if _namespace_equal(self.namespace, other.namespace):\n _term.namespace = self.namespace\n\n terms.append(_term)\n\n return Formula(terms)",
"def __rsub__(self, other):\n\n return self.__sub__(other)",
"def __mul__(self, other, nested=False):\n\n other = formula(other, namespace=self.namespace)\n\n selftermnames = self.termnames()\n othertermnames = other.termnames()\n\n I = len(selftermnames)\n J = len(othertermnames)\n\n terms = []\n termnames = []\n\n for i in range(I):\n for j in range(J):\n termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))\n pieces = termname.split('*')\n pieces.sort()\n termname = '*'.join(pieces)\n termnames.append(termname)\n\n selfnames = self.terms[i].names()\n othernames = other.terms[j].names()\n\n if self.terms[i].name is 'intercept':\n _term = other.terms[j]\n _term.namespace = other.namespace\n\n elif other.terms[j].name is 'intercept':\n _term = self.terms[i]\n _term.namespace = self.namespace\n else:\n names = []\n\n d1 = len(selfnames) \n d2 = len(othernames)\n\n for r in range(d1):\n for s in range(d2):\n name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))\n pieces = name.split('*')\n pieces.sort()\n name = '*'.join(pieces)\n names.append(name)\n\n def product_func(value, d1=d1, d2=d2):\n\n out = []\n for r in range(d1):\n for s in range(d2):\n out.append(value[r] * value[d1+s])\n return N.array(out)\n\n sumterms = self + other\n sumterms.terms = [self, other] # enforce the order we want\n sumterms.namespace = self.namespace\n\n _term = quantitative(names, func=sumterms, termname=termname,\n transform=product_func)\n _term.namespace = self.namespace\n\n\n terms.append(_term)\n\n return formula(terms, namespace=self.namespace)",
"def __rsub__(self, other):\n return self._operation_sub(other, self)",
"def delete_terms(self, *terms):\n result = self.sequence\n for term in ANCOVA(*terms).sequence:\n result.remove(term)\n return ANCOVA(*result)",
"def __rsub__(self, other):\n if not isinstance(other, UniSet):\n other = self.fam.c_uniset(other)\n return other.fam.c_sub(other, self)",
"def __rsub__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during substraction of {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Minus(other, self)",
"def __sub__(self, this):\n return self.rm(this)",
"def __sub__(self, other):\n if type(other) == list:\n for element in other:\n if element in self.__dict__:\n del self.__dict__[element]\n return self\n else:\n raise PJFInvalidType(other, list)",
"def __rsub__(self, other):\n return self.runtime.sub(other, self)",
"def __sub__(self, other):\n if not isinstance(other, (list, Set)):\n raise TypeError(\"only sets can be removed from sets\")\n\n new_set = self._clone()\n\n for element in other:\n new_set.delete(element)\n\n return new_set",
"def __xor__(self, other):\n if type(other) == Form:\n other = [other]\n return Form.union([other, self])",
"def absorb(self):\n if not self.is_nf():\n raise TypeError(\"expression is not in normal form\")\n\n temps, args = list(self._args), list()\n\n # Drop all terms that are a subset of other terms\n while temps:\n fst, rst, temps = temps[0], temps[1:], list()\n drop_fst = False\n for term in rst:\n drop_term = False\n if fst.arg_set <= term.arg_set:\n drop_term = True\n elif fst.arg_set > term.arg_set:\n drop_fst = True\n if not drop_term:\n temps.append(term)\n if not drop_fst:\n args.append(fst)\n\n return self.__class__(*args)",
"def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )",
"def __rmul__(self, other):\n\n return self * other",
"def delete_formula(self, formula):\n\n # Looking for TreeFormula to remove from node\n f_list = self.node_memory[formula.node.unique_id].formulas\n index = -1\n # Searching for a match\n for i in range(len(f_list)):\n if f_list[i].formula_id == formula.formula_id:\n index = i\n break\n f_list.pop(index)\n\n # Removing the formula from it's parent and children\n if formula.parent and formula.parent.formula != \"PREMISE\":\n parent = formula.parent\n formula.remove_parent()\n formula.parent = parent\n for f in formula.children:\n f.parent = None\n f.parent_checkmark = False\n if formula.parent_checkmark:\n formula.parent.checkmarked = False\n formula.mark_child_not_valid()\n\n # Remove formula from the tree\n self.formulas.pop(formula.formula_id)\n self.readjust_formula_id(formula.formula_id)",
"def remove(self, *args):\n return _libsbml.ListOfFunctionTerms_remove(self, *args)",
"def skolemize_non_quantifier(formula):\n return copy.deepcopy(formula)",
"def __rmul__(self, other):\n return self * other",
"def __rmul__(self, other):\n return self * other",
"def __rmul__(self, other):\n\n return self.__mul__(other)",
"def formula(self):\n terms = []\n for ff in self.formulae:\n terms += list(ff.terms)\n return Formula(terms)",
"def reject(self, other):\n return self - self.project(other)",
"def __rmul__(self, other):\n return self.__mul__(other)"
] | [
"0.8369117",
"0.7235107",
"0.6456491",
"0.6246613",
"0.59090024",
"0.59075487",
"0.583818",
"0.5830051",
"0.5789227",
"0.57837075",
"0.5763275",
"0.57111454",
"0.5702665",
"0.57007825",
"0.5649963",
"0.5625294",
"0.56151897",
"0.5607506",
"0.5577634",
"0.5573287",
"0.5572609",
"0.55588543",
"0.55584157",
"0.5552114",
"0.5537296",
"0.5537296",
"0.5522576",
"0.55171627",
"0.55111825",
"0.549985"
] | 0.8258985 | 1 |
Output all pairwise interactions of given order of a sequence of terms. The argument order is a sequence specifying which order of interactions should be generated the default creates main effects and twoway interactions. If order is an integer, it is changed to range(1,order+1), so order=3 is equivalent to order=[1,2,3], generating all one, two and threeway interactions. If any entry of order is greater than len(terms), it is effectively treated as len(terms). >>> print interactions([Term(l) for l in ['a', 'b', 'c']]) | def interactions(terms, order=[1,2]):
l = len(terms)
values = {}
if np.asarray(order).shape == ():
order = lrange(1, int(order)+1)
# First order
for o in order:
I = np.indices((l,)*(o))
I.shape = (I.shape[0], np.product(I.shape[1:]))
for m in range(I.shape[1]):
# only keep combinations that have unique entries
if (np.unique(I[:,m]).shape == I[:,m].shape and
np.alltrue(np.equal(np.sort(I[:,m]), I[:,m]))):
ll = [terms[j] for j in I[:,m]]
v = ll[0]
for ii in range(len(ll)-1):
v *= ll[ii+1]
values[tuple(I[:,m])] = v
key = list(iterkeys(values))[0]
value = values[key]; del(values[key])
for v in itervalues(values):
value += v
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def diff(self, order=1):\n order = int(order)\n if order < 0:\n raise ValueError(\"order must be >= 0, got %d\" % order)\n\n if order == 0:\n return self.__call__\n\n if order > self.p: # identically zero, but force the same output format as in the general case\n dummy = self.__call__(0.) # get number of basis functions and output dtype\n nbasis = dummy.shape[0]\n return lambda x: np.zeros((nbasis,), dtype=dummy.dtype) # accept but ignore input x\n\n # At each differentiation, each term maps into two new terms.\n # The number of terms in the result will be 2**order.\n #\n # This will cause an exponential explosion in the number of terms for high derivative orders,\n # but for the first few orders (practical usage; >3 is rarely needed) the approach works.\n #\n terms = [(1., self)]\n for k in range(order):\n tmp = []\n for Ci, Bi in terms:\n tmp.extend((Ci * cn, Bn) for cn, Bn in Bi.__diff_internal()) # NOTE: also propagate Ci\n terms = tmp\n\n # perform final summation at call time\n return lambda x: sum(ci * Bi(x) for ci, bi in terms)",
"def AddOrderByTerms(self, order_by_pairs):\n for term, args in order_by_pairs:\n assert _IsValidOrderByTerm(term), term\n assert term.count('%s') == len(args), term\n self.order_by_terms.append(term)\n self.order_by_args.extend(args)",
"def execute_order(self, order):\n with self._lock:\n if not self._robot:\n raise Exception(\"No robot control service available\")\n\n # Get the target\n target = order['target']\n handlers = self._target_handlers.get(target, None)\n if handlers is None:\n # No handler for this target\n raise Exception(\"No handler for target {0}\".format(target))\n\n # Get the command\n command = order['cmd']\n\n # Get the arguments, i.e. what's left\n arguments = order.copy()\n del arguments['target']\n del arguments['cmd']\n\n results = []\n for handler in handlers:\n # Call handlers\n try:\n result = handler.handle_order(target, command, arguments)\n if result is not None:\n # Ignore None results\n results.append(result)\n\n except Exception as ex:\n # Just log it\n _logger.exception(\"Error calling order handler: %s\", ex)\n\n if len(results) == 0:\n return json.dumps({'message': 'No result'})\n\n if len(results) == 1:\n return json.dumps(results[0])\n\n else:\n return json.dumps({'results': results})",
"def get_pairs(terms):\n return itertools.combinations(terms, 2)",
"def print_options(order_list, option_list):\n menu = ''\n for order, text in zip(order_list, option_list):\n menu += (str(order) + ' - ' + text + '\\n')\n return menu",
"def all_orders(self, symbol, **kwargs):\n pass",
"def CRT_automorphisms(automorphisms, order_elts, degree, moduli):\n # restrict to automorphisms of degree `degree`\n degree_d_autos = []\n for j in range(len(automorphisms)):\n L = automorphisms[j]\n degree_d_autos.append(\n [L[i] for i in range(len(L)) if order_elts[j][i] == degree])\n\n # get list of CRT'ed automorphisms\n return CRT_helper(degree_d_autos, moduli)",
"def get_orders(self, *orders):\n return Orders(self, orders)",
"def with_terms(model: Model, terms: Iterable[Term]):\n program: SWIProgram = model.solver.program\n if isinstance(program, SWIProgram):\n # cdb = ClauseDB(builtins={})\n # for c in terms:\n # cdb.add_statement(c)\n identifiers = list(x[0:2] for x in program.add_program(terms))\n model.solver.cache.invalidate()\n try:\n yield\n finally:\n for type_, idx in identifiers:\n if type_ == \"cl\":\n program.retract_clause(idx)\n elif type_ == \"fa\":\n program.retract_fact(idx)\n else:\n raise NotImplementedError(\n \"with_terms is currently only implemented for ApproximateEngine\"\n )",
"def add_order(self, orders):\n if isinstance(orders, list):\n for order in orders:\n self._add_order(order)\n else:\n self._add_order(orders)",
"def pair_transition_matrix(order, a):\n size = len(order)\n assert a.shape == (size, size)\n directions = {\"W\": (0, 0), \"X\": (1, 0), \"Y\": (0, 1), \"M\": (1, 1)}\n emits = [directions[state.upper()] for state in order]\n return TransitionMatrix(a, emits).withoutSilentStates()",
"def generate_operations(self):\n combinations = self.COMBINATIONS.items()[:self.limit]\n for (term1, term2), type in combinations:\n yield (term1, term2, type)",
"def for_order(self, order, direction=None):\n if direction is not None:\n descending = direction == 'desc'\n elif self.order != order:\n descending = False\n else:\n descending = self.direction != 'desc'\n\n return self.__class__(\n self.session,\n state=self.state,\n term=self.term,\n order=order,\n direction='desc' if descending else 'asc',\n issues=self.issues,\n categories=self.categories,\n organizations=self.organizations,\n user_ids=self.user_ids,\n group_ids=self.group_ids\n )",
"def interactions(self) -> Sequence[Interaction[_C_out, _A_out]]:\n ...",
"def orders(self, orders):\n\n self._orders = orders",
"def orders(self, orders):\n\n self._orders = orders",
"def interactions(self) -> Sequence[Interaction[_C_out, Tuple[int,...]]]:\n return self._simulation.interactions",
"def monomials(order):\n def flip_row(rows, i):\n \"\"\"Changes the sign of a row.\n\n Args:\n rows (Tuple[Tuple[int]]): a matrix\n i (int): the index into `rows` of the row to flip\n\n \"\"\"\n for j in range(order):\n rows[i][j] = -rows[i][j]\n possible_rows = tuple(tuple(1 if i == n else 0 for i in range(order))\n for n in range(order))\n \n for rows in permutations(possible_rows):\n for n in range(2**order):\n rows_ = list(list(row) for row in rows)\n for i in range(order):\n if n == 0: break\n if (n >> i) & 1: flip_row(rows_, i)\n yield rows_",
"def hadamard_candidates_by_perms_combs(order):\n if order > 2 and order % 4 != 0:\n raise ValueError(\"order must be < 2 or divisible by 4\")\n\n seed = tuple(repeat(-1, order // 2)) + tuple(repeat(1, order // 2 - 1))\n possible_rows = tuple((1,) + perm for perm in unique_permutations(seed))\n\n first_row = tuple(repeat(1, order))\n for rows in combinations(possible_rows, order - 1):\n m = (first_row,) + rows\n if is_hadamard(m):\n yield(m)",
"def interactions(self) -> Sequence[Interaction[_C_out,_A_out]]:\n return self._simulation.interactions",
"def _iterate_basis_order_(reference_determinant, order):\n occupied_indices = numpy.where(reference_determinant)[0]\n unoccupied_indices = numpy.where(numpy.invert(reference_determinant))[0]\n\n for occ_ind, unocc_ind in itertools.product(\n itertools.combinations(occupied_indices, order),\n itertools.combinations(unoccupied_indices, order)):\n basis_state = reference_determinant.copy()\n\n occ_ind = list(occ_ind)\n unocc_ind = list(unocc_ind)\n\n basis_state[occ_ind] = False\n basis_state[unocc_ind] = True\n\n yield basis_state",
"def genotype_coeffs(genotype, order=None):\n if order is None:\n order = len(genotype)\n length = len(genotype)\n mutations = [i + 1 for i in range(length) if genotype[i] == \"1\"]\n params = [[0]]\n for o in range(1, order + 1):\n params += [list(z) for z in it.combinations(mutations, o)]\n return params",
"def process(self, order):\r\n self._elements.append(order)",
"def order(self, order):\n self._order = order",
"def set_coefs_order(self, order):\n # Attach an epistasis model.\n self.order = order\n self.add_epistasis()\n self.epistasis.data.values = np.zeros(self.epistasis.n)\n self.epistasis.data.values[0] = 1\n return self",
"def interactions(self) -> Sequence[Interaction[_C_out,_A_out]]:\n return self._interactions",
"def order(self, order):\n\n self._order = order",
"def order(self, order):\n\n self._order = order",
"def order(self, order):\n\n self._order = order",
"def set_order(self, order):\n self.order = order"
] | [
"0.50208193",
"0.49593326",
"0.48951837",
"0.48186675",
"0.47466713",
"0.46223867",
"0.45471212",
"0.4546781",
"0.45429128",
"0.45283663",
"0.44752872",
"0.44683182",
"0.4408208",
"0.4404306",
"0.43998128",
"0.43998128",
"0.43941587",
"0.43856522",
"0.43695247",
"0.43439233",
"0.4330875",
"0.43304577",
"0.4305404",
"0.42956007",
"0.42920563",
"0.42808756",
"0.4279295",
"0.4279295",
"0.4279295",
"0.42457843"
] | 0.69528484 | 0 |
Displays all process along with Ports and Process IDS | def display_port(self):
ports=os.popen("sudo netstat -ntlp").read().strip().splitlines()[2:]
for port in ports:
split=re.split('[\s]+',port)
self.portDic["Protcol"]=split[0]
self.portDic["Receive Q"]=split[1]
self.portDic["Send Q"]=split[2]
split_port=split[3].split(":")
if split_port[1]=="":
self.portDic["port"]="No Port"
else:
self.portDic["port"]=split_port[1]
self.portDic["Foreign Address"]=split[4]
self.portDic["State"]=split[5]
split_ID=split[6].split("/")
self.portDic["PID"]=split_ID[0]
self.portDic["Programme Name"]=split_ID[1]
self.portList.append(self.portDic.copy())
return self.portList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _showProcessList(self, procs):\n device_name = self._devId\n proc_list = ['%s %s %s' % (pid, name, args) for pid, (name, args) in sorted(procs)]\n proc_list.append('')\n log.info(\"#===== Processes on %s:\\n%s\", device_name, '\\n'.join(proc_list))",
"def print_process_list(self) -> None:\n\n print(f\"Process List: {self.process_list}\")",
"def ps():\n for p in psutil.process_iter():\n try:\n pid = p.pid\n name = p.name()\n cmdline = p.cmdline()\n except psutil.AccessDenied:\n continue\n\n print(\"%5d %10s %s\" % (pid, name, cmdline))",
"def ShowAllIPC(cmd_args=None):\n for t in kern.tasks:\n print GetTaskSummary.header + \" \" + GetProcSummary.header\n pval = Cast(t.bsd_info, 'proc *')\n print GetTaskSummary(t) + \" \" + GetProcSummary(pval)\n print PrintIPCInformation.header\n PrintIPCInformation(t.itk_space, False, False) + \"\\n\\n\"",
"def process_info(process):\n\thelp(process)",
"def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()",
"def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))",
"def display_nbr(ctx, as_port=False):\n ctl = ctx.ctl\n\n jobs = ctl('list-avail', '--partition', 'main', flatten=False)\n\n if len(jobs) == 0:\n click.echo('No jobs running', err=True)\n sys.exit(1)\n\n for job in jobs:\n info = collect_vnc_info(ctl, job['id'], ctx.ssh_cfg)\n\n if as_port:\n click.echo('%d' % info['port'])\n else:\n click.echo(':%d' % info['display'])",
"def get_processes_info():\n processes_list = []\n for proc in get_processes():\n try:\n # Fetch process details as dict\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"username\"])\n pinfo[\"rss\"] = proc.memory_info().rss / (1024 * 1024)\n pinfo[\"ports\"] = []\n try:\n connections = proc.connections()\n except psutil.Error:\n continue\n if connections:\n for conn in connections:\n pinfo[\"ports\"].append({\"port\": conn.laddr.port, \"status\": conn.status})\n # Append dict to list\n processes_list.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n processes_list = sorted(processes_list, key=lambda procObj: procObj[\"rss\"], reverse=True)\n return processes_list[:25]",
"def List(cls):\n\t\tres = {}\n\t\tfor p in glob.glob(\"/proc/*/cmdline\"):\n\t\t\tprocess = p.split(\"/\")[2]\n\t\t\tif cls.RE_PID.match(process):\n\t\t\t\tres[int(process)] = cat(p).replace(\"\\x00\", \" \")\n\t\treturn res",
"def procinfo() -> None:\n if pwndbg.gdblib.qemu.is_qemu():\n print(\n message.error(\n \"QEMU target detected: showing result for the qemu process\"\n \" - so it will be a bit inaccurate (excessive for the parts\"\n \" used directly by the qemu process)\"\n )\n )\n exe = pwndbg.auxv.get()[\"AT_EXECFN\"]\n print(\"%-10s %r\" % (\"exe\", exe))\n\n proc = Process()\n\n # qemu-usermode fail!\n if not proc.status:\n return\n\n print(\"%-10s %s\" % (\"cmdline\", proc.cmdline))\n\n print(\"%-10s %s\" % (\"cwd\", proc.cwd))\n\n files = dict(proc.open_files)\n\n for c in proc.connections:\n files[c.fd] = str(c)\n\n print(\"%-10s %s\" % (\"pid\", proc.pid))\n print(\"%-10s %s\" % (\"tid\", proc.tid))\n\n if proc.selinux != \"unconfined\":\n print(\"%-10s %s\" % (\"selinux\", proc.selinux))\n\n print(\"%-10s %s\" % (\"ppid\", proc.ppid))\n\n if not pwndbg.gdblib.android.is_android():\n print(\"%-10s %s\" % (\"uid\", proc.uid))\n print(\"%-10s %s\" % (\"gid\", proc.gid))\n print(\"%-10s %s\" % (\"groups\", proc.groups))\n else:\n print(\"%-10s %s\" % (\"uid\", list(map(pwndbg.lib.android.aid_name, proc.uid))))\n print(\"%-10s %s\" % (\"gid\", list(map(pwndbg.lib.android.aid_name, proc.gid))))\n print(\"%-10s %s\" % (\"groups\", list(map(pwndbg.lib.android.aid_name, proc.groups))))\n\n for fd, path in files.items():\n if not set(path) < set(string.printable):\n path = repr(path)\n\n print(\"%-10s %s\" % (\"fd[%i]\" % fd, path))\n\n return",
"def getAllProcessInfo(self):\r\n self._update('getAllProcessInfo')\r\n\r\n all_processes = self._getAllProcesses(lexical=True)\r\n\r\n output = []\r\n for group, process in all_processes:\r\n name = make_namespec(group.config.name, process.config.name)\r\n output.append(self.getProcessInfo(name))\r\n return output",
"def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n ['/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append(\n (int(m.group(1)), m.group(3).rstrip(), int(m.group(2)), m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs",
"def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}",
"def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info",
"def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n [\n '/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'\n ],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append((int(m.group(1)), m.group(3).rstrip(), int(m.group(2)),\n m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs",
"def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections",
"def process_infos(str=\"???\"):\n # stdin/stdout not always connected to a controlling terminal\n try:\n term_owner0 = os.tcgetpgrp(0)\n except OSError:\n term_owner0 = 0\n try:\n term_owner1 = os.tcgetpgrp(1)\n except OSError:\n term_owner1 = 0\n return \"processus %s: pid=%d, pere=%d, groupe=%d, term owner:%d/%d, sid=%d\"%(str,os.getpid(),os.getppid(),os.getpgid(0),term_owner0,term_owner1, os.getsid(0))",
"def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst",
"def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)",
"def list_ports():\n print '\\nHere is the list of available ports on this machine:'\n # lp.comports returns a list of (port, description, hardware ID) tuples\n iterator = sorted(lp.comports())\n for port, desc, hwid in iterator:\n print port\n exit()",
"def get_all_port(self, conf, dpid):\n\t\tpass",
"def list_ports(state):\n\tstate.report()",
"def pids(node, java_class):\n cmd = \"ps -C java -wwo pid,args | grep '%s' | awk -F' ' '{print $1}'\" % java_class\n\n return [int(pid) for pid in node.account.ssh_capture(cmd, allow_fail=True)]",
"def test_process_list_with_all_users(self):\n\n class MyResponder(MockGDBServerResponder):\n def qfProcessInfo(self, packet):\n if \"all_users:1\" in packet:\n return \"pid:10;ppid:1;uid:1;gid:1;euid:1;egid:1;name:\" + binascii.hexlify(\"/a/test_process\".encode()).decode() + \";\"\n else:\n return \"E04\"\n\n self.server.responder = MyResponder()\n\n self.runCmd(\"platform select remote-linux\")\n\n try:\n self.runCmd(\"platform connect connect://localhost:%d\" %\n self.server.port)\n self.assertTrue(self.dbg.GetSelectedPlatform().IsConnected())\n self.expect(\"platform process list -x\",\n substrs=[\"1 matching process was found\", \"test_process\"])\n self.expect(\"platform process list\",\n error=True,\n substrs=[\"error: no processes were found on the \\\"remote-linux\\\" platform\"])\n finally:\n self.dbg.GetSelectedPlatform().DisconnectRemote()",
"def GetPublishedProcesses():\r\n pass",
"async def find_processes(self, msg):\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in [\"java.exe\", \"javaw.exe\"] and proc.cwd() in PROCESSES.keys():\n running_processes.append(proc.cwd())\n\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Online <:GreenTick:592083498534174721>\", inline=self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Offline <:RedCross:592082557961633877>\", inline=self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Admin Required <:OrangeUnknown:592082676891123722>\", inline=self.inline)\n await msg.edit(content=\"\", embed=new_embed)",
"def print_ports(self, file=None):\n lines = []\n for name, port in self.servers.items():\n lines.append(f\"{name}:{port}\")\n\n if file is not None:\n print(\"\\n\".join(lines), file=file)\n\n return \"\\n\".join(lines)",
"def do_list(self,line):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n obj = ProcessInfo('jobs')\n process_list = obj.handle_parameter()\n\n if process_list:\n # get the hostname\n hostname = process_list[0]\n del process_list[0]\n process_list = obj.extract_process(process_list)\n # print 'dict is here$$$$$'\n # sys.exit(1)\n dict_processor = []\n for proc_val in process_list:\n if proc_val.search_result ==0:\n dict_processor.append({'processor':proc_val.name,'status':'Stopped','PID':str(proc_val.pid)})\n\n elif proc_val.search_result >=1:\n dict_processor.append({'processor':proc_val.name,'status':'Running','PID':str(proc_val.pid)})\n # dict_processor[proc_val.name] = 'Running'\n # print (\"|%-20s|%-5s|\"%(proc_val.name,proc_val.search_result))\n # print dict_processor\n print('##############################################')\n print('PID #'+' Processor #'+' Status')\n print('##############################################')\n spark_ls = []\n for processor in dict_processor:\n if processor.get('processor') == 'spark<spark_worker>' or processor.get('processor') == 'spark<spark_master>':\n spark_ls.append(processor)\n del dict_processor[dict_processor.index(processor)]\n # print dict_processor\n for processor in dict_processor:\n space_pid = 7 - len(processor.get('PID'))\n space_name = 30 - len(processor.get('processor'))\n if processor.get('status') == 'Running':\n print str(processor.get('PID'))+space_pid*' '+processor.get('processor') + space_name*' '+ '\\33[32m' +processor.get('status')+ '\\33[0m'\n else:\n print str(processor.get('PID'))+space_pid*' '+processor.get('processor') + space_name*' '+ '\\33[33m' +processor.get('status')+ '\\33[0m'\n # space_num = 30 - len(k)\n # print k + space_num*' '+v\n print 7*' '+'spark'\n for item in spark_ls:\n space_pid = 8 - len(item.get('PID'))\n space_name = 29 - len(item.get('processor').split('<')[1].split('>')[0])\n if item.get('status')=='Running':\n print str(item.get('PID'))+space_pid*' '+item.get('processor').split('<')[1].split('>')[0] + space_name*' '+ '\\33[32m'+item.get('status')+'\\33[0m'\n else:\n print str(item.get('PID'))+space_pid*' '+item.get('processor').split('<')[1].split('>')[0] + space_name*' '+ '\\33[33m'+item.get('status')+'\\33[0m'\n print('##############################################')\n else:\n print(\"cmd is not support from this host\")",
"def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess"
] | [
"0.725191",
"0.6919943",
"0.6882458",
"0.6662438",
"0.6527916",
"0.6403569",
"0.6377524",
"0.6372277",
"0.6365713",
"0.62965727",
"0.6290927",
"0.61608934",
"0.6155275",
"0.61273324",
"0.6119863",
"0.6109947",
"0.60304934",
"0.6015953",
"0.60154057",
"0.6005746",
"0.59994674",
"0.59775597",
"0.5960456",
"0.5930859",
"0.5906557",
"0.5885389",
"0.5878405",
"0.5862541",
"0.58481437",
"0.58152103"
] | 0.6956632 | 1 |
Kill Process with given process ID `PID` Process ID | def kill_process(self,PID):
os.system("sudo kill {}".format(PID))
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def kill(pid):\n p = psutil.Process(pid)\n\n try:\n p.kill()\n except Exception:\n pass",
"def processKill(uPid):\n return processTerminate(uPid);",
"def _KillProcess(self, pid):\n if sys.platform.startswith('win'):\n process_terminate = 1\n handle = ctypes.windll.kernel32.OpenProcess(\n process_terminate, False, pid)\n ctypes.windll.kernel32.TerminateProcess(handle, -1)\n ctypes.windll.kernel32.CloseHandle(handle)\n\n else:\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError as exception:\n logger.error('Unable to kill process {0:d} with error: {1!s}'.format(\n pid, exception))",
"def kill(pid):\n # If the process doesn't exist, it raises an exception that we can ignore.\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError:\n pass",
"def kill(proc_pid: int) -> None:\n\n if not psutil.pid_exists(proc_pid):\n return\n\n process = psutil.Process(proc_pid)\n\n for proc in process.children(recursive=True):\n proc.kill()\n\n process.kill()",
"def kill_pid(pid):\n try:\n # Unable to import 'module'\n # pylint: disable=no-member,F0401\n import signal\n return os.kill(pid, signal.SIGTERM)\n except ImportError:\n pass",
"def Kill(cls, pid, children=False):\n\t\tif pid is not None:\n\t\t\tif children:\n\t\t\t\tfor cpid, _, cmd in cls.Children(pid):\n\t\t\t\t\t# We need to recursively kill the childrens\n\t\t\t\t\tcls.Kill(cpid, children=True)\n\t\t\tLogger.Info(\"Killing process: \" + repr(pid))\n\t\t\treturn popen(\"kill -9 %s\" % (pid))\n\t\telse:\n\t\t\treturn None",
"def kill_pid(self, pid):\n # Ensure it still exists before continuing\n if not psutil.pid_exists(pid):\n return\n\n # If it did not die nicely, get stronger about killing it\n p = psutil.Process(pid)\n\n # Try terminating, wait 3 seconds to see if it dies\n p.terminate() # SIGTERM\n psutil.wait_procs([p], timeout=3)\n\n # Ensure it still exists before continuing\n if not psutil.pid_exists(pid):\n self.logger.debug(\n \"PID \" + str(pid) + \" was killed with SIGTERM successfully.\"\n )\n return\n\n # Try hard killing, wait 3 seconds to see if it dies\n p.kill() # SIGKILL\n psutil.wait_procs([p], timeout=3)\n\n self.logger.info(\n \"PID \" + str(pid) + \" could not be killed with SIGTERM, and \" +\n \"was killed with SIGKILL.\"\n )\n\n return",
"def _TerminateProcessByPid(self, pid):\n self._RaiseIfNotRegistered(pid)\n\n process = self._processes_per_pid[pid]\n\n self._TerminateProcess(process)\n self._StopMonitoringProcess(process)",
"def kill(self, pid, returncode):\r\n kernel32 = ctypes.windll.kernel32\r\n handle = kernel32.OpenProcess(1, 1, pid)\r\n ret = kernel32.TerminateProcess(handle, returncode)\r\n kernel32.CloseHandle(handle)\r\n return (0 != ret)",
"def kill_process(process):\n \n if process == None:\n print(\"No process to kill.\")\n pass\n else:\n os.killpg(os.getpgid(process.pid), signal.SIGTERM)\n process = None\n print(\"Process killed.\")\n return None",
"def kill_child(self, pid):\n # try communicate\n try:\n self._child_pids[pid].communicate()\n except Exception:\n print('Could not communicate to child')\n try:\n self.execute_command(\"kill -9 \"+str(pid))\n except Exception as e:\n print(e)",
"def kill_process(proc):\r\n p1_group = psutil.Process(proc.pid)\r\n\r\n child_pids = p1_group.get_children(recursive=True)\r\n\r\n for child_pid in child_pids:\r\n os.kill(child_pid.pid, signal.SIGKILL)",
"def kill_process_by_pid(duthost, container_name, program_name, program_pid):\n kill_cmd_result = duthost.shell(\"docker exec {} kill -SIGKILL {}\".format(container_name, program_pid))\n\n # Get the exit code of 'kill' command\n exit_code = kill_cmd_result[\"rc\"]\n pytest_assert(exit_code == 0, \"Failed to stop program '{}' before test\".format(program_name))\n\n logger.info(\"Program '{}' in container '{}' was stopped successfully\"\n .format(program_name, container_name))",
"def kill_process(pid, exit_code=None):\n\n if exit_code is None:\n exit_code = DEFAULT_TERMINATION_EXIT_CODE\n\n try:\n handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, pid)\n except pywintypes.error:\n return False # \"The parameter is incorrect.\"\n\n if not handle:\n return False\n\n try:\n win32api.TerminateProcess(handle, exit_code)\n return True\n except pywintypes.error:\n return False # \"Access is denied.\"\n finally:\n win32api.CloseHandle(handle)",
"def delete_process(self, pid):\n del self.processes[pid]",
"def stop_process(check_id, storage, processes):\n\n processes[storage[check_id]['pid']].terminate()\n os.wait()\n del processes[storage[check_id]['pid']]",
"def try_kill_process(proc):\n pid = proc.pid\n LOG.info(\"Killing process %s\" % pid)\n try:\n os.kill(pid, signal.SIGKILL)\n except Exception:\n LOG.exception(\"Failed to kill %s\" % pid)",
"def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False",
"def safe_kill(pid):\n try:\n return os.kill(pid, signal.SIGKILL)\n except OSError as e:\n if e.errno == errno.ESRCH:\n # Raced with process termination\n pass\n else:\n raise",
"def pkill(process_name):\n try:\n killed = os.system('taskkill /im ' + process_name)\n except Exception:\n killed = 0\n return killed",
"def stop(pid_file):\n # Get the running process pid, if any\n pid = status(pid_file)\n if not pid:\n return\n\n try:\n os.kill(pid, signal.SIGTERM)\n # Give the process some time to exit nicely\n time.sleep(0.1)\n # Send a SIGKILL signal but ignore errors\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError:\n pass\n except OSError as e:\n # Catch the error in case the process exited between our check and our\n # attempt to stop it.\n logger.warning(\n \"Unable to stop process %d, assuming it is already stopped: %s\" % (pid, e)\n )\n logger.debug(str(e), exc_info=True)\n # Clean up\n if os.path.exists(pid_file):\n os.remove(pid_file)",
"def kill(pids):\n for pid in pids:\n process = psutil.Process(pid)\n for proc in process.children(recursive=True):\n proc.kill()\n process.kill()\n return",
"def killProcess(self):\n if self._processEnded:\n return defer.succeed(None)\n self.onProcessEnd = defer.Deferred()\n self.transport.signalProcess('KILL')\n return self.onProcessEnd",
"def _kill_process(self, box_config):\n try:\n self.logger.info(f'kill: {box_config.process_name} {{')\n self.logger.info(f'target process pid={box_config.pid}')\n if box_config.pid and psutil.pid_exists(box_config.pid):\n p = psutil.Process(box_config.pid)\n p.kill()\n p.wait()\n box_config.pid = None\n self.bc_dao.update(box_config)\n remove_pid_file(box_config.process_name)\n except Exception:\n self.logger.error(f'Exception on killing: {box_config.process_name}', exc_info=True)\n finally:\n self.logger.info('}')",
"def kill(self):\r\n try:\r\n if self.process:\r\n self.process.kill()\r\n self.process.wait()\r\n except WindowsError:\r\n # kill may not be available under windows environment\r\n pass",
"def kill(self):\n if self.process is not None:\n LOGGER.info('Killing command...')\n self.process.kill()\n self.process = None",
"def kill(pid, sig=signal.SIGTERM.value):\n pid = int(pid)\n sig = int(sig)\n proc = psutil.Process(pid)\n try:\n proc.send_signal(sig)\n return True\n except Exception as e:\n raise j.exceptions.RuntimeError(\"Could not kill process with id %s.\\n%s\" % (pid, e))",
"def kill(self):\n\n self.proc.kill()",
"def kill_process_by_port(port):\n port = int(port)\n pid = get_pid_by_port(port)\n if pid:\n return kill(pid)"
] | [
"0.7816297",
"0.773752",
"0.77095354",
"0.7672007",
"0.76110274",
"0.7358674",
"0.73388183",
"0.7314189",
"0.72366554",
"0.71744215",
"0.703986",
"0.6969621",
"0.6960862",
"0.6879827",
"0.6843893",
"0.67791253",
"0.670096",
"0.6696248",
"0.6672676",
"0.66505",
"0.6650049",
"0.6591754",
"0.65836066",
"0.65728426",
"0.6540489",
"0.65278906",
"0.6526621",
"0.6470518",
"0.6437773",
"0.6426052"
] | 0.7942027 | 0 |
Returns the first found packet with the given reportID while keeping the rest of the packets on the queue in the correct order. | def get_packet_withreportID(self, reportID, timeout=0.01):
# check if we've got a correct packet in the queue
incorrect_packets = []
correct_pkt = None
start_time = time.time()
while time.time() < start_time + timeout:
pkt = self.get_packet()
if pkt:
report, retval, payload = pkt
# check if it's the correct report
if reportID == report:
correct_pkt = pkt
break
else:
# self.log("Incorrect packet type: {}".format(report))
incorrect_packets.append(pkt)
else:
time.sleep(0.001)
# put back incorrect packets onto the queue
for pkt in incorrect_packets:
self.queue.put(pkt)
return correct_pkt | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_report(self, report_ID, payload=None):\n if report_ID < 0 or report_ID > 127:\n raise ValueError(\"Report ID {} is out of the valid range!\".format(report_ID))\n\n self._serial_write(self.MAGIC_NUM_0)\n self._serial_write(self.MAGIC_NUM_1)\n self._serial_write(self.MAGIC_NUM_2)\n self._serial_write(self.MAGIC_NUM_3)\n self._serial_write(report_ID)\n _bytes = [self.MAGIC_NUM_0, self.MAGIC_NUM_1, self.MAGIC_NUM_2, self.MAGIC_NUM_3, report_ID]\n if payload is None:\n _bytes.append(0)\n self._serial_write(0)\n else:\n _bytes.append(len(payload))\n self._serial_write(len(payload))\n for b in payload:\n if b < 0 or b > 255:\n raise ValueError(\"Value in payload out of valid range!\")\n _bytes.append(b)\n self._serial_write(b)\n # Checksum time!\n self._serial_write(self.generate_checksum(_bytes))\n\n # Try to get the response\n retval = None\n payload = None\n start_time = time.time()\n while time.time() - start_time < self.TIMEOUT:\n pkt = self.get_packet_withreportID(report_ID)\n if pkt:\n report, retval, payload = pkt\n break\n else:\n pass\n # self.log(\"Failed to get report with ID {}\".format(report_ID))\n else:\n # check for timeout\n self.log(\"WARNING: Timed out waiting for response\")\n\n return retval, payload",
"def storage_find_report_file(self, report_id, filename):\n return self._get_queryset(report_id=report_id, filename=filename).get()",
"def fetch_packet_from_analyzer(self):\n\n try:\n # Read a packet from the backend, and add it to our analysis queue.\n return self.read_packet(timeout=self.PACKET_READ_TIMEOUT, blocking=False)\n\n except queue.Empty:\n # If no packets were available, return without error; we'll wait again next time.\n return None",
"def get_packet(self):\n if len(self._read_queue) == 0:\n raise NoPacketException()\n\n return self._read_queue.popleft()",
"def pop(self, pid):\n for p in self._queue:\n if p.id == pid:\n return self._queue.pop(self._queue.index(p)).id\n return 0",
"def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0",
"def get(self, str_report_id=None):\n rid = str_report_id\n nmapreport = None\n if str_report_id is not None and isinstance(str_report_id, str):\n rid = ObjectId(str_report_id)\n\n if isinstance(rid, ObjectId):\n # get a specific report by mongo's id\n resultset = self.collection.find({'_id': rid})\n if resultset.count() == 1:\n # search by id means only one in the iterator\n record = resultset[0]\n # remove mongo's id to recreate the NmapReport Obj\n del record['_id']\n nmapreport = NmapParser.parse_fromdict(record)\n return nmapreport",
"def _get_data(self):\n while True:\n # self.logger.debug(\"data queue size is: {}\".format(len(self._dataqueue)))\n ans = self._parser.find_first_packet(self._dataqueue[:])\n if ans:\n self._dataqueue = ans[1]\n # self.logger.debug(\"found packet of size {}\".format(len(ans[0])))\n return ans[0]\n else:\n # self.logger.debug(\"Could not find packet in received data\")\n tmp = self.conn.recv(1024)\n self._dataqueue += tmp",
"def find(self, task_id):\n for task_obj in self.queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in queue: '{}'\".format(task_id))",
"def dequeue(self, server_id):\n srv = self.get_server_dict(server_id)\n if len(srv['queue']) <= 0:\n return None\n return srv['queue'].popleft()",
"def getPacket(self, index):\n\t\treturn self.packets[index.row()]",
"def storage_get_report_file(self, report_pk):\n return self._get_queryset(pk=report_pk).get()",
"def send_report(self, report_ID, payload=None):\n raise NotImplementedError",
"def find(self, task_id):\n for task_obj in self._queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in dorm: '{}'\".format(task_id))",
"def receive_packet(self, time=0):\n if time == 0:\n try:\n return self.in_queue.get(False)\n except queue.Empty:\n return None\n elif time < 0:\n try:\n return self.in_queue.get(True)\n except queue.Empty:\n return None\n else:\n try:\n return self.in_queue.get(True, time)\n except queue.Empty:\n return None",
"def get_next_output_packet(self):\n if self.num_packets != 0:\n return self.packet_buffer.pop(0)",
"def get_queue_number(self):\n outstring = self.dut.send_expect(\"stop\", \"testpmd> \")\n time.sleep(2)\n result_scanner = r\"Forward Stats for RX Port= %s/Queue=\\s?([0-9]+)\" % self.dut_ports[0]\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(outstring)\n queue_id = m.group(1)\n print \"queue is %s\" % queue_id\n self.dut.send_expect(\"start\", \"testpmd> \")\n return queue_id",
"def latest_report(report_uuid: str, database):\n return database.reports.find_one(filter={\"report_uuid\": report_uuid}, sort=[(\"timestamp\", pymongo.DESCENDING)])",
"def get_scan_by_id(self, scan_id):\n try:\n return self._scan_cache[scan_id]\n except KeyError:\n packed = self._make_scan(self._get_scan_by_id_raw(scan_id))\n self._scan_cache[packed.id] = packed\n return packed",
"def report_driver_command(self, driver_command_report: DriverCommandReport):\n\n queue_item = QueueItem(\n report_as_json=driver_command_report.to_json(),\n url=urljoin(self._remote_address, Endpoint.ReportDriverCommand.value),\n token=self._token,\n )\n\n self._queue.put(queue_item, block=False)",
"def getitem(self, index):\n #FIXME: A better way to get item without removing it.\n priority,size,trace=self.queues[index].get()\n self.enqueue(index,trace,priority)\n return trace",
"def first(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is empty')\n\t\treturn self._head._element",
"def get_report(self, report_id: str):\n self.logger.info(f'[INFO] Retrieving OpenVAS report {report_id}...')\n params = {\n 'cmd': 'get_report',\n 'token': self.token,\n 'report_id': report_id,\n 'filter': 'apply_overrides=0 min_qod=70 autofp=0 levels=hml first=1 rows=0 sort-reverse=severity',\n 'ignore_pagination': 1,\n 'report_format_id': self.csv_report_id,\n 'submit': 'Download',\n }",
"def find(self, task_id):\n _structs = [\n self.stack,\n self.backlog,\n self.blocked,\n self.sleeping,\n ]\n for struct in _structs:\n try:\n task_obj = struct.find(task_id)\n return task_obj\n except LookupError:\n # not found; try next structure\n continue\n\n # the graveyard is just a list; search it\n for task_obj in self.graveyard:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task: '{}'\".format(task_id))",
"def getItemsById(self,trackId):\n trackId = str(trackId) # all keys are strings, allow for integers to be passed in\n return next((i for i in self.items if i.getItunesAttribute('Track ID') == trackId),None)",
"def _get_existing_report(self, mask, report):\n for existing_report in self._reports:\n if existing_report['namespace'] == report['namespace']:\n if mask == existing_report['queryMask']:\n return existing_report\n return None",
"def getNextCard(deckId):\n deckOfCards = getCardsForDeck(deckId)\n card = deckOfCards.order_by('?')[0]\n return card",
"def first(self) -> Optional[T]:\n if len(self.entry_finder) == 0:\n return None\n for (_, _, (item,)) in self.priority_queue:\n if item is not None:\n return cast(T, item)\n return None",
"def get(self, task_id=None):\n if task_id:\n item = self.find(task_id)\n self.queue.remove(item)\n else:\n item = self.queue.get()\n return item",
"def _pop_received_packet(self):\n fragments = self._receive_heap.pop_min_and_all_fragments()\n if fragments is None:\n self._attempt_disabling_looping_receive()\n else:\n last_seqnum = fragments[-1].sequence_number\n self._update_next_expected_seqnum(last_seqnum)\n self._update_next_delivered_seqnum(last_seqnum)\n payload = b''.join(f.payload for f in fragments)\n self.handler.receive_message(payload)\n\n if self._next_delivered_seqnum not in self._receive_heap:\n self._attempt_disabling_looping_receive()"
] | [
"0.5876091",
"0.55140936",
"0.545259",
"0.5442561",
"0.5426714",
"0.52965784",
"0.52956307",
"0.52622616",
"0.5228871",
"0.5184876",
"0.51326233",
"0.50348186",
"0.4981323",
"0.49604148",
"0.49549568",
"0.4922734",
"0.49177262",
"0.4852774",
"0.483061",
"0.48110393",
"0.4796516",
"0.4756463",
"0.4724739",
"0.47156668",
"0.47035205",
"0.46931943",
"0.4688769",
"0.46874234",
"0.46854988",
"0.46830386"
] | 0.7943378 | 0 |
Writes `values_to_write` to the serial port. | def _serial_write(self, values_to_write):
if self.verbose:
self.log("Writing 0x{:x} to serial port...".format(values_to_write))
if type(values_to_write) is not list:
self.serial.write(bytearray([values_to_write]))
else:
self.serial.write(bytearray(values_to_write)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(self, values, file_obj, format=None):\n pass",
"def _spi_write(self, dc, values):\n self._gpio.output(self.cs_pin, 0)\n self._gpio.output(self.dc_pin, dc)\n\n if type(values) is str:\n values = [ord(c) for c in values]\n\n for byte_value in values:\n self._spi_bus.xfer([byte_value])\n\n self._gpio.output(self.cs_pin, 1)",
"def write(l, values):\n l.get_device().write(1, values)\n\n # Sometimes the flag simply ignores the command. Unknown if this\n # is an issue with PyUSB or the flag itself. But sending the\n # command again works a treat.\n l.get_device().write(1, values)",
"def write_registers(self, registeraddress, values):\n if not isinstance(values, list):\n raise TypeError('The \"values parameter\" must be a list. Given: {0!r}'.format(values))\n _checkInt(len(values), minvalue=1, description='length of input list')\n # Note: The content of the list is checked at content conversion.\n\n self._genericCommand(16, registeraddress, values, numberOfRegisters=len(values), payloadformat='registers')",
"def _WriteFieldValues(self, output_mediator, field_values):\n return",
"def writePoints(self, pointsvalues):\n raise NotImplementedError()",
"def write_value(self, value):\n raise NotImplementedError",
"def _WriteFieldValues(self, output_mediator, field_values):",
"def _write_values(self, app_name, chart_dir, values):\n\n data = self._get_values(app_name, chart_dir)\n new_data = {**data, **values}\n new_raw = yaml.dump(new_data)\n\n values_path = \"%s/%s/values.yaml\" % (chart_dir, app_name)\n with open(values_path, mode=\"w\") as values_file:\n values_file.write(new_raw)",
"def WriteValuesToJSONFile(self, state, values):\n value_counters = {}\n max_post_size = config.CONFIG[\"BigQuery.max_file_post_size\"]\n for value in values:\n class_name = value.__class__.__name__\n output_tracker, created = self._GetTempOutputFileHandles(class_name)\n\n # If our output stream is getting huge we should flush everything now and\n # set up new output files. Only start checking when we are getting within\n # range of the limit because we need to flush the stream to check the\n # size. Start counting at 0 so we check each file the first time.\n value_counters[class_name] = value_counters.get(class_name, -1) + 1\n if not value_counters[class_name] % max_post_size // 1000:\n\n # Flush our temp gzip handle so we can stat it to see how big it is.\n output_tracker.gzip_filehandle.flush()\n if os.path.getsize(output_tracker.gzip_filehandle.name) > max_post_size:\n # Flush what we have and get new temp output handles.\n self.Flush(state)\n value_counters[class_name] = 0\n output_tracker, created = self._GetTempOutputFileHandles(class_name)\n\n if not output_tracker.schema:\n output_tracker.schema = self.RDFValueToBigQuerySchema(value)\n\n if created:\n # Omit the leading newline for the first entry in the file.\n self._WriteJSONValue(output_tracker.gzip_filehandle, value)\n else:\n self._WriteJSONValue(\n output_tracker.gzip_filehandle, value, delimiter=\"\\n\")\n\n for output_tracker in self.temp_output_trackers.values():\n output_tracker.gzip_filehandle.flush()",
"def write_values_to_tsv(self, values, out_file):\n with open(out_file, \"w\") as f:\n for row in values:\n f.write(\"\\t\".join([cell.strip().replace(\"\\n\", \"|\").replace(\"\\r\", \"\")\n for cell in row]))\n f.write(os.linesep)",
"def write(self, value: int, /) -> None:",
"def write(self, value):\r\n self.__output__.write(value)",
"def write(self, service, values):\n with open(os.path.join(self.directory, service), \"w\") as f:\n values = [d.copy() for d in values]\n for d in values:\n # There can be other values in the JSON:\n d[\"extra\"] = 123\n f.write(dumps(values))",
"def register_write_multiple(self, register_indices, values):\n # TODO: rename 'register_indices' to 'registers'\n register_indices = register_indices[:]\n if len(register_indices) != len(values):\n raise ValueError('Must be an equal number of registers and values')\n\n num_regs = len(register_indices)\n for idx, indice in enumerate(register_indices):\n if isinstance(indice, six.string_types):\n register_indices[idx] = self._get_register_index_from_name(indice)\n buf = (ctypes.c_uint32 * num_regs)(*register_indices)\n data = (ctypes.c_uint32 * num_regs)(*values)\n\n # TODO: For some reason, these statuses are wonky, not sure why, might\n # be bad documentation, but they cannot be trusted at all.\n statuses = (ctypes.c_uint8 * num_regs)(0)\n\n res = self._dll.JLINKARM_WriteRegs(buf, data, statuses, num_regs)\n if res != 0:\n raise errors.JLinkException(res)\n\n return None",
"def callback_serial_write(data):\n serial_write(data.data)",
"def _WriteFieldValues(self, output_mediator, field_values):\n if self._event_counter != 0:\n self.WriteText(', ')\n\n json_string = json.dumps(field_values, sort_keys=True)\n output_text = '\"event_{0:d}\": {1:s}\\n'.format(\n self._event_counter, json_string)\n self.WriteText(output_text)\n\n self._event_counter += 1",
"def write(self, parameter_values: Mapping[Union[str, ParameterAref], ParameterValue]) -> \"Memory\":\n for parameter, parameter_value in parameter_values.items():\n self._write_value(parameter=parameter, value=parameter_value)\n return self",
"def write(self, tags, values, step):\n if not isinstance(tags, list):\n tags = list(tags)\n if not isinstance(values, list):\n values = list(values)\n\n for i, (tag, value) in enumerate(zip(tags,values)):\n self.writer.add_scalar(tag, value, step)",
"def write(self, value):\n if self.mode is UNAVAILABLE:\n raise IOError, \"%s can not be used through Firmata\" % self\n if self.mode is INPUT:\n raise IOError, \"%s is set up as an INPUT and can therefore not be written to\" % self\n if value is not self.value:\n self.value = value\n if self.mode is OUTPUT:\n if self.port:\n self.port.write()\n else:\n msg = chr(DIGITAL_MESSAGE)\n msg += chr(self.pin_number)\n msg += chr(value)\n self.board.sp.write(msg)\n elif self.mode is PWM:\n value = int(round(value * 255))\n msg = chr(ANALOG_MESSAGE + self.pin_number)\n# print(value)\n msg += chr(value % 128)\n msg += chr(value >> 7)\n self.board.sp.write(msg)\n elif self.mode is SERVO:\n value = int(value)\n msg = chr(ANALOG_MESSAGE + self.pin_number)\n msg += chr(value % 128)\n msg += chr(value >> 7)\n self.board.sp.write(msg)",
"def writeToSerial( self, channel, value ):\n self.checkConnection()\n toSend = self.mapMessage( channel, value )\n self.ser.write( toSend )\n resp = yield self.ser.read( len( RESP_STRING ) )\n if RESP_STRING != resp:\n# Since we didn't get the the correct reponse,\n# place the value back in the front of the queue\n# and wait for a specified ERROR_TIME before\n# checking the queue again.\n self.queue.insert( 0, ( channel, value ) )\n reactor.callLater( ERROR_TIME, self.checkQueue )\n raise DCBoxError(5)\n else:\n# Since we got the correct reponse,\n# update the value entry for this channel\n# and check the queue.\n dev, devChannel = self.getChannelInfo( channel )\n self.dcDict[dev]['devChannels'][devChannel]['value'] = value\n self.checkQueue()",
"def write( self, value ): # uint_8\n\t\tif (type(value) is bytearray) or (type(value) is bytes):\n\t\t\tfor data in value:\n\t\t\t\tself.send( data, LCD_RS )\n\t\telse:\n\t\t\tself.send(value, LCD_RS )",
"def __writeValue(self, valFormat, val):\n self._messageBuf.extend(pack(valFormat, val))",
"def w(self, value):\n self.oFile.write(value)",
"def write(self, filename):\n f = open(filename, 'w')\n f.write(str(self.m) + \"\\n\")\n f.write(str(self.n) + \"\\n\")\n for i in self.values:\n for j in i:\n f.write(str(j)+\"\\n\")\n f.closed",
"def write(self, new_value):\n self.write_value = new_value",
"def SetAllOutputs(self,value=0x00):\n self.Bus.Write_uInt8(self.Address,0x10,value)",
"def write_value(self, **kw):\n for reg, val in kw.iteritems():\n #print \"{} = {}\".format(reg, val)\n reg_n, reg_def = self.find_reg(reg)\n if reg_n == None:\n raise ValueError(\"Register {} not found\".format(reg))\n reg_start = reg_def[0]\n reg_len = reg_def[1]\n if val > 2**reg_len or val < 0:\n raise ValueError(\"Invalid value, got: {}, maximum {}\".format(val, reg_len))\n #Clear previous value\n self.registers[reg_n] &= (~((((2**reg_len-1))&0xFFFFFFFF) << reg_start) & 0xFFFFFFFF)\n self.registers[reg_n] |= (val) << reg_start\n self.modified[reg_n] = True\n return",
"def write_int_array(f, path, values, dtype='<i4'):\n dset = f.create_dataset(path, (len(values),), dtype=dtype)\n dset[:] = values\n f.flush()",
"def writetofile(self,direction,value):\r\n output = str(\"{},{} \\n\".format(direction,value))\r\n self.new_file.write(output)"
] | [
"0.66824037",
"0.6468341",
"0.6362898",
"0.62047505",
"0.6188161",
"0.6069787",
"0.60293794",
"0.6015024",
"0.5997377",
"0.5970137",
"0.5953444",
"0.5944348",
"0.5928449",
"0.58901906",
"0.58859426",
"0.5849831",
"0.5816865",
"0.5813451",
"0.5778106",
"0.57472885",
"0.5656866",
"0.56264037",
"0.5550511",
"0.55412966",
"0.55381155",
"0.5493028",
"0.5488082",
"0.5482567",
"0.5464402",
"0.5463435"
] | 0.86318475 | 0 |
reads `num_bytes` from the serial port. | def _serial_read(self, num_bytes):
out = self.serial.read(num_bytes)
if len(out) != num_bytes:
self.log("WARNING: Didn't get the expected number of bytes")
self.log(" Received {}, expected {}. Serial port dead?".format(len(out), num_bytes))
out_list = [int(v) for v in bytearray(out)]
if self.verbose:
self.log("Read in: {}".format(" ".join(["{:0>2X}".format(b) for b in out_list])))
return out_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read(self, num_bytes_to_read):\n pass",
"def read_bytes(self, number_of_bytes):\n\n self.index = -1\n data = self.buf[self.offset:self.offset + number_of_bytes]\n self.offset += number_of_bytes\n\n return data",
"async def read(self, num_bytes=0) -> bytes:\n if num_bytes < 1:\n num_bytes = self.in_waiting or 1\n\n return await self._read(num_bytes)",
"def _read_num_bytes(self, num):\r\n buf = \"\"\r\n while len(buf) < num:\r\n chunk = self.sock.recv(num - len(buf))\r\n if not chunk:\r\n raise SocketClosedException\r\n buf += chunk\r\n return buf",
"def Read_Bytes(self, size = 0):\r\n if size == 0: size = self.Port.inWaiting()\r\n data = self.Port.read(size)\r\n return data",
"def read(self, n):\n logger.debug(\"Reading {} bytes...\".format(n))\n bytes_ = self.impl.read(n)\n logger.debug(\"Received: {} bytes\".format(len(bytes_)))\n return bytes_",
"def _read_bytes(self, start, num_bytes):\n with self._fp_lock:\n self._fp.seek(start)\n return self._fp.read(num_bytes)",
"def unpack_bytes(self, num_bytes):\n buf = self.view[0:num_bytes]\n\n self.num_bytes_consumed += num_bytes\n self.view = self.view[num_bytes:]\n\n return num_bytes, buf",
"def _readBytes(self, len):\n return self.socket.recv(len)",
"def _serial_read(self, size):\n self.write([self.SERIAL_IO])\n resp = self.read(size)\n data = self.decode(resp)\n return data",
"def _read_bytes(self, start, count): # type: (int) -> bytes\n bytes_data = self._buffer[start:start + count]\n\n if len(bytes_data) != count:\n raise ASN1WantMore('Premature end of input.')\n\n return bytes_data",
"def _readBytes(self, len):\n return self.stream.read(len)",
"def read(self, nbytes: int, /) -> bytes | None:",
"def read(self, num_of_byte: int=1) -> bytes:\n if num_of_byte < 0:\n return self.stream.read()\n if self._is_buffer_full():\n return self.stream.read(num_of_byte)\n\n buffer_len = len(self._buffer)\n if buffer_len == self._buffer_pointer + 1: # all real read\n data = self.stream.read(num_of_byte)\n self._buffer += data\n self._buffer_pointer = self._buffer_pointer + num_of_byte\n return data\n elif buffer_len - (self._buffer_pointer + 1) >= num_of_byte: # all from buffer\n self._buffer_pointer += num_of_byte\n return bytes(self._buffer[self._buffer_pointer - num_of_byte + 1: self._buffer_pointer + 1])\n elif buffer_len - (self._buffer_pointer + 1) < num_of_byte: # one part from buffer and the other real read\n data_buffer_part = self._buffer[self._buffer_pointer + 1:]\n remained_not_read_num = num_of_byte - (buffer_len - (self._buffer_pointer + 1))\n data_read_part = self.stream.read(remained_not_read_num)\n self._buffer += data_read_part\n self._buffer_pointer += num_of_byte\n return bytes(data_buffer_part + data_read_part)",
"def read(self, nbytes: int, /) -> Optional[bytes]:",
"def read(self, nbytes: Optional[int] = None) -> Optional[bytes]:\n ...",
"def read(self, num=1):\n contents = self.stream.read(num)\n self.bitsRead += len(contents)\n self.observerRead(contents)\n return contents",
"def readBytes(self, size=1):\n return self.bytes",
"def read(self, nbytes, /) -> bytes | None:",
"def _read_bytes(self, byte_stream, num_byte):\n dt = np.dtype(np.uint32).newbyteorder('>')\n return np.frombuffer(byte_stream.read(num_byte), dtype=dt)[0]",
"def read_bytes(self, size):\n return self.read('bytes:'+str(size))",
"def _read_nowait(self, n: int) -> bytes:\n ...",
"def read(self, size: int=-1) -> bytes:\n ...",
"def read(self, size: int=-1) -> bytes:\n ...",
"def read(self, bytecount: int = -1) -> bytes:\n if not bytecount or bytecount < 0:\n return self.read1()\n bfr = bytearray(bytecount)\n offset = 0\n while offset < bytecount:\n tmp = self.read1(bytecount - offset)\n if not tmp:\n del bfr[offset:]\n break\n end = offset + len(tmp)\n bfr[offset:end] = tmp\n offset = end\n return bytes(bfr)",
"def readinto(self, buf: bytes, nbytes: int, /) -> Optional[int]:",
"def _read_length(self):\n msg_length = struct.unpack('!I', self.received_data[0])[0]\n self.l.debug('msg_length = %d', msg_length)\n self.set_terminator(msg_length)\n self.process_data = self._read_message\n self.received_data = []",
"def read_num_lines(data_socket):\r\n size_bytes = b''\r\n for i in range(0, 4):\r\n size_bytes += next_byte(data_socket)\r\n return int.from_bytes(size_bytes, 'big')",
"def read(self, nbytes: int, write: int = 0x00, /) -> bytes:",
"def read(self, nbytes: int, write: int = 0x00, /) -> bytes:"
] | [
"0.75836605",
"0.7325011",
"0.7147769",
"0.7057854",
"0.7018971",
"0.68422747",
"0.6747512",
"0.64437133",
"0.6420467",
"0.6404139",
"0.64017504",
"0.63769394",
"0.6248493",
"0.6245583",
"0.62369466",
"0.62223905",
"0.6211439",
"0.62023777",
"0.61807936",
"0.6122951",
"0.6094644",
"0.60905075",
"0.6029885",
"0.6029885",
"0.59899133",
"0.5988045",
"0.59876776",
"0.5987442",
"0.59728634",
"0.59728634"
] | 0.7343166 | 1 |
Clears the serial buffer of anything received. | def _serial_clear(self):
self.serial.reset_input_buffer() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_in_serial_buffer(self):\n if self.serial.inWaiting():\n resp = self.serial.readall()\n log.warn(\"Unknown/unparsed serial response: %s\", resp)",
"def clearBuffer(self):\n self.buffer = b''\n self._linesReceived = []\n self._lineBuffer = b''",
"def _clear_read_buffer(self):\n # self.ser.read(self.ser.in_waiting)\n self.ser.reset_input_buffer()",
"def clear(self):\n self.ser.read(1000000)",
"def clear_buffer(self):\n for i, value in enumerate(self.buffer):\n self.buffer[i] = 0",
"def clear(self):\n self.append_send_buffer = bytes()\n self.append_connect = True",
"def reset(self):\n self._buffer.clear()",
"def clear(self):\r\n self.buffer = ''\r\n self.connected = False\r\n self.listening = False\r\n del self.transaction_id\r\n try:\r\n self.socket.close()\r\n except:\r\n pass\r\n self.socket = None",
"def emptyBuffer(self):\n msg = True\n while msg:\n msg = self.receive()",
"def clear(self):\n self.molo_tcp_pack.clear()\n self.append_recv_buffer = bytes()\n self.append_send_buffer = bytes()\n self.append_connect = True\n self.client_status = None",
"def clear_input_buffer(ser):\n sys.stderr.write(\"\\n\")\n LOGGER.warning(\"***** Unprocessed input buffer content *****\")\n sys.stderr.write(\"\\n\")\n capture = \"\"\n rx = 1\n while rx:\n rx = ser.read(ser.in_waiting or 1)\n if rx:\n capture += rx.decode(errors=\"replace\")\n if capture != \"\":\n LOGGER.info(capture.strip())\n sys.stderr.write(\"\\n\")\n LOGGER.warning(\"*\" * 44)\n sys.stderr.write(\"\\n\")\n ser.reset_input_buffer()",
"def clearleds(self):\n self.buffer = self.emptybuffer[:]",
"def reset(self):\n self._buffer.fill(0)",
"def _clear_buffer(self, data_socket):\n\n # attempt to read a 1 byte length messages without blocking.\n # recv throws an exception as it fails to receive data from the cleared buffer\n data_socket.setblocking(False)\n while True:\n try:\n data_socket.recv(1)\n except IOError:\n break\n data_socket.setblocking(True)",
"def _clear_buffer(self, data_socket):\n\n # attempt to read a 1 byte length messages without blocking.\n # recv throws an exception as it fails to receive data from the cleared buffer\n data_socket.setblocking(False)\n while True:\n try:\n data_socket.recv(1)\n except IOError:\n break\n data_socket.setblocking(True)",
"def clear_in_buff(self):\n\t\tself._server_in_buf.clear()",
"def clear_in_buff(self):\n self._server_in_buf.clear()",
"def clear(self):\n self.molo_tcp_pack.clear()\n self.tranparency = False\n self.append_recv_buffer = bytes()\n self.append_send_buffer = bytes()\n self.append_connect = True",
"def sendClear(self):\n self.broadcast(MessageType.CLEAR,\"clear\")",
"def purge_buffer(self):\n self._buf = b''",
"def reset(self):\n\t\tself.buf = []",
"def clear(self):\n wait(self.proto.vanish())",
"def empty(self):\n with self._lock:\n store = self._buffer.flush()\n self._empty_action(store)",
"def flush(self):\n if self.serial:\n self.serial.flush()",
"def clear(self):\n self.buf = np.zeros((self.length, self.dim))\n self.ptr = 0\n self._size = 0",
"def flush(self):\n timeout = self.serial.timeout\n try:\n self.serial.timeout = 0.1\n while True:\n c = self.serial.read()\n if not c:\n return\n finally:\n self.serial.timeout = timeout",
"def clear(self):\n self._buffer = [0]*(self.width*self._pages)",
"def reset_buffers(self) -> None:\n # Clear input buffer, discarding all that is in the buffer.\n logger.debug(\"reset_input_buffer\")\n self._con.reset_input_buffer()\n # Clear output buffer, aborting the current output and discarding all that is in the buffer.\n logger.debug(\"reset_output_buffer\")\n self._con.reset_output_buffer()",
"def reset_io(self) -> None:\n\n self.serial.write(b\"z!\")\n self.__read_response(0)",
"def clearBuffer(self):\r\n self.__buffer =[[Pixel() for i in range(self.__width)] for i in range(self.__height)]"
] | [
"0.84202254",
"0.81880486",
"0.8017563",
"0.77988154",
"0.77487266",
"0.7468556",
"0.74253863",
"0.73461974",
"0.7287638",
"0.7238226",
"0.7233992",
"0.72171247",
"0.7203294",
"0.7098728",
"0.7098728",
"0.70947367",
"0.7071002",
"0.706498",
"0.7038408",
"0.6951526",
"0.69379854",
"0.68876153",
"0.68575865",
"0.6770879",
"0.6743528",
"0.6683494",
"0.6647678",
"0.6611972",
"0.6601169",
"0.65757614"
] | 0.84938765 | 0 |
The threaded listener that looks for packets from Pensel. | def _listener(self):
while self.thread_run.is_set():
if self._serial_bytes_available() >= len(self.MAGIC_HEADER) and \
self._check_for_start():
report, retval, payload = self._receive_packet()
if report >= 0:
self.queue.put((report, retval, payload))
if self.verbose:
self.log("Put report {} on queue".format(report)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listener(localSearchForFile):\n for con in listenForConnection(GLOBALS.LOCAL_PORT):\n if con == -1: return -1\n\n if GLOBALS.DEBUG_MODE: print(\"[info] Linear lib: received connection\")\n\n # spawn connection handler thread\n superPeer = Thread(target = superPeerConnection, args=[con, localSearchForFile])\n superPeer.daemon = True\n superPeer.start()",
"def __listener__(self):\n frame_interval = 0.1\n str_list = []\n c = ''\n while True:\n with Timeout(frame_interval, False):\n while True:\n try:\n c = self.ser.read()\n except:\n self.ser.close()\n self.make_connection.go()\n self.connection_made.wait()\n str_list.append(c)\n if c == \"\\n\" or c == '':\n break\n received = ''.join(str_list)\n str_list = []\n if received:\n for i in self.read_handlers:\n gevent.spawn(i, received)\n sleep(0.001)",
"def _listener(self):\n with open(self.playback_file, \"rb\") as f:\n while self.thread_run.is_set():\n length = f.read(1)\n if len(length) == 0:\n # out of data\n break\n length = length[0]\n data = f.read(length)\n if len(data) != length:\n raise RuntimeError(\"Didn't receive the expected amount of bytes!\")\n\n # itterating over bytes gives us ints\n report = data[0]\n retval = data[1]\n payload = [d for d in data[2:]]\n if report >= 0:\n self.queue.put((report, retval, payload))\n if self.verbose:\n self.log(\"Put report {} on queue\".format(report))\n\n if self.verbose:\n self.log(\"Waiting for queue to empty...\")\n\n while self.packets_available():\n time.sleep(0.01)",
"def run(self):\n log.info(\"Starting thread\")\n if self.open_listener():\n\n # This feels so dirty, but we need to make sure the thread isn't always blocking so we\n # can safely shutdown the thread. Given that the Listener address is always an IP\n # it should be safe. Should be, famous last words of course...\n conn = self.listener._listener._socket\n\n while self.running:\n r_list, w_list, e_list = select.select([conn, ], [conn, ], [conn, ], 0.01)\n\n if conn in r_list:\n connection = None\n try:\n connection = self.listener.accept()\n log.info(\"Connection opened by %s\", self.listener.last_accepted)\n\n while self.running:\n if connection.poll():\n msg = connection.recv()\n globals.strip_data.spi_recv(msg)\n except (IOError, EOFError):\n if connection:\n connection.close()\n log.info(\"Connection closed %s\", self.listener.last_accepted)\n\n log.info(\"Exiting thread\")",
"def listen(self):\n pass",
"def run(self):\n self.listen(self.input_topics.filter_by(transmission='tcp'))\n\n logging.info('Getting into the listening loop')\n self.running = True\n while self.running:\n self.loop()",
"def start_listener(self):\n if not self.listener:\n #self.listener = threading.Thread(target=self.tn.listener)\n self.listener = threading.Thread(target=self.listener_handler)\n self.listener.start()",
"def _listen(self):\n if not self.is_connected:\n self.connect()\n\n while True:\n data = self.recv()\n ping = PING_RE.match(data)\n if ping:\n self.handle_ping(ping.group(1))\n else:\n result = self.handle_message(data)\n\n if result:\n print(result)\n\n time.sleep(1)",
"def run(self):\n t = Thread(target=self._listen)\n t.start()",
"def listen(self):\n self.processor_thread = Thread(target = self.event_loop, name=\"InputThread-\"+str(self.thread_index), args=(self.thread_index, ))\n self.thread_index += 1\n self.processor_thread.daemon = True\n self.processor_thread.start()",
"def run(self):\n\n udp_socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n\n # Allow multiple receiving endpoints.\n if sys.platform in ['darwin']:\n udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n elif sys.platform in ['linux', 'win32']:\n udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Accept UDP packets from any host.\n address = ('', self._udp_port)\n udp_socket.bind(address)\n\n selector = selectors.DefaultSelector()\n\n key_udp_socket = selector.register(udp_socket, selectors.EVENT_READ)\n key_socketpair = selector.register(self._socketpair[0], selectors.EVENT_READ)\n\n logging.info(\"Receiver thread started, reading UDP packets from port {}.\".format(self._udp_port))\n\n quitflag = False\n while not quitflag:\n for (key, events) in selector.select():\n timestamp = time.time()\n if key == key_udp_socket:\n # All telemetry UDP packets fit in 2048 bytes with room to spare.\n packet = udp_socket.recv(2048)\n timestamped_packet = TimestampedPacket(timestamp, packet)\n self._recorder_thread.record_packet(timestamped_packet)\n elif key == key_socketpair:\n quitflag = True\n\n selector.close()\n udp_socket.close()\n for sock in self._socketpair:\n sock.close()\n\n logging.info(\"Receiver thread stopped.\")",
"def run(self):\n print \"Starting Packet Sniffer on [ %s ]:[ %s ]...\" % (self.ifname, self.packet_filter_string)\n self.socket = conf.L2listen(\n type=ETH_P_ALL,\n iface=self.ifname,\n filter=self.packet_filter_string\n )\n\n sniff(\n opened_socket=self.socket,\n #filter=self.packet_filter_string,\n lfilter=self.is_not_outgoing,\n # prn=self.print_packet,\n prn=self.sniffer_callback,\n stop_filter=self.should_stop_sniffer\n )",
"def run(self):\n print('starting up on {} port {}'.format(*self.listener_address))\n self.selector.register(self.listener, selectors.EVENT_READ)\n\n # Serialize our listener's host and port\n serializedAdd = fxp_bytes_subscriber.serialize_address(\n self.listener_address[0], self.listener_address[1])\n\n # Contact with Publisher\n self.listener.sendto(serializedAdd, self.gcd_address)\n\n while True:\n events = self.selector.select(CHECK_INTERVAL)\n for key, mask in events:\n data = self.receive_message()\n self.removeOldQuote()\n self.createGraph(data)\n self.arbitrage()\n self.checkTimeout()",
"def udp_listener(self):\n while self.running:\n m = self.sock_udp.recvfrom(1024)\n self.sock_udp.sendto(ANS_HELLO, m[1])",
"def run(self):\n logger.info(\"Started listening from incoming ICMP packets...\")\n self.sockets = [self.icmp_server_socket]\n while True:\n sread, _, _ = select.select(self.sockets, [], [])\n for sock in sread:\n if sock.proto == socket.IPPROTO_ICMP:\n self.client_to_target()\n else:\n self.target_to_client(sock)",
"def listen(self):\n\n # It's ideal to start listening before the game starts, but the\n # down-side\n # is that object construction may not be done yet. Here we pause\n # shortly\n # to let initialization finish, so all functionality (e.g. self.log)\n # is\n # available.\n time.sleep(0.1)\n\n for st in self.sentences():\n if st:\n self.onMessage(source=None, message=st)",
"def listen(self):\n\n\t\twhile self.running:\n\t\t\t#Wait for server to inform you there is data\n\t\t\tself.rxEvt.wait()\n\t\t\t\n\t\t\ttry:\n\t\t\t\t#See if recieved packet is actually latest from client\n\t\t\t\tif self.rxData[len(self.rxData)-1][0] >= self.rxLatest:\n\n\t\t\t\t\t#Update latest and pass data to data handler\n\t\t\t\t\tself.rxLatest = self.rxData[len(self.rxData)-1][0]\n\t\t\t\t\tself.handleRecvData(self.rxData[len(self.rxData)-1][1])\n\t\t\n\t\t\t\t\t#Clear event object so other clientHandlers begin waiting again\n\t\t\t\t\tself.rxEvt.clear()\n\n\t\t\texcept IndexError, e:\n\t\t\t\tprint(\"Index error on ServerClient listen\\nCarrying on Regardless\")",
"def start(self):\n self.bind()\n logging.info(\"Statring UDP server\")\n self.lthread = Thread(target=self.listen, args=())\n self.lthread.name = \"UDP listening thread\"\n self.lthread.start()",
"def listen(self):\n self.can_listen = True\n threading.Thread(target=self._listen).start()",
"def create_listen_thread(self):\n self.listen_thread = threading.Thread(target=self.listen, daemon=True)\n self.listen_thread.start()\n print('Started listener thread')",
"def listen(self):\n raise NotImplementedError()",
"def _run(self):\n\n while self._thread_alive_event.is_set():\n reported_events = self._poll.poll(self.POLL_TIMEOUT)\n\n for fd_event_pair in reported_events:\n fd, event = fd_event_pair\n\n if event & select.POLLIN or event & select.POLLPRI:\n self._recv(fd)\n\n elif event & select.POLLERR:\n self.logger.error(\"Error condition of some sort\")\n self._thread_alive_event.clear()\n break\n\n elif event & select.POLLNVAL:\n self.logger.error(\"Invalid request: descriptor not open\")\n self._thread_alive_event.clear()\n break",
"def packet_in_handler(self, ev):\n msg = ev.msg\n try:\n src_dpid, src_port_no = LLDPPacket.lldp_parse(msg.data)\n dpid = msg.datapath.id\n if self.sw_module is None:\n self.sw_module = lookup_service_brick('switches')\n\n for port in self.sw_module.ports.keys():\n if src_dpid == port.dpid and src_port_no == port.port_no:\n delay = self.sw_module.ports[port].delay\n self._save_lldp_delay(src=src_dpid, dst=dpid,\n lldpdelay=delay)\n except LLDPPacket.LLDPUnknownFormat as e:\n return",
"def _pkt_handle(self, pkt):\n\n # snag any left over data from last read()\n # Parse the header to get type\n offset, payload_len, subtype, nxp_sniffer = lowpan.message.parse_header(pkt[0])\n\n\n # Extract the raw message bytes\n rawmsg = pkt[0][offset : offset + payload_len]\n if self.debug:\n print(pkt[1])\n print(util.hex_dump_buffer(rawmsg))\n\n\n\n # Now check for message handlers; preference is given to\n # handlers for a specific packet\n handled = False\n # Send to bridge socket\n if self.bdg_unix_addr:\n self.bridge_socket.sendto(rawmsg,self.bdg_unix_addr)\n handled = True\n\n if subtype in self.handlers.keys():\n handled = self.handlers[subtype](self, nxp_sniffer, rawmsg)\n if not handled and (\"all\" in self.handlers.keys()):\n handled = self.handlers[\"all\"](self, nxp_sniffer, rawmsg)\n\n if not handled: # Not handled, enqueue\n with self.packets_cv:\n if len(self.packets) >= self.max_pkts:\n self.packets.pop(0)\n self.packets_expired += 1\n self.packets.append((nxp_sniffer, rawmsg))\n self.packets_cv.notify_all()\n self.packets_total += 1\n else:\n self.packets_handled += 1\n self.logger.debug(\"Message handled by callback\")",
"def sniffing():\n sniff(store=False, prn=lambda p: threading.Thread(target=next, args=(p,)).start(), iface=IFACE)",
"def poll_data(self):\n with s.socket(s.AF_INET, s.SOCK_DGRAM) as sock:\n sock.bind(('', self.__port))\n while True:\n message, address = sock.recvfrom(1024)\n self.__address = address\n logging.debug('Received: {}'.format(message))\n self.process_data(message)",
"def onPing(self, payload):",
"def listen(self) -> None:\n raise NotImplementedError",
"def listentcp(self, site):\n logger.warning('Setting TCP listener on port %d for HTTP requests',\n self.bind_port)\n self.reactor.listenTCP(self.bind_port, site)",
"def peer_server_listener(self):\n try:\n peer_server_socket = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n peer_server_socket.setsockopt(\n socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n peer_server_socket.setsockopt(\n socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n peer_server_host = self.peer.peer_hostname\n peer_server_port = self.peer.peer_port\n peer_server_socket.bind(\n (peer_server_host, peer_server_port))\n peer_server_socket.listen(10)\n while True:\n conn, addr = peer_server_socket.accept()\n conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n #print \"Got connection from %s on port %s\" \\\n # % (addr[0], addr[1])\n self.peer_server_listener_queue.put((conn,addr))\n except Exception as e:\n print \"Peer Server Listener on port Failed: %s\" % e\n sys.exit(1)"
] | [
"0.6188723",
"0.6181703",
"0.6155212",
"0.61097896",
"0.60706186",
"0.60596853",
"0.60125685",
"0.6005501",
"0.592774",
"0.5900257",
"0.5859468",
"0.577193",
"0.57254094",
"0.56806195",
"0.56681275",
"0.56416494",
"0.56133914",
"0.560342",
"0.5548568",
"0.5541672",
"0.5541235",
"0.551153",
"0.5508939",
"0.550226",
"0.55004656",
"0.54702204",
"0.5459353",
"0.54586345",
"0.5446866",
"0.54232347"
] | 0.62927926 | 0 |
Serializes session to buffer | def vscr_ratchet_group_session_serialize(self, ctx):
vscr_ratchet_group_session_serialize = self._lib.vscr_ratchet_group_session_serialize
vscr_ratchet_group_session_serialize.argtypes = [POINTER(vscr_ratchet_group_session_t)]
vscr_ratchet_group_session_serialize.restype = POINTER(vsc_buffer_t)
return vscr_ratchet_group_session_serialize(ctx) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def serialize(self) -> str:\n return json.dumps([session.to_dict() for session in self.sessions])",
"def save_session(session, path):\n with open(path, 'w', encoding='utf-8') as f:\n json.dump(session, f, ensure_ascii=False, indent=4, cls=classes.CustomEncoder)",
"def save_to_session(self, serializer: URLSafeSerializer, session):\n session[SESSION_STORE_KEY] = self.store(serializer)\n return session",
"def to_session_message(self, flags=0):\n id_list = []\n name_list = []\n file_list = []\n node_count_list = []\n date_list = []\n thumb_list = []\n num_sessions = 0\n\n with self._sessions_lock:\n for session_id in self.sessions:\n session = self.sessions[session_id]\n # debug: session.dumpsession()\n num_sessions += 1\n id_list.append(str(session_id))\n\n name = session.name\n if not name:\n name = \"\"\n name_list.append(name)\n\n file = session.file_name\n if not file:\n file = \"\"\n file_list.append(file)\n\n node_count_list.append(str(session.get_node_count()))\n\n date_list.append(time.ctime(session._state_time))\n\n thumb = session.thumbnail\n if not thumb:\n thumb = \"\"\n thumb_list.append(thumb)\n\n session_ids = \"|\".join(id_list)\n names = \"|\".join(name_list)\n files = \"|\".join(file_list)\n node_counts = \"|\".join(node_count_list)\n dates = \"|\".join(date_list)\n thumbs = \"|\".join(thumb_list)\n\n if num_sessions > 0:\n tlv_data = \"\"\n if len(session_ids) > 0:\n tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, session_ids)\n if len(names) > 0:\n tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NAME.value, names)\n if len(files) > 0:\n tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.FILE.value, files)\n if len(node_counts) > 0:\n tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NODE_COUNT.value, node_counts)\n if len(dates) > 0:\n tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.DATE.value, dates)\n if len(thumbs) > 0:\n tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.THUMB.value, thumbs)\n message = coreapi.CoreSessionMessage.pack(flags, tlv_data)\n else:\n message = None\n\n return message",
"def testReadAndWriteSerializedSession(self):\n expected_session = sessions.Session()\n expected_session.product_name = 'plaso'\n expected_session.product_version = plaso.__version__\n\n json_string = (\n json_serializer.JSONAttributeContainerSerializer.WriteSerialized(\n expected_session))\n\n self.assertIsNotNone(json_string)\n\n session = (\n json_serializer.JSONAttributeContainerSerializer.ReadSerialized(\n json_string))\n\n self.assertIsNotNone(session)\n self.assertIsInstance(session, sessions.Session)\n\n expected_session_dict = {\n 'aborted': False,\n 'debug_mode': False,\n 'identifier': session.identifier,\n 'preferred_encoding': 'utf-8',\n 'preferred_time_zone': 'UTC',\n 'product_name': 'plaso',\n 'product_version': plaso.__version__,\n 'start_time': session.start_time}\n\n session_dict = session.CopyToDict()\n self.assertEqual(\n sorted(session_dict.items()), sorted(expected_session_dict.items()))",
"def session(self):",
"def serialize(self):\n return {\n 'user_id' : self.user_id,\n 'session_id' : self.session_id,\n }",
"def save_session(self, session):\n db = self.open()\n db[session.id] = session",
"def _save_to_buffer(self):\n self._save_to_resource()",
"def cache_session(self):\n # always save (to update timeout)\n self.i('Cache Session')\n with open(self.cache_file_path, \"wb\") as file:\n pickle.dump(self, file)",
"def to_unicode(session):\n return six.text_type(session.data)",
"def save_session_data(self, session_id, data):\n raise NotImplementedError()",
"def dump_sessions(self, data):\n try:\n with open(\"sessions.json\", \"w\") as file:\n json.dump(data, file)\n except:\n print(\"Can not save active sessions list to disk. Check permissions.\")",
"def get_session_data(self):\n return dict(self._get_session())",
"def to_session_message(self, flags=0):\n return self.mainserver.to_session_message(flags)",
"def export_file(self):\n\n if not self.session_filename:\n return\n\n data = {\n \"session_filename\": self.session_filename,\n \"index_start\": self.total_mutant_index,\n \"sleep_time\": self.sleep_time,\n \"restart_sleep_time\": self.restart_sleep_time,\n \"restart_interval\": self.restart_interval,\n \"web_port\": self.web_port,\n \"crash_threshold\": self._crash_threshold_node,\n \"total_num_mutations\": self.total_num_mutations,\n \"total_mutant_index\": self.total_mutant_index,\n \"netmon_results\": self.netmon_results,\n \"procmon_results\": self.procmon_results,\n \"is_paused\": self.is_paused\n }\n\n fh = open(self.session_filename, \"wb+\")\n fh.write(zlib.compress(cPickle.dumps(data, protocol=2)))\n fh.close()",
"def _deserialize_session_stored(self, session, deserialize=pickle.loads):\n _session_id = session.session_id\n _session_data = session.redis.store[_session_id]\n _session_deserialized = deserialize(_session_data)\n return _session_deserialized",
"def serialize_timingsession(session_pk, pk_offset=0):\n session = TimingSession.objects.get(pk=session_pk)\n data_to_dump = [\n [session],\n [session.coach],\n [session.coach.user],\n Split.objects.filter(timingsession=session_pk),\n SplitFilter.objects.filter(timingsession=session.pk),\n Reader.objects.filter(timingsession=session_pk),\n Athlete.objects.filter(split__timingsession=session_pk),\n User.objects.filter(athlete__split__timingsession=session_pk),\n Team.objects.filter(athlete__split__timingsession=session_pk),\n Tag.objects.filter(split__timingsession=session_pk),\n Checkpoint.objects.filter(session=session_pk)\n ]\n\n data = []\n for item in data_to_dump:\n data.extend(json.loads(serializers.serialize('json', item)))\n\n for i in range(len(data)):\n data[i]['pk'] += pk_offset\n for key in data[i]['fields'].keys():\n if key in _models:\n if isinstance(data[i]['fields'][key], list):\n data[i]['fields'][key] = [\n (pk + pk_offset) for pk in data[i]['fields'][key]\n ]\n else:\n data[i]['fields'][key] += pk_offset\n elif key in _exclude_fields:\n data[i]['fields'][key] = ''\n elif key in _make_unique_fields:\n data[i]['fields'][key] = _random_name()\n elif data[i]['model'] == 'trac.reader' and key == 'name':\n data[i]['fields'][key] = _random_name()\n\n return data",
"async def view_session(request: Request):\n return JSONResponse(request.session)",
"def save_data(self):\n data = self.data\n if data is not None:\n data = base64.encodestring(pickle.dumps(data))\n connection = self._open_db()\n cursor = connection.cursor()\n cursor.execute('UPDATE sessions SET data = ? WHERE id = ?;',\n (data, self.sid))\n cursor.close()\n connection.commit()\n connection.close()",
"def save_session():\n\n filename = request.json.get(\"path\")\n finished = request.json.get(\"finished\")\n config = request.json.get(\"config\")\n\n success = engine.io.save(filename, state.proc, state.corpus, state.test_corpus, state.classifier, state.last_result, finished, config)\n\n if success:\n return jsonify({\"saved\":True})\n else:\n return 'Could not save session file.', 428",
"def serialize(self) -> bytes:\n pass",
"def serialize(self) -> bytes:\n pass",
"def save_session(self):\n filename = os.path.join(self.result_path, 'LFPSession_{}.obj'.format(self.session_id))\n filehandler = open(filename, \"wb\")\n # Do not save the loaded LFP matrices since they are too big\n temp = self\n temp.probes = dict.fromkeys(temp.probes.keys())\n temp.loaded_cond = None\n temp.layer_selected = False\n cPickle.dump(temp.__dict__, filehandler)\n filehandler.close()\n return filename",
"def session_for_write():\n return enginefacade.writer.using(_CONTEXT)",
"def session_dump(self) -> json:\n resp = self.__get_updates()\n try:\n if not os.path.exists(self.dirDownloads):\n os.mkdir(self.dirDownloads)\n local_path = os.path.join(self.dirDownloads, 'session_dump.json')\n\n with open(local_path, 'w+', encoding='utf-8') as outfile:\n json.dump(resp, outfile)\n except IOError as io_err:\n print(io_err)\n sys.exit(1)",
"def save(self):\n path = self.user.get_session_path()\n with open(path, 'a', encoding='utf8') as file:\n self.write(file=file)",
"def temp_dump(self, session_id):\n f = open(pathlib.Path(basedir).joinpath('static', 'temp', session_id, 'hero_pickle_storage.json'), 'w')\n stored_info = jsonpickle.encode(self)\n f.write(stored_info)\n f.close()",
"def serialize(self):\n return {\n 'id' : self.id,\n 'session_id' : self.session_id,\n 'filename' : self.filename,\n 'filetype' : self.filetype\n }",
"def _WriteSessionStart(self, session_start):\n if self.storage_type != definitions.STORAGE_TYPE_SESSION:\n raise IOError('Session completion not supported by storage type.')\n\n stream_name = 'session_start.{0:06d}'.format(self._last_session)\n if self._HasStream(stream_name):\n raise IOError('Session start: {0:06d} already exists.'.format(\n self._last_session))\n\n session_start_data = self._SerializeAttributeContainer(session_start)\n\n data_stream = _SerializedDataStream(\n self._zipfile, self._temporary_path, stream_name)\n data_stream.WriteInitialize()\n data_stream.WriteEntry(session_start_data)\n data_stream.WriteFinalize()"
] | [
"0.65804774",
"0.6362454",
"0.61549973",
"0.61451703",
"0.5916309",
"0.5916149",
"0.5871953",
"0.5779407",
"0.57596093",
"0.57299167",
"0.5727005",
"0.5723597",
"0.57071483",
"0.56273746",
"0.56186277",
"0.5615529",
"0.56047714",
"0.55918837",
"0.5528991",
"0.5525956",
"0.55078685",
"0.5496122",
"0.5496122",
"0.54668576",
"0.54451793",
"0.5427536",
"0.54119086",
"0.54016286",
"0.5385616",
"0.53851694"
] | 0.64907396 | 1 |
This function performs a variety of slicing operations based on the given task parameter. | def Slice(seq, task):
if task == 'swapfirstlast' and len(seq) >= 2:
seq = seq[-1:] + seq[1:-1] + seq[:1]
elif task == 'removeeveryother':
seq = seq[::2]
elif task == 'reverse':
seq = seq[::-1]
elif task == 'reorder':
onethird = (len(seq) / 3)
remainder = (len(seq) % 3)
seq = seq[onethird:onethird*2] + seq[0:onethird] + seq[(onethird + remainder) * -1:]
elif task == 'removefirstlastfoureveryother':
seq = seq[4:-4:2]
return seq | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def simple_slice():\n examples = [\n benchmark.Example(\n inputs=[\n [[12, 34, 56, 78], [-1, -2, -3, -4]],\n -1,\n ],\n output=[[34, 56], [-2, -3]],\n ),\n ]\n constants = []\n description = 'Slice a tensor'\n target_program = 'in1[:, 1:in2]'\n source = 'handwritten task'\n return benchmark.Benchmark(examples=examples,\n constants=constants,\n description=description,\n target_program=target_program,\n source=source,\n name='simple_slice')",
"def _task_filter(self, task):\n raise NotImplementedError(\"Subclasses should implement this!\")",
"def slice_trajectory(**kwargs):\n\tglobal gmxpaths\n\tif gmxpaths==None: gmxpaths = get_gmx_paths()\n\tcall = bash\n\t#---process kwargs\n\tstart,end,skip,sequence = [kwargs[k] for k in 'start end skip sequence'.split()]\n\ttpr_keyfinder,traj_keyfinder = kwargs['tpr_keyfinder'],kwargs['traj_keyfinder']\n\toutkey = kwargs['outkey']\n\tpostdir = kwargs['postdir']\n\toutput_format = kwargs.get('output_format','xtc')\n\tpbc = kwargs.get('pbc',None)\n\tgroup_fn = kwargs.get('group_fn',None)\n\n\t#---commands to create sub-slices\n\tsources = infer_parts_to_slice(start,end,skip,sequence)\n\tsn = sources[0][0][0]\n\tif sn=='membrane-v563':\n\t\tsources = infer_parts_to_slice_legacy(start,end,skip,sequence)\n\tgroup_flag = '' if not group_fn else ' -n '+group_fn\n\tpbc_flag = '' if not pbc else ' -pbc %s'%pbc\n\tcmdlist = []\n\tfor num,source in enumerate(sources):\n\t\tkeys,t0 = source\n\t\tsn = keys[0]\n\t\t#---get tpr exist use the previous one (or fail on first source)\n\t\ttry: \n\t\t\ttpr = tpr_keyfinder(*keys,strict=False)\n\t\texcept: \n\t\t\timport ipdb;ipdb.set_trace()\n\t\t\traise Exception('development error. could not locate a TPR: %s'%kwargs)\n\t\t#---assume cursor points to the trajectory we want\n\t\ttry: \n\t\t\ttraj = traj_keyfinder(*keys)\n\t\texcept Exception as e: \n\t\t\traise Exception('could not locate trajectory for %s,%s,%s'%keys+': %s'%e)\n\t\toutfile = 'trjconv%d.%s'%(num,output_format)\n\t\t\"\"\"\n\t\tnote on timestamps: if you ask for time beyond the end of a simulation, the slicer will fail with\n\t\tblank outputs from `gmx trjconv`. in one misadventure, the author misattributed this to problems\n\t\twith the interval of the samples, since the dt flag causes trjconv to only save frames with times\n\t\twhich are zero modulo dt, and copied the begin flag to t0 to fail through the problem silently. \n\t\ta better alternative is to treat trjconv failures more seriously and check the time stamps with\n\t\t`make look times`. the slicer is designed to ignore problems of jitter. if a new XTC starts on\n\t\ta non-even or non-integer time, the slicer should continue as normal and rely on dt to find the next\n\t\tvalid time. ... ???\n\t\t\"\"\"\n\t\ttail = ' -b %d -e %d -dt %d -s %s -f %s -o %s%s%s'%(\n\t\t\tt0 if t0>start else start,end,skip,tpr,traj,\n\t\t\toutfile,group_flag,pbc_flag)\n\t\tcmdlist.append((outfile,gmxpaths['trjconv']+tail))\n\n\t#---make a GRO file of the first frame for reference\n\tkeys,t0 = sources[0]\n\tsn,sub,fn = keys\n\ttraj = traj_keyfinder(*keys)\n\ttail = ' -dump %d -s %s -f %s -o %s.gro%s'%(start,tpr,traj,outkey,group_flag)\n\tif pbc != None: tail = tail + ' -pbc %s'%pbc\n\tbash(gmxpaths['trjconv']+tail,cwd=postdir,inpipe='0\\n'.encode())\n\n\t#---convert relevant trajectories\n\tstart = time.time()\n\tfor ii,(outfile,cmd) in enumerate(cmdlist):\n\t\tstatus('slicing trajectory',i=ii,looplen=len(cmdlist),start=start,tag='SLICE')\n\t\tbash(cmd,cwd=postdir,inpipe='0\\n'.encode())\n\t\n\t#---concatenate remaining steps with no errors\n\tvalid_parts = range(len(cmdlist))\n\tbash(gmxpaths['trjcat']+' -o %s.%s -f '%(outkey,output_format)+\n\t\t' '.join(list(zip(*cmdlist))[0]),cwd=postdir)\n\n\t#---delete extraneous files\n\t#---! consider using a temporary directory although it's nice to have things onsite\n\tfor outfile in list(zip(*cmdlist))[0]:\n\t\tos.remove(postdir+'/%s'%outfile)",
"def alternatingSlice(self,geom,polyLayer,targetArea,granularity,direction,method):\r\n global recurs\r\n recurs+=1\r\n if self.debug: print \"******************************\"\r\n if self.debug: print \"Slicing, No of part: \",str(recurs)\r\n if self.debug: print \"Slicing, Granularity remaining: \", str(granularity)\r\n bbox=[geom.boundingBox().xMinimum(),geom.boundingBox().yMinimum(),geom.boundingBox().xMaximum(),geom.boundingBox().yMaximum()]\r\n if direction==\"h\":\r\n step=(bbox[2]-bbox[0])/granularity\r\n pointer=bbox[0]\r\n else:\r\n step=(bbox[3]-bbox[1])/granularity\r\n pointer=bbox[1]\r\n totalArea=0\r\n slices=0\r\n #save the original geom\r\n tempGeom=QgsGeometry(geom)\r\n #start slicing until targetArea is reached\r\n while totalArea<targetArea*0.999:\r\n pointer+=step\r\n if direction==\"h\":\r\n startPt=QgsPoint(pointer,bbox[1])\r\n endPt=QgsPoint(pointer,bbox[3])\r\n (multiGeom,tempGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n else:\r\n startPt=QgsPoint(bbox[0],pointer)\r\n endPt=QgsPoint(bbox[2],pointer)\r\n (tempGeom,multiGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n if multiGeom!=None:\r\n totalArea+=multiGeom.area();\r\n slices+=1\r\n if self.debug: print \"Slicing, Slices: \", str(slices)\r\n #do the real cutting when reached targetArea and add \"left\" feature to layer\r\n if self.debug: print \"Cutting with line, Cutline:\", startPt,\",\",endPt\r\n if direction==\"h\":\r\n (multiGeom,geom)=self.cutPoly(geom,startPt,endPt,True)\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts to the left:\",str(len(multiGeom.asGeometryCollection()))\r\n if geom:\r\n if self.debug: print \"After split, Parts to the right:\",str(len(geom.asGeometryCollection()))\r\n else:\r\n (geom,multiGeom)=self.cutPoly(geom,startPt,endPt,True)\r\n if geom:\r\n if self.debug: print \"After split, Parts above:\",str(len(geom.asGeometryCollection()))\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts under:\",str(len(multiGeom.asGeometryCollection()))\r\n self.addGeomToLayer(multiGeom,polyLayer)\r\n #self.addGeomToLayer(QgsGeometry.fromPolyline([startPt,endPt]),lineLayer)\r\n if geom:\r\n if geom.area()>targetArea:\r\n if (method==\"v\") or ((method==\"a\") and (direction==\"h\")):\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"v\",method)\r\n else:\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"h\",method)\r\n else:\r\n self.addGeomToLayer(geom,polyLayer)",
"def slice2(self, vs=None,xs=None):\n return self.condition2(vs,xs)",
"def __getitem__(self, index: int | slice) -> Task | TaskList:\n if isinstance(index, slice):\n return TaskList(self._tasks[index])\n else:\n return self._tasks[index]",
"def infer_parts_to_slice(start,end,skip,sequence):\n\ttry:\n\t\t# protect from ambiguous step names which occurs when the time stamp starts at zero on a new step \n\t\t# ... and the original method cannot tell which step to use. typically the last step is the only \n\t\t# ... relevant one since preceding steps are usualy preparatory e.g. with restraints. users who wish\n\t\t# ... to have more control are welcome to code up something more specific. the slicer is due for an\n\t\t# ... overhaul anyway. for now, we just try to get the right sequence by restricting attention to\n\t\t# ... the last step. since the toc is sorted this is easy.\n\t\t# all steps have the same sn and they should be ordered from the toc so we filter by the last one\n\t\t#! note that this breaks v563. fixed it by linking things in and moving s01\n\t\tlast_step = sequence[-1][0][1]\n\t\tsequence_alt = [s for s in sequence if s[0][1]==last_step]\n\t\tslice_target = infer_parts_to_slice_legacy(start,end,skip,sequence_alt)\n\t# fall back to the original method\n\texcept Exception as e: \n\t\traise \n\t\tslice_target = infer_parts_to_slice_legacy(start,end,skip,sequence)\n\treturn slice_target",
"def sub(slice_left, slice_right):\n start = 0\n stop = 0\n if slice_left.start == slice_right.start:\n start = min(slice_left.stop, slice_right.stop)\n stop = max(slice_left.stop, slice_right.stop)\n if slice_left.stop == slice_right.stop:\n start = min(slice_left.start, slice_right.start)\n stop = max(slice_left.start, slice_right.start)\n\n return slice(start, stop)",
"def slice(self, pred):\n return NotImplemented",
"def slicer(seq, start=None, stop=None, step=None):\n return seq[start:stop:step]",
"def task_stagnant(task):",
"def create_slice(self,**kwargs):\n\t\n\t\tsn = kwargs['sn']\n\t\tstart = kwargs['start']\n\t\tend = kwargs['end']\n\t\tskip = kwargs['skip']\n\t\tgroup = kwargs['group']\n\t\tslice_name = kwargs['slice_name']\n\t\tpbc = kwargs['pbc'] if 'pbc' in kwargs else None\n\t\tpbc_suffix = '' if not pbc else '.pbc%s'%pbc\n\t\toutkey = '%s.%d-%d-%d.%s%s'%(self.prefixer(sn),start,end,skip,group,pbc_suffix)\n\t\tgrofile,trajfile = outkey+'.gro',outkey+'.'+self.trajectory_format\n\t\t#---make the slice only if necessary\n\t\tboth_there = all([os.path.isfile(self.postdir+fn) for fn in [grofile,trajfile]])\n\t\tself.slice(sn,part_name=self.trajectory_format)\n\t\tif both_there and slice_name in self.slice(sn) and group in self.slice(sn)[slice_name]: return\n\t\tif not both_there or not all([self.confirm_file(self.postdir+fn) for fn in [grofile,trajfile]]):\n\t\t\tstatus('making slice: %s'%outkey,tag='status')\n\t\t\t#---slice is not there or not confirmed so we make a new one here\n\t\t\tsequence = self.get_timeseries(sn,strict=False)\n\t\t\ttraj_toc = self.toc[self.cursor]\n\t\t\t#---assume the tpr part exists\n\t\t\ttpr_toc = self.toc[(self.c,'tpr')]\n\t\t\ttry:\n\t\t\t\t#---! note that we force xtc below and this needs a solution ASAP!\n\t\t\t\tslice_trajectory(start,end,skip,sequence,outkey,self.postdir,\n\t\t\t\t\ttpr_keyfinder=self.keyfinder((self.c,'tpr')),\n\t\t\t\t\ttraj_keyfinder=self.keyfinder((self.c,self.trajectory_format)),\n group_fn=self.groups[sn][group]['fn'],pbc=pbc)\n\t\t\texcept KeyboardInterrupt: raise Exception('[ERROR] cancelled by user')\n\t\t\texcept Exception as e:\n\t\t\t\t#---the following exception handler allows the code to continue to slice in case\n\t\t\t\t#---...of faulty data but it produces a large quantity of output including a full \n\t\t\t\t#---...traceback to the original exception which also tells you which log files to read\n\t\t\t\t#---...to diagnose the error. tested on faulty data. note that the calculator continues\n\t\t\t\t#---...but every time you run \"make compute\" it will hit the error until you solve it\n\t\t\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\t\t\tfname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n\t\t\t\tstatus('%s in %s at line %d'%(str(exc_type),fname,exc_tb.tb_lineno),tag='error')\n\t\t\t\tstatus('%s'%e,tag='error')\n\t\t\t\timport traceback\n\t\t\t\tstatus(re.sub('\\n','\\n[TRACEBACK] ',traceback.format_exc()),tag='traceback')\n\t\t\t\tstatus('failed to make slice: '+outkey,tag='error')\n\t\t\t\tif slice_name not in self.slice(sn): self.slice(sn)[slice_name] = {}\n\t\t\t\tself.slice(sn)[slice_name][group] = {'start':start,'end':end,'skip':skip,\n\t\t\t\t\t'group':group,'pbc':pbc,'verified':False,'filekey':outkey,\n\t\t\t\t\t'gro':grofile,self.trajectory_format:trajfile,'missing_frame_percent':100.}\n\t\t\t\tstatus('returning from this function but otherwise passing',tag='error')\t\t\t\n\t\t\t\treturn\n\t\tprint '[STATUS] checking timestamps of slice: %s'%outkey\n\t\t#---slice is made or preexisting and now we validate\n\t\ttimeseries = self.slice_timeseries(self.postdir+grofile,self.postdir+trajfile)\n\t\timport numpy as np\n\t\tmissing_frame_percent = 1.-len(np.arange(start,end+skip,skip))/float(len(timeseries))\n\t\tif len(timeseries)!=len(np.arange(start,end+skip,skip)): verified = False\n\t\telse:\n\t\t\ttry: verified = all(np.array(timeseries).astype(float)==\n\t\t\t\tnp.arange(start,end+skip,skip).astype(float))\n\t\t\texcept: verified = False\n\t\tif not verified: status('frame problems in %s'%outkey,tag='warning')\n\t\tif slice_name not in self.slice(sn): self.slice(sn)[slice_name] = {}\n\t\tself.slice(sn)[slice_name][group] = {'start':start,'end':end,'skip':skip,\n\t\t\t'group':group,'pbc':pbc,'verified':verified,'timeseries':timeseries,'filekey':outkey,\n\t\t\t'gro':grofile,self.trajectory_format:trajfile,'missing_frame_percent':missing_frame_percent}",
"def _special_handle_slice(cls, op, X, W):\n tensor_list = []\n # slice add starts, ends, axes, steps\n append_inputs = {\n \"starts\": op.starts,\n \"ends\": op.ends,\n \"axes\": op.axes,\n \"steps\": op.steps,\n }\n for tmp_name, append_input in append_inputs.items():\n node_name = op.name + \":\" + tmp_name\n tensor_list.append(\n numpy_helper.from_array(np.array(append_input), node_name))\n return tensor_list",
"def __peek_task(self, task, what, offset, size, **extra_args):\n return task.peek(what, offset, size, **extra_args)",
"def _slice(tensor, size, i):\n return tensor[:, i * size : (i + 1) * size]",
"def __getslice__(self,i,j):\n return self.x[i:j]",
"def slice(tensor):\n out = tensor[:, 444:524, :]\n return out",
"def _read_sorted_slice(self, *args, **kwargs): # real signature unknown\n pass",
"def _read_sorted_slice(self, *args, **kwargs): # real signature unknown\n pass",
"def subset_of_list(alist, n, t):\n if t < 1 or t > n:\n raise Exception(f't={t} is not accept, must be 1-N (include)')\n\n if n > len(alist): # if n is bigger than all list, return only 1 for t<=len\n if t <= len(alist):\n return [alist[t - 1]]\n else:\n return None\n\n m = int(len(alist) / n) # each task of a section of list\n\n start_index = int((t - 1) * m)\n if t == n:\n sublist = alist[start_index:]\n else:\n sublist = alist[start_index:start_index + m]\n # logger.debug(f'n={n}, t={t}, section={m}, index={start_index}:{start_index + m}')\n return sublist",
"def __get_slice(islice, isize):\n if islice[0] is None:\n if islice[1] is None:\n return slice(isize)\n else:\n return slice(islice[1])\n else:\n if islice[1] is None:\n return slice(islice[0], isize)\n else:\n return slice(islice[0], islice[1])",
"def slice2(self, cvars=None,ctuple=None):\n return self.condition2(cvars,ctuple)",
"def apply_slice(*, value : Any, slice : slice) -> Any:\n return value[slice]",
"def test_simple_slicing(self):\n class only_slice(object):\n def __getitem__(self, index):\n self.res = 'get', index.start, index.stop\n def __setitem__(self, index, value):\n self.res = 'set', index.start, index.stop, value\n def __delitem__(self, index):\n self.res = 'del', index.start, index.stop\n\n class mixed_slice(object):\n def __getitem__(self, index):\n if isinstance(index, slice):\n self.res = 'get', index.start, index.stop\n else:\n raise Exception()\n def __setitem__(self, index, value):\n if isinstance(index, slice):\n self.res = 'set', index.start, index.stop, value\n else:\n raise Exception()\n def __delitem__(self, index):\n if isinstance(index, slice):\n self.res = 'del', index.start, index.stop\n else:\n raise Exception()\n\n for mytype in [only_slice, mixed_slice]:\n x = mytype()\n x[:]\n self.assertEqual(x.res, ('get', None, None))\n\n x[0:]\n self.assertEqual(x.res, ('get', 0, None))\n\n x[1:]\n self.assertEqual(x.res, ('get', 1, None))\n\n x[:100]\n self.assertEqual(x.res, ('get', None, 100))\n\n x[:] = 2\n self.assertEqual(x.res, ('set', None, None, 2))\n\n x[0:] = 2\n self.assertEqual(x.res, ('set', 0, None, 2))\n\n x[1:] = 2\n self.assertEqual(x.res, ('set', 1, None, 2))\n\n x[:100] = 2\n self.assertEqual(x.res, ('set', None, 100, 2))\n\n del x[:]\n self.assertEqual(x.res, ('del', None, None))\n\n del x[0:]\n self.assertEqual(x.res, ('del', 0, None))\n\n del x[1:]\n self.assertEqual(x.res, ('del', 1, None))\n\n del x[:100]\n self.assertEqual(x.res, ('del', None, 100))",
"def getSlice(properties=None, **kw):",
"def __getitem__(sliceOrIdentifier):",
"def take_slice(img_3D, view):\n input_type = isinstance(img_3D, np.ndarray)\n if input_type:\n img_3D = [img_3D]\n img_shape = img_3D[0].shape\n if view == \"sag\":\n slice_pos = np.random.randint(int(0.2 * img_shape[0]), int(0.8 * img_shape[0]))\n imgs_2D = [imgg_3D[slice_pos, :, :] for imgg_3D in img_3D]\n elif view == \"cor\":\n slice_pos = np.random.randint(int(0.2 * img_shape[1]), int(0.8 * img_shape[1]))\n imgs_2D = [imgg_3D[:, slice_pos, :] for imgg_3D in img_3D]\n else:\n slice_pos = np.random.randint(int(0.2 * img_shape[2]), int(0.8 * img_shape[2]))\n imgs_2D = [imgg_3D[:, :, slice_pos] for imgg_3D in img_3D]\n # img_2D = np.expand_dims(img_2D, 2)\n if input_type:\n return imgs_2D[0]\n return imgs_2D",
"def slice_tensors(data, tensor_slice):\n\n def _slice_tensor(tensor, tensor_slice):\n return tensor[tensor_slice]\n\n return recursively_apply(_slice_tensor, data, tensor_slice)",
"def command(task_id, tail, wip, limit):\n if task_id:\n task = storage.get_by_id(task_id)\n\n if not task:\n click.echo(f\"Task {task_id} not found.\")\n sys.exit(1)\n\n tasks = [task]\n else:\n tasks = storage.all(limit=limit, reverse=tail, wip=wip)\n\n print_header()\n for task in tasks:\n show_task(task)",
"def slice(ds, timedelta_input, timedelta_output, to_predict, stepwidth, input_sampling, output_sampling):\n\n inputs = []\n outputs = []\n\n start_input_frame = ds.index[0]\n while start_input_frame + timedelta_input + timedelta_output <= ds.index[-1]:\n\n end_input_frame = start_input_frame + timedelta_input\n end_output_frame = end_input_frame+timedelta_output\n\n input_frame = ds[start_input_frame:end_input_frame]\n output_frame = ds[end_input_frame:end_output_frame]\n\n input_frame = input_frame.resample(input_sampling)\n output_frame = output_frame.resample(output_sampling)\n\n for k in output_frame.keys():\n if k not in to_predict:\n del output_frame[k]\n\n input_shape = input_frame.shape\n output_shape = output_frame.shape\n\n inputs.append(input_frame.as_matrix().flatten())\n outputs.append(output_frame.as_matrix().flatten())\n\n #Move forward\n start_input_frame = start_input_frame + stepwidth\n\n\n return (inputs, input_shape), (outputs, output_shape)"
] | [
"0.6612952",
"0.57252514",
"0.55938196",
"0.5583914",
"0.55479294",
"0.5539142",
"0.54901874",
"0.54826033",
"0.5461477",
"0.5425997",
"0.53798646",
"0.53485817",
"0.5332608",
"0.5315734",
"0.5297336",
"0.5265292",
"0.5263417",
"0.52609426",
"0.52609426",
"0.5229619",
"0.520239",
"0.5200387",
"0.5171798",
"0.5150323",
"0.51487",
"0.5132622",
"0.51268715",
"0.5110136",
"0.5085191",
"0.507679"
] | 0.7156067 | 0 |
CDF for the skewgaussian. Additional safety to filter out large arguments. | def single_peak_cdf(x, mean, sigma, alpha):
z = np.clip((x-mean)/sigma, -10, 10)
return skewnorm.cdf(z, alpha) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gaussian_cdf(x, _erf=erf):\n return (1 + _erf(x / math.sqrt(2))) / 2",
"def cdf(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n return norm.cdf(X,parameters['mu'],sigma)",
"def std_norm_cdf(x):\r\n #Generalize for many x\r\n x = np.asarray(x).copy()\r\n cdf_x = np.zeros_like(x)\r\n N = x.size\r\n support_code = \"#include <math.h>\"\r\n code = \"\"\"\r\n\r\n double sign, t, erf;\r\n for (int i=0; i<N; i++){\r\n sign = 1.0;\r\n if (x[i] < 0.0){\r\n sign = -1.0;\r\n x[i] = -x[i];\r\n }\r\n x[i] = x[i]/sqrt(2.0);\r\n\r\n t = 1.0/(1.0 + 0.3275911*x[i]);\r\n\r\n erf = 1. - exp(-x[i]*x[i])*t*(0.254829592 + t*(-0.284496736 + t*(1.421413741 + t*(-1.453152027 + t*(1.061405429)))));\r\n\r\n //return_val = 0.5*(1.0 + sign*erf);\r\n cdf_x[i] = 0.5*(1.0 + sign*erf);\r\n }\r\n \"\"\"\r\n weave.inline(code, arg_names=['x', 'cdf_x', 'N'], support_code=support_code)\r\n return cdf_x",
"def std_cdf(x):\n return 0.5 + 0.5 * tt.erf(x / tt.sqrt(2.))",
"def std_cdf(x):\n return 0.5 + 0.5 * pt.erf(x / pt.sqrt(2.0))",
"def norm_cdf(mu, sigma, x):\n return 0.5 * (1 + math.erf((x - mu) / (sigma * math.sqrt(2.0))))",
"def normal_cdf(x: torch.Tensor) -> torch.Tensor:\n return torch.distributions.Normal(0, 1.0).cdf(x)",
"def cdf(self,x):\n if hasattr(x,'__len__'):\n returnCdf = np.array([self.cdf(i) for i in x])\n else:\n returnCdf = self._distribution.cdf(x)\n return returnCdf",
"def _normal_distribution_cdf(x, stddev):\n return 0.5 * (1.0 + tf.erf(x / (math.sqrt(2) * stddev + 1e-20)))",
"def normal_cdf(x: float, mu: float = 0, sigma: float = 1) -> float:\n return (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2",
"def cdf(self,x):\n return self.categoricalDist.cdf(x)",
"def approx_standard_normal_cdf(x):\n return 0.5 * (1.0 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * torch.pow(x, 3))))",
"def cdf(self, value):\n return self._normal.cdf(value)",
"def cdf(self, value):\n cdf = torch.where(\n value < 1., \n self.base.cdf(value), \n torch.ones_like(value) # all of the mass\n )\n cdf = torch.where(value < 0., torch.zeros_like(cdf), cdf)\n return cdf",
"def cdfFunction(f, x, N):\r\n return ssstats.binom.cdf(x, N, f)",
"def peak_comb_cdf(x, mean, sigma, alpha, dt, n_samples = 6):\n res = np.zeros_like(x)\n for i in range(n_samples):\n z = np.clip((x - mean - i*dt)/sigma, -10, 10)\n res += skewnorm.cdf(z, alpha)\n return 1.0/n_samples * res",
"def norm_cdf(self, x):\n k = 1.0/(1.0+0.2316419*x)\n k_sum = k * (0.319381530 + k * (-0.356563782 + \\\n k * (1.781477937 + k * (-1.821255978 + 1.330274429 * k))))\n \n if x >= 0.0:\n return (1.0 - (1.0 / ((2 * pi)**0.5)) * exp(-0.5 * x * x) * k_sum)\n else:\n return 1.0 - self.norm_cdf(-x)",
"def cdf(self,x):\n if self.functionType == 'cdf':\n cdfValue = self.cdfFunc(x)\n else:\n cdfValue = self.pdfFunc.integral(self.data[0][0],x)\n return cdfValue",
"def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue",
"def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue",
"def _multivariate_gaussian(self, x, mu_k, sigma_k):\n return multivariate_normal.pdf(x, mu_k, sigma_k)",
"def test_pnorm_cdf():\n mu = np.array([[1.], [2.]])\n sigma = np.array([[2., 1.], [1., 3.]])\n\n lowerbound = np.pi/4\n upperbound = np.pi/2\n cdf = pnorm.cdf(lowerbound, upperbound, mu, sigma)\n\n cdf_ans = np.array([0.5066762601816892])\n assert np.allclose(cdf, cdf_ans)",
"def cdf(s, x):\r\n x = Basic.sympify(x)\r\n return (1+erf((x-s.mu)/(s.sigma*sqrt(2))))/2",
"def cohensd2problarger(d):\n\n return stats.norm.cdf(d / np.sqrt(2))",
"def make_conditional_density(bgm_fit, threshold, sigma, width):\n pass",
"def cdf(self,x):\n if self.method == 'spline':\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n else:\n self.raiseAnError(NotImplementedError,'cdf not yet implemented for ' + self.method + ' method')\n return cdfValue",
"def cdf_discretize(self,variables=[]):\n #the errors in the code are due to the deleted files that require packages to be installed on the computer\n for i in variables:\n x=unique(self.data[:,i])\n m=max(x)-min(x)\n f=lambda x0,y0: array([m*(x0+y0)/(1+m**2), (x0*m+y0)/(1+m**2)])\n cdf=array([np.sum(self.data[:,i]<=t) for t in x])\n d=array([norm(array([x0,cdf[k]])-f(x0,cdf[k])) for k,x0 in\\\n enumerate(x)])",
"def ksdensity(data, width=0.3):\r\n def ksd(x_axis):\r\n def n_pdf(x, mu=5., sigma=3.): # normal pdf\r\n u = (x - mu) / abs(sigma)\r\n y = (1 / (np.sqrt(2 * np.pi) * abs(sigma)))\r\n y *= np.exp(-u * u / 2)\r\n return y\r\n prob = [n_pdf(x_i, data, width) for x_i in x_axis]\r\n pdf = [np.average(pr) for pr in prob] # each row is one x value\r\n return np.array(pdf)\r\n return ksd",
"def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs):\n # Note that scipy weights its bandwidth by the covariance of the\n # input data. To make the results comparable to the other methods,\n # we divide the bandwidth by the sample standard deviation here.\n #kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)\n kde = gaussian_kde(x)\n return kde.evaluate(x_grid)",
"def cdf(self, x):\n from scipy.special import betainc\n sq_x = x * x\n return np.where(\n sq_x < 1., betainc(self.m / 2.0, self.n / 2.0, sq_x),\n np.ones_like(x))"
] | [
"0.65131265",
"0.6186629",
"0.6161622",
"0.6146133",
"0.6136695",
"0.6125919",
"0.6050933",
"0.60233885",
"0.60069877",
"0.59550256",
"0.5927398",
"0.5910854",
"0.5899473",
"0.5862627",
"0.58616513",
"0.5841404",
"0.5840815",
"0.58182883",
"0.5798016",
"0.5798016",
"0.57889545",
"0.5784495",
"0.57075727",
"0.56971276",
"0.56558603",
"0.56493765",
"0.56475335",
"0.5642766",
"0.56264687",
"0.5619588"
] | 0.63566905 | 1 |
Function to fit vdistributions in partial copula by beta(a,1) cdfs. | def fu_fit(fu, v):
popt, pcov = curve_fit(
lambda v, a : beta.cdf(v, a, 1),
v, fu,
p0 = (1,)
)
chi2 = np.sum((beta.cdf(v, *popt, 1) - fu)**2 / (len(v)-2))
res = np.zeros(len(popt)+1)
res[0:1] = popt
res[1] = math.sqrt(chi2)
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def V_fit(x, a, b, c, d, e, f):\n x1 = x[0] # I\n x2 = x[1] # dT\n m = (a * x1 ** 2 + b * x1 + c)\n b = (d * x1 ** 2 + e * x1 + f)\n return m * x2 + b",
"def fitfunc_Coulomb_wF(a_x, a_p1, a_p2):\n \n # 4.27014423267 = sqrt(18.2341317678 fm^-2) (= sqrt(0.71 GeV^2))\n # 1.43996507808 = 197.327053 / 137.035999 (= hbar c * alpha)\n \n sqrt_b_r = 4.27014423267 * a_x\n factor = (2.0 - exp(-sqrt_b_r) * (2.0 + sqrt_b_r)) / 2.0\n \n return a_p1 * a_p2 * 1.43996507808 / a_x * factor",
"def fit(self, f: callable, a: float, b: float, d: int, maxtime: float) -> callable:\r\n\r\n # replace these lines with your solution\r\n initial_T = time.time()\r\n\r\n def get_points(f, a, b, n):\r\n x = np.linspace(a, b, n)\r\n y_lst = f(x)\r\n y = np.array(y_lst)\r\n\r\n return x, y\r\n\r\n def build_vector(x, y, size, deg):\r\n if deg == size:\r\n y_n = y.sum()\r\n else:\r\n y_n = np.dot(y, (x ** (size - deg)))\r\n pol1 = np.zeros(size + 1)\r\n for i in range(size + 1):\r\n pol1[i] = (x ** (2 * size - deg - i)).sum()\r\n return pol1, y_n\r\n\r\n def build_coeff_matrix(f, a, b, n, d):\r\n x, y = get_points(f, a, b, n)\r\n deg = d\r\n b = np.array([])\r\n coeff_matrix = np.array([])\r\n for i in range(d + 1):\r\n x_n, y_n = build_vector(x, y, d, i)\r\n deg -= 1\r\n coeff_matrix = np.append(coeff_matrix, x_n)\r\n b = np.append(b, y_n)\r\n coeff_matrix = np.reshape(coeff_matrix, (d + 1, d + 1))\r\n return coeff_matrix, b\r\n\r\n def solve_coeff_matrix(coeff_matrix, b):\r\n A_inverse = np.linalg.inv(coeff_matrix)\r\n coeffs = A_inverse.dot(b)\r\n return coeffs\r\n\r\n def build_function_from_coeffs(coeffs):\r\n f = np.poly1d(coeffs)\r\n return f\r\n\r\n n = 100 # fisrt sample size\r\n while (time.time() - initial_T) + 0.2 < maxtime: # while i still have time loop again\r\n Ax, B = build_coeff_matrix(f, a, b, n, d=d)\r\n if time.time() - initial_T + 0.2 >= maxtime: # first break point\r\n break\r\n coeffs = solve_coeff_matrix(coeff_matrix=Ax, b=B)\r\n if time.time() - initial_T + 0.2 >= maxtime: # second break point\r\n break\r\n result = build_function_from_coeffs(coeffs)\r\n if time.time() - initial_T + 0.2 >= maxtime: # third break point\r\n break\r\n n += 200 # increasing sample size\r\n\r\n return result",
"def _abc_fit(z, d, lambda0):\n nlfit, _nlpcov = np.polyfit(z, d**2, 2, cov=True)\n\n # unpack fitting parameters\n c, b, a = nlfit\n\n\n z0 = -b/(2*c)\n Theta = np.sqrt(c)\n disc = np.sqrt(4*a*c-b*b)/2\n M2 = np.pi/4/lambda0*disc\n d0 = disc / np.sqrt(c)\n zR = disc/c\n params = [d0, z0, Theta, M2, zR]\n\n# unpack uncertainties in fitting parameters from diagonal of covariance matrix\n#c_std, b_std, a_std = [np.sqrt(_nlpcov[j, j]) for j in range(nlfit.size)]\n#z0_std = z0*np.sqrt(b_std**2/b**2 + c_std**2/c**2)\n#d0_std = np.sqrt((4*c**2*a_std)**2 + (2*b*c*b_std)**2 + (b**2*c_std)**2) / (8*c**2*d0)\n#Theta_std = c_std/2/np.sqrt(c)\n#zR_std = np.sqrt(4*c**4*a_std**2 + b**2*c**2*b_std**2 + (b**2-2*a*c)**2*c_std**2)/(4*c**3) / zR\n#M2_std = np.pi**2 * np.sqrt(4*c**2*a_std**2 + b**2*b_std**2 + 4*a**2*c_std**2)/(64*lambda0**2) / M2\n#errors = [d0_std, z0_std, M2_std, Theta_std, zR_std]\n return params",
"def fit_mle(data, copula, marginals, opti_method='SLSQP', known_parameters=False):\n\n if copula.type == \"mixture\":\n print(\"estimation of mixture only available with CMLE try fit mle\")\n raise error\n \n if known_parameters == True:\n\n marg_cdf1 = lambda i : marginals[0][\"distribution\"].cdf(data[0][i], marginals[0][\"loc\"], marginals[0][\"scale\"]) \n marg_pdf1 = lambda i : marginals[0][\"distribution\"].pdf(data[0][i], marginals[0][\"loc\"], marginals[0][\"scale\"])\n\n marg_cdf2 = lambda i : marginals[1][\"distribution\"].cdf(data[1][i], marginals[1][\"loc\"], marginals[1][\"scale\"]) \n marg_pdf2 = lambda i : marginals[1][\"distribution\"].pdf(data[1][i], marginals[1][\"loc\"], marginals[1][\"scale\"]) \n\n logi = lambda i, theta: np.log(copula.get_pdf(marg_cdf1(i),marg_cdf2(i),[theta]))+np.log(marg_pdf1(i)) +np.log(marg_pdf2(i))\n log_likelihood = lambda theta: -sum([logi(i, theta) for i in range(0,len(data[0]))])\n\n results = minimize(log_likelihood, copula.parameters_start, method=opti_method, )# options={'maxiter': 300})#.x[0]\n\n else:\n marg_cdf1 = lambda i, loc, scale : marginals[0][\"distribution\"].cdf(data[0][i], loc, scale) \n marg_pdf1 = lambda i, loc, scale : marginals[0][\"distribution\"].pdf(data[0][i], loc, scale)\n\n marg_cdf2 = lambda i, loc, scale : marginals[1][\"distribution\"].cdf(data[1][i], loc, scale) \n marg_pdf2 = lambda i, loc, scale : marginals[1][\"distribution\"].pdf(data[1][i], loc, scale) \n\n logi = lambda i, theta, loc1, scale1, loc2, scale2: \\\n np.log(copula.get_pdf(marg_cdf1(i, loc1, scale1),marg_cdf2(i, loc2, scale2),[theta])) \\\n + np.log(marg_pdf1(i, loc1, scale1)) +np.log(marg_pdf2(i, loc2, scale2))\n \n def log_likelihood(params):\n theta, loc1, scale1, loc2, scale2 = params\n return -sum([logi(i, theta, loc1, scale1, loc2, scale2) for i in range(0,len(data[0]))])\n\n results = minimize(log_likelihood, (copula.parameters_start, np.array(0), np.array(1), np.array(0), np.array(1)), method=opti_method, )# options={'maxiter': 300})#.x[0]\n\n print(\"method:\", opti_method, \"- success:\", results.success, \":\", results.message)\n if results.success == True:\n return results.x\n\n print(\"Optimization failed\")\n return None",
"def vWBRfit(field, percentage, DNAvals=np.linspace(100,50000,100),\n dataset = datasets['vertical'], mu_func = mu_funcs['vertical'],\n method = 'linear', replNANs = True, plot=True):\n mu = np.zeros(len(DNAvals))\n vWBR = lambda L, muS, muL, gamma: (1/muS+(1/muL-1/muS)*(1-np.exp(-L/gamma)))**-1\n for i, Li in enumerate(DNAvals):\n mu[i] = size_to_mobility(Li, field, percentage, mu_func,\n dataset, method, replNANs)\n def residuals(pars, L, mu):\n return mu - vWBR(L, *pars)\n muS0 = 3.5E-4 # cm^2/(V.sec) ############################################\n muL0 = 1.0E-4 # cm^2/(V.sec) ############################################\n gamma0 = 8000 # bp ############################################\n pars, cov, infodict, mesg, ier = leastsq(residuals, [muS0,muL0,gamma0],\n args=(DNAvals, mu),\n full_output=True)\n muS, muL, gamma = pars\n #print ('E=%.2f V/cm, T=%.1f %%, muS=%.3e, muL=%.3e cm^2/(V.s), gamma=%s bp'\n # %(field, percentage, muS, muL, gamma))\n if plot:\n DNAmin = min(DNAvals)\n DNAmax = max(DNAvals)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(DNAvals, mu*1E4, color='blue')\n ax.plot(DNAvals, vWBR(DNAvals, muS, muL, gamma)*1E4, label='fit',\n linestyle='--', color='red')\n ax.set_xlim(DNAmin-0.1*DNAmin, DNAmax+0.1*DNAmax)\n ax.set_ylim((min(mu)-0.1*min(mu))*1E4, (max(mu)+0.1*max(mu))*1E4)\n ax.set_xscale('log')\n ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))\n ax.tick_params(which='both', top='off', right='off')\n ax.set_xlabel('$\\mathrm{DNA\\,length\\,(bp)}$', fontsize=14)\n ax.set_ylabel(r'$\\mu\\times{10^{8}}\\,(\\mathrm{m^{2}/V\\cdot s})$',\n fontsize=14)\n ax.set_title('$\\mu_S=%.2e,\\,\\mu_L=%.2e\\,\\mathrm{cm^2/(V.s)},\\,\\gamma=%d \\mathrm{bp}$'\n %(muS, muL, gamma))\n ax.legend().draggable()\n plt.show()\n return pars, cov, infodict, mesg, ier",
"def _sedov_calc_beta(v, gamma, nu):\n\n beta = (nu + 2.0) * (gamma + 1.0) * np.array(\n (0.25, (gamma / (gamma - 1)) * 0.5, -(2.0 + nu * (gamma - 1.0)) / 2.0 /\n ((nu + 2.0) * (gamma + 1.0) - 2.0 *\n (2.0 + nu * (gamma - 1.0))), -0.5 / (gamma - 1.0)))\n\n beta = np.outer(beta, v)\n\n beta += (gamma + 1.0) * np.array(\n (0.0, -1.0 / (gamma - 1.0), (nu + 2.0) /\n ((nu + 2.0) * (gamma + 1.0) - 2.0 *\n (2.0 + nu * (gamma - 1.0))), 1.0 / (gamma - 1.0))).reshape((4, 1))\n\n return beta",
"def test_param_cov(self, fitter):\n fitter = fitter()\n\n a = 2\n b = 100\n\n with NumpyRNGContext(_RANDOM_SEED):\n x = np.linspace(0, 1, 100)\n # y scatter is amplitude ~1 to make sure covariance is\n # non-negligible\n y = x * a + b + np.random.randn(len(x))\n\n # first compute the ordinary least squares covariance matrix\n X = np.vstack([x, np.ones(len(x))]).T\n beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T)\n s2 = np.sum((y - np.matmul(X, beta).ravel()) ** 2) / (len(y) - len(beta))\n olscov = np.linalg.inv(np.matmul(X.T, X)) * s2\n\n # now do the non-linear least squares fit\n mod = models.Linear1D(a, b)\n\n with pytest.warns(AstropyUserWarning, match=r\"Model is linear in parameters\"):\n fmod = fitter(mod, x, y)\n\n assert_allclose(fmod.parameters, beta.ravel())\n assert_allclose(olscov, fitter.fit_info[\"param_cov\"])",
"def _fv(self):\n return self.beta * (self.x ** self.c)",
"def svm_fit(self, c: float = 1.0):\r\n self.svmModel = SVC(C=c, gamma='auto').fit(self.x, self.y)",
"def test_param_cov_with_uncertainties(self, fitter):\n fitter = fitter()\n\n a = 2\n b = 100\n\n with NumpyRNGContext(_RANDOM_SEED):\n x = np.linspace(0, 1, 100)\n # y scatter is amplitude ~1 to make sure covariance is\n # non-negligible\n y = x * a + b + np.random.normal(size=len(x))\n sigma = np.random.normal(loc=1, scale=0.1, size=len(x))\n\n # compute the ordinary least squares covariance matrix\n # accounting for measurement uncertainties `sigma`\n X = np.vstack([x, np.ones(len(x))]).T\n inv_N = np.linalg.inv(np.diag(sigma) ** 2)\n cov = np.linalg.inv(X.T @ inv_N @ X)\n beta = cov @ X.T @ inv_N @ y.T\n\n # now do the non-linear least squares fit\n mod = models.Linear1D(a, b)\n\n with pytest.warns(AstropyUserWarning, match=r\"Model is linear in parameters\"):\n fmod = fitter(mod, x, y, weights=sigma**-1)\n\n assert_allclose(fmod.parameters, beta.ravel())\n assert_allclose(cov, fitter.fit_info[\"param_cov\"])",
"def prob_calibration_function(truthvec, scorevec, reg_param_vec='default', knots='sample',\n method='logistic', force_prob=True, eps=1e-15, max_knots=200,\n transform_fn='none', random_state=942, verbose=False, cv_folds=5,\n unity_prior_weight=1, unity_prior_gridsize=20):\n from sklearn import linear_model\n from sklearn.metrics import log_loss, make_scorer\n\n if (unity_prior_weight>0):\n scorevec_coda, truthvec_coda = create_yeqx_bias_vectors(unity_prior_gridsize)\n coda_wt = unity_prior_weight/unity_prior_gridsize\n weightvec = np.concatenate((np.ones(len(scorevec)), coda_wt * np.ones(len(scorevec_coda))))\n scorevec = np.concatenate((scorevec, scorevec_coda))\n truthvec = np.concatenate((truthvec, truthvec_coda))\n\n if transform_fn != 'none':\n scorevec = transform_fn(scorevec)\n\n knot_vec = np.unique(scorevec)\n if (knots == 'sample'):\n num_unique = len(knot_vec)\n if (num_unique > max_knots):\n smallest_knot, biggest_knot = knot_vec[0], knot_vec[-1]\n inter_knot_vec = knot_vec[1:-1]\n random.seed(random_state)\n random.shuffle(inter_knot_vec)\n reduced_knot_vec = inter_knot_vec[:(max_knots-2)]\n reduced_knot_vec = np.concatenate((reduced_knot_vec, [smallest_knot, biggest_knot]))\n reduced_knot_vec = np.concatenate((reduced_knot_vec, np.linspace(0, 1, 21)))\n if (unity_prior_weight>0):\n reduced_knot_vec = np.concatenate((reduced_knot_vec, scorevec_coda))\n knot_vec = np.unique(reduced_knot_vec)\n if verbose:\n print(\"Originally there were {} knots. Reducing to {} while preserving first and last knot.\".format(num_unique, len(knot_vec)))\n X_mat = _natural_cubic_spline_basis_expansion(scorevec, knot_vec)\n\n if (method == 'logistic'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 5, 61)\n if verbose:\n print(\"Trying {} values of C between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec), np.max(reg_param_vec)))\n reg = linear_model.LogisticRegressionCV(Cs=reg_param_vec, cv=StratifiedKFold(cv_folds, shuffle=True),\n scoring=make_scorer(log_loss, needs_proba=True, greater_is_better=False))\n if (unity_prior_weight>0):\n reg.fit(X_mat, truthvec, weightvec)\n else:\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found C = {}\".format(reg.C_))\n\n if (method == 'ridge'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 7, 71)\n if verbose:\n print(\"Trying {} values of alpha between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec),np.max(reg_param_vec)))\n reg = linear_model.RidgeCV(alphas=reg_param_vec, cv=KFold(cv_folds, shuffle=True), scoring=make_scorer(mean_squared_error_trunc,needs_proba=False, greater_is_better=False))\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found alpha = {}\".format(reg.alpha_))\n\n def calibrate_scores(new_scores):\n new_scores = np.maximum(new_scores,knot_vec[0]*np.ones(len(new_scores)))\n new_scores = np.minimum(new_scores,knot_vec[-1]*np.ones(len(new_scores)))\n if transform_fn != 'none':\n new_scores = transform_fn(new_scores)\n basis_exp = _natural_cubic_spline_basis_expansion(new_scores,knot_vec)\n if (method == 'logistic'):\n outvec = reg.predict_proba(basis_exp)[:,1]\n if (method == 'ridge'):\n outvec = reg.predict(basis_exp)\n if force_prob:\n outvec = np.where(outvec < eps, eps, outvec)\n outvec = np.where(outvec > 1-eps, 1-eps, outvec)\n return outvec\n\n return calibrate_scores",
"def gradient(init_par, alpha, delta, obs, sigma_obs, ccoef, N):\n\n\n\t## Initial parameters\n\n\tparallax, v, sigma_v = init_par[:-4], init_par[-4:-1], init_par[-1] \n\tplx_obs, mualpha_obs, mudelta_obs = obs[:, 0], obs[:, 1], obs[:, 2]\n\n\t### Define normal triad and proper motions\n\tp, q, r = normalTriad(alpha, delta)\n\tmualpha_mod = np.dot(np.transpose(p),v)*parallax/_A\n\tmudelta_mod = np.dot(np.transpose(q),v)*parallax/_A\n\t\n\tplx_mod, mualpha_mod, mudelta_mod = parallax, mualpha_mod, mudelta_mod\n\tsigma_plx, sigma_mualpha, sigma_mudelta = np.transpose(sigma_obs)\n\ta,like, expo, detD = np.ones(N),np.ones(N),np.ones(N), np.ones(N) \n\n\t### Eq. 8 in Lindegren+2000 (Covariance Matrix)\n\tC = np.zeros((3,3,N),dtype=np.float64)\n\tC[0,0,:],C[1,1,:],C[2,2,:] = sigma_plx**2.,sigma_mualpha**2., sigma_mudelta**2.\n\tcorr_coefficient_plx_mualpha, corr_coefficient_plx_mudelta, corr_coefficient_mualpha_mudelta = np.zeros(N), np.zeros(N), np.zeros(N)\n\tcorr_coefficient_plx_mualpha[:], corr_coefficient_plx_mudelta[:], corr_coefficient_mualpha_mudelta[:] = ccoef[:, 0], ccoef[:, 1], ccoef[:, 2] \n\t\n\tC[0,1,:], C[0,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta\n\tC[1,0,:], C[1,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\tC[2,0,:], C[2,1,:] = corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\n\t### Eq. 16 in Lindegren+2000 (Definition of D matrix)\t\n\tE = np.zeros((3,3,N),dtype=np.float64)\n\tE[1,1,:],E[2,2,:] = (sigma_v*parallax[:]/_A)**2., (sigma_v*parallax[:]/_A)**2.\n\tD,invD = np.zeros((3,3,N),dtype=np.float64),np.zeros((3,3,N),dtype=np.float64)\n\tD = np.add(E,C)\n\tfor i in range(N):\n\t\tdetD[i] = matrix_det(D[:,:,i]) \n\t\tinvD[:,:,i] = matrix_inv(D[:,:,i])\n\t\t\n\t\n\ta_c = np.ones((3,N))\n\ta_c = [plx_obs - plx_mod, mualpha_obs - mualpha_mod, mudelta_obs-mudelta_mod]\n\t\n\t### First derivatives in Eq. A3 \n\tcprime_pi, cprime_vx, cprime_vy, cprime_vz, = np.ones((3,N)), np.ones((3,N)), \\\n\t\t\t\t\t\t\tnp.ones((3,N)), np.ones((3,N)), \n\tcprime_pi[0,:] = 1.\n\tcprime_pi[1,:] = np.dot(np.transpose(p),v)/_A\n\tcprime_pi[2,:] = np.dot(np.transpose(q),v)/_A\n\t\n\tcprime_vx[0,:] = 0.\n\tcprime_vx[1,:] = -np.sin(alpha)*plx_mod/_A \n\tcprime_vx[2,:] = -np.sin(delta)*np.cos(alpha)*plx_mod/_A\n\n\t\n\tcprime_vy[0,:] = 0.\n\tcprime_vy[1,:] = np.cos(alpha)*plx_mod/_A \n\tcprime_vy[2,:] = -np.sin(delta)*np.sin(alpha)*plx_mod/_A\n\n\tcprime_vz[0,:] = 0.\n\tcprime_vz[1,:] = 0. \n\tcprime_vz[2,:] = np.cos(delta)*plx_mod/_A\n\n\tdlnd_dpi, dlnd_dsigmav = np.zeros(N), np.zeros(N)\n\tde_dpi, de_dsigmav = np.zeros(N), np.zeros(N)\n\t\n\n\t### See Eq. A5 \n\tde_dpi[:] = ((sigma_v/_A)**2.)*2.*plx_mod[:]\n\tde_dsigmav[:] = ((plx_mod[:]/_A)**2.)*2.*sigma_v\n\t\n\tdlnd_dpi[:] = (invD[1,1,:] + invD[2,2,:])*de_dpi[:] \n\tdlnd_dsigmav[:] = (invD[1,1,:] + invD[2,2,:])*de_dsigmav[:]\n\t\n\t\n\t\n\t### See Eq. A6\n\tdG_dpi, dG_dsigmav = np.zeros((3,3,N)), np.zeros((3,3,N)) \n\t\n\tdG_dpi[0,0,:], dG_dpi[0,1,:], dG_dpi[0,2,:] = (-invD[0,1,:]*invD[1, 0, :] - invD[0, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[0,1,:]*invD[1, 1, :] - invD[0,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[0,1,:]*invD[1,2,:] - invD[0,2,:]*invD[2,2,:])*de_dpi[:]\n\tdG_dpi[1,0,:], dG_dpi[1,1,:], dG_dpi[1,2,:] = (-invD[1,1,:]*invD[1, 0, :] - invD[1, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[1,1,:]*invD[1, 1, :] - invD[1,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[1,1,:]*invD[1,2,:] - invD[1,2,:]*invD[2,2,:])*de_dpi[:]\n\tdG_dpi[2,0,:], dG_dpi[2,1,:], dG_dpi[2,2,:] = (-invD[2,1,:]*invD[1, 0, :] - invD[2, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[2,1,:]*invD[1, 1, :] - invD[2,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[2,1,:]*invD[1,2,:] - invD[2,2,:]*invD[2,2,:])*de_dpi[:]\n\t\n\n\tdG_dsigmav[0,0,:], dG_dsigmav[0,1,:], dG_dsigmav[0,2,:] = (-invD[0,1,:]*invD[1, 0, :] - invD[0, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[0,1,:]*invD[1, 1, :] - invD[0,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[0,1,:]*invD[1,2,:] - invD[0,2,:]*invD[2,2,:])*de_dsigmav[:]\n\tdG_dsigmav[1,0,:], dG_dsigmav[1,1,:], dG_dsigmav[1,2,:] = (-invD[1,1,:]*invD[1, 0, :] - invD[1, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[1,1,:]*invD[1, 1, :] - invD[1,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[1,1,:]*invD[1,2,:] - invD[1,2,:]*invD[2,2,:])*de_dsigmav[:]\n\tdG_dsigmav[2,0,:], dG_dsigmav[2,1,:], dG_dsigmav[2,2,:] = (-invD[2,1,:]*invD[1, 0, :] - invD[2, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[2,1,:]*invD[1, 1, :] - invD[2,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[2,1,:]*invD[1,2,:] - invD[2,2,:]*invD[2,2,:])*de_dsigmav[:]\n\n\tf_dpi = np.zeros((N), dtype=np.float64) \n\t\n\t\n\tfor i in range(N):\n\t\tf_dpi_1, f_dpi_3 = 0., 0.0 \n\t\tfor ia in range(3):\n\t\t\tfor ib in range(3):\n\t\t\t\tf_dpi_1 += invD[ia,ib,i]*cprime_pi[ia,i]*a_c[ib][i]\n\t\t\t\tf_dpi_3 += (-0.5)*(dG_dpi[ia,ib,i]*a_c[ia][i]*a_c[ib][i])\n\t\t\t\t\t\n\t\tf_dpi_2 = (-0.5)*dlnd_dpi[i]\n\t\tf_dpi[i] = f_dpi_1 + f_dpi_2 + f_dpi_3\n\t\t\n\n\tf_vx, f_vy, f_vz, f_sigmav = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) \n\n\tf_vx = np.sum(invD[0,0,:]*cprime_vx[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vx[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vx[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vx[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vx[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vx[1,:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vx[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vx[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vx[2,:]*a_c[2][:])\n\t\n\tf_vy = np.sum(invD[0,0,:]*cprime_vy[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vy[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vy[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vy[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vy[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vy[1][:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vy[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vy[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vy[2,:]*a_c[2][:])\n\n\tf_vz = np.sum(invD[0,0,:]*cprime_vz[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vz[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vz[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vz[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vz[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vz[1,:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vz[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vz[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vz[2,:]*a_c[2][:])\n\t\n\tf_sigmav = np.sum(-0.5*(dG_dsigmav[0,0,:]*a_c[0][:]*a_c[0][:] + dG_dsigmav[0,1,:]*a_c[1][:]*a_c[0][:]+ dG_dsigmav[0,2,:]*a_c[2][:]*a_c[0][:] + \\\n\t\t dG_dsigmav[1,0,i]*a_c[1][:]*a_c[0][:] + dG_dsigmav[1,1,:]*a_c[1][:]*a_c[1][:]+ dG_dsigmav[1,2,:]*a_c[1][:]*a_c[2][:] + \t\n\t\t dG_dsigmav[2,0,i]*a_c[2][:]*a_c[0][:] + dG_dsigmav[2,1,:]*a_c[2][:]*a_c[1][:]+ dG_dsigmav[2,2,:]*a_c[2][:]*a_c[2][:]))\n\t\n\n\tf_sigmav = f_sigmav - 0.5*np.sum(dlnd_dsigmav)\t\n\tf = np.concatenate((f_dpi, np.array([f_vx, f_vy, f_vz, f_sigmav]))) ### Grad L(theta), see Eq. 17\n\treturn -2.*f \t\t\t\t\t\t ### Grad U(theta), see Eq. 18",
"def _dncb_pdf(x, a1, a2, mu1, mu2):\n out = st.beta.pdf(x, a1, a2, loc=0)\n out *= np.exp(-mu1-mu2)\n out *= hchg(x, a1, a2, mu1, mu2)\n return out",
"def init_fva_constraints(mod_, opt_frac=0.1, pfba_fact=1.5, verbose=True):\n if verbose==True:\n print(\"...constraining the base cobra model with FVA + pfba constraint\")\n mod = mod_.copy()\n fva_df = flux_variability_analysis(mod, fraction_of_optimum=opt_frac, pfba_factor=pfba_fact)\n for rxn, row in fva_df.iterrows():\n if abs(row[\"maximum\"] - row[\"minimum\"]) > 1e-09:\n mod.reactions.get_by_id(rxn).lower_bound = row[\"minimum\"]\n mod.reactions.get_by_id(rxn).upper_bound = row[\"maximum\"]\n return mod, fva_df",
"def brody_cdf(s: fArr, beta: float) -> fArr:\n b1 = beta + 1\n alpha = gamma((beta + 2) / b1) ** b1\n return 1 - np.exp(-alpha * s ** b1) # type: ignore",
"def our_own_bvp_solve(f, a, b, n, y0, dim, bc, tol=1e-2):\n\n # interpolate the initial guess function y0 on Chebyshev points of the first kind\n cf0 = []\n for y0_i in y0:\n for thing in np.polynomial.chebyshev.Chebyshev(np.zeros(n), (a, b)).interpolate(y0_i, n, (a, b)):\n cf0.append(thing)\n\n solution = root(lambda u: fun(u, a, b, dim, n, f, bc), cf0, method='lm', tol=tol)\n if not solution.success:\n print('root finding failed')\n\n cf = solution.x\n cf = cf.reshape((dim, cf.size // dim))\n\n return [np.polynomial.chebyshev.Chebyshev(cf[i], (a, b)) for i in range(dim)]",
"def partial_fit(self, X, y=...):\n ...",
"def partial_fit(self, X, y=...):\n ...",
"def fit(x, a, p, b):\n return a * (p ** x) + b",
"def _fit_svd(self, X, y, alpha=0.0):\n \n # Decompose X into U, s, and Vt\n U, s, Vt = np.linalg.svd(X, full_matrices=False)\n \n d = s / (s[:, np.newaxis].T ** 2 + alpha)\n # Calculate the coefficients minimizing the MSE with a penalty of\n # alpha on the l2 norm of the coefficients.\n self.beta_hat = np.dot(d * U.T.dot(y), Vt).T\n self.beta_hat = np.dot(d*np.dot(np.transpose(U), y), Vt).reshape(1, \n -1)[0]\n \n return",
"def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return",
"def biphasic_fit_function(x, a, b, c, d, e, f):\n term1 = 1 + (a + (1 - a)/(1 + (x * (10 ** b)) ** c))\n term2 = 1 + (d + (1 - d)/(1 + (x * (10 ** e)) ** f))\n\n biphasic_function = 2 ** (0.5 * (np.log2(term1) + np.log2(term2))) - 1\n return biphasic_function",
"def form(func, dist_list, init_search_point, alg):\n \n def SLSQP(func, dist_list, init_search_point):\n \n dim = len(dist_list)\n current_beta = 0\n new_beta = 1\n sig = np.empty((1, dim))\n mu = np.empty((1, dim))\n new_search_point = np.array(init_search_point).reshape((1, dim))\n \n def f_l(x_l):\n return(func([x_l[i,:]*sig[0,i] + mu[0,i] for i in range(0, dim)]))\n \n while abs(current_beta-new_beta) > 0.001:\n current_search_point = new_search_point\n current_beta = new_beta\n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n mu[0,i], sig[0, i] = Rosenblatt_Transform(dist_list[i][0], current_search_point[0,i])\n else:\n mu[0,i], sig[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n dist_fun = lambda u: np.linalg.norm(u) \n \n alg = 'SLSQP'\n \n H = lambda u: f_l(u)\n cons = ({'type': 'eq', 'fun': lambda u: -(H(u.reshape(-1,1)))})\n \n result = scipy.optimize.minimize(dist_fun, x0 = current_search_point, constraints = cons, method=alg)\n \n new_beta = result.fun\n u = np.array(result.x).reshape((1,dim))\n \n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = mu[0,i] + u[0,i]*sig[0,i]\n \n beta_value = new_beta \n p_f = sst.norm.cdf(-beta_value)\n iterations = result.nit\n u = result.x\n x = u[:]*sig[0,:] + mu[0,:]\n print(x)\n grad_val = scipy.optimize.approx_fprime(x, func, 0.00000001)\n grad_val = grad_val.reshape((1, dim))\n \n sum1 = np.sum((grad_val[0,:]**2)*(sig[0,:]**2))\n cosines = np.empty((1, dim))\n \n for i in range(0, dim):\n cosines[0,i] = grad_val[0,i]*sig[0,i]/np.sqrt(sum1) \n \n print('------------------------')\n print('First-Order Reliability Analysis')\n print('Algorithm: slsqp solver')\n print('Iterations: {}\\nReliability index = {}\\nProbability of failure = {}'.format(iterations, beta_value, p_f))\n print('------------------------')\n \n return(beta_value, p_f, x, u, mu, sig, cosines) \n \n def HL_R(func, dist_list, init_search_point):\n \n iterations = 0\n cur_beta = 3\n new_beta = 0\n dim = len(dist_list)\n global_mean_arr = np.empty((1, dim))\n global_std_arr = np.empty((1, dim))\n new_search_point = np.array(init_search_point).reshape((1, dim))\n \n while abs(cur_beta - new_beta) > 0.001:\n cur_beta = new_beta\n cur_cosines = np.zeros((1, dim))\n new_cosines = np.ones((1, dim))\n \n while max((abs(cur_cosines - new_cosines))[0]) > 0.005:\n \n cur_cosines = new_cosines\n \n cur_search_point = new_search_point\n \n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n global_mean_arr[0, i], global_std_arr[0, i] = Rosenblatt_Transform(dist_list[i][0], cur_search_point[0,i])\n else:\n global_mean_arr[0, i], global_std_arr[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n \n grad_val = scipy.optimize.approx_fprime(cur_search_point[0], func, 0.00000001)\n grad_val = grad_val.reshape((1, dim))\n \n sum1 = np.sum((grad_val[0,:]**2)*(global_std_arr[0,:]**2))\n cosines = np.empty((1, dim))\n \n for i in range(0, dim):\n cosines[0,i] = grad_val[0,i]*global_std_arr[0,i]/np.sqrt(sum1)\n \n new_cosines = cosines\n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = global_mean_arr[0,i] - new_cosines[0,i]*global_std_arr[0,i]*cur_beta\n \n iterations = iterations + 1\n \n \n B = Symbol('B')\n coordinates = []\n for i in range(0, dim):\n coordinates.append(global_mean_arr[0, i] - new_cosines[0,i]*global_std_arr[0, i]*B)\n new_beta = float(solve(func(coordinates), B)[0])\n \n cosines = new_cosines \n beta_value = new_beta\n p_f = sst.norm.cdf(-new_beta)\n x = new_search_point\n u = (x[0,:] - global_mean_arr[0,:])/global_std_arr\n \n print('-------------------------')\n print('First-Order Reliability Analysis')\n print('Algorithm: HL-R solver')\n print('Iterations: {}\\nReliability index = {}\\nProbability of failure = {}'.format(iterations, beta_value, p_f))\n print('-------------------------')\n \n return(beta_value, p_f, x, u, global_mean_arr, global_std_arr, cosines)\n \n def HL_RF(func, dist_list, init_search_point):\n\n cur_beta = 3\n new_beta = 0\n dim = len(dist_list)\n\n new_search_point = np.array(init_search_point).reshape((1, dim))\n iterations = 0\n while abs(cur_beta - new_beta) > 0.001 and abs(func(new_search_point[0])) > 0.001:\n global_mean_arr = np.empty((1, dim))\n global_std_arr = np.empty((1, dim))\n cur_beta = new_beta\n cur_search_point = new_search_point\n \n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n global_mean_arr[0,i], global_std_arr[0, i] = Rosenblatt_Transform(dist_list[i][0], cur_search_point[0,i])\n else:\n global_mean_arr[0,i], global_std_arr[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n f_val = func(cur_search_point[0])\n \n x_ast = np.empty((1, dim))\n for i in range(0, dim):\n x_ast[0,i] =(cur_search_point[0,i] - global_mean_arr[0,i])/global_std_arr[0,i]\n\n grad_val = scipy.optimize.approx_fprime(cur_search_point[0], func, 0.000001)\n grad_val = grad_val.reshape((1, dim)) \n \n grad_val_ast = np.empty(grad_val.shape)\n for i in range(0, dim):\n grad_val_ast[0,i] = grad_val[0,i]*global_std_arr[0,i]\n \n t1 = 1/np.sum(grad_val_ast[0,:]**2)\n\n t2 = sum(grad_val_ast[0,:]*x_ast[0,:]) - f_val\n \n t3 = t1*t2\n \n new_x_ast = np.empty(x_ast.shape)\n for i in range(0, dim):\n new_x_ast[0,i] = t3*grad_val_ast[0,i]\n u = new_x_ast\n new_beta = np.linalg.norm(new_x_ast)\n \n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = new_x_ast[0,i]*global_std_arr[0,i] + global_mean_arr[0,i]\n iterations = iterations + 1\n \n grad_val_ast_sum = sum(grad_val_ast[0,:]**2)\n cosines = grad_val_ast/(grad_val_ast_sum**0.5)\n beta_value = new_beta\n x = new_search_point\n p_f = sst.norm.cdf(-beta_value)\n \n print('-------------------------')\n print('First-Order Reliability Analysis')\n print('Algorithm: HL-RF solver')\n print('Iterations: {}\\nReliability index = {}\\nProbability of failure = {}'.format(iterations, beta_value, p_f))\n print('-------------------------')\n \n return(beta_value, p_f, x, u, global_mean_arr, global_std_arr, cosines)\n \n if alg == 'slsqp':\n return(SLSQP(func, dist_list, init_search_point))\n elif alg == 'HL-R':\n return(HL_R(func, dist_list, init_search_point))\n elif alg == 'HL-RF':\n return(HL_RF(func, dist_list, init_search_point))",
"def compute_bce(num_bidders, max_value, value_pdf, num_samples=100, max_trials=20, random_seed=1232, solver_str='COIN'):\r\n\r\n status = 0 # whether the LP was feasible\r\n trials = 0 # counter on sampling trials\r\n random.seed(random_seed) # seed the random generator\r\n while status != 1 and trials < max_trials: # while we have not found a feasible LP\r\n\r\n prob = plp.LpProblem(\"BCE\", plp.LpMinimize) # create an LP instance\r\n\r\n # Sample a set of bid vectors to try to create a BCE with just them\r\n bid_vectors = product(range(max_value + 1), repeat=num_bidders)\r\n pool = tuple(bid_vectors)\r\n indices = random.sample(range(len(pool)), min(num_samples, len(pool)))\r\n sampled_bid_vectors = set([pool[i] for i in indices])\r\n\r\n # Variables are of the form (v,b) where v is a value in {0,...,max_value}\r\n # and b is a bid vector in the sampled set\r\n lp_var_keys = product(*[range(max_value + 1), sampled_bid_vectors])\r\n\r\n # Create the psi variables which correspond to Pr[b | v] for each (v,b)\r\n # pair\r\n psi_vars = plp.LpVariable.dicts('psi', lp_var_keys, lowBound=0)\r\n\r\n # Creating the best response constraints\r\n devs = product(\r\n *[range(max_value + 1), range(max_value + 1), range(0, num_bidders)])\r\n for (cur_bid, dev_bid, bidder_id) in devs: # for all b_i*, b_i', i\r\n if cur_bid != dev_bid: # if b_i* \\neq b_i'\r\n # Create all the terms of the form Pr[b | v] * pi(v) * (U_i' - U_i)\r\n # for all b \\in S, such that b_i = b_i*\r\n dev_terms = [\r\n deviation_term(dev_bid, bidder_id, v, bids,\r\n psi_vars[(v, bids)], value_pdf[v])\r\n for (v, bids) in product(*[range(max_value + 1), sampled_bid_vectors])\r\n if bids[bidder_id] == cur_bid]\r\n # Add these terms to create the best response constraint\r\n prob += plp.lpSum(dev_terms) <= 0, \"Dev_{}_{}_{}\".format(\r\n cur_bid, dev_bid, bidder_id)\r\n\r\n # Constraint that Pr[b | v] is a distribution for each fixed v\r\n for value in range(max_value + 1):\r\n cond_vars = [psi_vars[(value, bids)]\r\n for bids in sampled_bid_vectors]\r\n prob += plp.lpSum(cond_vars) == 1, \"Density_Psi_{}\".format(value)\r\n\r\n # Objective coefficients are random numbers based on the seed\r\n np.random.seed(random_seed)\r\n prob += plp.lpSum([np.random.standard_normal(1) *\r\n var for var in psi_vars.values()])\r\n\r\n # Solve LP\r\n prob.solve(get_solver(solver_str))\r\n\r\n # Get the status returned by the solver. 1 means success\r\n status = int(prob.status)\r\n\r\n # Increase the trial counter\r\n trials += 1\r\n\r\n if status == 1:\r\n # Once we have found a BCE, compute the marginal bid vector\r\n # distribution\r\n bid_pdf = {}\r\n for bid_vector in sampled_bid_vectors:\r\n # Compute the probability of the bid vector: sum_{v} Pr[b | v] *\r\n # pi(v)\r\n prob_mass = sum([plp.value(psi_vars[(v, bid_vector)]) * value_pdf[v]\r\n for v in range(max_value + 1)])\r\n # If mass is positive add it to the bid_pdf dictionary\r\n if prob_mass > 0:\r\n bid_pdf[bid_vector] = prob_mass\r\n\r\n return bid_pdf\r\n else:\r\n return None",
"def test_fit(self):\n\n # Generate data with known parameters\n a, loc, scale = 1.0, 3.0, 5.0\n data = gamma.rvs(a, loc, scale, size=100000)\n\n # Fit the model and check parameters\n copula = GammaUnivariate()\n copula.fit(data)\n self.assertAlmostEqual(copula.a, a, places=1)\n self.assertAlmostEqual(copula.loc, loc, places=1)\n self.assertAlmostEqual(copula.scale, scale, places=1)",
"def _fit(self, init_step_size, y):\n step_size = np.zeros(self.max_iter + 1)\n beta = np.zeros((self.max_iter + 1, self.d))\n theta = np.zeros((self.max_iter + 1, self.d))\n beta_mask = np.zeros((self.max_iter + 1, self.n))\n theta_mask = np.zeros((self.max_iter + 1, self.n))\n grad_beta = np.zeros((self.max_iter + 1, self.d))\n grad_theta = np.zeros((self.max_iter + 1, self.d))\n norm_grad_beta = np.zeros(self.max_iter + 1)\n norm_grad_theta = np.zeros(self.max_iter + 1)\n\n step_size[0] = init_step_size\n grad_beta[0, :], beta_mask[0, :] = self._gradient(beta[0, :], y)\n grad_theta[0, :], theta_mask[0, :] = self._gradient(theta[0, :], y)\n norm_grad_beta[0] = np.linalg.norm(grad_beta[0, :])\n norm_grad_theta[0] = np.linalg.norm(grad_theta[0, :])\n for t in range(self.max_iter):\n if self.verbose:\n print(\"ITERATION {}\".format(t))\n\n step_size[t + 1], beta_mask[t + 1, :] = self._backtracking(step_size[t], theta[t], theta_mask[t, :], grad_theta[t, :], norm_grad_theta[t], y)\n beta[t + 1, :] = theta[t, :] - step_size[t + 1]*grad_theta[t, :]\n theta[t + 1, :] = beta[t + 1, :] + t/(t + 3)*(beta[t + 1, :] - beta[t, :])\n\n grad_beta[t + 1, :], _ = self._gradient(beta[t + 1, :], y, mask=beta_mask[t + 1, :])\n grad_theta[t + 1, :], theta_mask[t + 1, :] = self._gradient(theta[t + 1, :], y)\n norm_grad_beta[t + 1] = np.linalg.norm(grad_beta[t + 1, :])\n norm_grad_theta[t + 1] = np.linalg.norm(grad_theta[t + 1, :])\n\n if norm_grad_beta[t + 1] <= self.epsilon:\n break\n\n if self.verbose and t + 1 == self.max_iter:\n print(\"Maximum iterations reached\")\n\n return beta[t + 1, :]",
"def cdf(x, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n if x < 0:\n return mp.zero\n if x > 1:\n return mp.one\n return mp.betainc(a, b, x1=0, x2=x, regularized=True)",
"def jacobian_fitznagumo(v, w, a, b, tau, I):\n return np.array([[- 3 * v**2 + 1 , -1],\n [1/tau, -b/tau]])",
"def _calc_vanmarcke1975_ccdf(n, a):\n args = numba.carray(a, n)\n x = args[0]\n num_zero_crossings = args[1]\n bandwidth_eff = args[2]\n\n return (1 - (1 - np.exp(-x ** 2 / 2)) * np.exp(-1 * num_zero_crossings * (\n 1 - np.exp(-1 * np.sqrt(np.pi / 2) * bandwidth_eff * x)) /\n (np.exp(x ** 2 / 2) - 1)))"
] | [
"0.5953164",
"0.5694468",
"0.5640919",
"0.56255734",
"0.5607747",
"0.55408704",
"0.551601",
"0.54907",
"0.5482917",
"0.5444024",
"0.54390156",
"0.5406749",
"0.5400015",
"0.53816324",
"0.53800315",
"0.5361456",
"0.5336376",
"0.5291689",
"0.5291689",
"0.5280807",
"0.5275895",
"0.5274033",
"0.52714396",
"0.52674",
"0.5265126",
"0.52224386",
"0.52153623",
"0.5199062",
"0.5198235",
"0.5186114"
] | 0.6380507 | 0 |
This is the periodic component for u in , with peaks derived from margin fit, shift and first shape parameter for the beta kernel. | def beta_periodic(u, shift, n, peaks):
res = np.zeros_like(u)
safe_n = min(50, max(1.1,n))
safe_shift = min(0.05, max(-0.05, shift))
size = np.mean(np.diff(peaks))
for m in peaks:
z = (u - m + safe_shift)/size
indices = np.abs(z) < 0.5
res[indices] += 0.5 * (beta.pdf(0.5 + z[indices], safe_n, safe_n) + beta.pdf(0.5 + z[indices], 5, 5))
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self, u):\n # get index of grid point left of u\n index = self.get_index(u)\n # get current controlpoints\n current_controlpoints = self.get_controlpoints(index)\n # setup matrix to store the values in the de Boor array:\n # deBoorvalues =\n # d[I-2, I-1, I]\n # d[I-1, I, I+1] d[u, I-1, I]\n # d[I, I+1, I+2] d[u, I, I+1] d[u, u, I]\n # d[I+1, I+2, I+3] d[u, I+1, I+2] d[u, u, I+1] d[u, u, u]\n deBoorvalues = scipy.column_stack((current_controlpoints,\n scipy.zeros((4, 6))))\n # calculate values for de Boor array\n for i in range(1, 4):\n for j in range(1, i + 1):\n leftmostknot = index + i - 3 # current leftmost knot\n rightmostknot = leftmostknot + 4 - j # current rightmost knot\n alpha = self.get_alpha(u, [leftmostknot, rightmostknot])\n deBoorvalues[i, j*2:j*2+2] = (\n alpha * deBoorvalues[i-1, (j-1)*2:(j-1)*2+2] +\n (1 - alpha) * deBoorvalues[i, (j-1)*2:(j-1)*2+2]\n )\n return deBoorvalues[3, -2:]",
"def bla(self):\n # TODO bla expects m, R, P, F = U.shape\n self.U = fft(self.u, axis=0)[self.lines].transpose((1, 2, 3, 0))\n self.Y = fft(self.y, axis=0)[self.lines].transpose((1, 2, 3, 0))\n self.G, self.covG, self.covGn = bla_periodic(self.U, self.Y)\n self.G = self.G.transpose((2, 0, 1))\n if self.covG is not None:\n self.covG = self.covG.transpose((2, 0, 1))\n if self.covGn is not None:\n self.covGn = self.covGn.transpose((2, 0, 1))\n return self.G, self.covG, self.covGn",
"def CoolMassFluxPDF(self,u,w,sfr=1.0,params=None):\n\n vout = 10.**u\n cs = 10.**w\n\n if params is None:\n A_v = self.cool_params['A_v']\n p_v = self.cool_params['p_v']\n d_v = self.cool_params['d_v']\n A_cs = self.cool_params['A_cs']\n cs0 = self.cool_params['cs0']\n sigma = self.cool_params['sigma']\n else:\n from scipy.special import gamma\n p_v,d_v,cs0,sigma = params\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n\n vout0 = self._vout0(sfr)\n v=(vout/vout0)\n PDF_v = A_v*v**d_v*np.exp(-v**p_v)\n PDF_cs = A_cs*np.exp(-0.5*(np.log(cs/cs0)/sigma)**2)\n\n return PDF_cs*PDF_v",
"def update(self, env, u, z, marker_id):\n # YOUR IMPLEMENTATION HERE\n\n new_particles_bar = np.zeros((self.num_particles, 3))\n importance_weights = np.ones(self.num_particles)\n ita = 0\n for m in range(self.num_particles):\n u_noisy = env.sample_noisy_action(u, self.alphas)\n xt = env.forward(self.particles[m,:].reshape(-1, 1), u_noisy)\n zt_hat = env.observe(xt, marker_id)\n importance_weights[m] = env.likelihood(minimized_angle(z - zt_hat), self.beta)\n new_particles_bar[m,:] = xt.reshape(1, -1)\n ita += importance_weights[m]\n \n importance_weights = importance_weights/ita\n\n self.particles, self.weights = self.resample(new_particles_bar, importance_weights)\n mean, cov = self.mean_and_variance(self.particles)\n return mean, cov",
"def apx_mon_val(mdp, u, pi, v0, x, eps, delta):\n v_tilde = np.zeros((mdp.nb_s, 1))\n pi_tilde = np.zeros((mdp.nb_s, 1))\n q, w = apx_val(mdp, u, v0, x, eps, delta)\n\n for i in range(mdp.nb_s):\n if q[i, 0] - 2 * mdp.gamma * eps > u[i, 0]:\n v_tilde[i, 0] = q[i, 0] - 2 * mdp.gamma * eps\n pi_tilde[i, 0] = w[i, 0]\n else:\n v_tilde[i, 0] = u[i, 0]\n pi_tilde[i, 0] = pi[i, 0]\n\n return v_tilde, pi_tilde",
"def y01(x):\r\n # return pini*((special.gamma(k1+p1))/(special.gamma(k1)*special.gamma(p1))*((x/l)**(k1-1))*(1-(x/l))**(p1-1))/7.3572\r\n return 1/100*x*epsilon*1/q*1e21\r\n # return 1e13*1/sigma*np.sqrt(2*np.pi) * np.exp(-np.power(x - u, 2.) / (2 * np.power(sigma, 2.)))-1e15*1/sigma\r",
"def get_bforce_wm_ws_Gx_surf(self):\n\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w'] \n \n self.Gm1 = np.zeros([Ly])\n self.dGm1_dS = np.zeros([Ly]) \n self.Gt1 = np.zeros([Ly])\n self.dGt1_dS = np.zeros([Ly]) \n self.Bfsfc_bl = np.zeros([Ly])\n self.Av_bl = np.zeros([Ly])\n self.dAv_bl = np.zeros([Ly])\n \n #debugging\n self.wm_surf = np.zeros([Ly])\n self.ws_surf = np.zeros([Ly]) \n\n #---> j-loop\n for j in range(Ly): \n k_w = self.kbl[j] # KBL is \"new bl index after calling find_new_kbl()\n z_bl = z_u_w[j,N] - self.hbls[j]\n zscale = self.hbls[j] \n \n if self.swr_frac[j,k_w-1] > 0:\n Bfsfc = self.Bo[j] + self.Bosol[j] * ( 1. - self.swr_frac[j,k_w-1]\\\n * self.swr_frac[j,k_w] * ( z_u_w[j,k_w] - z_u_w[j,k_w-1] )\\\n / (self.swr_frac[j,k_w] * (z_u_w[j,k_w] - z_bl)\\\n + self.swr_frac[j,k_w-1] * (z_bl - z_u_w[j,k_w-1]) ))\n \n else:\n Bfsfc = self.Bo[j] + self.Bosol[j]\n \n # CALCUALTE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm_surf[j] = wm\n self.ws_surf[j] = ws \n\n if self.LIMIT_UNSTABLE_ONLY:\n f1 = 5. * np.max([0,Bfsfc]) * self.vonKar / (self.ustar[j]**4+self.eps)\n else:\n f1 = 0\n\n \n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n\n #MOMENTUM \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * (self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl[j] = Av_bl\n self.dAv_bl[j] = dAv_bl\n self.Gm1[j] = Av_bl / (self.hbls[j] * wm + self.eps)\n self.dGm1_dS[j] = np.min([0.,Av_bl*f1-dAv_bl/(wm+self.eps)]) \n\n #TEMPERATURE(BUOYANCY)\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * (self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1[j] = At_bl / (self.hbls[j] * ws + self.eps)\n self.dGt1_dS[j] = np.min([0.,At_bl*f1-dAt_bl/(ws+self.eps)]) \n\n self.Bfsfc_bl[j] = Bfsfc",
"def angular1(brdf_settings):\n # const\n scaleconst = 2*np.pi/366\n\n locals().update(brdf_settings)\n\n def scale(x, a=5, b=10, xmin=-1, xmax=1):\n \"\"\"\n rescale the sin\n a new min\n b = new max\n xmin = min of x\n xmax = max of x\n \"\"\"\n return (b - a)*(x - xmin)/(xmax - xmin) + a\n\n t = np.linspace(0, 2*np.pi, 366)\n\n\n noise = np.random.normal(0, 2*np.pi/100.0, size=366)\n\n szaMAX = 60\n szaMIN = 10\n sza_off = 0.5*np.pi # in pi\n\n sza_t = np.sin(noise + t + sza_off)\n SZA = scale(sza_t, a=szaMIN, b=szaMAX)\n\n\n # noisy it a bit?\n\n \"\"\"\n vza cycle\n \"\"\"\n vzaMAX = 45\n vzaMIN = 0\n vza_cycle = 6 # in days\n\n vza_t = np.sin(noise + t/(vza_cycle/366.0))\n VZA = scale(vza_t, a=vzaMIN, b=vzaMAX)\n\n \"\"\"\n raa cycle\n \"\"\"\n raaMAX = 360\n raaMIN = 0\n raa_cycle = 32 # in days\n\n raa_t = np.sin(t/(raa_cycle/366.0))\n RAA = scale(noise + vza_t, a=raaMAX, b=raaMIN)\n\n\n \"\"\"\n only need to return kernels really\n \"\"\"\n kerns = Kernels(VZA, SZA, RAA,\n LiType='Sparse', doIntegrals=False,\n normalise=True, RecipFlag=True, RossHS=False, MODISSPARSE=True,\n RossType='Thick',nbar=0.0)\n return kerns, VZA, SZA, RAA",
"def HotMassFluxPDF(self,u,w,sfr=1.0,params=None):\n\n vout = 10.**u\n cs = 10.**w\n\n vB = np.sqrt(5.0*cs**2+vout**2)\n Mach = vout/cs\n\n if params is None:\n A_vB = self.hot_params['A_vB']\n p_vB = self.hot_params['p_vB']\n d_vB = self.hot_params['d_vB']\n A_M = self.hot_params['A_M']\n p_M = self.hot_params['p_M']\n d_M = self.hot_params['d_M']\n Mach0 = self.hot_params['Mach0']\n else:\n from scipy.special import gamma\n p_vB,d_vB,Mach0,p_M,d_M = params\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n vB0 = self._vB0(sfr)\n v=(vB/vB0)\n PDF_v = A_vB*v**d_vB*np.exp(-v**p_vB)\n m=(Mach/Mach0)\n PDF_M = A_M*m**d_M*np.exp(-m**p_M)\n\n return PDF_v*PDF_M",
"def UCB2(x, gp, ndim, t,delta = 0.1,v=1):\n\td=ndim\n\t#t=X_init.shape[0]\n#\tv=3\n#\tdelta=0.1\n\tx1=np.array(x).reshape(-1,ndim)\n\tmuNew, stdNew = gp.predict(x1, return_std=True)\n\t#fMax=max(Y_init)\n\t#Kappa = np.sqrt( v* (2* np.log((t**(d/2. + 2))*(np.pi**2)/(3. * delta) )))\n\tKappa = delta*((v**d)/t) \n\t#plt.plot(t,Kappa,'o')\n\treturn -(muNew + Kappa * stdNew)",
"def PeriodicConv(out_chan, filter_shape,\n strides=None, padding='VALID', dimension_numbers=('NHWC', 'HWIO', 'NHWC'), W_init=None,\n b_init=normal(1e-6), ignore_b=False, dtype=jnp.float64):\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n one = (1,) * len(filter_shape)\n strides = strides or one\n W_init = W_init or glorot_normal(rhs_spec.index('I'), rhs_spec.index('O'))\n\n def init_fun(rng, input_shape):\n\n # add padding dimensions for periodic BC; move this line into conv_general_shape_tuple after defining padding='PERIODIC'\n\n\n add_input = list(np.array(filter_shape) - 1) # new\n input_shape += np.array([0]+add_input+[0]) # only works with stride=(1,1)\n\n filter_shape_iter = iter(filter_shape)\n kernel_shape = [out_chan if c == 'O' else\n input_shape[lhs_spec.index('C')] if c == 'I' else\n next(filter_shape_iter) for c in rhs_spec]\n\n output_shape = lax.conv_general_shape_tuple(\n input_shape, kernel_shape, strides, padding, dimension_numbers)\n\n\n k1, k2 = random.split(rng)\n\n if not ignore_b:\n bias_shape = [out_chan if c == 'C' else 1 for c in out_spec]\n bias_shape = tuple(itertools.dropwhile(lambda x: x == 1, bias_shape))\n\n W, b = W_init(k1, kernel_shape, dtype=dtype), b_init(k2, bias_shape, dtype=dtype)\n return tuple(output_shape), (W, b)\n else:\n W = W_init(k1, kernel_shape, dtype=dtype)\n return output_shape, (W, )\n\n def apply_fun(params, inputs, **kwargs):\n\n # move this line into lax.conv_general_dilated after defining padding='PERIODIC'\n inputs = periodic_padding(inputs.astype(dtype), filter_shape, strides)\n # print(inputs.shape)\n if not ignore_b:\n W, b = params\n return lax.conv_general_dilated(inputs, W, strides, padding, one, one,\n dimension_numbers) + b\n else:\n W = params\n return lax.conv_general_dilated(inputs, W, strides, padding, one, one,\n dimension_numbers)\n\n return init_fun, apply_fun",
"def regularize_bwd(X, y, mu0, mu1, v1, nz, K, verbose=False):\n \n if verbose: sss=0#print '\\ncompute bath between mu=%.4f and mu=%.4f'%(mu0, mu1)\n \n n, m = X.shape\n X_nz = np.atleast_2d(X[:, nz])\n b = np.dot(X.T, y)\n G = np.dot(X.T, X)\n \n nbr = 0\n mu = mu0\n trans_type = -1\n trans_sign = 0\n trans_ind = -1\n if verbose: nbr=0#print 'initial active features =', nz\n \n while mu > mu1:\n \n # find the breakpoints where coefficients become zero\n b_nz = b[nz]\n Kv1 = np.dot(K, v1)\n Kb_nz = np.dot(K, b_nz)\n mu_0 = Kb_nz / Kv1\n \n # find the breakpoints where new coefficients become active\n z = np.setdiff1d(np.arange(m), nz)\n X_z = np.atleast_2d(X[:, z])\n b_z = b[z]\n M = G[np.ix_(z, nz)]\n MKb_nz = np.dot(M, Kb_nz)\n MKv1 = np.dot(M, Kv1)\n mu_1 = (b_z - MKb_nz) / (1 - MKv1)\n mu_m1 = (b_z - MKb_nz) / (-1 - MKv1)\n \n if trans_type > 0: mu_0[-1] = mu1\n mu_0[mu_0 >= mu] = mu1\n if len(mu_0) > 0: \n mu_0_argmax = mu_0.argmax()\n mu_0_max = mu_0[mu_0_argmax][0]\n else:\n mu_0_max = mu1\n if trans_type == 0:\n if trans_sign == 1: mu_1[np.where(z == trans_ind)[0]] = mu1 - 1\n else: mu_m1[np.where(z == trans_ind)[0]] = mu1 - 1\n mu_1[mu_1 >= mu] = mu1\n if len(mu_1) > 0: \n mu_1_argmax = mu_1.argmax()\n mu_1_max = mu_1[mu_1_argmax][0]\n else:\n mu_1_max = mu1\n mu_m1[mu_m1 >= mu] = mu1\n if len(mu_m1) > 0: \n mu_m1_argmax = mu_m1.argmax()\n mu_m1_max = mu_m1[mu_m1_argmax][0]\n else:\n mu_m1_max = mu1\n \n # compute the breakpoint\n mu_br_all = np.array([mu_0_max, mu_1_max, mu_m1_max])\n trans_type = mu_br_all.argmax()\n mu_br = mu_br_all[trans_type]\n \n if mu_br > mu1:\n \n nbr += 1\n mu = mu_br\n \n if trans_type == 0: # an element of theta(t) goes to zero\n trans_ind = nz[mu_0_argmax]\n trans_sign = v1[mu_0_argmax]\n if verbose: sss=0#print 'transition point :: mu = %.4f :: feature %d is inactive'%(mu, trans_ind)\n nzind = range(len(nz))\n rr=np.where(nz==trans_ind)[0][0]\n #print 'longa:',len(nz),len(nzind),len(v1)\n #print 'c:',nz.index(trans_ind)\n nzind=np.delete(nzind,rr)#nzind=np.delete(nzind,np.where(nzind==nz.index(trans_ind)))#nzind.remove(nz.index(trans_ind))\n v1 = v1[nzind]\n nz=np.delete(nz,rr)#nz=np.delete(nz,np.where(nz==trans_ind))#nz.remove(trans_ind)\n #print 'longa2:',len(nz),len(nzind),len(v1)\n X_nz = X[:, nz]\n K = invupdatered(K, mu_0_argmax)\n else: # new active element\n if trans_type == 1: # it is positive\n trans_ind = z[mu_1_argmax]\n if verbose: sss=0#print 'transition point :: mu = %.4f :: feature %d is positive'%(mu, trans_ind)\n nz=np.append(nz,trans_ind)#nz.append(trans_ind)\n v1 = np.vstack([v1, 1])\n else: # it is negative\n trans_ind = z[mu_m1_argmax]\n if verbose: sss=0#print 'transition point :: mu = %.4f :: feature %d is negative'%(mu, trans_ind)\n nz=np.append(nz,trans_ind)#nz.append(trans_ind)\n v1 = np.vstack([v1, -1])\n X_new = np.atleast_2d(X[:, trans_ind]).T\n K = invupdateapp(K, np.dot(X_nz.T, X_new), np.dot(X_new.T, X_nz), \n np.dot(X_new.T, X_new))\n X_nz = X[:, nz]\n \n else: # compute solution at mu1\n \n if verbose: sss=0#print 'compute solution at mu =', mu1\n theta_nz = Kb_nz - mu1*Kv1\n mu = mu1\n \n return theta_nz, nz, K, nbr",
"def __call__(self, u, apply_at=None):\n if apply_at is not None:\n lbound = apply_at - self.loffset\n rbound = apply_at + self.roffset + 1\n if rbound == 0: \n rbound = None\n return np.dot(self.stencil_coefs, u[lbound : rbound])\n else:\n \"\"\"The convolve method does not quite do the right thing, it\n flips the direction of iteration. Hence the ::-1 below.\n \n Note also that the convolve method is based on the \n multiarray.correlate a C routine.\"\"\"\n return np.convolve(u,self.stencil_coefs[::-1],mode='same')",
"def generate_pbc(self):\n s = \" - using 2D periodic boundaries -\"\n print_text(s, cls=self)\n\n xmin = MPI.min(mpi_comm_world(), self.mesh.coordinates()[:,0].min())\n xmax = MPI.max(mpi_comm_world(), self.mesh.coordinates()[:,0].max())\n ymin = MPI.min(mpi_comm_world(), self.mesh.coordinates()[:,1].min())\n ymax = MPI.max(mpi_comm_world(), self.mesh.coordinates()[:,1].max())\n \n self.use_periodic_boundaries = True\n \n class PeriodicBoundary(SubDomain):\n \n def inside(self, x, on_boundary):\n \"\"\"\n Return True if on left or bottom boundary AND NOT on one \n of the two corners (0, 1) and (1, 0).\n \"\"\"\n return bool((near(x[0], xmin) or near(x[1], ymin)) and \\\n (not ((near(x[0], xmin) and near(x[1], ymax)) \\\n or (near(x[0], xmax) and near(x[1], ymin)))) \\\n and on_boundary)\n\n def map(self, x, y):\n \"\"\"\n Remap the values on the top and right sides to the bottom and left\n sides.\n \"\"\"\n if near(x[0], xmax) and near(x[1], ymax):\n y[0] = x[0] - xmax\n y[1] = x[1] - ymax\n elif near(x[0], xmax):\n y[0] = x[0] - xmax\n y[1] = x[1]\n elif near(x[1], ymax):\n y[0] = x[0]\n y[1] = x[1] - ymax\n else:\n y[0] = x[0]\n y[1] = x[1]\n\n self.pBC = PeriodicBoundary()",
"def trackBeam(self, u):\n if not self.quiet:\n print(\"\"\"Particle start position:\nx = {0:.3f} mm, x' = {1:.3f} mrad\ny = {2:.3f} mm, y' = {3:.3f} mrad\"\"\".format(*np.asarray(u).A1 * 1e3, **globals()))\n for i, M in enumerate(self.M_array):\n self.u_array[i] = u.T\n u = M * u\n self.u_array[-1] = u.T\n if not self.quiet:\n print(u'''Particle final position:\nx = {0:.3f} mm, x' = {1:.3f} mrad\ny = {2:.3f} mm, y' = {3:.3f} mrad'''.format(*u.A1 * 1e3, **globals()))\n return u",
"def buffer_points_for_periodicBC(xy, PV, check=False):\n Epts = xy + PV[0]\n Npts = xy + PV[1]\n Wpts = xy - PV[0]\n Spts = xy - PV[1]\n NEpts = xy + PV[0] + PV[1]\n NWpts = xy - PV[0] + PV[1]\n SWpts = xy - PV[0] - PV[1]\n SEpts = xy + PV[0] - PV[1]\n xyout = np.vstack((xy, Epts, NEpts, Npts, NWpts, Wpts, SWpts, Spts, SEpts))\n if check:\n eps = 0.1\n plt.scatter(xy[:, 0], xy[:, 1], c='r', edgecolor='none')\n plt.scatter(Epts[:, 0] + eps, Epts[:, 1], c='y', edgecolor='none')\n plt.scatter(NEpts[:, 0] + eps, NEpts[:, 1] + eps, c='g', edgecolor='none')\n plt.scatter(Npts[:, 0], Npts[:, 1] + eps, c='b', edgecolor='none')\n plt.scatter(NWpts[:, 0] - eps, NWpts[:, 1] + eps, c='w')\n plt.scatter(Wpts[:, 0] - eps, Wpts[:, 1], c='m', edgecolor='none')\n plt.scatter(SWpts[:, 0] - eps, SWpts[:, 1] - eps, c='k', edgecolor='none')\n plt.scatter(Spts[:, 0], Spts[:, 1] - eps, c='lightgrey', edgecolor='none')\n plt.scatter(SEpts[:, 0] - eps, SEpts[:, 1] - eps, c='c', edgecolor='none')\n plt.show()\n return xyout",
"def notebook_01():\n\n freq_list, volt_list = las.load_freq_volt()\n\n n_steps, n_det, n_f, _ = np.shape(volt_list)\n\n #y_sym_mat_o = ds.by_sym_mat(volt_list, det_ind=0)\n #y_sym_mat_i = ds.by_sym_mat(volt_list, det_ind=1)\n\n # print(np.shape(y_sym_mat_o))\n # print(np.shape(y_sym_mat_i))\n # (mu_o, sigma_o) = stats.norm.fit(y_sym_mat_o[:,0])\n # (mu_i, sigma_i) = stats.norm.fit(y_sym_mat_i[:,0])\n # print(mu_o, sigma_o)\n # print(mu_i, sigma_i)\n # print(mu_o*89000, mu_i*89000.0, -mu_i*89000.0, -mu_o*89000.0)\n\n volt_list_sym = ds.volt_list_sym_calc(volt_list)\n\n fit_params_mat = fp.fit_params(ff.f_b_field, volt_list_sym)\n\n fit_params_mat_s = fp.fit_params(ff.f_b_field_off, volt_list_sym)\n\n # pbd.plot_bare_signal_and_fit_norm_shifted(0, volt_list_sym, freq_list, fit_params_mat_s, ff.f_b_field_off)\n\n # pfp.plot_fit_sym_comp(volt_list_sym, fit_params_mat, fit_params_mat_s, freq_list)\n\n\n # pfp.plot_fit_sym_comp_2(volt_list_sym, fit_params_mat_s, freq_list)\n\n #pfp.plot_symmetry_along_z(volt_list_sym, freq_list, fit_params_mat_s, ff.f_b_field_off)\n\n fp.fit_params_FH_data(ff.f_b_field)\n\n # pbd.plot_rel_diff_bare_signal_and_fit_norm_shifted(0, volt_list_sym, freq_list, fit_params_mat_s, ff.f_b_field_off)",
"def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return",
"def f_x_pbe(x, kappa=0.804, mu=0.2195149727645171):\n c = 1 / (2 * (3 * jnp.pi ** 2) ** (1 / 3))\n s = c * x\n f_x = 1 + kappa - kappa / (1 + mu * s ** 2 / kappa)\n return f_x",
"def pre_process_pat(x_data, y_data, background, z_data, fig=None):\n backgr_sm = scipy.ndimage.gaussian_filter(background, sigma=5)\n\n imq = z_data - backgr_sm\n imq = imq - np.mean(imq, axis=1).reshape((-1, 1))\n\n ks = 5\n w = np.ones((1, ks)) / ks\n imx = scipy.ndimage.convolve(imq, w, mode='nearest')\n\n qq = np.percentile(imx, [5, 50, 95])\n imx = imx - qq[1]\n qq = np.percentile(imx, [2, 50, 98])\n scale = np.mean([-qq[0], qq[2]])\n imx = imx / scale\n\n if fig is not None:\n # y_data = np.arange(imq.shape[0])\n plt.figure(fig)\n plt.clf()\n plt.subplot(2, 2, 1)\n plt.pcolormesh(x_data, y_data, z_data, shading='auto')\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Frequency (Hz)')\n plt.title('Input data')\n plt.subplot(2, 2, 2)\n plt.pcolormesh(x_data, y_data, imq, shading='auto')\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Frequency (Hz)')\n plt.title('imq')\n plt.subplot(2, 2, 3)\n plt.pcolormesh(x_data, y_data, imx, shading='auto')\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Frequency (Hz)')\n plt.title('imx')\n plt.tight_layout()\n\n return imx, imq, backgr_sm",
"def beta_gen_mnt(p):\n return np.array([-1.0]*int(0.7*p) + [1.0]*(p-int(0.7*p)))",
"def __init__(self,nparticles,size, mass=1, G=1, boundary_periodic = True,early_universe=False, softner=1, position = [], momentum = []):\n self.softner = softner\n self.G = G\n self.boundary_periodic = boundary_periodic\n self.nparticles = nparticles\n self.size = size\n self.mass = np.ones(nparticles)*mass\n #If the boundary condition are not periodic, the grid_size is double but particle kept in the first quadrant so \n #that the particles cannot feel the effect of the particles closed to the opposite boundary when we take the convolution\n if boundary_periodic==True:\n self.grid_size = size\n else:\n self.grid_size = 2*size\n #Initialize the partticle grid\n # if early_universe == True:\n # self.ptclgrid.early_universe_grid(softner)\n # self.mass = self.ptclgrid.mass\n self.ptclgrid = ParticleGrid(nparticles,self.grid_size,self.size, mass=self.mass, soft=softner, early_universe=early_universe)\n #If initial position are givem, place the particle to the right place on the grid\n if len(position) != 0:\n self.ptclgrid.update_position(position, mass)\n\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n x0,y0 = self.ptclgrid.position.transpose()\n initial_condition = np.array([x0,y0, self.mass]).transpose()\n #Initialize the Particle list containing the position and momentum of the particles\n self.particles = ParticleList(nparticles, initial_condition)\n #If initial mometa are given, intialize it \n if len(momentum) != 0:\n self.particles.momentum = momentum\n #Computes the green function on the grid\n self.compute_green_function(self.grid_size)\n #Initialize the array with the acceleration of the particles\n self.acc = np.zeros((len(self),2))",
"def nonsignaling_value(self) -> float:\n alice_out, bob_out, alice_in, bob_in = self.pred_mat.shape\n dim_x, dim_y = 2, 2\n\n constraints = []\n\n # Define K(a,b|x,y) variable.\n k_var = defaultdict(cvxpy.Variable)\n for a_out in range(alice_out):\n for b_out in range(bob_out):\n for x_in in range(alice_in):\n for y_in in range(bob_in):\n k_var[a_out, b_out, x_in, y_in] = cvxpy.Variable(\n (dim_x, dim_y), hermitian=True\n )\n constraints.append(k_var[a_out, b_out, x_in, y_in] >> 0)\n\n # Define \\sigma_a^x variable.\n sigma = defaultdict(cvxpy.Variable)\n for a_out in range(alice_out):\n for x_in in range(alice_in):\n sigma[a_out, x_in] = cvxpy.Variable((dim_x, dim_y), PSD=True)\n\n # Define \\rho_b^y variable.\n rho = defaultdict(cvxpy.Variable)\n for b_out in range(bob_out):\n for y_in in range(bob_in):\n rho[b_out, y_in] = cvxpy.Variable((dim_x, dim_y), PSD=True)\n\n # Define \\tau density operator variable.\n tau = cvxpy.Variable((dim_x, dim_y), PSD=True)\n\n p_win = cvxpy.Constant(0)\n for a_out in range(alice_out):\n for b_out in range(bob_out):\n for x_in in range(alice_in):\n for y_in in range(bob_in):\n p_win += self.prob_mat[x_in, y_in] * cvxpy.trace(\n self.pred_mat[a_out, b_out, x_in, y_in].conj().T\n * k_var[a_out, b_out, x_in, y_in]\n )\n\n objective = cvxpy.Maximize(cvxpy.real(p_win))\n\n # The following constraints enforce the so-called non-signaling\n # constraints.\n\n # Enforce that:\n # \\sum_{b \\in \\Gamma_B} K(a,b|x,y) = \\sigma_a^x\n for x_in in range(alice_in):\n for y_in in range(bob_in):\n for a_out in range(alice_out):\n b_sum = 0\n for b_out in range(bob_out):\n b_sum += k_var[a_out, b_out, x_in, y_in]\n constraints.append(b_sum == sigma[a_out, x_in])\n\n # Enforce non-signaling constraints on Alice marginal:\n # \\sum_{a \\in \\Gamma_A} K(a,b|x,y) = \\rho_b^y\n for x_in in range(alice_in):\n for y_in in range(bob_in):\n for b_out in range(bob_out):\n a_sum = 0\n for a_out in range(alice_out):\n a_sum += k_var[a_out, b_out, x_in, y_in]\n constraints.append(a_sum == rho[b_out, y_in])\n\n # Enforce non-signaling constraints on Bob marginal:\n # \\sum_{a \\in \\Gamma_A} \\sigma_a^x = \\tau\n for x_in in range(alice_in):\n sig_a_sum = 0\n for a_out in range(alice_out):\n sig_a_sum += sigma[a_out, x_in]\n constraints.append(sig_a_sum == tau)\n\n # Enforce that:\n # \\sum_{b \\in \\Gamma_B} \\rho_b^y = \\tau\n for y_in in range(bob_in):\n rho_b_sum = 0\n for b_out in range(bob_out):\n rho_b_sum += rho[b_out, y_in]\n constraints.append(rho_b_sum == tau)\n\n # Enforce that tau is a density operator.\n constraints.append(cvxpy.trace(tau) == 1)\n constraints.append(tau >> 0)\n\n problem = cvxpy.Problem(objective, constraints)\n ns_val = problem.solve()\n\n return ns_val",
"def beta_gen_posmnt(p):\n return np.array([0.0]*int(0.7*p) + [1.0]*(p-int(0.7*p)))",
"def _m_step(self, x, z_ik):\n # Update the parameters.\n # Update for pi (n_components, 1)\n\n # avg = np.mean(z_ik, axis=0).tolist()\n # norm = [i / sum(avg) for i in avg]\n # self._pi = np.array(norm).reshape(-1, 1)\n\n sum_ = np.sum(z_ik, axis=0)\n self._pi = sum_ / x.shape[0]\n\n # Update for mu (n_components, ndims)\n # new_mu = np.zeros_like(self._mu)\n # mu_down = np.sum(z_ik, axis=0)\n # for k in range(self._n_components):\n # mu_up = np.zeros((1, self._n_dims))\n # for i in range(x.shape[0]):\n # mu_up += z_ik[i, k] * x[i, :]\n # new_mu[k, :] = mu_up / mu_down[k]\n\n mu_up = z_ik.T.dot(x)\n mu_down = np.sum(z_ik, axis=0).reshape(-1, 1)\n self._mu = mu_up / mu_down\n\n # Update for sigma (n_components, n_dims, n_dims)\n new_sigma = np.zeros_like(self._sigma)\n sigma_down = np.sum(z_ik, axis=0)\n reg = np.zeros((x.shape[1], x.shape[1]))\n np.fill_diagonal(reg, self._reg_covar)\n\n for k in range(self._n_components):\n # mu_k = self._mu[k, :]\n # sigma_k = np.zeros((self._n_dims, self._n_dims))\n # for i in range(x.shape[0]):\n # sigma_k += z_ik[i, k] * np.diag(self._reg_covar +\n # np.diag(np.outer(x[i, :] - mu_k, x[i, :] - mu_k)))\n # new_sigma[k] = sigma_k / sigma_down[k]\n x_demean = x - self._mu[k, :]\n sigma_up = z_ik[:, k][:, np.newaxis] * x_demean\n new_sigma[k, :, :] = x_demean.T.dot(sigma_up) / sigma_down[k] + reg\n self._sigma = new_sigma",
"def beta_star(self):\n return self.reciprocal_lattice_parameters[4]",
"def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)",
"def __init__(self, temperatures, daytypes, consumptions, nb_days, nb_particles, sigma2, kappa, u_heat):\n self.temperatures = temperatures\n self.daytypes = daytypes\n self.consumptions = consumptions\n self.nb_days = nb_days\n self.nb_particles = nb_particles\n self.sigma2 = sigma2\n self.kappa = kappa\n self.u_heat = u_heat\n #Var init\n self.s = np.zeros((nb_days, nb_particles)) \n self.g_heat = np.zeros((nb_days, nb_particles))\n #sigma_s and sigma_g are fixed\n self.sigma_s_star_2 = np.zeros((1, nb_particles)) \n self.sigma_g_star_2 = np.zeros((1, nb_particles))\n self.x_season = np.zeros((1, nb_particles))\n self.x_heat = np.zeros((1, nb_particles))\n self.x = np.zeros((1, nb_particles))\n self.w = np.zeros((1, nb_particles))",
"def component_pdfs(x, mus, sigmas):\n n_components = mus.shape[0]\n return np.array([gaussian_pdf(x, mus[k,:], sigmas[k, :, :]) for k in range(n_components)])",
"def fitparab(z, ra, rb, theta, filt):\n # compute the interior quickly with convolutions\n a = scipy.signal.convolve2d(z, filt[:, :, 0], 'same')\n # fix border with mex file\n a = savgol_border(a, z, ra, rb, theta)\n \n return a"
] | [
"0.5633326",
"0.53691405",
"0.5280884",
"0.519096",
"0.51834625",
"0.51490057",
"0.50876623",
"0.50546724",
"0.5012297",
"0.5001122",
"0.49511448",
"0.4948063",
"0.49467683",
"0.4946201",
"0.49404076",
"0.49395758",
"0.49327973",
"0.4920169",
"0.49003652",
"0.48976943",
"0.48874596",
"0.48829293",
"0.4877312",
"0.48521537",
"0.48447096",
"0.48425117",
"0.48371172",
"0.48266685",
"0.48116234",
"0.48069564"
] | 0.66574705 | 0 |
kolmogorov distance is the maximum distance between two probability distributions. This calculates the KD of two pdfs in the form of numpy arrays. | def kolmogorov_distance(pdfx, pdfy):
cdfx = np.cumsum(pdfx)/np.sum(pdfx)
cdfy = np.cumsum(pdfy)/np.sum(pdfy)
return np.max(np.abs(cdfx - cdfy)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance(p0, p1):\n return( numpy.sqrt( (p0[0]-p1[0])**2 + \n (p0[1]-p1[1])**2 + \n (p0[2]-p1[2])**2 ) )",
"def dist(p1,p2):\n\n return sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)",
"def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))",
"def kolmogorov_smirnov_distance(self, other, show=False):\n from scipy.interpolate import interp1d\n cumdist1 = self.get_cumulative_distribution()\n cumdist2 = other.get_cumulative_distribution()\n \n # Normalize the x-values\n # range1 = np.max(cumdist1[\"x\"]) - np.min(cumdist1[\"x\"])\n # range2 = np.max(cumdist2[\"x\"]) - np.min(cumdist2[\"x\"])\n # cumdist1[\"x\"] -= np.min(cumdist1[\"x\"])\n # cumdist1[\"x\"] *= (range2/range1) \n # cumdist1[\"x\"] += np.min(cumdist2[\"x\"])\n\n interp_cumdist1 = interp1d(cumdist1[\"x\"], cumdist1[\"P\"], kind=\"linear\",\n fill_value=(0.0, 1.0), bounds_error=False)\n\n diff = cumdist2[\"P\"] - interp_cumdist1(cumdist2[\"x\"])\n\n ks_distance = np.max(np.abs(diff))\n\n if show:\n from matplotlib import pyplot as plt\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(cumdist1[\"x\"], cumdist1[\"P\"], drawstyle=\"steps\", label=\"First\")\n ax.plot(cumdist2[\"x\"], cumdist2[\"P\"], drawstyle=\"steps\", label=\"Second\")\n ax.set_xlabel(\"Normalized distances\")\n ax.set_ylabel(\"Cummulative Distribution\")\n ax.legend(loc=\"best\")\n plt.show()\n return ks_distance",
"def get_distance(p1, p2):\n\n deg_rad = math.pi / 180\n\n dphi = p1[1] - p2[1]\n phim = 0.5 * (p1[1] + p2[1])\n dlam = p1[0] - p2[0]\n\n k1 = (111.13209 - 0.56605 * math.cos(2 * phim * deg_rad) + 0.00120 * \n math.cos(4 * phim * deg_rad))\n k2 = (111.41513 * math.cos(phim * deg_rad) - 0.09455 * \n math.cos(3 *phim * deg_rad) + 0.0012 * math.cos(5 * phim * deg_rad))\n\n return numpy.sqrt(k1**2 * dphi**2 + k2**2 * dlam**2)",
"def dist(p0, p1):\n return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)",
"def dist(a, b):\n return np.sum((a-b)**2.0)**.5",
"def hyperboloidDist(point1, point2):\n return np.arccosh(-minkowskiDot(point1, point2))",
"def distance(P1, P2):\n return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5",
"def dist(pt1, pt2):\n return np.sqrt((pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2)",
"def distance(p1, p2):\n\treturn sqrt((p1[1]-p2[1])**2 + (p1[0]-p2[0])**2)",
"def distance(p1, p2):\n return np.linalg.norm(np.array(p1) - np.array(p2))",
"def distance(p1, p2):\n return np.linalg.norm(p2-p1)",
"def distkp(self, k1: int, k2: int) -> float:\n result = self._read_inline(f\"distkp({k1},{k2})\")\n return result",
"def getDistance(self, pt1, pt2):\n p = 2 #euclidean distance\n tot = 0\n for indexc, column in pt1.iteritems():\n if indexc in self.discrete: # need to reference VDM\n datapoint = self.VDMdict.get(indexc)\n dif = datapoint[pt1[indexc]][pt2[indexc]]\n elif indexc != \"class\": #gets distance beween 2 points\n dif = abs(float(pt1[indexc]) - float(pt2[indexc]))\n\n tot += dif ** p\n distance = tot ** (1 / p)\n return(distance)",
"def distance(p1, p2):\n dist = 0\n for k in set([*p1.keys(), *p2.keys()]):\n dist += (p1.get(k, 0) - p2.get(k, 0))**2\n return math.sqrt(dist)",
"def euclid_dist(p1, p2):\n \n return float(np.linalg.norm(np.array(p1)-np.array(p2)))",
"def get_distance(p1, p2):\n return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5",
"def distance(p1, p2):\n return math.sqrt((math.pow((p2[0] - p1[0]), 2) + math.pow((p2[1] - p1[1]), 2)))",
"def distance(p1, p2):\n return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)",
"def distance(self, pt1, pt2):\r\n # productive #frequent\r\n if frequent: profprint()\r\n d = ((float(pt1[0]) - float(pt2[0])) ** 2 + (float(pt1[1]) - float(pt2[1])) ** 2 + (float(pt1[2]) - float(pt2[2])) ** 2) ** 0.5\r\n return d",
"def distance(a, b):\n return (np.sum((a - b)**2))**0.5",
"def compute_dist(p_1, p_2):\n return sqrt((p_2[0] - p_1[0])**2 + (p_2[1] - p_1[1])**2 +\n (p_2[2] - p_1[2])**2)",
"def distance(p_1, p_2):\n return ((p_2[0] - p_1[0]) ** 2 + (p_2[1] - p_1[1]) ** 2 \\\n + (p_2[2] - p_1[2]) ** 2) ** 0.5",
"def dist2D(a, b):\n return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5",
"def distance(p1, p2):\n\n return sqrt(((p2[0] - p1[0])**2) + ((p2[1] - p1[1])**2))",
"def _get_dist(self, p1, p2): \r\n\r\n distance = np.sqrt(\r\n (p1[0] - p2[0]) ** 2 +\r\n (p1[1] - p2[1]) ** 2 +\r\n (p1[2] - p2[2]) ** 2)\r\n\r\n return distance",
"def kl_divergence(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( x.flat_cpt() * np.log( x.flat_cpt() / y.flat_cpt() ) )\n\treturn distance",
"def dist_2D(v1, v2):\n return ((v1[0]-v2[0])**2 + (v1[1]-v2[1])**2 )**(0.5)",
"def dist(v1, v2):\n return ( (v1[0] - v2[0])**2 + (v1[1] - v2[1])**2 )**0.5"
] | [
"0.6695389",
"0.6530534",
"0.6528139",
"0.6474828",
"0.64736056",
"0.6458455",
"0.6420905",
"0.63969034",
"0.63817",
"0.6379324",
"0.6358662",
"0.6355999",
"0.63409716",
"0.63376147",
"0.6327973",
"0.63198674",
"0.6312635",
"0.6306476",
"0.62971365",
"0.6286765",
"0.6282493",
"0.6277934",
"0.6276071",
"0.6271267",
"0.6266675",
"0.6265339",
"0.6254474",
"0.6224336",
"0.6204365",
"0.6199705"
] | 0.7690672 | 0 |
Simple reconnoisance tool to harvest comments in webpages | def main(u, o):
click.echo(f"Web crawling on {u} started successfully...")
comment_regex = re.compile('<!--(.*?-->)')
with requests.Session() as session:
resp = session.get(u)
soup = BeautifulSoup(resp.text, 'lxml')
#TODO: search for hidden attributes, may be useful
comments = soup.find_all(text=comment_regex)
print(comments) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comment():",
"def test_print_comments():\n flat_comments, tree_comments = get_comments_from_submission_id('jrjn70')\n print(len(flat_comments))\n print(len(tree_comments))\n\n print('flat comments')\n for c in flat_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)\n\n print()\n print('tree comments')\n for c in tree_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)",
"def watch2():\n\tcomments = r.get_comments('all', limit=None)\n\tfor comment in comments:\n\t\tif comment in visited:\n\t\t\tcontinue\n\t\telse:\n\t\t\tvisited[comment] = 1\n\t\t\tif \"LexiconBot define\" in comment.body:\n\t\t\t\tprint comment, \"from\", comment.permalink, \" / \", comment.submission\n\t\t\t\tmsg = define(comment.body.split()[2])\n\t\t\t\tcomment.reply(msg)\n\n\tprint \"Sleeping...\"\n\tsleep(1)",
"def test_issue_get_comments(self):\n pass",
"def getComments(source):\n\n markup = []\n for f in source:\n markup += extractMarkup(f)\n\n docs = collateDocs(markup)\n return docs",
"def parse_comments_html(advertise: Dict[str, Any]) -> Optional[List[str]]:\n if \"comments_html\" in advertise.keys():\n\n filtred_comments: str = advertise[\"comments_html\"][200::]\n\n tmp: List[str] = re.split(\"[ \\n\\t]{2,}\", filtred_comments)\n if '' in tmp:\n tmp.remove('')\n\n # Breaking comments\n master: List[List[str]] = []\n tmp_vec: List[str] = []\n for line in tmp:\n\n if re.search(\"de \\d{4,}\", line): # matches 'de 2018' that signals the end of comment\n master.append(tmp_vec)\n tmp_vec = []\n else:\n tmp_vec.append(line)\n\n # Cleaning comments\n for comment in master:\n if \"...\" in comment:\n comment.remove(\"...\")\n if \"O usuário contratou o serviço em\" in comment:\n comment.remove(\"O usuário contratou o serviço em\")\n\n return [\" \".join(m) for m in master]",
"def test_issue_get_comment(self):\n pass",
"def test_issue_get_comment_reactions(self):\n pass",
"def test_get_comments():\n comments = list(get_comments(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n\n # prints the dictionary of variables for each comment\n for x in comments:\n print(x.d_)",
"def getHTMLComments(self, text):\n return self.doSpecial(text, '<!--', '-->', self.fParseHTMLComments)",
"def do_comments(self, line):\n for comment in self.review.comments():\n print(comment)",
"def test_comment_post():\n\n driver = webdriver.Chrome()\n driver.get('https://cmput404group10.herokuapp.com')\n\n username = driver.find_element_by_id(\"username\")\n username.send_keys(\"ronWeasley\")\n\n password = driver.find_element_by_name(\"password\")\n password.send_keys(\"ualberta123\")\n\n driver.find_element_by_xpath('/html/body/div/div/div/div/div/div/div[2]/div/form/div[3]/button').click()\n\n elements = driver.find_elements_by_link_text('View Comments / Comment')\n elements[0].click()\n\n comment = driver.find_element_by_xpath('//*[@id=\"id_comment\"]')\n comment.send_keys(\"Test Comment\")\n\n driver.find_element_by_xpath('//*[@id=\"content\"]/div/div[2]/form/button').click()\n\n assert driver.page_source.__contains__(\"Test Comment\")",
"def parseComments(data):\n global comments\n reviewBegins = '<div style=\"margin-left:0.5em;\">'\n reviewEnds = '<div style=\"padding-top: 10px; clear: both; width: 100%;\">'\n stars_line = 'margin-right:5px;'\n stars = re.compile('\\d+.\\d+ out of 5 stars')\n header_line = '<span style=\"vertical-align:middle;\"'\n helpful_line ='people found the following review helpful'\n helpful = re.compile('\\d+ of \\d+ people found the following review helpful')\n reviewText = '<span class=\"h3color tiny\">' # Actual review\n\n boundaries = commentsStartStopLineNmbr(data)\n for i in range(boundaries[0], boundaries[1] + 1):\n if reviewBegins in data[i]:\n curcomment = Comment()\n while reviewEnds not in data[i]:\n # Parse stars\n if stars_line in data[i]:\n stars_found = re.search(stars, data[i])\n if stars_found != None:\n curcomment.stars = stars_found.group()\n # Parse header\n elif header_line in data[i]:\n line = data[i]\n begin = line.find('<b>') + 3\n end = line.find('</b>')\n curcomment.header = line[begin : end]\n # Parse helpfulness\n elif helpful_line in data[i]:\n helpful_found = data[i].replace(\",\", \"\")\n helpful_found = re.search(helpful, helpful_found)\n if helpful_found != None:\n curcomment.helpful = helpful_found.group()\n # Parse body text\n elif reviewText in data[i]:\n i += 3\n if '<span class=\"small\"' in data[i]: # Yep, dirty trick :(\n i += 3\n data[i] = stripHtmlTags(data[i])\n curcomment.comment = re.sub(\"\\s+\", \" \", data[i])\n i += 1\n comments.append(curcomment.getonelinecomment())\n #comments.append(curcomment.__repr__())",
"def core(self):\n \n \n comments = self.bot.subreddit(\n \"all\").stream.comments(\n skip_existing = True)\n \n \n for comment in comments:\n \n text = comment.body.lower().replace(\".\", \"\")\n \n for card in self.catalog:\n \n if (\n card[1].lower() in text\n and card[0].lower() in text\n and not comment.submission.id in self.responded\n and not comment.subreddit.user_is_banned):\n\n self.get_info(card)\n\n if not self.details:\n \n break\n\n audio = [\n \"audiobook\", \n \"audio book\"]\n \n author_format = [\n name.lower() for name in card[1].split(\" \") \n if len(name) >= 3]\n\n if (\n self.details[\"duration\"] > 10800\n and card[0].lower() in self.details[\n \"title\"].lower()\n and any(\n item in self.details[\n \"title\"].lower() for item in audio)\n and all(\n item in self.details[\n \"title\"].lower() for item in author_format)):\n \n \n saw_the_sign = (\n \"\"\"[^(Source Code)](https://capybasilisk.com/posts/\"\"\"\n \"\"\"2020/04/speculative-fiction-bot/) \"\"\"\n \"\"\"^| [^(Feedback)](https://www.reddit.com/message/\"\"\"\n \"\"\"compose?to=Capybasilisk&subject=Robot) \"\"\"\n \"\"\"^| [^(Programmer)](https://www.reddit.com/u/\"\"\"\n \"\"\"capybasilisk) \"\"\"\n \"\"\"^| ^(Downvote To Remove) \"\"\" \n \"\"\"^| ^(Version 1.4.0) \"\"\"\n \"\"\"^| ^(Support Robot Rights!)\"\"\")\n \n\n comment.reply(\n f\"\"\"Hi. You just mentioned *{card[0]}* by \"\"\" \n f\"\"\"{card[1]}.\\n\\nI've found an audiobook of \"\"\" \n \"\"\"that novel on YouTube. You can listen to it here\"\"\"\n f\"\"\":\\n\\n[YouTube | {self.details['title']}]\"\"\"\n f\"\"\"({self.details['webpage_url']})\\n\\n*I\\'m a bot that \"\"\" \n \"\"\"searches YouTube for science fiction and fantasy\"\"\" \n f\"\"\" audiobooks.*\\n***\\n{saw_the_sign}\"\"\")\n\n \n self.responded.append(\n comment.submission.id)\n \n with open(\n \"activity.csv\", \n \"a\", \n encoding = \"UTF-8\") as actlog:\n\n activity = clevercsv.writer(\n actlog)\n\n if actlog.tell() == 0:\n\n activity.writerow(\n [\"Book\",\n \"Comment\", \n \"Author\", \n \"Thread\", \n \"Subreddit\", \n \"Time\"])\n\n activity.writerow(\n [f\"{card[0]} by {card[1]}\",\n f\"{comment.body}\",\n f\"{comment.author}\",\n f\"{comment.submission.title}\",\n f\"{comment.subreddit}\",\n f\"{pendulum.now().to_datetime_string()}\"])\n \n self.details = None\n \n break\n \n break \n \n if pendulum.now().to_time_string().endswith(\n \"0:00\"):\n \n self.tidy()",
"def test_comments(self):\n self.resource._request.register_uri(\n 'GET', '/users/dotzero/comments?page=2', 'fixture_user.json')\n\n response = self.resource.comments('dotzero', 2)\n\n self.assertTrue('data' in response)\n self.assertTrue('server_time' in response)",
"def test_get_specific_comment_info():\n a, b, c, d = get_specific_comment_info('g99c7c0')\n print('time created:', a, 'type:', type(a))\n print('permalink:', b, 'type:', type(b))\n print('karma score:', c, 'type:', type(c))\n print('submission id:', d, 'type:', type(d))",
"def comments():\n return render_template(\"/scene/comments/comments.html\")",
"def test_issue_get_repo_comments(self):\n pass",
"def test_get_comment_information_by_id():\n get_comment_information_by_id('g99c7c0')",
"def comment_for_run (ins, exp, runnum) :\n return dict_of_recs_for_run(ins, exp, runnum)['comment']",
"def get_comments(qint,conn):\n\n comms = ('SELECT DISTINCT ip.value '\n 'FROM interaction i, interactionprop ip, cvterm cvt '\n 'WHERE i.interaction_id = ip.interaction_id AND ip.type_id = cvt.cvterm_id '\n 'AND cvt.is_obsolete=0 AND cvt.name != \\'comments on source\\' '\n 'AND cvt.name != \\'internalnotes\\' AND i.uniquename = %s')\n comnts = connect(comms, qint, conn)\n return(comnts)",
"def test_get_comments_from_submission():\n # gets a test submission\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n submission_id = threads[0].d_['id']\n\n # prints link to thread\n thread_full_link = threads[0].d_['full_link']\n print(thread_full_link)\n\n # prints submission title\n thread_title = threads[0].d_['title']\n print(thread_title)\n\n submission = get_comments_from_submission(submission_id)\n for top_level_comment in submission.comments:\n print(top_level_comment.body)",
"async def scrape_comments(self):\n\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n comment_count = 0\n async for comment in subreddit_origin.comments(limit=self.limit):\n if self.memory.contains(comment.id):\n continue\n\n self.memory.add(comment.id)\n\n # Parse Comment\n comment = self.parse_comment(comment)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(comment)\n\n comment_count += 1\n\n return comment_count",
"def extract_comments(self, response):\n\n # use the comment_parser package to extract HTML and JS comments\n try:\n html_comments = comment_parser.extract_comments_from_str(response.text, mime=\"text/html\")\n except (UnterminatedCommentError, CP_ParseError):\n html_comments = []\n try:\n js_comments = comment_parser.extract_comments_from_str(response.text, mime=\"application/javascript\")\n except (UnterminatedCommentError, CP_ParseError):\n js_comments = []\n\n # put the discovered comments together\n comments = list()\n for comment in html_comments:\n comments.append({\"line\": comment.line_number(), \"comment\": \"<!--\" + comment.text() + \"-->\"})\n for comment in js_comments:\n if comment.is_multiline():\n comments.append({\"line\": comment.line_number(), \"comment\": \"/*\" + comment.text() + \"*/\"})\n else:\n comments.append({\"line\": comment.line_number(), \"comment\": \"//\" + comment.text()})\n\n # store the discovered comments w.r.t. the response's path & query\n if comments:\n parsed_url = urllib.parse.urlparse(response.url)\n if self.config[\"crawl_parameter_links\"].lower() == \"true\":\n self.comments[parsed_url.path + parsed_url.query] = comments\n else:\n self.comments[parsed_url.path] = comments",
"def get_comments(self):\n raise NotImplementedError",
"def get_comment_information_by_id(comment_id):\n comment = REDDIT.comment(comment_id)\n print(comment.body)\n print(vars(comment))",
"def get_comments(self, sort, time):\r\n from r2.models import Comment\r\n return self.get_links(sort, time, Comment)",
"def extract_comments(comments_file, output_filename=direc+\"/comments.txt\"):\r\n if not os.path.exists(output_filename.split(\"/\")[0]):\r\n os.makedirs(output_filename.split(\"/\")[0])\r\n\r\n print(\"Extracting comments from \" + comments_file + \"...\")\r\n comments_dict = {}\r\n with open(output_filename, \"w\", encoding=encoding) as f:\r\n current = 0\r\n for event, child in iterparse(comments_file, events=('start', 'end')):\r\n if current > SAMPLE_SIZE:\r\n break\r\n elif len(child.attrib) > 0 and event == \"start\":\r\n if child.attrib['PostId'] not in comments_dict:\r\n comments_dict[child.attrib['PostId']] = []\r\n comments_dict[child.attrib['PostId']].append(child.attrib['Id'])\r\n clean_comment = clean_markdown(child.attrib['Text'])\r\n line = child.attrib['Id'] + \"\\t\" + child.attrib['PostId'] + \"\\t\" + clean_comment + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n f.write(line)\r\n\r\n current += 1\r\n print_progress(current, SAMPLE_SIZE)\r\n print(\"\\nFinished extracting comments from \" + comments_file + \".\\n\")\r\n return comments_dict",
"def cli(ctx, comment, metadata=\"\"):\n return ctx.gi.cannedcomments.add_comment(comment, metadata=metadata)",
"def main():\n pattern = get_regex_pattern()\n directories = get_directories()\n files = get_js_files(directories)\n comments_for_jsdoc_exists = analyse_files_against_regex_pattern(\n files, pattern)\n set_github_env_variable(comments_for_jsdoc_exists)"
] | [
"0.65669584",
"0.6525056",
"0.6505806",
"0.6469335",
"0.6297864",
"0.6289731",
"0.62210983",
"0.6215077",
"0.61902016",
"0.6184528",
"0.6149626",
"0.61480325",
"0.61391455",
"0.6130129",
"0.60489833",
"0.60166967",
"0.6005638",
"0.6002507",
"0.59990406",
"0.59586626",
"0.59361655",
"0.58962595",
"0.58956575",
"0.5892558",
"0.5884654",
"0.58758336",
"0.5871386",
"0.5862181",
"0.58524144",
"0.58314615"
] | 0.7606145 | 0 |
Scrape charlesriverapparel.com. If url is set, scrape only item in the url If url_to_scrape is not set, returns list of itemInfo dict If url_to_scrape is set, returns itemInfo dict | def scrape(url_to_scrape=None, cached_data=None):
all_items = []
if cached_data:
cat_dict = cached_data['categories_dict']
else:
cat_dict = _traverse_categories() # {item_id: { 'id': item_id, 'category_list': [categories], 'url': item_url } }
category_list = None
brand = None
if url_to_scrape:
for scrape_cat_data in cat_dict.values():
if scrape_cat_data['url'] == url_to_scrape:
if scrape_cat_data.has_key('category_list'):
category_list = scrape_cat_data['category_list']
else:
category_list = None
if scrape_cat_data.has_key('brand'):
brand = scrape_cat_data['brand']
else:
brand = None
break
# Only include item to scrape
if category_list and brand:
scrape_data_output = _get_item_attributes(url_to_scrape, category_list, brand)
elif category_list:
scrape_data_output = _get_item_attributes(url_to_scrape, category_list)
else:
scrape_data_output = _get_item_attributes(url_to_scrape)
return scrape_data_output
else:
for scrape_cat_data in cat_dict.values():
if scrape_cat_data.has_key('category_list'):
category_list = scrape_cat_data['category_list']
else:
category_list = None
if scrape_cat_data.has_key('brand'):
brand = scrape_cat_data['brand']
else:
brand = None
if category_list and brand:
get_all_data_points = _get_item_attributes(scrape_cat_data['url'], category_list, brand)
elif category_list:
get_all_data_points = _get_item_attributes(scrape_cat_data['url'], category_list)
else:
get_all_data_points = _get_item_attributes(scrape_cat_data['url'])
if TESTRUN:
print "URL is not set so scrapping data from :- ", scrape_cat_data['url']
print '-'*78
print get_all_data_points
print '-'*78
all_items.append(get_all_data_points)
return all_items | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def single_crawl(self, urlitem: str):\n # print(\"Item: \", urlitem)\n try:\n hdr = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36 \",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Charset\": \"ISO-8859-1,utf-8;q=0.7,*;q=0.3\",\n \"Accept-Encoding\": \"none\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"Connection\": \"keep-alive\",\n }\n try:\n req = Request(urlitem, headers=hdr)\n html_page = urlopen(req)\n soup = BeautifulSoup(html_page, \"lxml\")\n links = [\n requests.compat.urljoin(urlitem, link.get(\"href\"))\n for link in soup.findAll(\"a\")\n ]\n links = [x for x in links if \"#\" not in x]\n except Exception as e:\n # print(e)\n pass\n return links\n\n except:\n pass",
"def scrape_url(url):\n r = requests.get(url)\n url_list = get_urls(r.text)\n email_list = get_email_addresses(r.text)\n phone_list = get_phone_numbers(r.text)\n\n print_list('Urls', url_list)\n print_list('Emails', email_list)\n print_list('Phone Numbers', phone_list)",
"def program_item(url):\n items = []\n \n soup = abcradionational.get_soup(url)\n\n playable_podcast = abcradionational.get_playable_podcast(soup)\n\n items = abcradionational.compile_playable_podcast(playable_podcast)\n\n return items",
"def scrape_site(url, headers, proxy):\n items = []\n\n # Makes request to site\n s = rq.Session()\n page = 1\n while True:\n html = s.get(url + f'?page={page}&limit=250', headers=headers, proxies=proxy, verify=False, timeout=20)\n output = json.loads(html.text)['products']\n if output == []:\n break\n else:\n # Stores particular details in array\n for product in output:\n product_item = {\n 'title': product['title'], \n 'image': product['images'][0]['src'], \n 'handle': product['handle'],\n 'variants': product['variants']}\n items.append(product_item)\n page += 1\n \n logging.info(msg='Successfully scraped site')\n s.close()\n return items",
"def getLinksToPhonesPerBrands(url):\n urls = {}\n print(\"brand link being scrapped : \", url)\n try:\n request = requests.get(url)\n if request.status_code == 200:\n sourceCode = BeautifulSoup(request.content, \"html.parser\")\n li = sourceCode.select('#review-body div > ul > li > a')\n for link in li:\n title = link.get_text()\n url = processUrl(link['href'])\n if title not in urls.keys():\n urls[title] = url\n print(title, ' ', url)\n else:\n print('no table or row found ')\n except requests.HTTPError as e:\n print('Unable to open url', e)\n return urls",
"def parse_webpage(self, response):\n item = response.meta['item']\n print(\"Request url {}, actual requested url {}\".format(item['url'], response.request.url))\n # website url\n item['website_url'] = response.request.url\n\n item['name'] = self.guess_company_name(response)\n item['domain'] = self.get_domain(response)\n\n # get website title\n item['website_title'] = self.get_webpage_title(response)\n # get description from website\n item['website_desc'] = self.get_webpage_description(response)\n\n # get keywords from website\n item['keywords'] = self.get_webpage_keywords(response)\n\n # try to get email and phones\n item['email'] = self.extract_email(response)\n item['phone'] = self.extract_phone(response)\n\n if not item['email']:\n # try to get contact info\n # check if there is kontakt link on the page\n item = self.check_webpage_for_contact_details(item, response, \"impressum\")\n\n if not item['email']:\n try:\n # try Contact\n item = self.check_webpage_for_contact_details(item, response, \"kontakt\")\n\n except Exception as e:\n print(\"Exception\", e)\n\n if item['email']:\n item['email'] = item['email'].replace(\"(at)\", \"@\")\n yield item",
"def get_query_url_results(self, url):\n page = requests.get(url)\n soup = BeautifulSoup(page.text, \"html5lib\")\n \n items = []\n raw_items = soup.findAll(\"tbody\", {\"class\": \"item\"})\n for item in raw_items:\n description = OrderedDict()\n description[\"name\"] = item['data-name']\n description[\"seller\"] = item['data-ign']\n # sockets requires modification for counting divs\n #description[\"sockets\"] = item.find(\n # \"span\" ,\n # {\"class\" : \"sockets-raw\"}\n # ).text\n description[\"price\"] = item['data-buyout']\n # pdps will also require modification for updated html\n #description[\"pdps\"] = item.find(\n # \"td\",\n # {\"data-name\": \"quality_pdps\"},\n #).text\n items.append(description)\n \n return items",
"def parse_page(url):\n\n page = requests.get(url)\n soup = BeautifulSoup(page.text, 'html.parser')\n\n listings = []\n\n # Loop throuhg all prices\n for offer in soup.findAll(\"div\", {\"class\": \"regular-ad\"}): # Scan regular-ad class to avoid featured ads realted to Kijiji Ads\n \n current_listing_dict = {}\n\n # Parse title\n title_list = offer.find_all(href=True)[0].text.split(\" \")\n title = [i for i in title_list if i]\n title = \" \".join(title).rstrip().strip(\"\\n\").strip(\" \")\n\n # Append title to dict\n current_listing_dict['title'] = title\n\n # Parse price\n price = \"\".join(offer.findAll(\"div\", {\"class\": \"price\"})[0].text.split(\" \")).rstrip().strip('\\n')\n\n if '$' in price:\n price = price.split('$')[-1].replace(',','')\n\n # Append price to dict\n current_listing_dict['price'] = price\n \n # Parse link\n link = offer.find_all(href=True)[0]['href']\n\n # Append link to dict\n current_listing_dict['link'] = link\n\n # Append to global listings list\n listings.append(current_listing_dict)\n\n return listings",
"def get_products_from_page(url):\n\n def get_data_from_book(book):\n \"\"\"Return data from one book.\"\"\"\n src_img = book.find(\"img\").get(\"src\")\n src_img = src_img.replace(\"../\", \"\")\n image = \"http://books.toscrape.com/\" + src_img\n\n in_stock = False\n in_stock_or_not = book.find(\"p\", {\"class\", \"instock\"}).text\n if \"In stock\" in in_stock_or_not:\n in_stock = True\n\n name = book.find(\"h3\").find(\"a\").text\n\n price = book.find(\"p\", {\"class\", \"price_color\"}).text\n price = price.replace(\"Â\", \"\")\n\n rating = book.find(\"p\", {\"class\", \"star-rating\"}).get(\"class\")[1]\n rating = w2n.word_to_num(rating)\n\n return {\n \"image\": image,\n \"in_stock\": in_stock,\n \"name\": name,\n \"price\": price,\n \"rating\": rating,\n }\n\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n books = soup.find_all(\"article\", {\"class\", \"product_pod\"})\n\n result = list(map(get_data_from_book, books))\n return result",
"def getLinkstoBrands(url):\n brandUrls = {}\n try:\n print(\"Maker link being crawled : \", url)\n request = requests.get(url)\n if request.status_code == 200:\n sourceCode = BeautifulSoup(request.text, \"html.parser\")\n for td in sourceCode.findAll('td'):\n link = td.find('a', href=True)\n title = td.get_text()\n url = processUrl(link['href'])\n if title not in brandUrls.keys():\n brandUrls[title] = url\n print(title, ' ', url)\n else:\n print('no table or row found ')\n except requests.HTTPError as e:\n print('Unable to open url', e)\n return brandUrls",
"def scrape_BI(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text)\n companies = soup.find_all('h3', class_='slide-title')\n #names = []\n driver = init_driver()\n for company in companies[:]:\n name = company.getText().strip()\n # if \" \" in name:\n # name.replace(' ','+')\n html_code = load_google(driver, name)\n #name, address = scrape_google(html_code)\n url = scrape_google(html_code)\n print(name,url)\n #names.append(name)\n driver.quit()\n #print(names)",
"def collect_amenities(self, url: str) -> None:\n page_source = self.get_page_source(url, \"gmnoprint\")\n soup = BeautifulSoup(page_source, \"html.parser\")\n\n # Get latitude and longitude data\n self.get_coordinates(soup)\n\n # Open amenities url and collect additional data\n try:\n href_url_amenities = soup.find(class_=\"b6xigss dir dir-ltr\").find(\"a\")[\n \"href\"\n ]\n url_amenities = f\"https://www.airbnb.com{href_url_amenities}\"\n\n amenities_page_source = self.get_page_source(url_amenities, \"_vzrbjl\")\n soup = BeautifulSoup(amenities_page_source, \"html.parser\")\n amenities = soup.find_all(class_=\"_1cnse2m\")[1].get_text()\n\n except (AttributeError, TypeError, IndexError):\n amenities = \"\"\n\n if amenities == \"\":\n self.__collected_dic[\"kitchen\"].append(None)\n self.__collected_dic[\"refrigerator\"].append(None)\n self.__collected_dic[\"wifi\"].append(None)\n self.__collected_dic[\"washer\"].append(None)\n self.__collected_dic[\"tv\"].append(None)\n self.__collected_dic[\"parking\"].append(None)\n else:\n self.get_amenity_kitchen(amenities)\n self.get_amenity_refrigerator(amenities)\n self.get_amenity_wifi(amenities)\n self.get_amenity_washer(amenities)\n self.get_amenity_tv(amenities)\n self.get_amenity_parking(amenities)",
"def parse_item(self, response):\n NewhouseSpider.crawled_urls.append(response.url)\n item = FocusedScrapyCrawlerItem()\n item['url'] = response.url\n item['link_text'] = response.meta.get('link_text', '') if response.meta else ''\n soup = BeautifulSoup(response.body, 'html.parser')\n\n item['body_p_tags'] = self._getBodyText(soup)\n item['head_title'] = self._getHeadTitle(soup)\n item['last_crawled'] = time.time()\n links = self._getLinks(response, soup)\n\n # get score of the page based upon classifier\n if self.classifier:\n score = self.classifier.score(item['link_text'], item['head_title'], item['body_p_tags'])\n else:\n score = 0.0\n\n item['score'] = score\n yield item\n if score <= 0:\n self.log(\"item={} does not belong to new home so stop crawling\".format(item),\n logging.INFO)\n else:\n for link in links:\n req = Request(link, priority=int(score * 1000000), # after the request is done, run parse_item to train the apprentice\n callback=self.parse_item)\n yield req",
"def parse_url(self, url: str):\n time.sleep(0.1)\n resp = requests.get(url, timeout=5).content.decode('windows-1250')\n selector = Selector(text=resp)\n name_addresses = []\n if not self.is_right_page(selector):\n return []\n\n company = self.parse_business_name(selector)\n name_addresses += self.parse_management_body(selector)\n name_addresses += self.parse_partners(selector)\n\n ret = []\n for name_address in name_addresses:\n name_address = [re.sub(r'[\",;]', '', n).strip() for n in name_address]\n print(\"Found name: \", name_address)\n is_russian = self.RUSSIA in name_address[1]\n ret.append([re.sub(r'[\",;]', '', company).strip()] + name_address + [is_russian])\n return ret",
"def handle_url(url, session, res):\n print(\"Parsing\", url, file=sys.stderr)\n try:\n data, baseUrl = getPageContent(url, session)\n except IOError as msg:\n print(\"ERROR:\", msg, file=sys.stderr)\n return\n for match in url_matcher.finditer(data):\n url = match.group(1)\n name = unescape(match.group(2))\n name = asciify(name.replace('&', 'And').replace('@', 'At'))\n name = capfirst(name)\n if name in exclude_comics:\n continue\n if contains_case_insensitive(res, name):\n # we cannot handle two comics that only differ in case\n print(\"INFO: skipping possible duplicate\", repr(name), file=sys.stderr)\n continue\n res[name] = url",
"def get_item_url(self, soup: BeautifulSoup) -> Optional[str]:\n try:\n url = f\"https://www.airbnb.com{soup.find('a').get('href')}\"\n except AttributeError:\n url = None\n self.__collected_dic[\"url\"].append(url)\n return url",
"def parse_items(self, response: Response) -> RlItem:\n self.logger.info('Crawler Found Item Page: %s', response.url)\n\n # Iterate through each rocket league item and build it.\n for elem_item in response.xpath('//div[starts-with(@class, \"rlg-item__container\")]'):\n loader = RlItemLoader(item=RlItem(), selector=elem_item)\n loader.add_xpath('data_id', './/div/@data-id')\n loader.add_xpath('img_url', './/img/@src')\n loader.add_value('name', elem_item.attrib['data-name'])\n loader.add_value('category', elem_item.attrib['data-category'])\n loader.add_value('platform', elem_item.attrib['data-platform'])\n loader.add_value('rarity', elem_item.attrib['data-rarity'])\n loader.add_value('dlcpack', elem_item.attrib['data-dlcpack'])\n yield loader.load_item()",
"def get_urls(search_key = \"Roma, Roma\", select_key = 0):\n print(driver_path)\n driver = webdriver.Chrome(driver_path, options=chrome_options) # Optional argument, if not specified will search path.\n driver.get('https://www.astegiudiziarie.it/Immobili/Risultati')\n time.sleep(small_wait)\n driver.find_element_by_id('filter-category').click()\n time.sleep(small_wait)\n driver.find_element_by_xpath(\"//ul[@id='categories_filter']/li[@data-option-id='1']\").click()\n time.sleep(small_wait)\n driver.find_elements_by_xpath(\"//div[@class='clefted']/a\")[1].click()\n time.sleep(small_wait)\n while True:\n try:\n location = driver.find_element_by_xpath(\"//span[@id='location-span']/input[@id='location']\")\n location.click()\n time.sleep(small_wait)\n location.send_keys(search_key)\n time.sleep(small_wait)\n locatoin_text = driver.find_elements_by_xpath(\"//ul[@class='ui-menu ui-widget ui-widget-content ui-autocomplete highlight ui-front']/li\")[select_key].text\n loc = driver.find_elements_by_xpath(\"//ul[@class='ui-menu ui-widget ui-widget-content ui-autocomplete highlight ui-front']/li\")[select_key].click()\n print(\"The location is:\", locatoin_text)\n l1['text'] = \"The location is: \" + locatoin_text\n break\n except:\n pass\n\n n_items = 0\n while True:\n time.sleep(small_wait)\n items = driver.find_elements_by_class_name(\"listing-item\")\n hover = items[-2]\n action = webdriver.common.action_chains.ActionChains(driver)\n action.move_to_element(hover).perform()\n time.sleep(big_wait)\n if n_items < len(items):\n n_items = len(items)\n else:\n break\n\n urls = driver.find_elements_by_xpath(\"//div[@class='listing-item']/a\")\n urls = [url.get_attribute('href') for url in urls]\n urls = set(urls)\n db = mysql.connector.connect(\n user='root', database='astegiudiziarie',\n host='localhost', password='Maral1398', port=3306)\n db.autocommit = True\n cur = db.cursor()\n cur.execute(\"select url from urls where region='{}'\".format(locatoin_text))\n pre_urls = [u[0] for u in cur.fetchall()]\n for url in urls:\n print(url)\n if url not in pre_urls:\n cur.execute('insert into urls(region, url, date) values(\"{}\",\"{}\",\"{}\")'.format(locatoin_text, url, datetime.date.today()))\n print('New url added')\n else:\n print('The url exists')\n cur.close()\n db.close()\n driver.get_screenshot_as_file(\"capture.png\")\n driver.close()\n driver.quit()",
"def grab_mApe_results (searchType) :\n\n mape_main_url = 'https://www.mightyape.co.nz/'\n #Defining the url paths for search types\n mape_mv_category_url = 'movies-tv/movies?q='\n mape_mv_format_search_url = 'movieformat~blu-ray'\n\n #This is the final url string\n searchUrl = ''\n\n #Checking search type\n if searchType is SEARCH_BD_MV_TYPE :\n searchUrl = mape_main_url+mape_mv_category_url+mape_mv_format_search_url\n elif searchType is 'Title' :\n searchUrl = 'https://www.mightyape.co.nz/movies-tv/movies/all?sort=2&q=movieformat~blu-ray'\n\n\n #Using a dictionary to store data, as contains list with objects\n mape_list = {}\n\n page = requests.get(searchUrl)\n tree = html.fromstring(page.content)\n\n data = tree.xpath('//div[@class=\"product-list gallery-view\"]/div[@class=\"product\"]/div[@class=\"title\"]/a') #<--- WORKS\n\n data_alt = tree.xpath('//div[@class=\"product-list gallery-view\"]/div[@class=\"product\"]')\n\n print('Getting results from url:',searchUrl)\n print('Number of objects=',len(data_alt))\n count = 1\n\n for item in data_alt :\n simple_item = item.xpath('div[@class=\"title\"]/a')\n title = simple_item[0].text\n link = simple_item[0].get('href')\n format = item.xpath('div[@class=\"format\"]/text()')\n rating = item.xpath('div[@class=\"customer-rating\"]/span/span[@class=\"average\"]/text()')\n base_price = item.xpath('div[@class=\"price\"]/s/text()')\n hot_price = item.xpath('div[@class=\"price\"]/span[@class=\"price hot\"]/text()')\n normal_price = item.xpath('div[@class=\"price\"]/span[@class=\"price\"]/text()')\n if len(rating) > 0 :\n #temp_mv = Movie_object(title,format[0],rating[0].strip(), mape_main_url + link,normal_price, base_price, hot_price)\n print(title,format[0],rating[0].strip(), mape_main_url + link,normal_price, base_price, hot_price)\n #mape_list[title] = temp_mv\n else :\n print(title, format[0], 'n/a', mape_main_url + link, normal_price, base_price, hot_price)\n #temp_mv = Movie_object(title, format[0], 'n/a', mape_main_url + link, normal_price, base_price, hot_price)\n #mape_list[title] = temp_mv\n\n\n count += 1\n\n return mape_list",
"def crawl(self, url):\n return None",
"def general_scraper(section_url):\n\n prefix = \"http://mesva.univaq.it\"\n\n request = []\n news = []\n\n for i, url in enumerate(section_url):\n request.append(requests.get(url))\n news_division = BeautifulSoup(request[i].text, \"html.parser\").find(class_=\"view-content\")\n\n discab_news = news_division.find_all(\"div\", recursive=False)[0:5]\n\n for single_news in discab_news:\n news.append({\n 'description': '',\n 'title': single_news.a.string,\n 'link': prefix + single_news.a['href']\n })\n\n return news",
"def parse(self, response):\n self.driver.get(response.url)\n product_category=response.meta[\"category_text\"]\n products=response.xpath(\"//*[(@class='list-item')]\")\n \n # item containers for storing product\n items = CrawlingECommerceItem()\n \n # iterating over search results\n # for product in products:\n # # Defining the XPaths\n # XPATH_PRODUCT_LINK=\".//*[contains(concat( ' ', @class, ' ' ), concat( ' ', 'goods-tit', ' ' ))]//a\"\n # XPATH_PRODUCT_NAME=\".//div[@class='goods-introudce']//a/@href\"\n # XPATH_PRODUCT_PRICE=\".//div[@class='catalog-detail']//div[@class='detail-right']//p/text()\"\n # XPATH_PRODUCT_IMAGE_LINK=\".//img\"\n\n # raw_product_name=product.xpath(XPATH_PRODUCT_NAME).get()\n # raw_product_price=product.xpath(XPATH_PRODUCT_PRICE).get()\n # raw_product_image_link=product.xpath(XPATH_PRODUCT_IMAGE_LINK).extract()\n # raw_product_link=product.xpath(XPATH_PRODUCT_LINK).get()\n\n # # cleaning the data\n # product_name=''.join(raw_product_name).strip(\n # ) if raw_product_name else None\n # product_price=''.join(raw_product_price).strip(\n # ) if raw_product_price else None\n # product_image_link=''.join(raw_product_image_link).strip(\n # ) if raw_product_image_link else None\n # product_link=''.join(raw_product_link).strip(\n # ) if raw_product_link else None\n\n # # storing item\n # yield CrawlingECommerceItem (\n # product_name=product_name,\n # product_price=product_price,\n # product_url=product_link,\n # product_category=product_category,\n # image_urls=raw_product_image_link\n # )\n\n # # yield items\n \n # XPATH_PRAGINATION_LINK=\"//*[(@class='next right')]/a/@href\"\n\n yield response.follow(str(response.request.url), callback = self.parse, meta = {\"category_text\": product_category})",
"def extract_from_soup( target_url_soup ):\n\n datetime_now = datetime.datetime.now().strftime(\"%m.%d.%Y:%H.%M.%S\")\n\n # Declare a list for each variable extracted below.\n title_list = []\n href_list = []\n cost_list = []\n info_list = []\n loc_list = []\n datetime_pulled = []\n\n # Iteratively extract the data into a list that goes to a dictionary.\n for each in target_url_soup.find_all('p'):\n ## Filter this thing which gets caught in the craigslist data.\n if each.a.string == \"next 100 postings\":\n pass\n else:\n # Get the title, get none on an exception\n try: \n #print str(each.a.string)\n post_title = str(each.a.string)\n except:\n post_title = \"\"\n \n # Get the hyperlink, get none on an exception\n try:\n #print str(each.a.get('href'))\n post_href = str(each.a.get('href'))\n except:\n post_href = \"\"\n\n # Get the cost/info, get none on an exception\n post_cost = \"\"\n post_info = \"\"\n try:\n #print str(each.find(\"span\", \n # \"itemph\").string).strip().strip('-').strip()\n post_cost_info = str(each.find(\"span\", \n \"itemph\").string).strip().strip('-').strip()\n \n\n # Use a regular expression to parse this data further\n if re.match(\"\\$\", post_cost_info) is None:\n post_info = str(post_cost_info).strip()\n\n else:\n # If there is no /, assign as dollars.\n if re.search(\"/\", post_cost_info) is None:\n post_cost = str(post_cost_info).strip().strip('$')\n \n # chop up any entry with $COST / x br\n else:\n cost_info_list = [] # list to receive re.split()\n cost_info_list = re.split('/', post_cost_info, 1 )\n post_cost = str(cost_info_list[0]).strip().strip('$')\n post_info = str(cost_info_list[1]).strip()\n \n # Close the above try block for cost and info\n except:\n pass \n\n # Get the location, get none on an exception\n try: \n #print str(each.find(\"span\", \n # \"itempn\").string).strip().strip('()')\n post_loc = str(each.find(\"span\", \n \"itempn\").string).strip().strip('()')\n except:\n post_loc = \"\"\n \n ## Add all extracted items to their respective lists.\n ## We are still in the above loop here. All lists will get an entry.\n ## This keeps the lists in step in the case of bad entries, so they can\n ## still be zipped, but with blank spaces. Some data is better than\n ## no data.\n title_list.append( post_title )\n href_list.append( post_href )\n cost_list.append( post_cost )\n info_list.append( post_info )\n loc_list.append( post_loc )\n ## Append the datetime_now to each tuple, kept in step.\n datetime_pulled.append( datetime_now )\n\n # Zip the lists collected in the for loop into a tuple.\n # The tuple is the value of the dict/json.\n extracted_data_tuple = zip(title_list, \n href_list, \n cost_list,\n info_list,\n loc_list, \n datetime_pulled)\n \"\"\"\n This tuple is used for MD5 generation because it excludes the unique \n datetime attribute. This would salt the MD5 and we want the md5 to \n represent the data inside so we can detect duplicates.\n\n I have now also removed the href_list because of duplicate posts\n The likelihood of different people having the same title, cost, info, and\n location is still very low and won't affect data.\n \"\"\"\n extracted_data_tuple_nouniquetime = zip(title_list, \n cost_list, \n info_list, \n loc_list)\n \n md5_key_list = []\n # Generate a list of md5 keys from the data tuple, the md5s are the keys. \n for each in extracted_data_tuple_nouniquetime:\n eachmd5 = md5.new()\n eachmd5.update( str(each) )\n md5_key_list.append( str( eachmd5.hexdigest() ) )\n\n # Zip a tuple and convert into a dictionary for JSON extraction\n extracted_data_dict = dict( zip( md5_key_list, extracted_data_tuple ) )\n\n return ( extracted_data_dict )",
"def scrap_book_info(book_url):\n response = requests.get(book_url)\n page = response.content\n soup = BeautifulSoup(page, \"html.parser\")\n\n return {\n \"product_page_url\": book_url,\n \"upc\": soup.select_one(\"table tr:nth-child(1) > td\").text,\n \"title\": soup.select_one(\"article div.col-sm-6.product_main > h1\").text,\n \"price_including_tax\": soup.select_one(\"table tr:nth-child(4) > td\").text,\n \"price_excluding_tax\": soup.select_one(\"table tr:nth-child(3) > td\").text,\n \"number_available\": number_only(soup.select_one(\"#content_inner > article > table tr:nth-child(6) > td\").text),\n \"product_description\": soup.select_one(\"article > p\").text,\n \"category\": soup.select_one(\"#default > div > div > ul > li:nth-child(3) > a\").text,\n \"review_rating\": word_to_number(soup.select_one(\".star-rating\")[\"class\"][1]),\n \"image_url\": remove_suffix(soup.select_one(\"#product_gallery img\")[\"src\"]),\n }",
"def crawl_main_list(session, top_url, indicator):\n try:\n req = session.get(top_url)\n\n except requests.exceptions.RequestException:\n # In the case HTTP request failed.\n req_err = str(sys.exc_info()[0]) + ' : ' + str(sys.exc_info()[1])\n print('HTTP request error. ({})'.format(err))\n sbtk.show_errormessage(indicator.parent,\n 'HTTP request error. Program terminated.',\n req_err)\n raise\n\n # print('get return = {} --- {}'.format(req.url, req.reason))\n top_list = req.html.find('li.regular-search-result')\n\n # Take information of restaurants from Main Page\n for a_rest in top_list:\n time.sleep(5)\n try:\n # Get this restaurant's information.\n rest_name = a_rest.find('h3.search-result-title > span.indexed-biz-name > a.biz-name.js-analytics-click > span', first = True).text\n # Genre, Area, Address, Phone\n rest_genre_list = [ rest_genre.text for rest_genre in a_rest.find(\n 'div.price-category > span.category-str-list > a')]\n\n rest_secondattr = a_rest.find('div.secondary-attributes', first=True)\n # Some businesses don't have area.\n rest_area_elem = rest_secondattr.find('span.neighborhood-str-list', first=True)\n if not rest_area_elem:\n rest_area = ''\n else:\n rest_area = rest_area_elem.text\n # rest_area = rest_area_elem.text\n\n # Some businesses don't have <address> tag and\n # <div class=\"biz-parent-container\"> tag instead.\n rest_address_elem = rest_secondattr.find('address', first=True)\n if not rest_address_elem:\n rest_located = rest_secondattr.find('div.biz-parent-container', first=True)\n if rest_located:\n rest_address = rest_located.text.replace('\\n', ', ')\n else:\n rest_address = ''\n else:\n rest_address = rest_address_elem.text.replace('\\n', ', ')\n # Some businesses don't have phone number.\n rest_phone_elem = rest_secondattr.find('span.biz-phone', first=True)\n if not rest_phone_elem:\n rest_phone = ''\n else:\n rest_phone = rest_phone_elem.text\n\n # print(str(\"* {}\".format(rest_name).encode(encoding='cp932', errors='replace')), flush=True)\n\n # Go to the link to the individual restaurant page.\n # Get the restaurant's website, message, reservation values by Dict.\n rest_link = element_link(a_rest.find(\n 'h3.search-result-title > span.indexed-biz-name',\n first=True\n ).find('a.biz-name.js-analytics-click', first=True)\n )\n rest_page_info = each_rest_page(session, rest_link)\n\n list_num = len(rest_list)+1\n # Set information to Dict rest_list.\n rest_list[list_num] = {\n 'name' : rest_name,\n 'genre' : rest_genre_list,\n 'area' : rest_area,\n 'address' : rest_address,\n 'phone' : rest_phone,\n 'web' : rest_page_info['web'],\n 'message' : rest_page_info['message'],\n 'reservation' : rest_page_info['reservation'],\n 'takes_rsrv' : rest_page_info['takes_rsrv'],\n 'page' : rest_link\n }\n indicator.set_num_to_msg(list_num)\n # print('[{}] {} : {}'.format(len(rest_list), rest_name, rest_list[rest_name]), flush=True)\n\n except Exception:\n # When any program error occures...\n err = str(sys.exc_info()[0]) + ' : ' + str(sys.exc_info()[1])\n if not sbtk.choose_errormessage(indicator.parent, 'HTML Analysis Error.', err):\n raise # Program terminates. (otherwise go back to loop)\n\n except:\n # When a system error happens...\n raise # Profram terminates.\n\n # Return 'next page' link\n return element_link(req.html.find(\n 'a.u-decoration-none.next.pagination-links_anchor', first = True))",
"def scrapeInfoForItem(self, subpage, item):\n\t\thtmlcontent = self.HttpHandler.getHtmlContentFromLink(item.link)\n\t\tsoupPage = BeautifulSoup(htmlcontent, \"html.parser\")\n\n\t\t# brand\n\t\tresult = soupPage.findAll(\"p\", { \"class\" : \"product-brand--details\" })\n\t\tif len(result) > 0:\n\t\t\tres1 = result[0].find(\"a\")\n\t\t\tif res1 == None:\n\t\t\t\titem.Brandname = str(result[0].contents[0])\n\t\t\telif len(res1) > 0:\n\t\t\t\titem.Brandname = str(res1.contents[0])\n\n\t\t# Name\n\t\tresult = soupPage.findAll(\"h1\", { \"class\" : \"product-title--details\" })\n\t\tif len(result) > 0:\n\t\t\tres1 = result[0].find(\"span\", { \"itemprop\" : \"name\" })\n\t\t\tif len(res1) > 0:\n\t\t\t\titem.Productname = str(res1.contents[0])\n\n\t\t# Color\n\t\tresults = soupPage.findAll(\"a\", { \"class\" : \"js-switch-colourVariant\" })\n\t\tif len(results) == 0:\n\t\t\tresult2 = soupPage.findAll(\"h1\", { \"class\" : \"product-title--details\" })\n\t\t\tif len(result) > 0:\n\t\t\t\tres2 = result2[0].find(\"span\", { \"itemprop\" : \"color\" })\n\t\t\t\tif len(res2) > 0:\n\t\t\t\t\titem.Colors = str(res2.contents[0])\n\t\telse:\n\t\t\titem.Colors = \"|\".join([res[\"title\"] for res in results])\n\n\t\t# size\n\t\tresults = soupPage.findAll(\"span\", { \"class\" : \"product-sizeLabel\" })\n\t\titem.Sizes = \"|\".join([res.contents[0] for res in results])\n\n\t\t# beschreibung\n\t\tresult = soupPage.find(\"ul\", { \"class\" : \"product-infoList--twoCol\" })\n\t\tif result:\n\t\t\tresults = result.findAll(\"span\")\n\t\t\titem.Description = \"|\".join([res.contents[0] for res in results])\n\n\t\t# material \n\t\tresults = soupPage.find(\"ul\", { \"class\" : \"product-infoList\" })\n\t\tif results:\n\t\t\tresults = results.findAll(\"span\")\n\t\t\titem.Materials = \"|\".join([res.contents[0] for res in results])\n\n\t\t# pflege\n\t\tresults = soupPage.find(\"ul\", { \"class\" : \"product-care\" })\n\t\tif results:\n\t\t\tresults = results.findAll(\"li\")\n\t\t\titem.Maintenance = \"|\".join([res.get_text() for res in results])\n\n\t\t# current, regular price (current can be reduced)\n\t\tresult = soupPage.find(\"meta\", { \"itemprop\" : \"price\" })\n\t\tif result:\n\t\t\tresult = result[\"content\"]\n\t\t\tif \",\" in result:\n\t\t\t\tresult = str(result).replace(',','.')\n\t\t\tif u'\\xa0' in result:\n\t\t\t\tresult = result.replace(u'\\xa0', u' ')[:-1] # there is a € sign at the end\n\t\t\tif \"ab\" in result:\n\t\t\t\titem.CurrentPrice = result\n\t\t\telse:\n\t\t\t\titem.CurrentPrice = float(result)\n\t\tresult = soupPage.find(\"span\", { \"class\" : \"is-regular\" })\n\t\tif result:\n\t\t\tif \",\" in result.contents[0]:\n\t\t\t\tresult = str(result.contents[0]).replace(',','.')\n\t\t\tif u'\\xa0' in result:\n\t\t\t\tresult = result.replace(u'\\xa0', u' ')[:-1] # there is a € sign at the end\n\t\t\tif \"ab\" in result:\n\t\t\t\titem.RegularPrice = result\n\t\t\telse:\n\t\t\t\titem.RegularPrice = float(result)\n\t\telse:\n\t\t\titem.RegularPrice = item.CurrentPrice",
"def _scrape(self):",
"def scrape_url(url):\n html = requests.get(url).text\n return scrape_html(html)",
"def parse_item(self, response):\n item = IphoneSpiderItem()\n\n item['sku'] = response.meta.get('sku')\n item['price'] = response.meta.get('price')\n item['name'] = response.meta.get('name')\n item['seller'] = response.meta.get('seller')\n #pass the data from parse to parse_item\n\n url = response.url\n model = response.xpath('//*[@id=\"crumb-wrap\"]/div/div[1]/div[9]/text()').extract_first()\n color = response.xpath('//div[@data-type=\"颜色\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/i/text()').extract_first()\n memory = response.xpath('//div[@data-type=\"版本\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/text()').extract_first()\n memory2 = response.xpath('//div[@data-type=\"内存\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/text()').extract_first()\n #memory data can be stored in 版本 or 内存\n\n if memory2:\n memory = memory2.strip()\n elif memory:\n memory = memory.strip()\n\n item['model'] = model\n item['color'] = color\n item['memory'] = memory\n item['url'] = url\n\n return item",
"def process_item(self, item, spider):\n item['url'] = spider.config['site_domain'] + item[\"url\"]\n item[\"rating\"] = extract_rating(item[\"rating\"])\n item['price'] = get_price(item['price_integer'], item[\"price_decimal\"])\n item['no_discount_price'] = get_price(item['no_discount_price_integer'], item[\"no_discount_price_decimal\"])\n item[\"brand\"] = get_brand(item[\"brand\"])\n item[\"number_of_ratings\"] = get_number_of_ratings(item[\"number_of_ratings\"])\n del item['price_integer']\n del item['price_decimal']\n del item['no_discount_price_integer']\n del item[\"no_discount_price_decimal\"]\n return item"
] | [
"0.65837955",
"0.62235403",
"0.6197151",
"0.6080949",
"0.59467924",
"0.5946328",
"0.5896769",
"0.58487123",
"0.5833526",
"0.58186036",
"0.57552403",
"0.5735235",
"0.5734194",
"0.570501",
"0.56573653",
"0.5643743",
"0.5621614",
"0.5603681",
"0.560318",
"0.56009895",
"0.55741036",
"0.55653995",
"0.5552301",
"0.55391693",
"0.5535788",
"0.5531782",
"0.5529689",
"0.5516019",
"0.55073184",
"0.5498799"
] | 0.70564085 | 0 |
Downloads the detection model from tensorflow servers | def download_model(\
download_base='http://download.tensorflow.org/models/object_detection/', \
model_name='ssd_mobilenet_v1_coco_11_06_2017'\
):
# add tar gz to the end of file name
model_file = model_name + '.tar.gz'
try:
opener = urllib.request.URLopener()
opener.retrieve(download_base + model_file, \
model_file)
tar_file = tarfile.open(model_file)
for f in tar_file.getmembers():
file_name = os.path.basename(f.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(f, os.getcwd())
except Exception as e:
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_model_from_gcs(self):\n # download model\n download_file_from_gcs(self.config.model_bucket_name,\n self.config.model_gcs_path,\n self.config.model_local_path)\n\n # download lable columns\n download_file_from_gcs(self.config.model_bucket_name,\n self.config.labels_gcs_path,\n self.config.labels_local_path)",
"def download_imagenet(self):\n raise NotImplementedError('download_imagenet method not implemented.')",
"def download_model(model_arch, alt_model_name=None):\n\n download_base_url = 'http://download.tensorflow.org/models/object_detection/'\n \n tf_org_model_name = downloadable_models[model_arch]['tf.org.model_name']\n base_pipeline_config = downloadable_models[model_arch]['pipeline_file']\n\n model_archive_basename = tf_org_model_name + '.tar.gz'\n dst_dir = './downloaded_pretrained_models'\n model_archive_path = join(dst_dir, model_archive_basename)\n\n if not isdir(dst_dir):\n makedirs(dst_dir)\n\n # \"cache\"\n if not exists(model_archive_path):\n print(\"downloading ::\", download_base_url + model_archive_basename)\n urlretrieve(\n url=download_base_url + model_archive_basename,\n filename=model_archive_path\n )\n else:\n print(\"model archive was already downloaded yay :D\")\n\n # saves to ./output/$model_name/training\n dst = join('./output', alt_model_name) \\\n if alt_model_name is not None else join(\n './output', model_arch\n )\n \n print(\"extracting download.tensorflow.org model archive...\")\n tar = tarfile.open(model_archive_path)\n tar.extractall(path=dst)\n tar.close()\n\n if exists(join(dst, 'training')):\n raise FileExistsError(\n \"\\n\\nModel directory already exists. \\\n \\n- Overwriting models that were potentially trained is forbidden.\\\n \\n- If you want to train a new model from scratch, \\\n \\n pass a non-existent alt_model_name.\\\n \\n\\n>> These are the model names already in use:\\\n \\n {}\".format(\"\\n\".join(listdir('./output')))\n )\n\n # not elegant, but whatever works man\n shutil.move(\n src=join(dst, tf_org_model_name),\n dst=join(dst, 'training')\n )\n # store model_arch for future ref\n system(\"echo {} > {}\".format(model_arch, join(\n dst,\n 'training/model.arch')\n )\n )\n # make sure its integrity checks out\n assert_ckpt(join(dst, 'training'))\n \n # make export directory for inference executables\n model_name = model_arch if alt_model_name is None else alt_model_name\n export_dir = join('./output', model_name, 'export')\n if not exists(export_dir):\n makedirs(export_dir)\n\n # rename `checkpoint` file so that training can actually take off...\n # don't ask, but... \n # https://github.com/tensorflow/models/issues/5053#issuecomment-441423962\n shutil.move(join(dst, 'training/checkpoint'), join(dst, 'training/old_checkpoint'))\n \n # path to training dir and path to base training pipeline config\n return join(dst, 'training'), export_dir, base_pipeline_config",
"def download_tf_params():\n\n if not os.path.exists(MODEL_DIR):\n os.makedirs(MODEL_DIR)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(MODEL_DIR, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n statinfo = os.stat(filepath)\n print()\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n\n tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)",
"def download_model():\n logging.info(\"[genreml] Downloading model...\")\n with urllib.request.urlopen(config.FMAModelConfig.FMA_MODEL_URL) as f:\n data = f.read()\n open(config.FMAModelConfig.FMA_MODEL_PATH, 'wb').write(data)\n logging.info(\"[genreml] Model download complete\")",
"def test_downloadModel(self):\n\t\tmodel_in = \"\"\n\t\tquery_localdirs = cancerscope.get_models.findmodel(os.path.dirname(cancerscope.__file__), \"v1_rm500\")\n\t\tif query_localdirs is not None:\n\t\t\tmodel_in = query_localdirs[\"v1_rm500\"]\n\t\telse:\n\t\t\tmodel_in = cancerscope.get_models.downloadmodel(model_label=\"v1_rm500\")\n\t\t\n\t\tself.assertTrue(os.path.isdir(model_in))\n\t\tself.assertTrue(os.path.exists(\"\".join([model_in, \"/lasagne_bestparams.npz\"])))\n\t\t\n\t\t\"\"\"Test if model can be setup correctly\"\"\"\n\t\tlmodel = cancerscope.scopemodel(model_in)\n\t\tlmodel.fit()\n\t\n\t\tself.assertEqual(len(lmodel.features), 17688)",
"def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)",
"def load_model(model_name):\n base_url = 'http://download.tensorflow.org/models/object_detection/'\n model_file = model_name + '.tar.gz'\n model_dir = tf.keras.utils.get_file(\n fname=model_name,\n origin=base_url + model_file,\n untar=True)\n\n model_dir = pathlib.Path(model_dir)/\"saved_model\"\n\n model = tf.saved_model.load(str(model_dir))\n model = model.signatures['serving_default']\n\n return model",
"def load_feature_extractor(model_spec, device):\n\n model_type = model_spec['name']\n model_weights_fp = model_spec['weights']\n\n if model_type == 'imagenet_swav':\n # or could load from hub model\n # model = torch.hub.load('facebookresearch/swav', 'resnet50')\n\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n state_dict = torch.load(model_weights_fp, map_location=\"cpu\")\n\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict.items()}\n for k in list(state_dict.keys()):\n if 'projection' in k or 'prototypes' in k:\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'imagenet_moco_v2':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'imagenet_supervised':\n model = models.resnet50(pretrained=True)\n\n elif model_type == 'random':\n model = models.resnet50(pretrained=False)\n\n elif model_type == 'inat2018_supervised':\n model = models.resnet50(pretrained=False)\n # This model was actually trained with 10000 classes for the fc layer\n # but only 8142 (the number in inat2018) were actually updated\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_supervised':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_supervised':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_supervised_from_scratch':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in checkpoint['state_dict'].items()}\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'inat2021_supervised_from_scratch':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_moco_v2':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'inat2021_mini_swav' or model_type == 'inat2021_mini_swav_1k':\n # or could load from hub model\n # model = torch.hub.load('facebookresearch/swav', 'resnet50')\n\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n state_dict = torch.load(model_weights_fp, map_location=\"cpu\")\n\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict['state_dict'].items()}\n for k in list(state_dict.keys()):\n if 'projection' in k or 'prototypes' in k:\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n else:\n raise ValueError(\"Unknown pytorch model: %s\" % model_type)\n\n\n # remove the final fully connected layer so the model only operates with post average pool features\n model = torch.nn.Sequential(*(list(model.children())[:-1]))\n model.to(device)\n model.eval()\n\n feature_extractor = PTResNet50FeatureExtractor(model, device)\n\n return feature_extractor",
"def maybe_download_pretrained_vgg(self):\n DESTINATION_MODEL_TAR_PATH = self.PATH_TO_MODEL + self.MODEL_FILE\n\n # Check if model downloaded\n if not os.path.exists (self.PATH_TO_MODEL + self.MODEL_NAME + '/frozen_inference_graph.pb'):\n\n # Download model\n print 'Downloading pre-trained model...'\n opener = urllib.request.URLopener()\n opener.retrieve(self.DOWNLOAD_BASE + self.MODEL_FILE, DESTINATION_MODEL_TAR_PATH)\n\n # Extracting model\n print 'Extracting model...'\n tar_file = tarfile.open(DESTINATION_MODEL_TAR_PATH)\n\n for file in tar_file.getmembers():\n file_name = os.path.basename(file.name)\n if 'frozen_inference_graph.pb' in file_name:\n tar_file.extract(file, self.PATH_TO_MODEL)\n\n self.maybe_download_model = True",
"def download_pre_trained_model (model_name):\n model_url = cfg.LIST_MODEL_TO_DOWNLOAD[model_name]\n\n file_name = (model_url.split(\"/\")[-1]).split(\".\")[0] # get file name from url\n\n # create directory if not exit\n if not os.path.exists(os.path.join(cfg.PATH_PRE_TRAINED_MODELS,file_name)):\n if not os.path.exists(cfg.PATH_PRE_TRAINED_MODELS):\n os.mkdir(cfg.PATH_PRE_TRAINED_MODELS)\n try:\n click.echo(click.style(f\"\\n Downloading the {file_name} model \\n\", bg='green', bold=True, fg='white'))\n file_location = save_zip_from_url(model_url, cfg.PATH_PRE_TRAINED_MODELS)\n \n click.echo(click.style(f'\\n Extraction of {file_name} ...\\n', bg='blue', bold=True, fg='white'))\n\n # Extract tar-file to annotation directory\n with tarfile.open(name=file_location) as tar:\n for member in tqdm(iterable=tar.getmembers(), total=len(tar.getmembers())):\n tar.extract(member=member, path=cfg.PATH_PRE_TRAINED_MODELS)\n # delete tar-file\n os.remove(file_location)\n return 0\n\n except expression as identifier:\n return -1\n else:\n click.echo(click.style(f\"\\n {file_name} have been downloaded \\n\", bg='blue', bold=True, fg='white'))\n return 1",
"def download_and_unpack_model(model_name, model_date='20200711'):\n\n # pretrained models\n pretrained_models_dir = os.path.join(ROOT_DIR, 'pre-trained-models')\n create_directory_if_not_exists(pretrained_models_dir)\n\n # pretrained model archives\n model_archive_dir = os.path.join(pretrained_models_dir, 'archives')\n create_directory_if_not_exists(model_archive_dir)\n\n # determine the (expected) path of the downloaded model\n model_file = f'{model_name}.tar.gz'\n model_archive_path = os.path.join(model_archive_dir, model_file)\n\n # if the tar.gz exists, try to unpack it\n retry = False\n if os.path.exists(model_archive_path):\n logger.info(f'Attempting to unpack {full_path(model_archive_path)}...')\n try:\n shutil.unpack_archive(model_archive_path, os.path.dirname(model_archive_dir))\n except EOFError:\n logger.info('Cannot unpack. Archive is corrupt. Attempting to retry...')\n retry = True\n\n # if the tar.gz does not exist or is corrupt (unpacking failed), download it, then unpack it\n if not os.path.exists(model_archive_path) or retry:\n base_url = 'http://download.tensorflow.org/models/object_detection/tf2/'\n url = base_url + model_date + '/' + model_file\n logger.info(f'Downloading from {url}...')\n\n # download file as stream\n response = requests.get(url, stream=True)\n with open(model_archive_path, 'wb') as handle:\n progress_bar = tqdm(unit=\"B\", total=int(response.headers['Content-Length']), unit_scale=True, unit_divisor=1024)\n for data in response.iter_content(chunk_size=8192):\n progress_bar.update(len(data))\n handle.write(data)\n progress_bar.close()\n\n # try to unpack tar.gz\n logger.info(f'Attempting to unpack {full_path(model_archive_path)}...')\n try:\n shutil.unpack_archive(model_archive_path, os.path.dirname(model_archive_dir))\n except EOFError:\n # give up if unpacking failed\n logger.info('Archive cannot be unpacked')\n sys.exit(1)\n\n logger.info('Successfully downloaded and unpacked model')",
"def __init__(self):\n self.classes_to_detect = ['person']\n # Load lebel_map\n self._load_label(PATH_TO_LABELS, NUM_CLASSES, use_disp_name=True)\n\n # Load Tensorflow model into memory\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(GRAPH_PATH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n with self.detection_graph.as_default():\n self.sess = tf.Session(graph=self.detection_graph, config=tf_config)\n # Definite input and output Tensors for detection_graph\n self.image_tensor = self.detection_graph.get_tensor_by_name(\n 'image_tensor:0')\n # Each box represents a part of the image where a particular\n # object was detected.\n self.detection_boxes = self.detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n # Each score represent how level of confidence for each of\n # the objects. Score is shown on the result image, together\n # with the class label.\n self.detection_scores = self.detection_graph.get_tensor_by_name(\n 'detection_scores:0')\n self.detection_classes = self.detection_graph.get_tensor_by_name(\n 'detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name(\n 'num_detections:0')\n\n logger.info('Model graph loaded.')",
"def serve(self):\n\n with self.graph.as_default():\n\n if self.builder == None:\n self.builder = tf.saved_model.builder.SavedModelBuilder(self.path + '/build/')\n\n # Generate softmax output.\n prediction = tf.nn.softmax(self.output, name='predict_probability')\n prediction_adv = tf.nn.softmax(self.output_adv, name='prediction_probability_adv')\n \n\n # Build `SignatureDef`.\n # See https://www.tensorflow.org/serving/signature_defs .\n inputs = {k.name: tf.saved_model.utils.build_tensor_info(k) for k in self.eval_config}\n inputs[SIGNATURE_INPUT] = tf.saved_model.utils.build_tensor_info(self.feature_holder)\n\n outputs = {SIGNATURE_OUTPUT: tf.saved_model.utils.build_tensor_info(prediction), SIGNATURE_OUTPUT_ADV: tf.saved_model.utils.build_tensor_info(prediction_adv)}\n\n signature = tf.saved_model.signature_def_utils.build_signature_def(inputs, outputs, SIGNATURE_METHOD_NAME)\n self.builder.add_meta_graph_and_variables(self.session, tags=[tf.saved_model.tag_constants.SERVING], signature_def_map={SIGNATURE_KEY: signature})\n self.builder.save()",
"def download(fnames):\n download_path = Path('./models')\n if not download_path.exists() or not download_path.is_dir():\n print('The directory \\'models\\' does not exist!')\n print('Please ensure you are in the top level of the visual-attention-networks repository')\n print(' and that the \\'models\\' directory exists')\n sys.exit()\n\n server_url = 'https://github.com/davidmascharka/tbd-nets/releases/download/v1.0/'\n if isinstance(fnames, str): # a single file\n fnames = [fnames]\n for fname in fnames:\n if (download_path / fname).exists():\n print('Skipping {}: the file already exists'.format(fname))\n continue\n\n print('Downloading {}'.format(fname))\n urlretrieve(server_url + fname, str((download_path/fname).absolute()), _download_info)\n print('Finished')",
"def load_model(self, directory):\n with tf.Session() as sess:\n tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], directory)\n self.model = tf.contrib.predictor.from_saved_model(directory)",
"def load_model(self, sess, pb_model_path):\n\n logging.info(\"Import yolo model from pb start .......\")\n\n with sess.as_default():\n with sess.graph.as_default():\n with tf.gfile.FastGFile(pb_model_path, 'rb') as f_handle:\n logging.info(\"ParseFromString start .......\")\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f_handle.read())\n logging.info(\"ParseFromString end .......\")\n\n tf.import_graph_def(graph_def, name='')\n logging.info(\"Import_graph_def end .......\")\n\n logging.info(\"Import yolo model from pb end .......\")",
"def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)",
"def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def create_model(model_url, num_classes = 10):\n # Download the pretrained model and save it as a Keras layer\n feature_extractor_layer = hub.KerasLayer(model_url,\n trainable = False,\n name=\"feature_extraction_layer\",\n input_shape = IMAGE_SHAPE + (3,)) #freeze the already learned layers\n \n # Create our own model\n model = tf.keras.Sequential([\n feature_extractor_layer,\n layers.Dense(num_classes, activation = 'softmax', name = 'output_layer')\n ])\n\n return model",
"def downloadModel(url):\n print('Model download started...')\n fileName = url.split('/')[-1]\n filePath = f'../kerasModels/{fileName}'\n\n if not os.path.exists(os.path.dirname(filePath)):\n try:\n os.makedirs(os.path.dirname(fileName))\n except OSError as exc:\n if exc.errno != errno.EXIST:\n raise\n\n urlretrieve(url, fileName)\n print('Model download completed')\n return fileName",
"def load_model():\r\n model = MobileNetV2(weights=\"imagenet\")\r\n print(\"Model loaded\")\r\n return model",
"def download_model(model, model_hash, model_dir):\n if not os.path.isdir(model_dir):\n os.mkdir(model_dir)\n filename = os.path.join(model_dir, model)\n\n logger.info(f\"Downloading {model} to {filename}...\")\n urllib.request.urlretrieve(\n os.path.join(\"http://genesis.ugent.be/uvpublicdata/ms2pip/\", model), filename\n )\n if not check_model_integrity(filename, model_hash):\n raise InvalidXGBoostModelError()",
"def download_model(source, target, filename):\n if not os.path.exists(target):\n os.mkdir(target) \n target_file = str(Path(target).joinpath(filename))\n if os.path.exists(target_file):\n print('model already exists, skipping download')\n return\n print(\"Downloading from {} to {}\".format(source, target))\n wget.download(source, target_file) \n print(\"\\nDone!\")",
"def get_workload(model_path):\n\n repo_base = 'https://github.com/dmlc/web-data/raw/master/tensorflow/models/'\n model_name = os.path.basename(model_path)\n model_url = os.path.join(repo_base, model_path)\n\n from mxnet.gluon.utils import download\n download(model_url, model_name)\n\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\"./\", model_name), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n graph = tf.import_graph_def(graph_def, name='')\n return graph_def",
"def download(self, root='./'):\n dir = os.path.join(root, 'tiny-imagenet-200')\n dir_train = os.path.join(dir, 'train')\n if os.path.exists(dir) and os.path.exists(dir_train):\n print('==> Already downloaded.')\n return\n\n path = Path(os.path.join(root, 'tiny-imagenet-200.zip'))\n if not os.path.exists(path):\n os.makedirs(path.parent, exist_ok=True)\n\n print('==> Downloading TinyImagenet200...')\n with urllib.request.urlopen(self.url) as response, \\\n open(str(path), 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n\n print('==> Extracting TinyImagenet200...')\n with zipfile.ZipFile(str(path)) as zf:\n zf.extractall(root)",
"def pretrained(name=\"multiclassifierdl_use_toxic\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(MultiClassifierDLModel, name, lang, remote_loc)",
"def _retrieve_models(local=True):\n # Check if the download folder exists\n def _get_meta_data(model_name, file):\n return {\n \"data\": {\n \"id\": model_name,\n \"name\": model_name,\n \"description\": model_name,\n \"filename\": os.path.join(\n app.config[\"DOWNLOAD_DIR\"], file),\n \"created\": time.ctime(os.path.getctime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file))),\n \"modified\": time.ctime(os.path.getmtime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file)))\n }\n }\n\n if not os.path.exists(app.config[\"DOWNLOAD_DIR\"]):\n os.makedirs(app.config[\"DOWNLOAD_DIR\"])\n\n if not local:\n # Fetch from a Nexus-hosted catalog\n resources = app.forge.search({\"type\": \"EmbeddingModel\"})\n for resource in resources:\n app.models[resource.name] = {\n \"data\": digest_model_data(resource),\n }\n app.forge.download(\n resource, \"distribution.contentUrl\",\n app.config[\"DOWNLOAD_DIR\"])\n\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n resource.distribution.name)\n app.models[resource.name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n\n # Clear the downloads dir\n for f in os.listdir(app.config[\"DOWNLOAD_DIR\"]):\n try:\n os.remove(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n except Exception:\n shutil.rmtree(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n else:\n # Fetch from a local dir\n for (_, dirs, files) in os.walk(app.config[\"DOWNLOAD_DIR\"]):\n for path in dirs + files:\n if path[0] != \".\":\n match = re.match(r\"(.*)\\.zip\", path)\n if match:\n model_name = match.groups()[0]\n else:\n model_name = path\n app.models[model_name] = _get_meta_data(model_name, path)\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"], path)\n app.models[model_name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n break",
"def download_pretrained_models(\n models_root_dir='/tmp/sketch_rnn/models',\n pretrained_models_url=PRETRAINED_MODELS_URL):\n tf.gfile.MakeDirs(models_root_dir)\n zip_path = os.path.join(\n models_root_dir, os.path.basename(pretrained_models_url))\n if os.path.isfile(zip_path):\n tf.logging.info('%s already exists, using cached copy', zip_path)\n else:\n tf.logging.info('Downloading pretrained models from %s...',\n pretrained_models_url)\n urlretrieve(pretrained_models_url, zip_path)\n tf.logging.info('Download complete.')\n tf.logging.info('Unzipping %s...', zip_path)\n with zipfile.ZipFile(zip_path) as models_zip:\n models_zip.extractall(models_root_dir)\n tf.logging.info('Unzipping complete.')",
"def pretrained(name=\"classifierdl_use_trec6\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(ClassifierDLModel, name, lang, remote_loc)"
] | [
"0.6803891",
"0.6580865",
"0.6538049",
"0.6379923",
"0.63532656",
"0.6349845",
"0.63104683",
"0.62438935",
"0.6224687",
"0.6155579",
"0.6152166",
"0.6123481",
"0.6121159",
"0.6108375",
"0.6105838",
"0.60727674",
"0.60591805",
"0.60474735",
"0.6028902",
"0.59698546",
"0.595215",
"0.59299666",
"0.58981824",
"0.58892703",
"0.5884731",
"0.5876882",
"0.5859347",
"0.58537453",
"0.58311474",
"0.5830266"
] | 0.75390905 | 0 |
binning and binarise outputs a csv into "cleaned" folder "_bin" | def bin_binarise(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _binarization(self):\n for feat in self.cat_feats:\n lbl = preprocessing.LabelBinarizer()\n lbl.fit(self.dataframe[feat].values)\n val = lbl.transform(self.dataframe[feat].values)\n self.dataframe_d_copy = self.dataframe_d_copy.drop(feat,axis=1)\n \n for j in range(val.shape[1]):\n new_col_name = feat + f'__bin_{j}'\n self.dataframe_d_copy[new_col_name] = val[:,j] \n self.binary_encoders[feat] = lbl\n joblib.dump(self.binary_encoders, f\"{self.output_path}/_binary_encoder.pkl\")\n return self.dataframe_d_copy",
"def filter_binaries_beamprofile(bin_arr, beamprofile, cutoff=0.75, dilate=0):\r\n bp_bool = beamprofile < cutoff * beamprofile.max()\r\n out_binary = np.empty_like(bin_arr, dtype=int)\r\n total_cells = 0\r\n removed_cells = 0\r\n\r\n for i, img in enumerate(bin_arr):\r\n labeled, n = mh.labeled.label(img)\r\n total_cells += n\r\n for l in np.unique(labeled)[1:]:\r\n selected_binary = multi_dilate(labeled == l, dilate)\r\n if np.any(np.logical_and(selected_binary, bp_bool)): # Cell lies outside of\r\n labeled[labeled == l] = 0\r\n removed_cells += 1\r\n out_binary[i] = labeled\r\n print('Removed {} cells out of a total of {} cells.'.format(removed_cells, total_cells))\r\n return out_binary",
"def binAnalysis(self):\n self.mode = 'binned'\n # --------------------------------------------------------------------------------------------- #\n # Make sure that another working directory is selected\n if self.workpath == self.datapath:\n print(\"\\t=== Variable 'self.workpath' is equal to 'self.datapath', provide another ===\")\n return\n else:\n if os.path.isfile(self.outgtlike):\n print(\"\\t=== Directory {} already contains a complete analysis, remove the .dat file ===\".format(self.workpath))\n return\n else:\n pass\n print(\"\\t=== Binned analysis will be computed in '{}' ===\".format(self.workpath))\n\n # --------------------------------------------------------------------------------------------- #\n # Create a temporary python script and launch the Science Tools\n fil = os.path.join(self.workpath, 'tmp_BinnedAnalysis'+self.suffix+'.py')\n tmp = open(fil, 'w')\n tmp.write(\"import algamma; import os; a=algamma.algamma(); a.ft1='{}';\\\n a.ft2='{}'; a.metstart={}; a.metstop={}; a.emin={}; a.emax={}; a.suffix='{}';\\\n a.workpath='{}'; a._gtSelect(); a._gtMktime();\\\n a._gtLtcube(); a._gtBincube(); a._gtExpmap(); a._gtSrcmap();\\\n a._gtLike(); os.remove('{}')\".format(self.ft1, self.ft2, \n self.metstart, self.metstop, self.emin, self.emax,\n self.suffix, self.workpath, fil))\n # Launch the file\n os.popen(\"nohup python {} &\".format(fil))\n tmp.close()\n\n return",
"def reduceBin(bin, size, binLabel):\n print(\"reducing bin [\" + str(binLabel) + \"] (size: \" + str(len(bin)) + \")\")\n np.random.shuffle(bin)\n chosenImages = bin[:size]\n newRatings = open(new_ratings_file_path, 'a')\n for image in chosenImages:\n newRatings.write(getRatingsLine(image[0], image[1]))\n newRatings.close()",
"def unbinAnalysis(self):\n self.mode = 'unbinned'\n # --------------------------------------------------------------------------------------------- #\n # Make sure that another working directory is selected\n if self.workpath == self.datapath:\n print(\"\\t=== Variable 'self.workpath' is equal to 'self.datapath', provide another ===\")\n return\n else:\n if os.path.isfile(self.outgtlike):\n print(\"\\t=== Directory {} already contains a complete analysis, remove the .dat file ===\".format(self.workpath))\n return\n else:\n pass\n print(\"\\t=== Unbinned analysis will be computed in '{}' ===\".format(self.workpath))\n\n # --------------------------------------------------------------------------------------------- #\n # Create a temporary python script and launch the Science Tools\n fil = os.path.join(self.workpath, 'tmp_UnbinnedAnalysis'+self.suffix+'.py')\n tmp = open(fil, 'w')\n tmp.write(\"import algamma; import os; a=algamma.algamma(); a.ft1='{}';\\\n a.ft2='{}'; a.metstart={}; a.metstop={}; a.emin={}; a.emax={}; a.suffix='{}';\\\n a.workpath='{}'; a._gtSelect(); a._gtMktime(); a._gtLtcube(); a._gtExpmap();\\\n a._gtLike(); os.remove('{}')\".format(self.ft1, self.ft2, \n self.metstart, self.metstop, self.emin, self.emax,\n self.suffix, self.workpath, fil))\n # Launch the file\n os.popen(\"nohup python {} &\".format(fil))\n tmp.close()\n\n return",
"def findBins(): \n\n df = pd.read_csv('significantData.csv')\n df = df.sort_values('RecordingTimestamp')\n df.to_csv('significantData.csv', index=False)\n read_in = pd.read_csv('significantData.csv')\n count = 0\n this = []\n return_bins = {}\n word = (read_in['AOI[Neutral_Left]Hit_0']).tolist()\n \n if word[0] == '1':\n return_bins.update({'start_value': 1})\n else: \n return_bins.update({'start_value': 0})\n for v, w in zip(word[:-1], word[1:]):\n if v == w and v != '': \n print v\n count = count + 1\n else: \n total = count\n this.append(count)\n my_list = sorted(list(set(this)))\n return_bins.update({'my_list': my_list})\n return return_bins",
"def rebin_bdt_output(samples, source, binning):\n # the number of new histogram contributions\n n_histos = 0\n\n # get the source histogram and the list of contributing sample folders\n sf_list = TList()\n samples.getHistogram(\".\", source, \"\", sf_list)\n\n # the name of the rebinned histogram is the old name, but \"FINE\" replaced with \"REBIN\"\n destination = TString(source.replace(\"FINE\", \"REBIN\"))\n new_name = TQFolder.getPathTail(destination) # destination becomes everything before last /\n new_name_remap = TString(new_name).ReplaceAll(\"REBIN\", \"REMAP\")\n logging.debug(\"New histogram names are %s and %s\", new_name, new_name_remap)\n\n # a list to keep track of sample folders that have already been handled\n done = TList()\n\n for sf in sf_list:\n # skip sample folders that have already been handled\n if done.FindObject(sf):\n logging.debug(\"Skipping %s, already handled\", sf.getName())\n continue\n done.Add(sf)\n\n # get the individual source histogram contribution\n hist = sf.getHistogram(\".\", source, \"\")\n if not hist:\n # we failed for some reason to obtain it\n logging.warning(\"Failed to obtain histogram %s in sample folder %s\", source, sf.getName())\n continue\n\n # rebin histogram\n hist = hist.Rebin(len(binning) - 1, \"\", array.array(\"d\", binning))\n\n # remap histogram\n hist_remap = remap_bdt_output(hist)\n\n # store as new histograms\n success_rebin = sf.addObject(hist, TQFolder.concatPaths(\".histograms\", destination) + \"+::\" + new_name)\n success_remap = sf.addObject(\n hist_remap, TQFolder.concatPaths(\".histograms\", destination) + \"+::\" + new_name_remap\n )\n if success_rebin and success_remap:\n n_histos += 1\n else:\n logging.warning(\"Failed to merge histogram %s in sample folder %s\", source, sf.getName())\n\n logging.info(\"Merged and remapped %d histograms for %s\", n_histos, source)",
"def makeBinaryMatrix(self):\n getPrimary = self.openFile.getPrimary()\n getSecondary = self.openFile.getSecondary()\n totalLen =len(getPrimary)*len(getSecondary)\n counterTime = 0\n counterPrimary = 0\n topList = getPrimary+getSecondary\n for p in getPrimary:\n\n for s in getSecondary:\n if \" or \" in s or \" OR \" in s:\n query = p+\" AND (\"+s+\")\"\n else:\n query = str(p+\" AND \"+s)\n print query\n ids = self.PubMed.searchPMID(query)\n for id in ids:\n self.makeBinTable(counterPrimary,s,id,topList)\n counterTime +=1\n #perc = (((counterTime)/(totalLen))*100)\n #print perc\n\n counterPrimary+=1\n\n self.splitCSV(topList)",
"def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()",
"def _get_dataset_bin(self):\n df_raw = pd.read_csv(self.path_bin)\n # kick all features we don't want\n features = get_features_from_questions(self.path_questions, remove_cond=True)\n features.append(self.target)\n df = df_raw[features]\n return df",
"def filter_binaries(bin_arr, remove_bordering=True, min_size=None, max_size=None, min_minor=None, max_minor=None,\r\n min_major=None, max_major=None):\r\n\r\n out = np.empty_like(bin_arr)\r\n for i, img in enumerate(bin_arr):\r\n if len(np.unique(img)) > 2: # Image is already labeled\r\n labeled = img\r\n else:\r\n labeled, n = mh.labeled.label(img)\r\n labeled, n = mh.labeled.filter_labeled(labeled, remove_bordering=remove_bordering, min_size=min_size, max_size=max_size)\r\n out[i] = (labeled > 0).astype(int) * labeled # Restore labels\r\n\r\n for j, img in enumerate(out):\r\n for i in np.unique(img)[1:]:\r\n selected_binary = (img == i).astype('int')\r\n min1, max1, min2, max2 = mh.bbox(selected_binary)\r\n selection = selected_binary[min1:max1, min2:max2]\r\n major, minor = mh.features.ellipse_axes(selection)\r\n\r\n if min_minor and minor < min_minor:\r\n img[img == i] = 0\r\n if max_minor and minor > max_minor:\r\n img[img == i] = 0\r\n if min_major and major < min_major:\r\n img[img == i] = 0\r\n if max_major and major > max_major:\r\n img[img == i] = 0\r\n\r\n return out",
"def bin(serie, bins):\n return serie.apply(lambda x: _bin(bins, x))",
"def generate_biz_directory():\n bizdf = pd.read_csv(inpath+bizfile, escapechar='\\\\')\n \n # drop rows with empty column entries\n bizdf = bizdf.dropna()\n\n # add a column to calculate the gid\n bizdf['gid'] = np.arange(1, bizdf.shape[0]+1)\n\n # write it out\n print \"[Generating the business directory...]\"\n # print stats to make sure printing write\n print_stats(bizdf) \n bizdf.to_csv(genpath+directory, index=False)",
"def make_RH_bins(path,start_date,end_date,bin_opt):\r\n \r\n # Load full output to read relative humidities (RH)\r\n time=[]\r\n with open(path, \"r\") as f:\r\n reader = csv.reader(f,delimiter=',')\r\n ct=1\r\n for row in reader:\r\n if ct==2:\r\n header = row\r\n elif ct>3:\r\n curtime = datetime.strptime('{} {}'.format(row[1],row[2]),\r\n '%Y-%m-%d %H:%M')\r\n time.append(curtime)\r\n ct+=1\r\n \r\n # Remove text columns from data and corresponding headers\r\n header = header[3:]\r\n data = np.genfromtxt(path,delimiter=',',skip_header=3) \r\n data = data[:,3:]\r\n \r\n sdatetime = datetime.strptime(start_date,'%Y-%m-%d')\r\n edatetime = datetime.strptime(end_date,'%Y-%m-%d')\r\n edatetime = edatetime+timedelta(1) # Add a day to include whole end_date\r\n # Cut to selected dates\r\n timei = [(t>sdatetime)&(t<=edatetime) for t in time]\r\n data = data[timei,:]\r\n \r\n RHi = [h=='RH' for h in header]\r\n RHi = np.where(RHi)[0][0]\r\n RH = np.ndarray.flatten(data[:,RHi])\r\n nni = ~np.isnan(RH)\r\n # Get the indices for each selected option\r\n bindex = np.zeros((len(RH),len(bin_opt)))\r\n for boi in range(len(bin_opt)):\r\n # Apply RH bin options to get bin limits\r\n if bin_opt[boi].lower()=='quantile':\r\n RH_bins = np.hstack((np.nanmin(RH)-0.01,\r\n np.quantile(RH[nni],[0.25,0.5,0.75]),\r\n np.nanmax(RH)+0.01))\r\n elif bin_opt[boi].lower()=='none':\r\n RH_bins = np.array([0,100])\r\n \r\n # Label each point with a bin (e.g. 0=exclude, 1=first bin, etc.)\r\n for bi in range(1,len(RH_bins)):\r\n thisbin = (RH>=RH_bins[bi-1]) & (RH<RH_bins[bi])\r\n bindex[thisbin,boi] = bi\r\n \r\n return bindex, time, data, header",
"def stopped_bus_data():\n os.chdir(\"../Data\")\n with open(\"Sorted Data/stopped_bus_data.csv\", \"w\", newline=\"\") as result_file:\n wr = csv.writer(result_file, dialect='excel')\n for file in glob.glob(\"*.csv\"):\n print(file)\n reader = csv.reader(open(file))\n for line in reader:\n try:\n if int(line[14]) == 1:\n wr.writerow(line)\n except:\n continue",
"def path_to_bin_files(path):\r\n files_list=list_of_files(path)\r\n for file in files_list:\r\n asm_lines = parse_data(file)\r\n symbols_dict = init_symbols_dictionary()\r\n collect_symbols_and_ignore_coments(asm_lines, symbols_dict)\r\n bin_lines = translate_to_binary(asm_lines, symbols_dict)\r\n create_output(bin_lines, file)",
"def csv_to_constant_bin_number(filepath,\n weight_column,\n N_bin,\n has_header=False,\n delim=',',\n quotechar='\"',\n lower_bound=None,\n upper_bound=None,\n ):\n\n data, weight_column, header = load_csv(filepath,\n weight_column,\n has_header=has_header,\n delim=delim,\n quotechar=quotechar,\n )\n\n bins = to_constant_bin_number(data,\n N_bin,\n weight_pos=weight_column,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n )\n print_binsizes(bins, weight_column)\n\n save_csvs(bins,\n filepath,\n header,\n delim=delim,\n quotechar=quotechar,\n )",
"def imageRebin(dir=None,rebinsize=(40,40)):\n if dir == None:\n dir = os.getcwd()+'/'\n filenameAll = gl.glob(dir+'*.fit*')\n filenameAll.sort()\n for filename in filenameAll:\n hdu = pf.open(filename)\n n = len(hdu)\n hdu[0].header.set('npix',rebinsize[0])\n hdu[0].header.set('scale',hdu[0].header.set('scale')*hdu[0].header.set('npix')/rebinsize[0])\n for i in range(1,n):\n img=hdu[i].data[0][4:].reshape(npix,npix)\n img = img.astype('f')\n if fft == False:\n covimg = sg.convolve2d(img,kern,mode='same')\n newfname = filename.replace('_noseeing_','_withseeing_')+'_fwhm_'+str(fwhm)+'_e1_'+str(e1)+'_e2_'+str(e2)+'.fit'\n else:\n covimg = sg.fftconvolve(img,kern,mode='same')\n newfname = filename.replace('_noseeing_','_withseeing_')+'_fwhm_'+str(fwhm)+'_e1_'+str(e1)+'_e2_'+str(e2)+'_fftconvolve.fit'\n covimg = covimg/covimg.sum()\n hdu[i].data[0][4:] = covimg.flatten()\n hdu.writeto(newfname)\n #os.system('gzip '+newfname)\n return 'done'",
"def preprocess(self):\n filtered_data = pd.read_csv(self.input)\n\n if self.config.getboolean(\"filterMissingsInGenes\"):\n # first filter out the genes that have more missings than threshold\n filtered_data = self.filterMissings(self.config[\"threshold\"], filtered_data)\n if self.config.getboolean(\"filterMissingsInSamples\"):\n # second transpose matrix and filter out samples that have more missings than threshold\n filtered_samples = self.filterMissings(self.config[\"threshold\"], filtered_data.T)\n filtered_data = filtered_samples.T\n\n # transpose back into original orientation and save\n filePrefix = self.input.split(\"/\")[-1].split(\".\")[\n 0] # split path by / to receive filename, split filename by . to receive filename without ending\n filename = self.output + filePrefix + \"_filtered.csv\"\n filtered_data.to_csv(filename, index=False)\n return filename",
"def splitCSV(self, topList):\n\n maxKeys = 45000\n fileCounter = 0\n topList.insert(0,\"\")\n print \"Bin grootte: \"+len(self.bin)\n for i in range (0,len(self.bin.keys()), maxKeys):\n fileCounter +=1\n #fileName = \"CRACKeLITe\"\n fileName = \"Corbion\"\n fileNameList = \"%s_Binary_Matrix_%i\" % (fileName,fileCounter)\n dic = OrderedDict(self.bin.items()[i:i+maxKeys])\n self.makeCSV(fileNameList, dic, topList)\n print \"CSV %i created\" % fileCounter\n print \"All CSVs created\"",
"def binary_encoding(df, bin_cols):\n for col in bin_cols:\n enc = BinaryEncoder(cols=col)\n bin_enc = enc.fit_transform(df[col])\n df = pd.concat([df, bin_enc], axis=1)\n df.drop(col, axis=1, inplace=True)\n return df",
"def bin_inscriptions(corpus):\n doc_bin = DocBin(attrs=[\"LEMMA\",\"TAG\",\"POS\",\"DEP\",\"HEAD\"], store_user_data=True)\n for c in corpus:\n doc = nlp(c)\n doc_bin.add(doc)\n\n with open('dbg.bin','wb') as f:\n f.write(doc_bin.to_bytes())",
"def to_bins(filein, fileout, window, numbins, chr=None, generator=None):\n if not generator:\n generator = hg38_generator()\n bam = pysam.AlignmentFile(filein, 'rb')\n cm = []\n for row in generator: # iterate over each chromosome\n if chr is None or (chr is not None and row[0] in chr): # checks for chr #\n count = int(int(row[1]) / window) # number of windows\n res = int(window / numbins)\n chr_i = row[0]\n for i in range(count): # iterate over each window\n win_start = i * window\n win_finish = (i + 1) * window - 1\n cm_i = np.zeros(3 + numbins, dtype=object) # array to hold bin counts info\n cm_i[0] = chr_i\n cm_i[1] = win_start\n cm_i[2] = win_finish\n for j in range(numbins): # iterate over each bin\n bin_start = win_start + j * res\n bin_finish = win_start + (j + 1) * res - 1\n cm_i[j + 3] = bam.count(chr_i, bin_start, bin_finish)\n cm.append(cm_i)\n status_statement(i, count, 20, chr_i)\n np.savetxt(fileout + \".csv\", np.asarray(cm), fmt='%s', delimiter=',')\n bam.close()",
"def augmentBin(bin, size, binLabel, data_path):\n # copy ratings of the original images to the new ratings file\n newRatings = open(new_ratings_file_path, 'a')\n for imagePath, rating in bin:\n newRatings.write(getRatingsLine(imagePath, rating))\n newRatings.close()\n # determine number of left images and generate them\n augmentationFactor = np.ceil(float(size) / len(bin))\n print(\"augmenting bin [\" + str(binLabel) + \"] (size: \" + str(len(bin)) + \", augmentationFactor: \" + str(\n augmentationFactor) + \")\")\n if augmentationFactor <= 1:\n return\n leftImages = size - len(bin)\n augmentedBin = []\n for imagePath, rating in bin:\n # determine how many images should be generated\n num_to_generate = augmentationFactor - 1\n actual_to_generate = num_to_generate if num_to_generate <= leftImages else leftImages\n num_generated = augmentImageByRotation(imagePath, actual_to_generate, binLabel, data_path)\n leftImages -= num_generated\n # break if no more images needed\n if leftImages <= 0:\n break",
"def binarize(self, image, threshold):\n\n bin_img = image.copy()\n [h, w] = bin_img.shape\n opt_threshold = threshold\n print(opt_threshold)\n for row in range(h):\n for col in range(w):\n if bin_img[row, col] > opt_threshold: #greater than threshld white(general)\n bin_img[row, col] = 255 #0 instead of 1\n else: #less than threshold black(general)\n bin_img[row, col] = 0 #0 instead of 1\n\n\n #reverse the cases\n\n return bin_img",
"def dump_all_binaries_to_CSV():\n ## TODO\n timenow = datetime.now()",
"def main(in_path, keep_path, out_path):\n\t# First open the input csv\n\tcsv_hndl = lambda x: np.array([np.array(r) for r in x])\n\tdata, headers = read_csv(in_path, csv_hndl, use_headers=True, delimiter=',')\n\n\t# Read headers to keep\n\tkeeps = []\n\n\t# Regex for ignoring comments\n\tcmnt_re = re.compile(\"^#\")\n\n\t# Open and read the file\n\twith open(keep_path) as f_obj:\n\t\tfor line in f_obj:\n\t\t\tline = line.strip()\n\t\t\t# If line is commented out, ignore\n\t\t\tif cmnt_re.match(line):\n\t\t\t\tcontinue\n\t\t\t# Otherwise add to list of keeps\n\t\t\tkeeps.append(line)\n\n\t# Prune the csv\n\tnew_data, new_headers = prune_csv(data,headers,keeps)\n\n\t# Write to output csv file\n\twrite_csv(\n\t\tout_path, \n\t\tnew_data, \n\t\tnew_headers, \n\t\tdelimiter=',', \n\t\tquotechar='\"',\n\t\tquoting=csv.QUOTE_MINIMAL\n\t)",
"def save_bin(data, file_path):\n np.save(file_path, data)",
"def seqff(self):\r\n\r\n start = time.time()\r\n\r\n # load bininfo\r\n bininfo = load_bininfo(self.bininfodata_loc)\r\n\r\n # load input files\r\n if os.path.isdir(self.input_loc):\r\n input_list = [self.input_loc + x for x in os.listdir(self.input_loc)]\r\n\r\n elif os.path.isfile(self.input_loc):\r\n input_list = [self.input_loc]\r\n\r\n else:\r\n raise FileNotFoundError(\"error occurred : inputData is not a Directory or File\")\r\n\r\n for i, file in enumerate(input_list):\r\n filetype = file.split(\".\")[-1]\r\n # filetype : 'sam' or 'bam' or 'newtemp'\r\n if 'sam' in filetype:\r\n bincount = load_sam(file)\r\n\r\n elif 'newtemp' in filetype:\r\n bincount = load_counts(file)\r\n file = file.replace(\".newtemp\", \"\") # TEMP .newtemp -> .bam\r\n\r\n elif 'bam' in filetype:\r\n bincount = load_bam(file)\r\n\r\n else:\r\n continue\r\n\r\n #CREATE newtemp file in \"output_loc\"/newtemp/\r\n create_newtemp(bincount, file, self.newtemp_loc)\r\n\r\n newtemp = pd.DataFrame.from_dict(bincount, orient='index')\r\n newtemp.reset_index(level=0, inplace=True)\r\n newtemp.rename(columns={'index': 'binName', 0: 'counts'}, inplace=True)\r\n\r\n temp_bininfo = bininfo.copy(deep=True)\r\n temp_bininfo = temp_bininfo.merge(newtemp, on='binName',\r\n how='left') # missing value : NaN, not NA in pandas\r\n temp_bininfo['counts'] = temp_bininfo['counts'].fillna(0)\r\n\r\n temp_bininfo.sort_values(by='binorder', inplace=True)\r\n temp_bininfo.reset_index(drop=True)\r\n\r\n ####DATA PROCESSING #######################\r\n autosomebinsonly = []\r\n for index in range(61927):\r\n boolean = (temp_bininfo['FRS'][index] != 'NA') and \\\r\n (float(temp_bininfo['GC'][index]) > 0.316) and \\\r\n (temp_bininfo['CHR'][index] != 'chrX') and \\\r\n (temp_bininfo['CHR'][index] != 'chrY')\r\n autosomebinsonly.append(boolean)\r\n autosomebinsonly = pd.Series(autosomebinsonly)\r\n\r\n alluseablebins = []\r\n for index in range(61927):\r\n boolean = (temp_bininfo['FRS'][index] != \"NA\") and (float(temp_bininfo['GC'][index]) > 0.316)\r\n alluseablebins.append(boolean)\r\n alluseablebins = pd.Series(alluseablebins)\r\n\r\n #CREATE alluseablebins file in \"output_loc\"/alluseablebins\r\n #create_alluseablebins(alluseablebins, file, self.alluseablebins_loc)\r\n\r\n sum_counts = pd.Series(temp_bininfo['counts'])\r\n sum_counts = sum_counts[autosomebinsonly].sum(skipna=True)\r\n\r\n autoscaledtemp = pd.Series(temp_bininfo['counts'].loc[(autosomebinsonly)],\r\n copy=True) / sum_counts # NA-related code removed\r\n allscaledtemp = pd.Series(temp_bininfo['counts'].loc[(alluseablebins)], copy=True) / sum_counts\r\n\r\n gc_index = {}\r\n cnt = 0\r\n for index, isauto in enumerate(autosomebinsonly):\r\n if isauto:\r\n if temp_bininfo['GC'].iat[index] in gc_index:\r\n gc_index[temp_bininfo['GC'].iat[index]].append(float(autoscaledtemp.iat[cnt]))\r\n cnt += 1\r\n\r\n else:\r\n gc_index[temp_bininfo['GC'].iat[index]] = [float(autoscaledtemp.iat[cnt])]\r\n cnt += 1\r\n\r\n key_list = []\r\n val_list = []\r\n for key, val in gc_index.items():\r\n key_list.append(key)\r\n val_list.append(np.median(val))\r\n\r\n loess_var = loess(key_list, val_list) # default span : 0.75\r\n loess_var.fit()\r\n # y = loess.loess_prediction(newData, loessVar)\r\n # temp_loessPredict.loess_debugging(loessVar)\r\n\r\n ###prediction###\r\n loess_x = [float(gc) for index, gc in enumerate(temp_bininfo['GC']) if (alluseablebins[index])]\r\n # print(temp_bininfo['GC'])\r\n loess_fitted = loess_var.predict(loess_x)\r\n loess_fitted = list(loess_fitted.values)\r\n # print(loess_fitted)\r\n\r\n median_autoscaledtemp = np.median(autoscaledtemp)\r\n median_autoscaledtemp = float(median_autoscaledtemp) # for fixed constant\r\n\r\n normalizedbincount = [(x + (median_autoscaledtemp - loess_fitted[index])) for index, x in\r\n enumerate(allscaledtemp)]\r\n\r\n #CREATE normalizedbincount in \"output_loc\"/normalizedbincount\r\n create_normalizedbincount(normalizedbincount, file, self.normalizedbincount_loc)\r\n\r\n bincounts = pd.Series(data=np.repeat(a=0.0, repeats=61927), index=temp_bininfo['binName'], dtype=np.float64)\r\n\r\n sum_normalizedbincount = sum([val for val in normalizedbincount if not math.isnan(val)])\r\n sum_normalizedbincount = float(sum_normalizedbincount) # deep copy temporarily\r\n\r\n cnt = 0\r\n for index, x in enumerate(alluseablebins):\r\n if x == True:\r\n data = (normalizedbincount[cnt] / sum_normalizedbincount) * len(normalizedbincount)\r\n bincounts.iat[index] = data\r\n cnt += 1\r\n\r\n #CREATE bincounts in \"output_loc\"/bincounts\r\n create_bincounts(bincounts, file, self.bincounts_loc)\r\n\r\n wrsc = self.prediction(bincounts, self.B, self.mu, self.parameter_1, self.parameter_2)\r\n enet = np.dot(bincounts, (self.elnetbeta)) + (self.elnetintercept)\r\n ff = (wrsc+enet) / 2\r\n\r\n result_lines = list()\r\n result_lines.append(\"SeqFF\\tEnet\\tWRSC\")\r\n result_lines.append(\"{}\\t{}\\t{}\".format(ff, enet, wrsc))\r\n\r\n #CREATE results of seqff (seqff paper result covered) in \"output_loc\"/results\r\n create_results(result_lines, file, self.results_loc)\r\n\r\n end = time.time()\r\n elapsed = end - start\r\n h = int(elapsed) // 3600\r\n m = (int(elapsed) - (h * 3600)) // 60\r\n s = (int(elapsed) % 60)\r\n print(\"elapsed time: %d hr %d min %d sec\" % (h, m, s))\r\n print(\"elapsed :\", elapsed)\r\n print(\"progress : {} / {}\".format(i + 1, self.progress))",
"def zip_imagenet100c():\n #First make sure the directory we are given is correct!\n if not os.path.isdir(DATA_SRC_ROOT):\n raise Exception(\"Bad filepath given\")\n\n #create the destiantion directories if they don't exist\n if not os.path.isdir(IMAGENET100_DIR):\n os.mkdir(IMAGENET100_DIR)\n\n #grab the subset wnids for the 100 class-subset\n with open(IMAGENET100_CLASSES) as f:\n subset_wnids = f.readlines()\n subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab\n\n #Grab the names of all of the folders inside the root data source\n #Structure is distortion/sub_distortion/level/wnids\n for distortion in os.listdir(DATA_SRC_ROOT):\n if distortion != \"meta.bin\":\n print(distortion)\n\n folder_path = os.path.join(DATA_SRC_ROOT, distortion)\n\n if not os.path.isdir(folder_path):\n continue\n\n for sub_distortion in os.listdir(folder_path):\n print(sub_distortion)\n\n subfolder_path = os.path.join(folder_path, sub_distortion)\n\n if not os.path.isdir(subfolder_path):\n continue\n\n for level in os.listdir(subfolder_path):\n print(level)\n\n level_path = os.path.join(subfolder_path, level)\n\n #grab the correcrt validation d9recotires\n for wnid in os.listdir(level_path):\n wnid_path = os.path.join(level_path, wnid)\n\n if not os.path.isdir(wnid_path):\n continue\n\n if wnid in subset_wnids:\n dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid)\n\n shutil.copytree(wnid_path, dest_path)\n\n #copy the metadata bin file\n meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin')\n meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin')\n\n shutil.copy(meta_file, meta_dest)\n\n #Zip the destinatio file\n shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR)"
] | [
"0.64596856",
"0.6200215",
"0.6026526",
"0.57897776",
"0.55271953",
"0.5480028",
"0.54477143",
"0.54473263",
"0.5424563",
"0.53364795",
"0.5334798",
"0.53143245",
"0.5308128",
"0.5306266",
"0.5300616",
"0.53005636",
"0.5279393",
"0.5272336",
"0.5262879",
"0.52385354",
"0.52289677",
"0.52249473",
"0.52089643",
"0.52083874",
"0.51999617",
"0.5190059",
"0.5165602",
"0.51508456",
"0.51453066",
"0.5145202"
] | 0.6602217 | 0 |
returns row indices where more than threshold entries are missing, e.g. 0.5 | def _remove_non_informative_rows(self, df, threshold):
df_tmp = pd.DataFrame()
n_features = len(df.columns)
# calculating ratio of rows that have more than "ratio" missing values
df_tmp['ratio'] = df.apply(lambda row: row.isnull().sum()/n_features, axis='columns')
# kick too noisy rows
return df[df_tmp['ratio'] <= threshold] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filterMissings(self, threshold, data):\n\n #replace NAs by 0 for counting\n data.fillna(0).astype(bool).sum(axis=1)\n\n filtered_columns = data.columns\n\n\n #find out threshold, i.e. minimum number of non-zero in real numbers\n rowNumber = data.shape[0]\n min_nonZeros = int(rowNumber - ((rowNumber * int(threshold))/100))\n\n zero_counts = data.astype(bool).sum(axis=0)\n\n for columnID, nonZeros in zero_counts.items():\n if nonZeros <= min_nonZeros:\n filtered_columns = filtered_columns.drop(columnID)\n\n\n return data[filtered_columns]",
"def high_null_count(df, thresh):\n cols_remove = []\n for col in df.columns:\n if df[col].isna().sum() / df.shape[0] >= thresh:\n cols_remove.append(col)\n\n return df.drop(columns=cols_remove, axis=1)",
"def remove_missing(self, threshold=1):\n qualified = np.sum(self.table.isnull().values, 1) >= threshold\n print(f\"@Timetable.remove_missing: {np.sum(qualified)} with more than {threshold} missing data will be removed.\")\n self.table = self.table[np.logical_not(qualified)]",
"def filter_rows_by_non_empty(df, threshold=1):\n # Boolean DataFrame where `True` means the cell value is non-zero.\n non_zeros = df.applymap(lambda cell: cell != 0)\n\n # Boolean Series where `True` means the row has enough non-zeros.\n enough_non_zeros = non_zeros.apply(\n # Check that the row contains `True`, meaning it has a non-zero.\n # check that the row has enough non-zeros, i.e. more than the threshold.\n lambda row: True in row.value_counts() and row.value_counts()[True] > threshold,\n axis=1\n )\n result = df[enough_non_zeros]\n if df.shape != result.shape:\n print('THRESHOLDING: filter_rows_by_non_empty')\n return result",
"def locate_nan_rows(arr):\n # Count the number of NaNs in each row\n nan_counts = np.sum(~np.isfinite(arr), axis=1)\n # Trigger on a NaN appearing anywhere in a line/row\n nans, = np.where(nan_counts > 1)\n return frozenset(nans)",
"def group_by_missing_percent(df, threshold=0.5):\n percent = list_missing_pct(df)\n condition = percent < threshold\n return df[percent[condition].index], df[percent[~condition].index]",
"def get_min_filled_threshold(df):\n percentage = 0.1\n return df.shape[0] * percentage",
"def remove_empty_columns(x, threshold=0.4):\n # For each column compute the ratio of nan values over the number of rows\n prop_empty_column = (np.isnan(x)).sum(axis=0) / len(x)\n column_mask = prop_empty_column < threshold\n return x[:, column_mask], column_mask",
"def missing_stats(X, missing_threshold, axis=1):\n a = 1-axis\n missing_series = X.isnull().sum(axis = a) / X.shape[a]\n # Calculate the fraction of missing in each column \n missing_series = X.isnull().sum() / X.shape[0]\n if axis == 1:\n missing_stats_cols = pd.DataFrame(missing_series).rename(columns = {'index': 'feature', 0: 'missing_fraction'})\n # Sort with highest number of missing values on top\n missing_stats_cols = missing_stats_cols.sort_values('missing_fraction', ascending = False)\n missing_threshold_cols_grid = pd.DataFrame(missing_series[missing_series >= missing_threshold]).reset_index().rename(columns = {'index': 'cols', 0: 'missing_fraction'})\n return missing_threshold_cols_grid\n elif axis == 0:\n missing_stats_rows = pd.DataFrame(missing_series).rename(columns = {'index': 'feature', 0: 'missing_fraction'})\n # Sort with highest number of missing values on top\n missing_stats_rows = missing_stats_rows.sort_values('missing_fraction', ascending = False)\n missing_threshold_rows_grid = pd.DataFrame(missing_series[missing_series > missing_threshold]).reset_index().rename(columns = {'index': 'rows', 0: 'missing_fraction'})\n return missing_threshold_rows_grid",
"def remove_rows(df, threshold, log=False):\n if log: section_timer = Timer(log=f\"removing rows with more than {threshold * 100}% of NaNs\")\n\n non_nan_values = int(df.shape[1] * (1 - threshold))\n df_clean = df.dropna(thresh=non_nan_values, axis=0)\n\n if log: section_timer.end_timer(log=f\"removed {df.shape[0] - df_clean.shape[0]} rows\")\n return df_clean",
"def get_first_N_above_thresh(N, freqs, thresh, decimals=3):\n unique_freqs, unique_indices = np.unique(np.round(freqs, decimals=decimals), return_index=True)\n nonzero = unique_freqs > thresh\n unique_freqs, unique_indices = unique_freqs[nonzero], unique_indices[nonzero]\n return unique_freqs[:N], unique_indices[:N]",
"def count_abs_index(arr1d, threshold):\n count = 0\n for ele in arr1d:\n if ele <= threshold:\n count = count + 1\n return count",
"def drop_nan_streaks_above_threshold(df, df_nan_table, thresholds):\n\n # Check for NaN streaks > threshold and drop them from the df\n length = len(df_nan_table['Amount of NaNs'])\n print('df_nan_table length: %s' % length)\n\n indices_to_drop = []\n for i, amount in enumerate(df_nan_table['Amount of NaNs']):\n selected_column = df_nan_table['Column name'][i]\n try:\n if amount > thresholds[selected_column]:\n start_index = (df_nan_table['Start index'][i])\n stop_index = (df_nan_table['Stop index'][i])\n indices = df[start_index:stop_index].index\n print('Enumeration %s of %s | From \\t %s \\t to \\t %s | column %s | NaN streak length: %s'\n % (i, length, start_index, stop_index, selected_column, (len(indices))))\n try:\n indices_to_drop += indices\n except:\n print('Could not add indices to indices_to_drop list')\n else:\n #print('amount < threshold')\n pass\n except:\n #print('No threshold detected for %s' % selected_column)\n pass\n\n print('Dropping NaN streaks > threshold')\n l1 = len(df)\n df = df.drop(indices_to_drop)\n l2 = len(df)\n print('Removed %s rows' % (l1-l2))\n return df",
"def find_nonexceed(trainy, train_tree_node_ID, pred_tree_node_ID, thres):\n \n npred = pred_tree_node_ID.shape[0]\n out = np.zeros((npred, thres.shape[0]))*np.nan\n for i in prange(pred_tree_node_ID.shape[0]):\n for j in prange(thres.shape[0]):\n idxs = np.where(train_tree_node_ID == pred_tree_node_ID[i, :])[0]\n sample = trainy[idxs]\n out[i, j] = (sample < thres[j]).sum() / float(sample.shape[0])\n return out",
"def num_larger(threshold, values):\n num = sum([1 for n in values if (n>threshold)])\n return num",
"def drop_high_nan(df, threshold=0.5):\n n_nans = df.isnull().sum()\n freq_nans = n_nans/float(len(df)) #in percentage\n to_drop = (freq_nans > threshold).values\n columns_drop = df.columns.values[to_drop].tolist()\n return df.drop(columns_drop, axis=1)",
"def relevant_indexes(data, min_threshold):\n\n start_index = 1\n end_index = len(data) - 1\n\n for i in range(len(data)):\n if data[i] > min_threshold:\n start_index = i\n break\n\n for i in range(len(data)):\n if data[::-1][i] > min_threshold:\n end_index = i\n break\n\n return start_index, end_index",
"def nancnt_nb(a):\n out = np.empty(a.shape[1], dtype=np.float_)\n for col in range(a.shape[1]):\n out[col] = np.sum(~np.isnan(a[:, col]))\n return out",
"def size_filter(db, threshold):\n idx2rm = []\n for i, x in db.iterrows():\n if x['size'] < threshold:\n idx2rm.append(i)\n\n print(\"Numero de imagenes: \",len(db)-len(idx2rm),\" de \", len(db))\n return db.drop(idx2rm)",
"def check_miss_count(self):\n first = self.attk_arry[-1]\n second = self.attk_arry[-2]\n third = self.attk_arry[-3]\n fourth = self.attk_arry[-4]\n sum_of_attk = first + second + third + fourth\n if sum_of_attk == 8:\n self.column_arry.append(10)\n self.row_arry.append(10)\n else:\n pass",
"def get_communities_above_threshold(data, threshold_size):\n index_counts = data.index.value_counts()\n count_list = index_counts[index_counts > threshold_size].index.values.tolist()\n return count_list",
"def delete_outliers_of_data_before(data: np.ndarray, qi_inspect: int, threshold: int):\n idx_to_del = []\n done = False\n for j in range(data.shape[0]):\n if data[j, qi_inspect] < threshold:\n if not done:\n idx_to_del = j\n done = True\n else:\n idx_to_del = np.append(idx_to_del, j)\n return np.delete(data, idx_to_del, axis=0)",
"def outlier_determine_threshold(df, col):\r\n df = df.copy(deep=True)\r\n keep_looping = True\r\n number_of_loops = 1\r\n thresh = 5\r\n while keep_looping:\r\n if number_of_loops >= 10:\r\n break\r\n mask_outliers = is_outlier(df[col], thresh=thresh).astype(int)\r\n dfout_index = df.iloc[np.where(mask_outliers>0)].index\r\n pct_outliers = len(dfout_index)/len(df)\r\n if pct_outliers == 0:\r\n if thresh > 5:\r\n thresh = thresh - 5\r\n elif thresh == 5:\r\n return thresh\r\n else:\r\n thresh = thresh - 1\r\n elif pct_outliers <= 0.01:\r\n keep_looping = False\r\n else:\r\n thresh_multiplier = int((pct_outliers/0.01)*0.5)\r\n thresh = thresh*thresh_multiplier\r\n number_of_loops += 1\r\n print(' %s Outlier threshold = %d' %(col, thresh))\r\n return thresh",
"def check_unique(df, threshold=3):\n counts = {}\n insuf_cols = []\n has_empty_cols = []\n cols = df.columns.values\n for col in cols:\n uniq_vals = df[col].unique() \n count_uniq = len(uniq_vals)\n print(\"%20s: %5d\" % (col, count_uniq), end=\" \")\n counts[col] = count_uniq\n if count_uniq <= threshold:\n insuf_cols.append(col)\n print(uniq_vals, end= \" \")\n if '' in uniq_vals:\n print(\"Has empty\", end=\" \")\n has_empty_cols.append(col)\n print(\"\")\n return counts, insuf_cols, has_empty_cols",
"def noisy_cells(self, hists, thresholds):\n return [[[x + 1, z + 1, i + 1] for x in range(h.GetNbinsX()) for z in range(h.GetNbinsY()) if h.GetBinContent(x + 1, z + 1) > threshold] for i, (h, threshold) in enumerate(zip(hists, thresholds))]",
"def apply_threshold(da, threshold=1.):\n with np.errstate(all='ignore'):\n result = xr.where(da < threshold, np.nan, da)\n result.attrs = da.attrs\n return result",
"def _check_preceding_time_interval_threshold(data, index, time_window, threshold, min_count):\n\n\t# define the start slice (note that we look backwards here)\n\tstart_slice = index - time_window\n\t# define the end slice, since python does not include the item defined in the end slice, we do not have to subtract -1. For example, 100:120 does not include 120\n\tend_slice = index\n\n\t# if the start slice is negative, then we set it to 0 since there are no values with indexes lower than 0\n\tif start_slice < 0:\n\t\t# set start slice to zero to indicate the beginning of the list\n\t\tstart_slice = 0\n\t\n\t# return True or False if the window contains more than the min_count\n\treturn ((data[start_slice:end_slice] > threshold).sum()) >= min_count",
"def filter_out_rare_points(points, threshold_pct=0.5):\n \n c = Counter(points)\n total = sum(c.values())\n l = []\n for p in points:\n v = c[p]\n if v/total * 100 <= threshold_pct:\n l.append(np.nan)\n else:\n l.append(p)\n \n return l",
"def remove_cols_high_missing_rates(data, min_missing_rate=0.4):\n cols_keep = list(data.isna().mean()[data.isna().mean() < min_missing_rate].index)\n return data[cols_keep], cols_keep",
"def filter_rows_by_non_empty_until(df, max_=MAX_NUM_ROWS):\n print('Starting shape: %s' % str(df.shape))\n threshold = 1\n while df.shape[0] > max_:\n df = filter_rows_by_non_empty(df, threshold=threshold)\n print('THRESHOLDING: to shape: %s' % str(df.shape))\n threshold += 1\n print('Ending shape: %s' % str(df.shape))\n return df"
] | [
"0.6818863",
"0.6373628",
"0.6308382",
"0.6293913",
"0.62112623",
"0.6198604",
"0.6150571",
"0.6133567",
"0.5942086",
"0.5931809",
"0.5892998",
"0.58830494",
"0.5831952",
"0.58169055",
"0.58158576",
"0.57656103",
"0.5757377",
"0.5756951",
"0.5692738",
"0.5690874",
"0.568918",
"0.5683435",
"0.567797",
"0.56775707",
"0.56623787",
"0.56552887",
"0.56208444",
"0.56002945",
"0.5568236",
"0.55292594"
] | 0.6447905 | 1 |
creates a dataframe with r samples for each feature | def _get_sample_df(self, df, features, r):
grouped = df.groupby('feature')
df_sample = pd.DataFrame()
for feature in features:
group = grouped.get_group(feature)
samples = group.sample(n=r)
df_sample = df_sample.append(samples)
return df_sample | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]",
"def get_data(self):\n if self.random_seeds: \n self._validate_random_seeds()\n seed_iter = list(map(iter,self.random_seeds))\n nsamples = len(self.random_seeds[0])\n else:\n seed_iter = None\n nsamples = self.numsamples\n self._set_meta_features()\n for _ in tqdm(range(nsamples)):\n self._update_meta_features(seed_iter)\n self._sample()\n yield self._extract_features()",
"def create_sample_dataframe():\n ax_readings = []\n ay_readings = []\n az_readings = []\n mx_readings = []\n my_readings = []\n mz_readings = []\n gx_readings = []\n gy_readings = []\n gz_readings = []\n activity_list = [LABELS_NAMES[0] for _ in range(SEGMENT_TIME_SIZE)]\n\n\n for _ in range(SEGMENT_TIME_SIZE):\n ax_readings.append(random.uniform(-10,10))\n ay_readings.append(random.uniform(-10,10))\n az_readings.append(random.uniform(-10,10))\n mx_readings.append(random.uniform(-10,10))\n my_readings.append(random.uniform(-10,10))\n mz_readings.append(random.uniform(-10,10))\n gx_readings.append(random.uniform(-10,10))\n gy_readings.append(random.uniform(-10,10))\n gz_readings.append(random.uniform(-10,10))\n\n data_dict = {\n COLUMN_NAMES[0]: activity_list, COLUMN_NAMES[1]: ax_readings,\n COLUMN_NAMES[2]: ay_readings, COLUMN_NAMES[3]: az_readings,\n COLUMN_NAMES[4]: gx_readings, COLUMN_NAMES[5]: gy_readings,\n COLUMN_NAMES[6]: gz_readings, COLUMN_NAMES[7]: mx_readings,\n COLUMN_NAMES[8]: my_readings, COLUMN_NAMES[9]: mz_readings\n }\n\n df = pd.DataFrame(data=data_dict)\n return df",
"def generate_trajectories_feature(self):\n if self.df_feature is not None:\n return self.df_feature\n trajs_feature = [traj.get_basic_feature() for traj in self.trajectories]\n self.df_feature = pd.DataFrame(trajs_feature)\n self.df_feature[\"LABEL\"] = self.df[\"LABEL\"]\n return self.df_feature",
"def create_samples(self):\n sample_list = []\n genes = []\n for record in range(len(self.data_dict[\"samples\"])):\n sample_id = self.data_dict[\"samples\"][record]\n genes_cols = list(self.data_dict.keys())[2:]\n for gene in genes_cols:\n genes.append(self.data_dict[gene][record])\n label = self.data_dict[\"type\"][record]\n sample_list.append(Sample(sample_id, genes, label))\n genes = []\n return sample_list",
"def sample_row(row, columns):\n\tsampled_row = pd.Series( index = columns)\n\n\t# Sampleo cada feature segun la distribucion de la fila\n\tfor c in columns:\n\t\tc = c.rstrip('.mean')\n\t\tsampled_row[c + '.mean'] = random.normal(row[c + '.mean'], row[c + '.std'])\n\n\t# Agrego la columna clase\n\tsampled_row['class'] = row['class']\n\n\treturn sampled_row",
"def get_features_dataframe(tids):\n\n Data = {}\n for tid in tids:\n Data[tid] = get_song_features(tid)\n return pd.DataFrame.from_dict(Data, orient='index')",
"def train_test_samples(df):\n\n from math import floor\n\n shuffled_df = df.reindex(np.random.permutation(df.index))\n\n seventy_five_percent = int(floor(len(shuffled_df) * 0.75))\n train_df = shuffled_df.iloc[:seventy_five_percent, ]\n test_df = shuffled_df.iloc[seventy_five_percent:, ]\n\n return train_df, test_df",
"def samples(self):\n sample = {}\n for label, df in self.data.groupby(by='Sample'):\n sample[label] = Sample(label, df)\n return sample",
"def sample_rows(df, nrows):",
"def _finalize_features(self) -> DataFrameLike:\n all_features_dict = dict(ChainMap(*self._final_features.values()))\n return pd.DataFrame(all_features_dict)",
"def read_samples(path: PathType) -> pd.DataFrame:\n df = pd.read_csv(path, sep=\" \", skiprows=[1], usecols=[0])\n df.columns = [\"sample_id\"]\n return df",
"def run(model, num_samples=5000, ignore_unnamed=True) -> pd.DataFrame:\n samples: Dict[str, List[float]] = {}\n for i in tqdm.trange(num_samples):\n trace = pyro.poutine.trace(model).get_trace()\n for name in trace.nodes.keys():\n if trace.nodes[name][\"type\"] == \"sample\":\n if not ignore_unnamed or not name.startswith(\"_var\"):\n samples.setdefault(name, [])\n samples[name].append(trace.nodes[name][\"value\"].item()) # FIXME\n return pd.DataFrame(samples) # type: ignore ",
"def generate_samples(self, n_samples):",
"def generate_samples(self, n_samples):",
"def samples(self, feature_object):\n db = ImageCollection()\n sample_cache = \"{}-{}\".format(feature_object.name+'-based', 'all-products')\n\n try:\n samples = cPickle.load(\n open(os.path.join(Config.engine_cache_path(), sample_cache), \"rb\", True))\n return samples\n except:\n samples = []\n\n i = 0\n data = db.get_collection()\n for d in data.itertuples():\n try:\n cls1, cls2, img = getattr(d, \"cls1\"), getattr(d, \"cls2\"), getattr(d, \"img\")\n image = Config.product_image_path()\n if len(cls1) > 0:\n image = image + '/' + cls1\n if len(cls2) > 0:\n image = image + '/' + cls2\n image = image + '/' + img\n samples.append(self.sample(image, cls1, cls2, feature_object))\n i = i + 1\n print(i)\n except:\n continue\n with open(Config.engine_cache_path()+'/'+sample_cache, 'wb') as fp:\n dill.dump(samples, fp)\n\n return samples",
"def random_sample(df, batch_size):\n sample = df.sample(n=batch_size)\n #print(sample)\n video_ids = list(sample.video_id.values.astype(str))\n labels = list(sample.label.values)\n\n return video_ids, labels",
"def generate_features(df):\n df_new = pd.DataFrame()\n \n # 6 original features\n df_new['open'] = df['open']\n df_new['open_1'] = df['open'].shift(1)\n df_new['close_1'] = df['close'].shift(1)\n df_new['high_1'] = df['high'].shift(1)\n df_new['low_1'] = df['low'].shift(1)\n df_new['volume_1'] = df['volume'].shift(1)\n \n # 50 original features\n # average price\n df_new['avg_price_5'] = df['close'].rolling(window=5).mean().shift(1)\n df_new['avg_price_30'] = df['close'].rolling(window=21).mean().shift(1)\n df_new['avg_price_90'] = df['close'].rolling(window=63).mean().shift(1)\n df_new['avg_price_365'] = df['close'].rolling(window=252).mean().shift(1)\n \n # average price ratio\n df_new['ratio_avg_price_5_30'] = df_new['avg_price_5'] / df_new['avg_price_30']\n df_new['ratio_avg_price_905_'] = df_new['avg_price_5'] / df_new['avg_price_90']\n df_new['ratio_avg_price_5_365'] = df_new['avg_price_5'] / df_new['avg_price_365']\n df_new['ratio_avg_price_30_90'] = df_new['avg_price_30'] / df_new['avg_price_90']\n df_new['ratio_avg_price_30_365'] = df_new['avg_price_30'] / df_new['avg_price_365']\n df_new['ratio_avg_price_90_365'] = df_new['avg_price_90'] / df_new['avg_price_365'] \n \n \n # average volume\n df_new['avg_volume_5'] = df['volume'].rolling(window=5).mean().shift(1)\n df_new['avg_volume_30'] = df['volume'].rolling(window=21).mean().shift(1)\n df_new['avg_volume_90'] = df['volume'].rolling(window=63).mean().shift(1)\n df_new['avg_volume_365'] = df['volume'].rolling(window=252).mean().shift(1)\n \n #average volume ratio\n df_new['ratio_avg_volume_5_30'] = df_new['avg_volume_5'] / df_new['avg_volume_30']\n df_new['ratio_avg_volumee_5_90'] = df_new['avg_volume_5'] / df_new['avg_volume_90'] \n df_new['ratio_avg_volume_5_365'] = df_new['avg_volume_5'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_30_90'] = df_new['avg_volume_30'] / df_new['avg_volume_90']\n df_new['ratio_avg_volume_30_365'] = df_new['avg_volume_30'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_90_365'] = df_new['avg_volume_90'] / df_new['avg_volume_365'] \n \n \n # standard deviation of prices\n df_new['std_price_5'] = df['close'].rolling(window=5).std().shift(1)\n df_new['std_price_30'] = df['close'].rolling(window=21).std().shift(1)\n df_new['std_price_90'] = df['close'].rolling(window=63).std().shift(1) \n df_new['std_price_365'] = df['close'].rolling(window=252).std().shift(1)\n \n # standard deviation ratio of prices \n df_new['ratio_std_price_5_30'] = df_new['std_price_5'] / df_new['std_price_30']\n df_new['ratio_std_price_5_90'] = df_new['std_price_5'] / df_new['std_price_90']\n df_new['ratio_std_price_5_365'] = df_new['std_price_5'] / df_new['std_price_365']\n df_new['ratio_std_price_30_90'] = df_new['std_price_30'] / df_new['std_price_90'] \n df_new['ratio_std_price_30_365'] = df_new['std_price_30'] / df_new['std_price_365'] \n df_new['ratio_std_price_90_365'] = df_new['std_price_90'] / df_new['std_price_365'] \n \n \n # standard deviation of volumes\n df_new['std_volume_5'] = df['volume'].rolling(window=5).std().shift(1)\n df_new['std_volume_30'] = df['volume'].rolling(window=21).std().shift(1)\n df_new['std_volume_90'] = df['volume'].rolling(window=63).std().shift(1)\n df_new['std_volume_365'] = df['volume'].rolling(window=252).std().shift(1)\n \n #standard deviation ratio of volumes\n df_new['ratio_std_volume_5_30'] = df_new['std_volume_5'] / df_new['std_volume_30']\n df_new['ratio_std_volume_5_90'] = df_new['std_volume_5'] / df_new['std_volume_90']\n df_new['ratio_std_volume_5_365'] = df_new['std_volume_5'] / df_new['std_volume_365'] \n df_new['ratio_std_volume_30_90'] = df_new['std_volume_30'] / df_new['std_volume_90']\n df_new['ratio_std_volume_30_365'] = df_new['std_volume_30'] / df_new['std_volume_365']\n df_new['ratio_std_volume_90_365'] = df_new['std_volume_90'] / df_new['std_volume_365'] \n \n # return\n df_new['return_1'] = ((df['close'] - df['close'].shift(1)) / df['close'].shift(1)).shift(1)\n df_new['return_5'] = ((df['close'] - df['close'].shift(5)) / df['close'].shift(5)).shift(1)\n df_new['return_30'] = ((df['close'] - df['close'].shift(21)) / df['close'].shift(21)).shift(1)\n df_new['return_90'] = ((df['close'] - df['close'].shift(63)) / df['close'].shift(63)).shift(1) \n df_new['return_365'] = ((df['close'] - df['close'].shift(252)) / df['close'].shift(252)).shift(1)\n \n #average of return\n df_new['moving_avg_5'] = df_new['return_1'].rolling(window=5).mean()\n df_new['moving_avg_30'] = df_new['return_1'].rolling(window=21).mean()\n df_new['moving_avg_90'] = df_new['return_1'].rolling(window=63).mean()\n df_new['moving_avg_365'] = df_new['return_1'].rolling(window=252).mean()\n \n # the target\n df_new['close'] = df['close']\n df_new = df_new.dropna(axis=0)\n return df_new",
"def rand_data():\n # 100 examples, with seq_len=10, each holding 300 features\n return torch.randn((100, 10, 300))",
"def make_tutorial_data(n: int) -> pd.DataFrame:\n np.random.seed(1111)\n\n dataset = pd.DataFrame({\n \"id\": list(map(lambda x: \"id%d\" % x, np.random.randint(0, 100, n))),\n \"date\": np.random.choice(pd.date_range(\"2015-01-01\", periods=100), n),\n \"feature1\": np.random.gamma(20, size=n),\n \"feature2\": np.random.normal(40, size=n),\n \"feature3\": np.random.choice([\"a\", \"b\", \"c\"], size=n)})\n\n dataset[\"target\"] = (dataset[\"feature1\"]\n + dataset[\"feature2\"]\n + dataset[\"feature3\"].apply(lambda x: 0 if x == \"a\" else 30 if x == \"b\" else 10)\n + np.random.normal(0, 5, size=n))\n\n # insert some NANs\n dataset.loc[np.random.randint(0, n, 100), \"feature1\"] = nan\n dataset.loc[np.random.randint(0, n, 100), \"feature3\"] = nan\n\n return dataset",
"def random_dataframe(pool, shape, columns):\n return pd.DataFrame(\n np.random.randint(pool[0], pool[1], size=shape), columns=columns\n )",
"def _get_table_from_samples(self, index):\n df = pd.DataFrame()\n for sample in self.samples:\n sd = sample.to_dict()\n ser = pd.Series(\n {k: v for (k, v) in list(sd.items()) if not k.startswith(\"_\")}\n )\n df = df.append(ser, ignore_index=True)\n index = [index] if isinstance(index, str) else index\n if not all([i in df.columns for i in index]):\n _LOGGER.debug(\n \"Could not set {} index. At least one of the \"\n \"requested columns does not exist: {}\".\n format(CFG_SAMPLE_TABLE_KEY, index))\n return df\n _LOGGER.debug(\"Setting sample_table index to: {}\".format(index))\n df.set_index(keys=index, drop=False, inplace=True)\n return df",
"def __init__(self, samples_per_class=10, n_classes=10, n_features=1):\n self.samples_per_class = samples_per_class\n self.n_classes = n_classes\n self.n_features = n_features\n\n # Create a dataframe to be consistent with other Datasets\n self.df = pd.DataFrame({\n 'class_id': [i % self.n_classes for i in range(len(self))]\n })\n self.df = self.df.assign(id=self.df.index.values)",
"def generate_features(X, shuffle_data = False, which_slice = 0,how_many_slices = 1,dummies = False):\n start = time.clock()\n \n \n df_X = pd.DataFrame(X)\n if dummies ==True:\n print('dummies')\n \n df_tmp = df_X.cumsum(axis=1)\n \n df_tmp_2 = df_X.cumprod(axis=1)\n df_tmp_5 = df_X.cummax(axis=1)\n\n df_tmp_6 = df_X.cummin(axis=1)\n df_tmp_3 = df_X**2\n df_tmp_4 = df_X.T.diff(2).T.dropna(axis=1,how='any')\n#df_X**3\n df_tmp_7 = df_X.T.diff(1).T.dropna(axis=1,how='any')\n df_tmp_8 = df_X**3\n df_tmp_9 = df_X**5\n df_tmp_10 = df_X - df_X.apply(np.mean,axis=0)#will this work?\n df_test = pd.stats.moments.rolling_mean(df_X.T,window = 19,axis=1)\n df_test = df_test.T.dropna(axis=1,how='any')\n\n df_X['nonzero_sum'] = df_X.apply(np.count_nonzero,axis=1)\n # df_X['binc'] = df_X.apply(np.unique,axis=1)\n corr = df_X.corr()\n df_tmp_11 = df_X.dot(corr)\n\n \n \n \n df_X['mean'] = df_X.apply(np.mean,axis=1,raw=True)\n df_X['std'] = df_X.apply(np.std,axis=1,raw=True)\n df_X['median'] = df_X.apply(np.median,axis=1,raw=True)\n df_X['amax'] = df_X.apply(np.amax,axis=1,raw=True)\n df_X['amin'] = df_X.apply(np.amin,axis=1,raw=True)\n\n\n df_X['ptp'] = df_X.apply(np.ptp,axis=1)\n \n \n df_X = pd.concat([df_X,\n df_tmp,\n df_tmp_2,\n #df_tmp_3,\n df_tmp_4,\n df_tmp_5,\n df_tmp_6,\n #df_tmp_7,\n #df_tmp_8,\n #df_tmp_9,\n df_tmp_11,\n df_test\n ],axis=1)\n \n X = df_X.values.astype(np.float32)\n\n if shuffle_data == True:\n z = shuffle(X.T)\n X = z.T\n print('shape')\n print(X.shape)\n \n slice_length = round(X.shape[1]/how_many_slices)\n print(slice_length, how_many_slices)\n X = X[:,which_slice*slice_length:(which_slice+1)*slice_length]\n \n print('shape new')\n print(X.shape)\n end = time.clock()\n\n return X",
"def prepare_data_features(raw=False, round_ratings=False):\n df = prepare_data(raw=False)\n print(\"prepare features\")\n df_categories = df['categories'].str.get_dummies(sep=\", \")\n df_categories = df_categories[df_categories.columns[\n df_categories.sum() > len(df)*0.01]]\n df = pd.concat([df.drop('categories', 1), df_categories], axis=1)\n print(\"end prepare features\")\n return df",
"def create_sample(self, sent, head_pred_id):\n return pandas.DataFrame({\"word\": sent,\n \"run_id\": [-1] * len(sent), # Mock running id\n \"head_pred_id\": head_pred_id})",
"def create_sample(self, sent, head_pred_id):\n return pandas.DataFrame({\"word\": sent,\n \"run_id\": [-1] * len(sent), # Mock running id\n \"head_pred_id\": head_pred_id})",
"def create_samples(self):\n for s_id in range(len(self.data[\"sample\"])):\n self.samples.add(Sample(s_id, [self.data[key][s_id] for key in self.data.keys() if key not in WRONG_KEYS],\n self.data[\"label\"][s_id]))",
"def samples(self):\n pass",
"def features_dataset(features_pickle_path, train=True):\n if train:\n start = 0\n stop = 48000\n else:\n start = 48000\n stop = 50000\n\n features = pd.read_hdf(features_pickle_path, start=start, stop=stop).values\n\n labels = torch.zeros(features.shape[0]).float()\n features = torch.from_numpy(features).float()\n\n return torch.utils.data.TensorDataset(features, labels)"
] | [
"0.66862357",
"0.6420824",
"0.6418054",
"0.6401807",
"0.6305013",
"0.61754525",
"0.61719656",
"0.6149171",
"0.6146527",
"0.6113724",
"0.60907054",
"0.60811317",
"0.6028008",
"0.60071933",
"0.60071933",
"0.59889555",
"0.59559",
"0.5942348",
"0.5926523",
"0.59253466",
"0.59128505",
"0.59105045",
"0.5908983",
"0.5888121",
"0.58706397",
"0.58560205",
"0.58560205",
"0.58496404",
"0.58327705",
"0.5791717"
] | 0.8428525 | 0 |
Aggregates crowd answers and evaluates for all crowd answers | def evaluate_crowd_all_answers(self, mode=CSFSCrowdAggregator.Mode.EXTENDED, fake_features={}):
df_clean = CSFSCrowdCleaner(self.path_questions, self.path_answers_raw, self.target).clean()
for f in fake_features:
df_clean = df_clean.append({'answer': fake_features[f], 'answerUser': 'FAKE', 'feature': f}, ignore_index=True)
df_clean.to_csv(self.path_answers_clean, index=True)
df_clean_grouped = CSFSCrowdAnswergrouper.group(df_clean)
df_clean_grouped.to_pickle(self.path_answers_clean_grouped)
df_aggregated = CSFSCrowdAggregator(df_clean, target=self.target, mode=mode, fake_features=fake_features).aggregate()
df_aggregated.to_csv(self.path_answers_aggregated, index=True)
df_combined = CSFSCrowdAnalyser().get_combined_df(self.path_answers_aggregated, self.path_meta)
df_combined.to_csv(self.path_answers_metadata, index=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate_question(self):\n self.get_question_fequency()\n self.count_answers()",
"def evaluate_questions(self):\n for question in self.question_list:\n question.evaluate_question()",
"def run_compute_reread_counts(self):\n questions = []\n contexts = []\n student_data = self.responses[:]\n for response in student_data:\n if response.question.text not in questions:\n questions.append(response.question.text)\n if response.context.text not in contexts:\n contexts.append(response.context.text)\n\n compute_reread_counts_data = []\n\n for question in questions:\n for context in contexts:\n compute_reread_counts_data.append(self.compute_reread_counts(\n question, context))\n\n return compute_reread_counts_data",
"def evaluate_result(my_answers, solutions):\n results = [None, None, None, None, None]\n for x in range(len(results)):\n results[x] = check_answer(my_answers[x], solutions[x])\n count_correct = 0\n count_incorrect = 0\n for result in results:\n if result is True:\n count_correct += 1\n else:\n count_incorrect += 1\n if count_correct / 5 > 0.7:\n return \"Congratulations, you passed the test! You scored \" + str(count_correct) + \" out of 5.\"\n elif count_incorrect / 5 >= 0.3:\n return \"Unfortunately, you did not pass. You scored \" + str(count_correct) + \" out of 5.\"",
"def evaluate(self,**d):\r\n\t\t\r\n\t\t# evaluate terms\r\n\t\tv = [i.evaluate(**d) for i in self]\r\n\t\t\r\n\t\t# sum results\r\n\t\tc = Pa(v).sum()\r\n\t\t\r\n\t\treturn c",
"def evaluate_no_answers(self):\n answer_range = self.answer_range\n feature_range = self.feature_range\n repetitions = self.repetitions\n\n df_cleaned_bin = pd.read_csv(self.path_bin)\n df_answers_grouped = pd.read_pickle(self.path_answers_clean_grouped)\n df_actual_metadata = pd.read_csv(self.path_answers_metadata, index_col=0, header=[0, 1])\n df_actual_metadata = df_actual_metadata['actual']\n\n # # feature_range = [2,3]\n # # answer_range = [2,10]\n # repetitions=5\n\n result = {}\n for no_answers in answer_range:\n print('calculating. number of answers: ', no_answers)\n evaluator = ERNofeaturesEvaluator(None, None, df_cleaned_bin, df_actual_metadata=df_actual_metadata, target=self.target, dataset_name=self.dataset_name, df_answers_grouped=df_answers_grouped, bootstrap_n=no_answers, repetitions=repetitions, replace=False)\n raw_data = evaluator.evaluate(feature_range, condition=ERCondition.CSFS) # raw_data is dict: {CONDITION: {NOFEATURES: [AUCS]}}\n result[no_answers] = raw_data[ERCondition.CSFS]\n\n # result is dict: {no_answers: {NOFEATURES: [AUCS]}}\n result_restructured = dict()\n for no_features in feature_range:\n result_restructured[no_features] = {no_answers: result[no_answers][no_features] for no_answers in answer_range}\n # {no_features: {no_answers: result[no_answers][no_features]} for no_features in feature_range for no_answers in answer_range }\n result = result_restructured # { 2 features: {2answers: [], 3 answers: [], 4 answers: [],...}, 3 features: [2answers:[], 3answers:[]},...}\n\n # print(result)\n data_aggregated = dict()\n for no_features in result:\n print('aggregating. number of features: ', no_features)\n data = {\n 'mean': [np.mean(result[no_features][no_answers]) for no_answers in answer_range],\n 'ci_lo': [ssw.DescrStatsW(result[no_features][no_answers]).tconfint_mean()[0] for no_answers in answer_range],\n 'ci_hi': [ssw.DescrStatsW(result[no_features][no_answers]).tconfint_mean()[1]for no_answers in answer_range],\n 'std': [np.std(result[no_features][no_answers]) for no_answers in answer_range],\n }\n\n df = pd.DataFrame(data)\n # print(no_features)\n # print(tabulate(df))\n data_aggregated[no_features] = df\n df_combined = pd.concat(data_aggregated, axis='columns')\n # exit()\n df_combined.index = answer_range\n df_combined.to_pickle(self.path_no_answers_vs_auc)",
"def eval(self):\n\n # How many questions we get right at precision@1.\n correct = 0\n\n total = self._analogy_questions.shape[0]\n start = 0\n while start < total:\n limit = start + 2500\n sub = self._analogy_questions[start:limit, :]\n idx = self._predict(sub)\n start = limit\n for question in xrange(sub.shape[0]):\n if sub[question, 3] in idx[question]:\n # print(sub[question, 3], idx[question])\n correct += 1\n\n print()\n print(\"Eval %4d/%d accuracy @ top5= %4.1f%%\" % (correct, total,\n correct * 100. / total)\n )",
"def results(self):\n\n\t\tresults = {'answer':42}\n\n\t\treturn results",
"def evaluate(self, solution, total = 0):\n for objective in self.objectives:\n total = total + objective(solution)\n return total",
"def calc_overall_evaluation(count_by_type_dict: dict):\n assert len(count_by_type_dict) > 0, \"count by domain class should not be empty!\"\n for domain_name, domain_cnt in count_by_type_dict.items():\n print('domain_cnt', domain_cnt)\n for mode, res in OverallEval[domain_name].items():\n OverallEval[domain_name][mode]['precision'] = res['precision'] / domain_cnt\n OverallEval[domain_name][mode]['recall'] = res['recall'] / domain_cnt\n OverallEval[domain_name][mode]['f1_score'] = res['f1_score'] / domain_cnt\n log = \"Domain:{}, mode:{}, P:{:.3f}, R:{:.3f}, f1:{:.3f}\".format(\n domain_name, mode, OverallEval[domain_name][mode]['precision'],\n OverallEval[domain_name][mode]['recall'],\n OverallEval[domain_name][mode]['f1_score'])\n print(log)\n #logger.info(log)\n return OverallEval",
"def quiz():\n\n global score\n\n questions = [question1,question2,question3,question4,question5,question6,question7,question8,question9,question10]\n\n for question in questions:\n if question() == 1:\n score += 1\n results()",
"def evaluate_solution(self, chosen):\n self.candidate_counter += 1\n\n # evaluation function in abstract superclass\n \n solved_clauses = np.any(self.truth_clauses & np.array([chosen, ~chosen]), axis=(2, 1)) \n num_solved_clauses = np.sum(solved_clauses)\n # calculate evaluation with weight adaption heuristic\n evaluation = np.sum(solved_clauses * self.clause_weights)\n\n if self.candidate_counter == self.WEIGHT_ADAPTION_DURATION:\n # increase weights for unsatisfied clauses\n self.clause_weights += ~solved_clauses\n self.candidate_counter = 0\n\n return evaluation, num_solved_clauses",
"def conlleval_overall_results(documents):\n counts = conlleval_evaluate(documents)\n overall, by_type = metrics(counts)\n return overall",
"def evaluate(self):\n scores = []\n scores.append(self.word_analogy())\n print(\"Word Analogy (acc): \", scores[0])\n scores.append(self.word_similarity())\n print(\"Word Similarity (MSE): \", scores[1])\n scores.append(self.concept_categorization())\n print(\"Concept Categorization (purity): \", scores[2])\n scores.append(self.sentiment_analysis())\n print(\"Sentiment Analysis (acc): \", scores[3])\n return scores",
"def get_answers(self):\r\n pass",
"def get_answers(self):\r\n return self.answer_values",
"def sum_over_occurences(answers):\n answer_dict = {}\n for answer in answers:\n a = answer['answer']\n if a not in answer_dict:\n answer_dict[a] = 0\n answer_dict[a] += 1\n return answer_dict",
"def show_answers(queries, answers, aggregation_predictions_string):\n\n ans_list = []\n for query, answer, predicted_agg in zip(queries, answers, aggregation_predictions_string):\n print(query)\n print(answer,type(answer))\n print(predicted_agg)\n answer = [i.strip() for i in answer.split(',')]\n print(answer)\n if (len(answer) == 1):\n if (predicted_agg == 'COUNT'):\n answer = len([i for i in answer])\n\n if (len(answer) > 1):\n if (predicted_agg == 'SUM'):\n try:\n answer = sum([float(i) for i in answer])\n except ValueError:\n answer = predicted_agg\n elif (predicted_agg == 'COUNT'):\n answer = len([i for i in answer])\n elif (predicted_agg == 'AVERAGE'):\n answer = sum([float(i) for i in answer]) / len([i for i in answer])\n elif (predicted_agg == 'NONE'):\n answer = answer\n else:\n answer = 'None'\n # if predicted_agg == \"NONE\":\n # print(\"Predicted answer: \" + answer)\n # else:\n # print(\"Predicted answer: \" + predicted_agg + \" > \" + answer)\n\n ans_list.append(answer)\n\n return ans_list",
"def overall_correct(self, interpreter):\n r = self.results[0]\n num_inst = len(self.all_indices)\n num_class = r.goldstandard.shape[1]\n num_res = len(self.results)\n\n retval = numpy.ones((num_inst, num_class, num_res)) * numpy.nan\n for r_i, r in enumerate(self.results):\n correct = r.correct(interpreter)\n retval[r.instance_indices,:,r_i] = correct\n return retval",
"def generate_score(self, answered_question, date, time_value):\n correct_answer_objects = map(lambda correct_key: Answer.get_by_id(correct_key.id_or_name()),\n answered_question.question.correctAnswers)\n invite = answered_question.invite\n earned_knowledge_area_score = {}\n max_knowledge_area_score = {}\n total_earned_value = 0.0\n\n for correct_answer in correct_answer_objects:\n for knowledge_area in correct_answer.knowledgeAreas:\n #We don't want negative values to influence the max score; skip those\n if correct_answer.value > float(0):\n earned_knowledge_area_score[knowledge_area] = 0.0\n if knowledge_area in max_knowledge_area_score.keys():\n max_knowledge_area_score[knowledge_area] += correct_answer.value\n else:\n max_knowledge_area_score[knowledge_area] = correct_answer.value\n round(max_knowledge_area_score[knowledge_area], 2)\n\n for given_answer_key in answered_question.givenAnswers:\n given_answer_object = Answer.get_by_id(given_answer_key.id_or_name())\n earned_value = float(given_answer_object.value)\n total_earned_value = float(round(total_earned_value + earned_value, 2))\n for knowledge_area in given_answer_object.knowledgeAreas:\n if knowledge_area in earned_knowledge_area_score.keys():\n earned_knowledge_area_score[knowledge_area] += earned_value\n else:\n earned_knowledge_area_score[knowledge_area] = earned_value\n round(earned_knowledge_area_score[knowledge_area], 2)\n\n multiple_choice_score_object = MultipleChoiceScore()\n multiple_choice_score_object.parse(correct_answer_objects, total_earned_value, earned_knowledge_area_score,\n max_knowledge_area_score, invite, date, time_value)\n multiple_choice_score_object.put()\n return multiple_choice_score_object",
"def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()",
"def aggregate_results(self):\n\n raise NotImplementedError",
"def evaluate(self):\n pass",
"def evaluate(self):\n pass",
"def main(duck, abstract, answer):\n set_duck = sorted(set(duck))\n for i in set_duck:\n if i > abstract / 2:\n break\n answer += duck.count(abstract - i) * duck.count(i)\n print(answer)",
"def part_one(answer_data):\n group_counts = []\n for answer_group in parse_groups(answer_data):\n group_counts.append(count_answer_set_union(answer_group))\n return sum(group_counts)",
"def rescore_existing_answers(self):\r\n return self._grade_answers(None)",
"def evaluate(self, data, category, dims=None, overall=True):\n n_data = len(data)\n eval_scores = [{} for _ in range(n_data)]\n\n if dims == None:\n eval_dims = self.dimensions\n else:\n assert isinstance(dims, list)\n eval_dims = dims\n\n for dim in eval_dims:\n output_list, ref_list = [], []\n for i in range(n_data):\n output_list.append(data[i]['system_output'])\n ref_list.append(data[i]['reference'])\n\n input_list = add_question(dimension=dim, output=output_list, ref=ref_list, task=self.task)\n score = self.scorer.score(input_list, self.task, category, dim)\n\n for i in range(n_data):\n eval_scores[i][dim] = score[i]\n\n # Customize your overall score here.\n if overall == True:\n for i in range(n_data):\n eval_scores[i]['overall'] = np.mean(list(eval_scores[i].values()))\n\n return eval_scores",
"def answers_all(self):\n return self.answer_set.all()",
"def get_exit_survey_results(survey, date):\n token = settings.SURVEYGIZMO_API_TOKEN\n secret = settings.SURVEYGIZMO_API_TOKEN_SECRET\n answers = []\n page = 1\n more_pages = True\n survey_id = SURVEYS[survey][\"exit_survey_id\"]\n\n # Aggregate results.\n summary = {\n \"yes\": 0,\n \"no\": 0,\n \"dont-know\": 0,\n }\n\n # Can't do anything without credentials.\n if token is None or secret is None:\n return summary\n\n while more_pages:\n response = requests.get(\n \"https://restapi.surveygizmo.com/v2/survey/{survey}\"\n \"/surveyresponse?\"\n \"filter[field][0]=datesubmitted\"\n \"&filter[operator][0]=>=&filter[value][0]={start}+0:0:0\"\n \"&filter[field][1]=datesubmitted\"\n \"&filter[operator][1]=<&filter[value][1]={end}+0:0:0\"\n \"&filter[field][2]=status&filter[operator][2]==\"\n \"&filter[value][2]=Complete\"\n \"&resultsperpage=500\"\n \"&page={page}\"\n \"&api_token={token}\"\n \"&api_token_secret={secret}\".format(\n survey=survey_id,\n start=date,\n end=date + timedelta(days=1),\n page=page,\n token=token,\n secret=secret,\n ),\n timeout=300,\n )\n\n results = json.loads(response.content)\n total_pages = results.get(\"total_pages\", 0)\n more_pages = page < total_pages\n answers = answers + [r.get(\"[question(2)]\") for r in results.get(\"data\", [])]\n page += 1\n\n for answer in answers:\n lower_stripped = answer.lower().strip()\n if lower_stripped in [\"no\", \"yes\"]:\n summary[lower_stripped] += 1\n else:\n summary[\"dont-know\"] += 1\n\n return summary"
] | [
"0.610443",
"0.5850755",
"0.5820487",
"0.58010185",
"0.57327366",
"0.56864023",
"0.5647703",
"0.56394255",
"0.55239356",
"0.551901",
"0.5397282",
"0.53928953",
"0.5356523",
"0.5340461",
"0.5325382",
"0.5309455",
"0.52819633",
"0.5268165",
"0.52674896",
"0.52351356",
"0.52271175",
"0.52254957",
"0.5224211",
"0.5224211",
"0.52106255",
"0.519038",
"0.51880246",
"0.5178031",
"0.5169683",
"0.5158074"
] | 0.59926075 | 1 |
Appends n user answers for feature with given value to df | def _append_fake_user_answers(self, df, feature, value, n=1):
data = {'answer': value, 'answerUser': 'FAKE', 'feature': feature}
df = df.append([data]*n, ignore_index=True) # need to append it several times in order to allow random selection
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nunique_stats(df, df_history, uid_key, value, feat):\n add = pd.DataFrame(df_history.groupby(uid_key)[value].nunique()).reset_index()\n add = add.rename(columns={value: feat})\n df = pd.merge(df, add, on=uid_key, how='left')\n return df",
"def alspostprocess(data, prediction, features, user_features, movie_features, n_features=10):\r\n \r\n\r\n data['ALS'] = prediction[data.loc[:, 'userID']-1, data.loc[:, 'movieID']-1]\r\n features.append('ALS')\r\n \r\n total_features = len(movie_features)\r\n if n_features>total_features:\r\n n_features = total_features\r\n \r\n for i in range(n_features):\r\n data[\"UserFeature{}\".format(i)] = user_features[data.loc[:, 'userID']-1, i]\r\n features.append(\"UserFeature{}\".format(i))\r\n data[\"MovieFeature{}\".format(i)] = movie_features[i, data.loc[:, 'movieID']-1]\r\n features.append(\"MovieFeature{}\".format(i))\r\n return data, features",
"def append_new_user(self,interaction_df, user_pref):\n\n\n\t\tuser_pref = pd.DataFrame(user_pref).T\n\t\tuser_pref.columns = list(interaction_df.columns)\n\t\tuser_pref.rename(index = {0:max(interaction_df.index)+1 }, \n\t\t inplace = True) \n\t\tframes = [interaction_df, user_pref]\n\t\tnew_df = pd.concat(frames)\n\t\treturn new_df",
"def add_features(df):\n \n assert df.columns.str.contains(\"query|value|keyword|ranking|timestamp|geo\").all(), \"Add features failed. \\\n Missing one of [query, value, keyword, ranking, timestamp, geo]\"\n \n # feature engineering: totals and normalize\n grouped = df.groupby(['ranking']).value # group values by ranking\n df['value_total'] = grouped.transform('sum') # total sum \n df['value_normalized'] = (df.value-grouped.transform('min'))/(grouped.transform('max')-grouped.transform('min')) # normalize \n df['value_normalized_total'] = df.groupby(['ranking']).value_normalized.transform('sum') # total sum of normalized values \n df['date'] = pd.to_datetime(df.query_timestamp).dtd\n \n return df",
"def cnt_stats(df, df_history, uid_key, value, feat):\n add = pd.DataFrame(df_history.groupby(uid_key)[value].count()).reset_index()\n add = add.rename(columns={value: feat})\n df = pd.merge(df, add, on=uid_key, how='left')\n return df",
"def any_cnt_stats(df, df_history, uid_key, value, feat, certain_value):\n add = pd.DataFrame(df_history.groupby(uid_key)[value] \\\n .apply(lambda x: len(np.where(x == certain_value)[0]))).reset_index()\n add = add.rename(columns={value: feat})\n df = pd.merge(df, add, on=uid_key, how='left')\n return df",
"def expand_features(data):\n\t## combine num of siblings and parents to feature of family size\n\tdata[\"Fam_size\"] = data[\"SibSp\"] + data[\"Parch\"]\n\n\t## add friend category defined as either sharing a ticket with someone not family\n\t## or share a room with someone not registered as family\n\tfriends = np.zeros((data['PassengerId'].size))\n\tfor i,iid in enumerate(data[\"PassengerId\"]):\n\t\tticket_temp = data.loc[data[\"PassengerId\"]==iid, \"ticket_no\"].values[0]\n\t\tcabin_temp = data.loc[data[\"PassengerId\"]==iid, \"cabin_no\"].values[0]\n\t\tif data.loc[data[\"ticket_no\"]==ticket_temp, \"Ticket\"].count()>1:\n\t\t\tif data.loc[data[\"PassengerId\"]==iid,\"Fam_size\"].values[0]==0:\n\t\t\t\tfriends[i] = 1\n\t\telif cabin_temp!=0:## corresponds to NaN cabin value\n\t\t\tif data.loc[data[\"cabin_no\"]==cabin_temp, \"cabin_no\"].count()>1:\n\t\t\t\tif data.loc[data[\"PassengerId\"]==iid,\"Fam_size\"].values[0]==0:\n\t\t\t\t\tfriends[i] = 1\n\tdata[\"friend\"] = pd.Series(friends,dtype=int)\n\n\t## fare per person\n\tfare_per_person = np.zeros((data['PassengerId'].size))\n\tfor i,ifare in enumerate(data[\"ticket_no\"].unique()):\n\t\tshared_ticket_temp = data.loc[data[\"ticket_no\"]==ifare,\"Fare\"]\n\t\tfare_per_person[i] = 1.*shared_ticket_temp.values[0]/shared_ticket_temp.count()\n\tdata[\"Fare_person\"] = pd.Series(fare_per_person,dtype=float)\n\n\t## add child feature\n\tdata.loc[data['Age'] <= 9, 'Child'] = 1\n\tdata.loc[data['Age'] > 9, 'Child'] = 0",
"def five_more(df):\n\n i = 0\n while True:\n more_data = input('Would you like to see raw data? Please enter yes or no: ').lower()\n if more_data not in ('yes', 'y'):\n break\n else:\n print(df.iloc[i:i+5])\n i += 5",
"def make_tutorial_data(n: int) -> pd.DataFrame:\n np.random.seed(1111)\n\n dataset = pd.DataFrame({\n \"id\": list(map(lambda x: \"id%d\" % x, np.random.randint(0, 100, n))),\n \"date\": np.random.choice(pd.date_range(\"2015-01-01\", periods=100), n),\n \"feature1\": np.random.gamma(20, size=n),\n \"feature2\": np.random.normal(40, size=n),\n \"feature3\": np.random.choice([\"a\", \"b\", \"c\"], size=n)})\n\n dataset[\"target\"] = (dataset[\"feature1\"]\n + dataset[\"feature2\"]\n + dataset[\"feature3\"].apply(lambda x: 0 if x == \"a\" else 30 if x == \"b\" else 10)\n + np.random.normal(0, 5, size=n))\n\n # insert some NANs\n dataset.loc[np.random.randint(0, n, 100), \"feature1\"] = nan\n dataset.loc[np.random.randint(0, n, 100), \"feature3\"] = nan\n\n return dataset",
"def fifty_fifty(dataframe) -> pd.DataFrame:\n dataframe[\"allocation\"] = 0.5\n return dataframe",
"def evaluate_no_answers(self):\n answer_range = self.answer_range\n feature_range = self.feature_range\n repetitions = self.repetitions\n\n df_cleaned_bin = pd.read_csv(self.path_bin)\n df_answers_grouped = pd.read_pickle(self.path_answers_clean_grouped)\n df_actual_metadata = pd.read_csv(self.path_answers_metadata, index_col=0, header=[0, 1])\n df_actual_metadata = df_actual_metadata['actual']\n\n # # feature_range = [2,3]\n # # answer_range = [2,10]\n # repetitions=5\n\n result = {}\n for no_answers in answer_range:\n print('calculating. number of answers: ', no_answers)\n evaluator = ERNofeaturesEvaluator(None, None, df_cleaned_bin, df_actual_metadata=df_actual_metadata, target=self.target, dataset_name=self.dataset_name, df_answers_grouped=df_answers_grouped, bootstrap_n=no_answers, repetitions=repetitions, replace=False)\n raw_data = evaluator.evaluate(feature_range, condition=ERCondition.CSFS) # raw_data is dict: {CONDITION: {NOFEATURES: [AUCS]}}\n result[no_answers] = raw_data[ERCondition.CSFS]\n\n # result is dict: {no_answers: {NOFEATURES: [AUCS]}}\n result_restructured = dict()\n for no_features in feature_range:\n result_restructured[no_features] = {no_answers: result[no_answers][no_features] for no_answers in answer_range}\n # {no_features: {no_answers: result[no_answers][no_features]} for no_features in feature_range for no_answers in answer_range }\n result = result_restructured # { 2 features: {2answers: [], 3 answers: [], 4 answers: [],...}, 3 features: [2answers:[], 3answers:[]},...}\n\n # print(result)\n data_aggregated = dict()\n for no_features in result:\n print('aggregating. number of features: ', no_features)\n data = {\n 'mean': [np.mean(result[no_features][no_answers]) for no_answers in answer_range],\n 'ci_lo': [ssw.DescrStatsW(result[no_features][no_answers]).tconfint_mean()[0] for no_answers in answer_range],\n 'ci_hi': [ssw.DescrStatsW(result[no_features][no_answers]).tconfint_mean()[1]for no_answers in answer_range],\n 'std': [np.std(result[no_features][no_answers]) for no_answers in answer_range],\n }\n\n df = pd.DataFrame(data)\n # print(no_features)\n # print(tabulate(df))\n data_aggregated[no_features] = df\n df_combined = pd.concat(data_aggregated, axis='columns')\n # exit()\n df_combined.index = answer_range\n df_combined.to_pickle(self.path_no_answers_vs_auc)",
"def generate_feature_vectors(scores_final, views_final, interests_final):\n\tusers = pd.merge(scores_final, views_final, how='outer', on='user_handle')\n\tusers = pd.merge(users, interests_final, how='outer', on='user_handle')\n\tusers.fillna(value=0, inplace=True)\n\tusers.set_index('user_handle', inplace=True)\n\treturn users",
"def evaluate_all_results(self, nbr_items: int = -1, val_size: float = 0.2, n: int = 3) -> pd.DataFrame:\n results = list(map(\n lambda x: self.evaluate_one_user(x, val_size, n),\n self.database.users.data['user_id'].tolist()[:nbr_items]\n ))\n return pd.DataFrame.from_records(results)",
"def __add_answered_on_feature(samples: List[TrainSample], all_features: np.array) -> np.array:\n new_features = []\n for sample, features in zip(samples, all_features):\n if isinstance(sample.selected_player, bool):\n answered_on = 1.0 if sample.selected_player else 0.0\n else:\n answered_on = 1.0 if sample.selected_player in sample.answer else 0.0\n features = np.append(features, answered_on * features)\n features = np.append(features, [answered_on])\n new_features.append(features)\n return np.array(new_features)",
"def max_stats(df, df_history, uid_key, value, feat):\n add = pd.DataFrame(df_history.groupby(uid_key)[value].max()).reset_index()\n add = add.rename(columns={value: feat})\n df = pd.merge(df, add, on=uid_key, how='left')\n return df",
"def test_limit_num_users(self):\n survey = self._create_test_survey()\n\n survey.save_user_answers(self.student, self.student_answers, self.course_id)\n survey.save_user_answers(self.student2, self.student2_answers, self.course_id)\n\n # even though we have 2 users submitted answers\n # limit the result set to just 1\n all_answers = survey.get_answers(limit_num_users=1)\n assert len(list(all_answers.keys())) == 1",
"def create_dataframe(G, paths, feature_list):\n n_feats = len(feature_list)\n choice_features = []\n observation_ids = []\n choice_indicators = []\n choice_ids = []\n counter = 0\n\n for path_index in range(len(paths)):\n \n path = paths[path_index]\n\n for i in range(len(path)-1):\n\n # append all the edge options to a list of lists called choice_features\n current_node = path[i]\n end_node = path[-1]\n neighbors = list(G.neighbors(current_node))\n n_choices = len(neighbors)\n observation_ids.append(counter*np.ones((n_choices,))) \n # 'i' is the \"index\" of the observation, or the reason why we know which observation is which\n for neighbor in neighbors:\n current_attribute_dict = find_attribute_dict(G, current_node, neighbor, end_node)\n # iteratively adding each feature value to the observation\n current_observation_choice_features = []\n \n for feature in feature_list:\n\n try:\n current_feature = current_attribute_dict[feature]\n except KeyError as e:\n current_feature = 0\n current_observation_choice_features.append(current_feature)\n \n choice_features.append(current_observation_choice_features)\n\n # marking the choiceID's choice as '1' among zeros\n choice_indicators.append(np.zeros((n_choices,)))\n chosen = neighbors.index(path[i+1])\n choice_indicators[-1][chosen] = 1\n # All the possible choices out at this observation:\n choice_ids.append(np.arange(n_choices))\n \n counter += 1\n\n # preparing columns for the dataframe (long) format\n overall_observation_ids = np.concatenate(observation_ids)\n choice_features_overall = np.vstack(choice_features)\n overall_choice_indicators = np.concatenate(choice_indicators)\n overall_choice_ids = np.concatenate(choice_ids)\n\n df = pd.DataFrame()\n\n df['obs_ids'] = overall_observation_ids\n df['choices'] = overall_choice_indicators\n df['alt_ids'] = overall_choice_ids\n\n # The next few lines just mean the columns will be consistent across choices\n spec_names = OrderedDict()\n specs = OrderedDict()\n for i in range(n_feats):\n spec = feature_list[i]\n spec_names[spec] = spec\n specs[spec] = 'all_same'\n df[spec] = choice_features_overall[:,i]\n return (df, n_feats, specs, spec_names)",
"def make_Y_for_user(user_id, Q):\n data = GlobalVar.data\n _user_truth = []\n for _, row in data[data.user_id == user_id].iterrows():\n _user_truth.extend([row.item_id.replace(\"I\",\"\")] * row.frequency)\n Ui = len(_user_truth)\n _user_truth = _user_truth + [\"0\"]*(72-Ui)\n #print(\"Put\", user_id, Len.value)\n Q.put([user_id, _user_truth])",
"def user_stats(df, selected_city):",
"def make_results_plot( df, k, reg ):\n\tuid = smalldf['user_id'].values\n\tbid = smalldf['business_id'].values\n\tactual = smalldf['stars'].values\n\tpredicted = np.zeros( len(actual) )\n\tcounter = 0\n\tfor biz_id, user_id in izip( bid, uid ):\n\t\tpredicted[counter] = rating( biz_id, user_id, k = k, reg = reg ) \n\t\tcounter = counter + 1\n\t# compare_results( actual, predicted )",
"def any_stats(df, df_history, uid_key, value, feat, certain_value):\n add = pd.DataFrame(df_history.groupby(uid_key)[value] \\\n .apply(lambda x: 1 if len(np.where(x == certain_value)[0]) != 0 else 0)) \\\n .reset_index()\n add = add.rename(columns={value: feat})\n df = pd.merge(df, add, on=uid_key, how='left')\n return df",
"def gen_ppmi_dataframe(df):\n print(\"Finding ppmi values.\")\n total_playcount = sum(df.sum())\n user_playcounts = df.sum(axis=1)\n artist_playcounts = df.sum(axis=0)\n ppmi_df = copy.copy(df)\n count = 0\n for user, user_artist_playcounts in df.iterrows():\n count += 1\n for artist in user_artist_playcounts.index:\n user_artist_playcount = user_artist_playcounts[artist]\n if user_artist_playcount == 0.0:\n ppmi = 0.0\n else:\n x = total_playcount * user_artist_playcount\n y = user_playcounts[user] * artist_playcounts[artist]\n ppmi = max(0.0, x / y)\n ppmi_df.at[user, artist] = ppmi\n print(str(count) + \"/\" + str(len(user_playcounts)) + \" users counted.\")\n\n return ppmi_df",
"def store_predictions(self, preds, df, feature):\n\n prev_values = list(df[feature].iloc[:len(df) - len(self.players)])\n prev_values.extend(preds)\n\n df[feature] = prev_values\n\n return df",
"def create_features(self, answer):\n # Get the teacher's stuff\n a_stopwords = sf.remove_stopwords(self.teacher_answer)\n a_stemmed = sf.stem_sentence(a_stopwords)\n a_stemmed_ordered = sf.order_sentence(a_stemmed)\n teacher_answers = [\n a_stemmed,\n a_stemmed_ordered,\n ]\n \n # Change sentence into multiple versions\n log = dict()\n log['student_answer'] = answer\n log['teacher_answer'] = self.teacher_answer\n log['q_answer'] = answer\n log['q_stopwords'] = sf.remove_stopwords(answer)\n log['q_stemmed'] = sf.stem_sentence(answer)\n log['q_stem_ordered'] = sf.order_sentence(log['q_stemmed'])\n \n # Might need to save scaling until jsut before modeling\n log['wordcount'] = sf.word_count(answer)\n log['wordcount'] = sf.scale_column(self.word_scaler, log['wordcount'])\n\n\n# Stem sim\n log['stem_g_similarity'] = sf.generic_similarity(log['q_stemmed'], a_stemmed)\n log['stem_j_similarity'] = sf.jaccard_similarity(log['q_stemmed'], a_stemmed)\n log['stem_c_similarity'] = sf.cosine_similarity(log['q_stemmed'], a_stemmed)\n # Ordered\n log['stem_ordered_g_similarity'] = sf.generic_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_j_similarity'] = sf.jaccard_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_c_similarity'] = sf.cosine_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n\n\n \n # Appending New Answer\n self.new_answers = self.new_answers.append(log, ignore_index = True)\n \n # Entity Extraction\n types_of_sentences = [\n 'q_stemmed',\n 'q_stem_ordered',\n ]\n \n for sent_type, teach_ans in zip(types_of_sentences, teacher_answers):\n \n self.new_answers = sf.unigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.bigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.trigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)",
"def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]",
"def collect_scores(true_values, pred_df):\n csv_data = []\n for index in true_values.index.unique():\n if index not in pred_df.index:\n continue\n true_confirmed = true_values.loc[index][\"confirmed\"]\n pred_confirmed = pred_df.loc[index][\"prediction_confirmed\"]\n\n csv_data.append(\n [\n index[0],\n index[1],\n true_values.loc[index][\"geoname_code\"],\n ale(true_confirmed, pred_confirmed),\n ]\n )\n\n csv_data = pd.DataFrame(csv_data)\n csv_data.columns = [\"region_code\", \"date\", \"geoname_code\", \"cases_male\"]\n return csv_data.set_index([\"region_code\", \"geoname_code\", \"date\"])",
"def test_sample_users():\n ratings = lktu.ml_test.ratings\n ratings = ratings.set_index('user') ##forces non-unique index\n with pytest.raises(ValueError):\n for split in xf.sample_users(ratings, 5, 100, xf.SampleN(5)):\n pass",
"def gen_questions(self, number_of_questions):",
"def new_features(df):\n print(\"Add new features ...\")\n # distinguish Spring, Fall and pregnant females (don't care about juvenilles/unknown)\n df[\"gender_plus\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_plus\"] = \"f_gra\"\n\n df[\"gender_seasons\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_seasons\"] = \"f_gra\"\n\n # add features\n df[\"Age_To_Weight\"] = df[\"Annuli\"] / df[\"Weight\"]\n\n # Calcuate Number of recaptures\n df_captures = df[[\"ID\", \"Date\"]].groupby(\"ID\").count()\n df_captures.columns = [\"recapture_count\"]\n df_captures.reset_index(inplace=True)\n df = pd.merge(df, df_captures, how=\"outer\", on=\"ID\")\n\n # recalculate annuli\n df_min = pd.pivot_table(\n df[df.Annuli > 0],\n values=[\"Date\", \"Annuli\"],\n index=[\"ID\"],\n aggfunc={\"Date\": min, \"Annuli\": min},\n )\n df_min.columns = [\"annuli_min\", \"date_min\"]\n df_min.reset_index(inplace=True)\n\n df = pd.merge(df, df_min, how=\"outer\", on=\"ID\")\n df[\"year\"] = df.Date.map(lambda x: x.year)\n df[\"year_min\"] = df.date_min.map(lambda x: x.year)\n df[\"Annuli_orig\"] = df.Annuli\n df.Annuli = df.year - df.year_min + df.annuli_min\n df.Annuli = np.nan_to_num(df.Annuli)\n df[\"Annuli\"] = pd.to_numeric(df[\"Annuli\"], downcast=\"integer\")\n\n # Annuli Buckets\n buckets = 5\n interval = int(df[\"Annuli\"].max() / buckets)\n buckets = [i for i in range(0, df[\"Annuli\"].max() + interval, interval)]\n labels = [\"'{0} - {1}'\".format(i, i + interval) for i in buckets]\n df[\"Annuli_Group\"] = pd.cut(\n df.Annuli, buckets, labels=labels[:-1], include_lowest=True\n )\n\n return df",
"def get_user_feature_matrix(user_dict, user_index, aspect_index, N):\n result = np.zeros((len(user_index), len(aspect_index)))\n for key in user_dict.keys():\n index_user = user_index[key]\n user_reviews = user_dict[key]\n count_dict = {}\n for review in user_reviews:\n feature = review[0]\n if feature not in aspect_index:\n continue\n aspect = aspect_index[feature]\n if aspect not in count_dict:\n count_dict[aspect] = 0;\n count_dict[aspect] += 1\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n result[index_user, aspect] = 1 + (N - 1) * (2 / (1 + exp(-count)) - 1)\n return result"
] | [
"0.56743383",
"0.56375664",
"0.55274326",
"0.5411185",
"0.53533185",
"0.5296851",
"0.52790606",
"0.5259034",
"0.52224356",
"0.5220007",
"0.5216666",
"0.51741165",
"0.51733893",
"0.51651996",
"0.51016253",
"0.50685185",
"0.5061694",
"0.504281",
"0.5034425",
"0.50202996",
"0.5014695",
"0.5003391",
"0.5002189",
"0.49946314",
"0.49852672",
"0.49813342",
"0.4980764",
"0.49754724",
"0.49670848",
"0.49645784"
] | 0.8131136 | 0 |
Calculcates KendallTau Correlation for binary features | def autocorrelation(self):
# For all features calculate kendall's tau with every other feature.
df_bin = pd.read_csv(self.path_bin)
features = sorted(list(df_bin.columns))
df_correlation = pd.DataFrame({f: [np.nan] * len(features) for f in features}, index=features)
for f1 in features:
for f2 in features:
x = list(df_bin[f1])
y = list(df_bin[f2])
corr, p = scipy.stats.kendalltau(x, y)
df_correlation.loc[f1, f2] = "{} (p={:.3f})".format(corr, p)
if f1 == f2:
break
df_correlation.to_csv(self.path_autocorrelation, index=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_feature_corr(self):\n \n return self.train_data.astype(float).corr(method='kendall')",
"def kendall(features, labels):\n n, d = features.shape\n coefficients = np.asarray([stats.kendalltau(features[:, idx], labels,\n method='asymptotic', variant='b',\n alternative='two-sided')[0]\n for idx in range(d)])\n # Scale coefficients using the definition in the paper\n coefficients = np.abs((n /2) * coefficients)\n return coefficients",
"def cka_tall(X, Y):\n X = X.copy()\n Y = Y.copy()\n\n X -= X.mean(0)\n Y -= Y.mean(0)\n \n XTX = X.T @ X\n YTY = Y.T @ Y\n YTX = Y.T @ X\n\n # Equation (4)\n top = (YTX ** 2).sum()\n bottom = np.sqrt((XTX ** 2).sum() * (YTY ** 2).sum())\n c = top / bottom\n\n return c",
"def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc",
"def calculate_cof_int(rslt, init_dict, data_frame, mte, quantiles):\n # Import parameters and inverse hessian matrix\n hess_inv = rslt[\"AUX\"][\"hess_inv\"] / data_frame.shape[0]\n params = rslt[\"AUX\"][\"x_internal\"]\n numx = len(init_dict[\"TREATED\"][\"order\"]) + len(init_dict[\"UNTREATED\"][\"order\"])\n\n # Distribute parameters\n dist_cov = hess_inv[-4:, -4:]\n param_cov = hess_inv[:numx, :numx]\n dist_gradients = np.array([params[-4], params[-3], params[-2], params[-1]])\n\n # Process data\n covariates = init_dict[\"TREATED\"][\"order\"]\n x = np.mean(data_frame[covariates]).tolist()\n x_neg = [-i for i in x]\n x += x_neg\n x = np.array(x)\n\n # Create auxiliary parameters\n part1 = np.dot(x, np.dot(param_cov, x))\n part2 = np.dot(dist_gradients, np.dot(dist_cov, dist_gradients))\n # Prepare two lists for storing the values\n mte_up = []\n mte_d = []\n\n # Combine all auxiliary parameters and calculate the confidence intervals\n for counter, i in enumerate(quantiles):\n value = part2 * (norm.ppf(i)) ** 2\n aux = np.sqrt(part1 + value)\n mte_up += [mte[counter] + norm.ppf(0.95) * aux]\n mte_d += [mte[counter] - norm.ppf(0.95) * aux]\n\n return mte_up, mte_d",
"def confidence_interval(self):\r\n coh_var = np.zeros((self.input.data.shape[0],\r\n self.input.data.shape[0],\r\n self._L), 'd')\r\n for i in range(self.input.data.shape[0]):\r\n for j in range(i):\r\n if i != j:\r\n coh_var[i, j] = tsu.jackknifed_coh_variance(\r\n self.spectra[i],\r\n self.spectra[j],\r\n self.eigs,\r\n adaptive=self._adaptive\r\n )\r\n\r\n idx = triu_indices(self.input.data.shape[0], 1)\r\n coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj()\r\n\r\n coh_mat_xform = tsu.normalize_coherence(self.coherence,\r\n 2 * self.df - 2)\r\n\r\n lb = coh_mat_xform + dist.t.ppf(self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n\r\n # convert this measure with the normalizing function\r\n tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb)\r\n tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub)\r\n\r\n return ub - lb",
"def estimateCs(y, inp):\n\treturn 1 -(math.tanh(getK1(inp) - (y/getY90(inp)) / (2 * getD0(inp)) + (y/getY90(inp) - 1/3.0)**3 / (3 * getD0(inp))))**2",
"def calculate_correlation(data):\n pass",
"def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn",
"def cramers_v1(confusion_matrix):\n chi2 = ss.chi2_contingency(confusion_matrix)[0]\n n = confusion_matrix.sum()\n phi2 = chi2 / n\n r, k = confusion_matrix.shape\n phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))\n rcorr = r - ((r-1)**2)/(n-1)\n kcorr = k - ((k-1)**2)/(n-1)\n return np.sqrt(phi2corr / min((kcorr-1), (rcorr-1)))",
"def compute_autocorrelation_and_timescale(rootdir, folder_prefix, cluster_path, te): #{{{\n\n print 'compute_autocorrelation_and_timescale'\n\n ####################################################################################################\n # set up paths and clusters\n ####################################################################################################\n\n rlzn_path_list = get_realization_paths(rootdir, folder_prefix)\n\n fopen_list = open_netcdf_files(rlzn_path_list,'output*nc')\n\n indicesToParticle, indicesOnCluster, maxIndices = get_clusters(cluster_path)\n\n # just eddy part\n rhouu, rhovv, up2, vp2, lonp, latp, lon, lat, hull = compute_autocorrelation_rlzn_ensemble(fopen_list, te)\n \n np.save('rhouu'+str(te),rhouu)\n np.save('rhovv'+str(te),rhovv)\n np.save('up'+str(te),np.sqrt(up2))\n np.save('vp'+str(te),np.sqrt(vp2))\n np.save('lonp'+str(te),lonp)\n np.save('latp'+str(te),latp)\n np.save('lon'+str(te),lon)\n np.save('lat'+str(te),lat)\n np.save('hullsimplicies'+str(te),hull.simplices)\n \n rhouu = compute_cluster_ensemble(rhouu, indicesOnCluster, maxIndices, indicesToParticle)\n rhovv = compute_cluster_ensemble(rhovv, indicesOnCluster, maxIndices, indicesToParticle)\n up2 = compute_cluster_ensemble(up2, indicesOnCluster, maxIndices, indicesToParticle)\n vp2 = compute_cluster_ensemble(vp2, indicesOnCluster, maxIndices, indicesToParticle)\n lonp = compute_cluster_ensemble(lonp, indicesOnCluster, maxIndices, indicesToParticle)\n latp = compute_cluster_ensemble(latp, indicesOnCluster, maxIndices, indicesToParticle)\n\n np.save('rhouu_cluster'+str(te),rhouu)\n np.save('rhovv_cluster'+str(te),rhovv)\n np.save('up_cluster'+str(te),np.sqrt(up2))\n np.save('vp_cluster'+str(te),np.sqrt(vp2))\n np.save('lonp_cluster'+str(te),lonp)\n np.save('latp_cluster'+str(te),latp)\n\n close_netcdf_files(fopen_list)\n\n print 'compute_autocorrelation_and_timescale done'\n return rhouu, rhovv, np.sqrt(up2), np.sqrt(up2), lonp, latp, lon, lat, hull.simplices #}}}",
"def phi_coefficient (subgroup,target1,target2):\r\n return matthews_corrcoef(subgroup[target1], subgroup[target2])",
"def kge(self, return_all=False):\n cc = np.corrcoef(self.true, self.predicted)[0, 1]\n alpha = np.std(self.predicted) / np.std(self.true)\n beta = np.sum(self.predicted) / np.sum(self.true)\n return post_process_kge(cc, alpha, beta, return_all)",
"def corrcoef(self):\r\n return np.corrcoef(self.input.data)",
"def calc_trig(self, tau):\n if self.A[self.k,self.p] != 0.0:\n if tau > 0:\n t = -tau + np.sqrt(tau**2 + 1.0)\n else:\n t = -tau - np.sqrt(tau**2 + 1.0)\n \n c = 1.0/(1.0 + t**2)\n s = t*c\n else:\n c = 1.0\n s = 0.0\n return c, s",
"def __calc_CoagS(self):\n\n Dp_small = self.dp_lim[0]*1e-9 # in m\n temp = self.temp_data # Kelvin\n pres = self.pres_data # Pascal\n Dp = self.par_diam*1e-9 # m\n time = self.par_time # days\n N = self.__dNdlog2dN(Dp,self.smoothed_par_data) # cm-3\n findex = np.argwhere(Dp>=Dp_small).flatten()\n big_R = Dp[findex]/2.\n big_N = N[:,findex]\n k_B = 1.38064852e-23 # Boltzmann constant m2 kg s-2 K-1\n r0=Dp_small/2.\n r1=r0\n dens=1000.\n self.CoagS=np.zeros(time.shape)\n for i in range(0,len(time)):\n lamda=(6.73e-8*temp[i]*(1+(110.4/temp[i])))/(296*pres[i]/101325.0*1.373)\n myy=(1.832e-5*(temp[i]**(1.5))*406.4)/(5093*(temp[i]+110.4))\n kn1=lamda/r1\n kn=lamda/big_R\n CC= 1.+(kn*(1.142+(0.558*np.exp((-.999)/kn))))\n CC1= 1. + (kn1*(1.142+(0.558*np.exp((-.999)/kn1))))\n D = (k_B*temp[i]*CC)/(6.*np.pi*myy*big_R)\n D1 = (k_B*temp[i]*CC1)/(6.*np.pi*myy*r1)\n M = 4./3.*np.pi*(big_R**3)*dens\n M1 = 4./3.*np.pi*(r1**3)*dens\n c= np.sqrt((8.*k_B*temp[i])/(np.pi*M))\n c1= np.sqrt((8.*k_B*temp[i])/(np.pi*M1))\n c12= np.sqrt((c**2)+(c1**2))\n r12= big_R+r1\n D12= D+D1\n CCONT = 4.*np.pi*r12*D12\n CFR = np.pi*r12*r12*c12\n L=(8.*D)/(np.pi*c)\n L1=(8.*D1)/(np.pi*c1)\n SIG=(1./(3.*r12*L))*((r12+L)**3-(r12*r12+L*L)**1.5)-r12\n SIG1=(1./(3.*r12*L1))*((r12+L1)**3-(r12*r12+L1*L1)**1.5)-r12\n SIG12= np.sqrt((SIG**2)+(SIG1**2))\n KO=CCONT/((r12/(r12+SIG12))+(CCONT/CFR))\n self.CoagS[i] = np.nansum(KO*big_N[i,:]*1e6)\n if (r0==big_R[0]):\n self.CoagS[i] = 0.5*KO*big_N[i,0]*1e6+np.nansum(KO*big_N[i,1:]*1e6)\n else:\n self.CoagS[i] = np.nansum(KO*big_N[i,:]*1e6)",
"def ctc(target):\n network = target.project.network\n throats = network.throats(target.name)\n cn = network['throat.conns'][throats]\n C1 = network['pore.coords'][cn[:, 0]]\n C2 = network['pore.coords'][cn[:, 1]]\n value = _norm(C1 - C2, axis=1)\n return value",
"def cramers_corrected_stat(confusion_matrix):\n chi2 = ss.chi2_contingency(confusion_matrix)[0]\n n = confusion_matrix.sum().sum()\n phi2 = chi2/n\n r,k = confusion_matrix.shape\n phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1)) \n rcorr = r - ((r-1)**2)/(n-1)\n kcorr = k - ((k-1)**2)/(n-1)\n return np.sqrt(phi2corr / min( (kcorr-1), (rcorr-1)))",
"def mcc(y_true, y_pred):\n \n tp = true_positive(y_true, y_pred)\n tn = true_negative(y_true, y_pred)\n fp = false_positive(y_true, y_pred)\n fn = false_negative(y_true, y_pred)\n numerator = (tp * tn) - (fp * fn)\n denominator = (\n (tp + fp) *\n (fn + tn) *\n (fp + tn) *\n (tp + fn)\n )\n \n denominator = denominator ** 0.5\n return numerator/denominator",
"def ccc_a(y_true, y_pred):\n x = y_true[:, 1]\n y = y_pred[:, 1]\n mx = K.mean(x, axis=0)\n my = K.mean(y, axis=0)\n xm, ym = x - mx, y - my\n rho = K.sum(xm * ym) / (K.sqrt(K.sum(xm ** 2)) * K.sqrt(K.sum(ym ** 2)))\n x_s = K.std(x)\n y_s = K.std(y)\n ccc = 2 * rho * x_s * y_s / (x_s ** 2 + y_s ** 2 + (mx - my) ** 2)\n return ccc",
"def kts(self):\n return CAL_TO_J * 0.0077 * (self.rho/1000.0) * (self.rho/1000.0)",
"def ccc_v(y_true, y_pred):\n x = y_true[:, 0]\n y = y_pred[:, 0]\n mx = K.mean(x, axis=0)\n my = K.mean(y, axis=0)\n xm, ym = x - mx, y - my\n rho = K.sum(xm * ym) / (K.sqrt(K.sum(xm ** 2)) * K.sqrt(K.sum(ym ** 2)))\n x_s = K.std(x)\n y_s = K.std(y)\n ccc = 2 * rho * x_s * y_s / (x_s ** 2 + y_s ** 2 + (mx - my) ** 2)\n return ccc",
"def sensitivity(confusion):\n conf = np.zeros(confusion.shape[0])\n for i in range(confusion.shape[0]):\n tp = confusion[i][i]\n fn = np.sum(confusion, axis=1) - tp\n conf[i] = tp / (tp + fn[i])\n return conf",
"def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall",
"def CC_REC_INC_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','CC_REC_INC']]\n Feature_DF.loc[:,'CC_REC_INC_TRS'] = Feature_DF.loc[:,'CC_REC_INC'].apply(lambda x : (1+x)**(-1/3))\n Feature_DF = Feature_DF.loc[:,['HNAME','CC_REC_INC_TRS']]\n\n return Feature_DF",
"def t(self,k,cosTheta,pk,c):\n pk = c.pkInterp(k)\n f2term = (self.tf21(0,1,2, k,cosTheta,pk,c)+self.tf21(1,2,0, k,cosTheta,pk,c)+self.tf21(2,0,1, k,cosTheta,pk,c)+ \\\n self.tf21(1,2,3, k,cosTheta,pk,c)+self.tf21(2,3,1, k,cosTheta,pk,c)+self.tf21(3,1,2, k,cosTheta,pk,c)+ \\\n self.tf21(2,3,0, k,cosTheta,pk,c)+self.tf21(3,0,2, k,cosTheta,pk,c)+self.tf21(0,2,3, k,cosTheta,pk,c)+ \\\n self.tf21(3,0,1, k,cosTheta,pk,c)+self.tf21(0,1,3, k,cosTheta,pk,c)+self.tf21(1,3,0, k,cosTheta,pk,c)) * 4.\n\n f3term = (self.tf31(M.array([0,1,2]),k,cosTheta,pk) + self.tf31(M.array([1,2,3]),k,cosTheta,pk) + \\\n self.tf31(M.array([2,3,1]),k,cosTheta,pk) + self.tf31(M.array([3,1,2]),k,cosTheta,pk)) * 6.\n\n #print cosTheta,f2term, f3term, ft2term+f3term\n return f2term + f3term",
"def test_calc_k_c():\n\n P_x0 = ufloat(1.75789868673e-12, 1.75789868673e-14) * u.nm**2/u.Hz # 1/100\n f_c = ufloat(50000, 0.5) * u.Hz # 1/100000 relative\n Q = ufloat(10000, 100) * u.dimensionless # 1/100\n T = ufloat(300, 3) * u.K # 1/100\n # ex_k_c is no longer a nice number because I switched from a rounded to\n # more exact value for Boltzmann's constant\n ex_k_c = ufloat(2.9999965233852217, 0.05196147267057527) * u.N/u.m\n k_c = calc_k_c(f_c, Q, P_x0, T)\n assert_almost_equal(k_c.magnitude.n, ex_k_c.magnitude.n)\n assert_almost_equal(k_c.magnitude.s, ex_k_c.magnitude.s)",
"def inner_cca_objective(y_true, y_pred):\n\n r1 = 1e-4\n r2 = 1e-4\n eps = 1e-12\n o1 = o2 = y_pred.shape[1]//2\n\n # unpack (separate) the output of networks for view 1 and view 2\n H1 = tf.transpose(y_pred[:, 0:o1])\n H2 = tf.transpose(y_pred[:, o1:o1+o2])\n\n m = H1.shape[1]\n\n H1bar = H1 - (tf.math.divide(1, m)) * tf.dot(H1, tf.ones([m, m]))\n H2bar = H2 - (tf.math.divide(1, m)) * tf.dot(H2, tf.ones([m, m]))\n\n SigmaHat12 = (tf.math.divide(1, m-1)) * \\\n tf.dot(H1bar, tf.transpose(H2bar))\n SigmaHat11 = (tf.math.divide(1, m-1)) * tf.dot(H1bar,\n tf.transpose(H1bar)) + r1 * tf.eye(o1)\n SigmaHat22 = (tf.math.divide(1, m-1)) * tf.dot(H2bar,\n tf.transpose(H2bar)) + r2 * tf.eye(o2)\n\n # Calculating the root inverse of covariance matrices by using eigen decomposition\n [D1, V1] = tf.nlinalg.eigh(SigmaHat11)\n [D2, V2] = tf.nlinalg.eigh(SigmaHat22)\n\n # Added to increase stability\n posInd1 = tf.gt(D1, eps).nonzero()[0]\n D1 = D1[posInd1]\n V1 = V1[:, posInd1]\n posInd2 = tf.gt(D2, eps).nonzero()[0]\n D2 = D2[posInd2]\n V2 = V2[:, posInd2]\n\n SigmaHat11RootInv = tf.dot(\n tf.dot(V1, tf.nlinalg.diag(D1 ** -0.5)), tf.transpose(V1))\n SigmaHat22RootInv = tf.dot(\n tf.dot(V2, tf.nlinalg.diag(D2 ** -0.5)), tf.transpose(V2))\n\n Tval = tf.dot(tf.dot(SigmaHat11RootInv, SigmaHat12), SigmaHat22RootInv)\n\n if use_all_singular_values:\n # all singular values are used to calculate the correlation\n corr = tf.sqrt(tf.nlinalg.trace(tf.dot(tf.transpose(Tval), Tval)))\n else:\n # just the top outdim_size singular values are used\n [U, V] = tf.nlinalg.eigh(T.dot(tf.transpose(Tval), Tval))\n U = U[tf.gt(U, eps).nonzero()[0]]\n U = U.sort()\n corr = tf.sum(tf.sqrt(U[0:outdim_size]))\n\n return -corr",
"def corr_coeff(self) -> float:\n correlation_coefficient = np.corrcoef(self.true, self.predicted)[0, 1]\n return float(correlation_coefficient)",
"def compute_ctf(freqs,rots,akv,cs,wgh,dfmid1f,dfmid2f,angastf,dscale,bfactor=None): \n av = akv * 1e3 # Convert kilovots to volts\n cs = cs * 1e7 # Convert spherical aberation from mm to A\n \n # wavelength of electrons\n elambda = 12.2643247 / n.sqrt(av + av**2 * 0.978466e-6)\n \n wgh1 = dscale*n.sqrt(1.0 - wgh**2)\n wgh2 = dscale*wgh\n\n ix = freqs[:,0]\n iy = freqs[:,1]\n freq_radius = n.sqrt(ix**2 + iy**2)\n\n angle = elambda*freq_radius\n angspt = n.arctan2(iy,ix)\n if rots is not None:\n angspt = n.mod(angspt.reshape((-1,1)) + rots.reshape((1,-1)),2.0*n.pi)\n angle = angle.reshape((-1,1)) \n c1 = 2.0*n.pi*angle**2/(2.0*elambda)\n c2 = -c1*cs*angle**2/2.0\n angdif = angspt - angastf\n ccos = n.cos(2.0*angdif)\n df = 0.5*(dfmid1f + dfmid2f + ccos*(dfmid1f-dfmid2f))\n chi = c1*df + c2\n\n ctf = -wgh1*n.sin(chi) - wgh2*n.cos(chi)\n \n if bfactor is not None:\n ctf *= envelope_function(freq_radius, bfactor)\n\n return n.require(ctf,dtype = freqs.dtype)"
] | [
"0.7024077",
"0.60572654",
"0.592135",
"0.57627964",
"0.57089365",
"0.5693804",
"0.56896114",
"0.563734",
"0.558531",
"0.5533107",
"0.5526995",
"0.55004585",
"0.54897",
"0.54560375",
"0.5441303",
"0.54251665",
"0.54239964",
"0.54215145",
"0.54153144",
"0.54020524",
"0.53774697",
"0.5365337",
"0.53543794",
"0.5342899",
"0.5339916",
"0.5334517",
"0.5320325",
"0.532004",
"0.5318056",
"0.5308204"
] | 0.65094215 | 1 |
Plots a bar chart for each number of features and condition showing the distribution of AUCs | def crowd_auc_plot(self, auto_open=False):
def get_name(nofeat, cond):
plural = nofeat > 1
return "{} features (condition {})".format(nofeat, cond) if plural else "{} feature (condition {})".format(nofeat, cond)
def get_trace(values, nofeat, cond):
name = get_name(nofeat, cond)
return go.Histogram(
name=name,
x=values,
histnorm='probability',
autobinx=False,
xbins=dict(
start=0.5,
end=1,
size=0.025,
),
marker=dict(
color=self.colours[cond]
),)
aucs = pd.read_pickle(self.path_final_evaluation_aucs)
nofeatures = sorted(set([nofeat for nofeat in aucs[1]]))
conditions = [1, 2, 3, 4, 5]
subplot_titles = [get_name(no_feat, c) for no_feat in nofeatures for c in conditions]
fig = plotly.tools.make_subplots(rows=len(nofeatures), cols=len(conditions), subplot_titles=subplot_titles)
row_index=1
for no_feat in nofeatures:
for i in range(len(conditions)):
trace = get_trace(aucs[conditions[i]][no_feat], no_feat, conditions[i])
fig.append_trace(trace, row_index, i+1)
row_index += 1
fig['layout'].update(showlegend=False, height=2500, width=1200, title='AUC Histograms for {} ({})'.format(self.dataset_name, self.experiment_name),)
plotly.offline.plot(fig, auto_open=auto_open, filename=self.path_auc_plots)
from IPython.display import Image
Image('a-simple-plot.png') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_histogram_assess(assess_input, figure_output):\n\n sns.set_style(\"white\")\n raw_auc = pd.read_table(assess_input, index_col=\"Motif\")\n raw_auc = raw_auc.drop_duplicates()\n # df = df.T.drop_duplicates().T\n raw_auc = raw_auc.sort(columns=\"MNCP\", axis=0, ascending=False)\n labels = raw_auc.index\n x = 10\n if len(labels) > 50:\n x = 15\n elif len(labels) < 10:\n x = 5\n f, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(x, 10), sharex=True)\n a = sns.barplot(x=labels, y=raw_auc[\"AUC\"],\n palette='colorblind', x_order=labels, ax=ax1)\n b = sns.barplot(x=labels, y=raw_auc[\"MNCP\"],\n palette=\"colorblind\", x_order=labels, ax=ax2)\n c = sns.barplot(x=labels, y=raw_auc[\"Spearman\"],\n palette=\"colorblind\", x_order=labels, ax=ax3)\n d = sns.barplot(x=labels, y=raw_auc[\"Pearson\"],\n palette=\"colorblind\", x_order=labels, ax=ax4)\n d.set_xticklabels(labels, rotation=90)\n\n sns.despine()\n f.savefig(figure_output + \".eps\", bbox_inches='tight')\n f.savefig(figure_output + \".png\", bbox_inches='tight')",
"def plot_results(\n predictor_names, validation_auc_values, is_forward_test,\n axes_object=None, num_predictors_to_plot=None):\n\n if num_predictors_to_plot is None:\n num_predictors_to_plot = len(predictor_names)\n\n num_predictors_to_plot = max([num_predictors_to_plot, 2])\n num_predictors_to_plot = min([\n num_predictors_to_plot, len(predictor_names)\n ])\n\n y_coords = numpy.linspace(\n 0, num_predictors_to_plot - 1, num=num_predictors_to_plot, dtype=float\n )[::-1]\n\n if axes_object is None:\n _, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)\n )\n\n axes_object.barh(\n y_coords, validation_auc_values[:num_predictors_to_plot],\n color=BAR_FACE_COLOUR, edgecolor=BAR_EDGE_COLOUR,\n linewidth=BAR_EDGE_WIDTH\n )\n\n axes_object.set_yticks([], [])\n axes_object.set_xlabel('Validation AUC')\n\n if is_forward_test:\n axes_object.set_ylabel('Predictor added')\n else:\n axes_object.set_ylabel('Predictor removed')\n\n axes_object.set_ylim(\n numpy.min(y_coords) - 0.75, numpy.max(y_coords) + 0.75\n )\n axes_object.set_xlim(\n 0, numpy.max(validation_auc_values[:num_predictors_to_plot])\n )\n\n for j in range(num_predictors_to_plot):\n axes_object.text(\n 0., y_coords[j], ' ' + predictor_names[j],\n color=BAR_TEXT_COLOUR, fontsize=BAR_FONT_SIZE,\n horizontalalignment='left', verticalalignment='center'\n )\n\n return axes_object",
"def plot_featurewise_barplot(\n utr5_counts, cds_counts, utr3_counts, ax=None, saveto=None, **kwargs\n):\n fig = None\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = ax.get_figure()\n barlist = ax.bar([0, 1, 2], [utr5_counts, cds_counts, utr3_counts])\n barlist[0].set_color(\"#1b9e77\")\n barlist[1].set_color(\"#d95f02\")\n barlist[2].set_color(\"#7570b3\")\n ax.set_xticks([0, 1, 2])\n ax.set_xticklabels([\"5'UTR\", \"CDS\", \"3'UTR\"])\n max_counts = np.max(np.hstack([utr5_counts, cds_counts, utr3_counts]))\n setup_axis(\n ax=ax, axis=\"y\", majorticks=max_counts // 10, minorticks=max_counts // 20\n )\n ax.set_ylabel(\"# RPFs\")\n # sns.despine(trim=True, offset=10)\n if saveto:\n fig.tight_layout()\n fig.savefig(saveto, dpi=DPI)\n return ax, fig",
"def generate_barplot(predictions):\n # TODO: Add hover functionality\n plot = figure(x_range=IMAGE_LABELS, plot_height=300, plot_width=400)\n plot.vbar(x=IMAGE_LABELS, top=squeeze(predictions), width=0.8)\n plot.xaxis.major_label_orientation = pi / 2.\n\n return components(plot)",
"def visualize_confidence_level(prediction_proba):\n data = (prediction_proba[0]*100).round(2)\n grad_percentage = pd.DataFrame(data = data,columns = ['Porcentage'],index = ['Est','Int','Int_Est','Rob','Rob_Est','Rob_Int','Rob_Int_Est'])\n ax = grad_percentage.plot(kind='barh', figsize=(7, 4), color='#0067e7', zorder=10, width=0.8)\n ax.legend().set_visible(False)\n ax.set_xlim(xmin=0, xmax=100)\n \n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(True)\n ax.spines['bottom'].set_visible(True)\n\n ax.tick_params(axis=\"both\", which=\"both\", bottom=\"off\", top=\"off\", labelbottom=\"on\", left=\"off\", right=\"off\", labelleft=\"on\")\n \n vals = ax.get_xticks()\n for tick in vals:\n ax.axvline(x=tick, linestyle='dashed', alpha=0.4, color='#eeeeee', zorder=1)\n\n ax.set_xlabel(\" Porcentage(%) Nivel de confianza\", labelpad=2, weight='bold', size=12)\n ax.set_ylabel(\"Victimización\", labelpad=10, weight='bold', size=12)\n ax.set_title('Nivel de confianza de la predicción ', fontdict=None, loc='center', pad=None, weight='bold')\n\n st.pyplot()\n \n return",
"def generate_barplot(predictions, labels):\n plot = figure(x_range=labels, plot_height=300, plot_width=400)\n plot.vbar(x=labels, top=predictions, width=0.8)\n # plot.xaxis.major_label_orientation = pi / 2.\n # plot.xaxis.axis_label_text_font_size = \"40pt\"\n # plot.yaxis.axis_label_text_font_size = \"40pt\"\n\n return components(plot)",
"def plot_uv_bar(df, colname, colorid=0):\n if (colname in list(df.columns)):\n \n # Set figure size \n fig, ax = plt.subplots(figsize=(8,6))\n \n # set colorid for bar plot\n base_color = sns.color_palette()[colorid]\n\n # variable counts to calculate percentage\n cdict_count = df[colname].value_counts().to_dict() \n total_count = df.shape[0]\n \n \n if (len(list(cdict_count.keys())) > 5):\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.125\n # max. no. of categories Vs % rotation \n rottext_pct = 90 \n # font size for % display\n fontsiz_pct = 12\n else:\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.075\n # max. no. of categories Vs % rotation \n rottext_pct = 0 \n # font size for % display\n fontsiz_pct = 16\n \n \n # plotting...\n sns.countplot(data = df, x = colname\n , order = list(cdict_count.keys())\n , color = base_color\n , saturation = 0.7)\n\n # title and labels\n plt.title('Order of '+ colname, fontsize=20)\n plt.xlabel(colname + ' Type', fontsize=16)\n plt.ylabel('Count', fontsize=16)\n \n # x-,y- ticks\n locs, labels = plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n\n # display % count information on each tower of bar plot\n for loc, label in zip(locs, labels):\n count = cdict_count[label.get_text()]\n pct_string = '{:0.1f}%'.format(count*100/total_count)\n plt.text(loc, count-maxcount_pct, pct_string, ha='center', color='w', fontsize=fontsiz_pct, rotation=rottext_pct)\n\n return plt.show()\n\n else:\n \n print(' >>>Error:',colname,' is not in DataFrame')",
"def visualize_data(df):\n # Remove 'not available'\n genres = df.genre.unique().tolist()\n remove_index = genres.index('Not Available')\n genres.pop(remove_index)\n print('Genres: ', genres)\n\n # Extract number of songs in each genre\n genre_counts = df.genre.value_counts().tolist()\n genre_counts.pop(remove_index)\n print('Counts: ', genre_counts)\n\n # Plot bar graph\n plt.bar(genres, genre_counts)\n plt.xlabel('Genres')\n plt.ylabel('Count')\n plt.show()",
"def run_and_plot(self):\n self.raw_processing()\n self.data_averaging_and_cleaning()\n\n print(self.organized_names)\n print(self.organized_film)\n print(self.organized_plank)\n\n height = self.organized_film\n bars = tuple(self.organized_names.copy())\n y_pos = np.arange(len(bars))\n\n plt.bar(y_pos, height)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('CFU/mL count')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()\n\n height2 = self.organized_plank\n\n plt.bar(y_pos, height2)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('Proportion of Biofilm CFUs to Planktonic CFUs')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()",
"def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()",
"def accuracy_hist(pred_proba_samples, labels):\n\tsampled_acc = sampled_accuracies(pred_proba_samples, labels)\n\tavg_accuracy = round(np.mean(sampled_acc) * 100, 3)\n\tprint(\"average accuracy across \" + str(pred_proba_samples.shape[0]) + \" samples: \" + str(avg_accuracy) + \"%\\n\")\n\tfig, ax = plt.subplots(figsize=(10,5))\n\tsns.distplot(100*sampled_acc, ax=ax, rug=True, kde=False)\n\tax.set_xlabel(\"Test set accuracy (%)\", fontsize=30)\n\tax.set_ylabel(\"Frequency density\", fontsize=30);\n\tax.tick_params(\"both\", labelsize=15)\n\treturn sampled_acc",
"def performanceBarCharts(): \n ##tauopathy HCS pearson\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null YFP Model\", \"Null DAPI Model\"]\n ml_model_perf = pickle.load(open(\"pickles/ml_model_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n x = [1, 2, 3]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'gold', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance\",fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=12)\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n ax.set_xticklabels(xlabels,fontsize=12, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(3))\n plt.savefig(\"matplotlib_figures/tau_performance_pearson_special_HCS_model.png\", dpi=300)\n\n ##tauopathy HCS MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null YFP Model\", \"Null DAPI Model\"]\n ml_model_perf = pickle.load(open(\"pickles/ml_model_mse_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_mse_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_mse_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n x = [1, 2, 3]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'gold', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance\",fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=12)\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n ax.set_xticklabels(xlabels,fontsize=12, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(3))\n plt.savefig(\"matplotlib_figures/tau_performance_mse_special_HCS_model.png\", dpi=300)\n\n ##osteosarcoma 3-fold (raw images) pearson\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ml_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_null_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [0.075, 0.1156] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with Raw Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.02)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_pearson_cross_val.png\", dpi=300)\n\n ##osteosarcoma 3-fold (raw images) MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ml_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_null_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [0.15, .2312] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance with Raw Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.01)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_mse.png\", dpi=300)\n\n ##osteosarcoma 3-fold (ablated image training) pearson\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ablated_ml_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_ablated_null_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [.1288, .1385] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with\\n95% Ablated Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.0)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_pearson_trained_ablation_model.png\", dpi=300)\n\n ##osteosarcoma 3-fold (ablated image training) MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ablated_ml_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_ablated_null_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [.2576, .2771] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance with\\n95% Ablated Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.0)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_MSE_trained_ablation_model.png\", dpi=300)\n\n ##supplemental single channel learning YFP and DAPI performance\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"YFP-tau to AT8-pTau\", \"DAPI to AT8-pTau\"]\n YFP_ml_model = pickle.load(open(\"pickles/single_channel_YFP_ml_model_perf.pkl\", \"rb\"))\n DAPI_ml_model = pickle.load(open(\"pickles/single_channel_DAPI_ml_model_perf.pkl\", \"rb\"))\n y = np.array([YFP_ml_model[0], DAPI_ml_model[0]]).round(decimals=2)\n stds = [YFP_ml_model[1], DAPI_ml_model[1]]\n x = [1, 2]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=\"cornflowerblue\", zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with\\nSingle Channel Input Learning\",fontname=\"Times New Roman\", fontsize=17, y=1.01)\n ax.set_xlabel(\"Model\", fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=14)\n plt.yticks(fontname=\"Times New Roman\", fontsize=14)\n ax.set_xticklabels(xlabels,fontsize=14, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/supplemental_single_channel_learning.png\", dpi=300)\n\n ##supplemental single channel learning YFP and DAPI, input similarity to prediction\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"YFP-tau to AT8-pTau\", \"DAPI to AT8-pTau\"]\n y = np.array([0.94894628, 0.98718720]).round(decimals=2)\n stds = [0.1673864, 0.039042]\n x = [1, 2]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=\"orange\", zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Similarity Between\\nInput Channel and Predicted Channel\",fontname=\"Times New Roman\", fontsize=17)\n ax.set_xlabel(\"Model\", fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=14)\n plt.yticks(fontname=\"Times New Roman\", fontsize=14)\n ax.set_xticklabels(xlabels,fontsize=14, fontname=\"Times New Roman\")\n ax.set_ylim((0,1.13))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/supplemental_single_channel_learning_pearson_similarity_input_and_predicted.png\", dpi=300)",
"def featuresBarPlot(barNames,barValues):\n plt.bar(range(0,len(barNames)),barValues)\n plt.xticks(range(0,len(barNames)), barNames,rotation='vertical')\n plt.show()",
"def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)",
"def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()",
"def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")",
"def plot(var):\n # MISSCHIEN KUNNEN WE HIER NOG IETS MEE\n # total_dead = len(train_data[\"Survived\"] == 0)\n # total_survived = len(train_data[\"Survived\"] == 1)\n # died = train_data[train_data[\"Survived\"] == 0][var].value_counts() / total_dead\n # survived = train_data[train_data[\"Survived\"] == 1][var].value_counts() / total_survived\n sns.set()\n sns.set_color_codes(\"pastel\")\n\n # order bars for family size variable\n if var == \"FamSize\":\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=.7, order=[\"alone\", 1, 2, 3, \"4 or more\"]).\\\n tick_params(labelsize=18)\n else:\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=1.1).tick_params(labelsize=18)\n\n # plot style properties\n ax = plt.gca()\n\n for ax in plt.gcf().axes:\n x = ax.get_xlabel()\n y = ax.get_ylabel()\n ax.set_xlabel(x, fontsize=20)\n ax.set_ylabel(y, fontsize=20)\n\n plt.title(\"Ratio of survivors for variable \" + str(var), fontsize=22)\n t = ax.title\n t.set_position([.5, 1.05])\n plt.ylim([0, 1])\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/survived_\" + str(var) + \".png\", bbox_inches=\"tight\")\n\n plt.show()",
"def plot_metrics(self, figsize=(12, 4)):\n\n plt.figure(figsize=figsize)\n\n # Plot the BIC scores\n spl = plt.subplot(1, 2, 1)\n color_iter = itertools.cycle(['k', 'r', 'b', 'g', 'c', 'm', 'y'])\n bars = []\n self.bic = np.array(self.bic)\n\n for i, (self.cov_type, color) in enumerate(zip(self.cov_types,\n color_iter)):\n xpos = np.array(self.n_components_range) + .2 * (i - 2)\n bars.append(plt.bar(xpos,\n self.bic[i*len(self.n_components_range):(i + 1)\n * len(self.n_components_range)],\n width=.2, color=color))\n plt.xticks(self.n_components_range)\n plt.ylim([self.bic.min() * 1.01 - .01 * self.bic.max(),\n self.bic.max()])\n plt.title('BIC score per model')\n\n xpos = np.min(self.n_components_range) - 0.4\n + np.mod(self.bic.argmin(), len(self.n_components_range))\n + .2 * np.floor(self.bic.argmin() / len(self.n_components_range))\n\n plt.text(xpos, self.bic.min() * 0.97 + .03 * self.bic.max(),\n '*', fontsize=14)\n spl.set_xlabel('Number of components')\n spl.legend([b[0] for b in bars], self.cov_types)\n\n # Plot the AIC scores\n spl = plt.subplot(1, 2, 2)\n color_iter = itertools.cycle(['k', 'r', 'b', 'g', 'c', 'm', 'y'])\n bars = []\n self.aic = np.array(self.aic)\n\n for i, (self.cov_type, color) in enumerate(zip(self.cov_types,\n color_iter)):\n xpos = np.array(self.n_components_range) + .2 * (i - 2)\n bars.append(plt.bar(xpos,\n self.aic[\n i * len(self.n_components_range):\n (i + 1) * len(self.n_components_range)],\n width=.2, color=color))\n plt.xticks(self.n_components_range)\n plt.ylim([self.aic.min() * 1.01 - .01 * self.aic.max(),\n self.aic.max()])\n plt.title('AIC score per model')\n\n xpos = np.min(self.n_components_range) - 0.4\n + np.mod(self.aic.argmin(), len(self.n_components_range))\n + .2 * np.floor(self.aic.argmin() / len(self.n_components_range))\n\n plt.text(xpos, self.aic.min() * 0.97 + .03 * self.aic.max(),\n '*', fontsize=14)\n spl.set_xlabel('Number of components')\n spl.legend([b[0] for b in bars], self.cov_types)\n\n plt.tight_layout()\n # plt.show()\n return(plt)",
"def _bar_example_1(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot\")\n ch.set_subtitle(\"Automatically sorts by value counts.\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n )\n ch.show(_OUTPUT_FORMAT)",
"def visualize_test_results(X, y, pred, signnames):\n assert(X.shape[0] == 14)\n nrows = 2\n ncols = 7\n nlabels = 43\n fig, axes = plt.subplots(nrows = 2 * nrows, ncols = ncols, figsize = (10, 10))\n for i in range(nrows):\n for j in range(ncols):\n aximg = axes[2*i, j]\n axprobs = axes[2*i + 1, j]\n idx = i*ncols + j\n\n img = X[idx]\n aximg.imshow(img)\n aximg.set_axis_off()\n\n probs = pred[idx]\n label = y[idx]\n colors = probs.shape[0] * [\"red\"]\n colors[label] = \"green\"\n\n n_top = 5\n topindices = sorted(np.arange(probs.shape[0]), key = lambda i: probs[i])[-n_top:]\n topprobs = probs[topindices]\n topcolors = [colors[i] for i in topindices]\n ypos = np.arange(n_top)\n axprobs.barh(ypos, topprobs, color = topcolors)\n axprobs.set_yticks(ypos)\n for ypos, l in zip(ypos, topindices):\n axprobs.text(0.025, ypos, textwrap.fill(signnames[l], 20), fontsize = 6)\n axprobs.set_axis_off()\n fig.savefig(os.path.join(img_dir, \"test_results.png\"))",
"def plot_bar_important_features(important_features, title, xlabel, ylabel, fname):\r\n plt.figure(figsize=(20, 21))\r\n plt.barh(important_features.index.astype(str).tolist(), important_features.values.tolist())\r\n plt.title(title)\r\n plt.xlabel(xlabel)\r\n plt.ylabel(ylabel)\r\n plt.savefig(fname, bbox_inches='tight')\r\n plt.close()",
"def plot(self):\n\t\traw_labels = self.make_raw_data()[1]\n\t\tbalanced_labels = self.get_extra()[1]\n\t\tfig, ax1 = subplots()\n\t\tax2 = ax1.twinx()\n\t\tx = array(range(1, NCLASSES + 1))\n\t\tl1 = ax1.bar(x - 0.3, self.prior_sizes, width = 0.25, color = 'b', align = 'center', label = 'train')\n\t\tl2 = ax2.bar(x, bincount(raw_labels - 1), width = 0.25, color = 'r', align = 'center', label = 'confident')\n\t\tl3 = ax2.bar(x + 0.3, bincount(balanced_labels - 1), width = 0.25, color = 'g', align = 'center', label = 'rebalanced')\n\t\tconfident_frac = len(raw_labels) / float(self.predictions.shape[0])\n\t\tusable_frac = len(balanced_labels) / float(self.predictions.shape[0])\n\t\tax1.set_title('at >{0:.1f}%, {1:.1f}% reliable, {2:.1f}% usable'.format(self.confidence * 100, confident_frac * 100, usable_frac * 100))\n\t\tax1.legend([l1, l2, l3], [l1.get_label(), l2.get_label(), l3.get_label()], loc = 'upper right')\n\t\tax1.set_xticks(x)",
"def plot_results(sgd_train_acc, sgd_train_std, sgd_heldout_acc, sgd_heldout_std, sgd_test_acc,\n dt_train_acc, dt_train_std, dt_heldout_acc, dt_heldout_std, dt_test_acc,\n dt4_train_acc, dt4_train_std, dt4_heldout_acc, dt4_heldout_std, dt4_test_acc,\n stumps_train_acc, stumps_train_std, stumps_heldout_acc, stumps_heldout_std, stumps_test_acc):\n train_x_pos = [0, 4, 8, 12]\n cv_x_pos = [1, 5, 9, 13]\n test_x_pos = [2, 6, 10, 14]\n ticks = cv_x_pos\n\n labels = ['sgd', 'dt', 'dt4', 'stumps (4 x 50)']\n\n train_accs = [sgd_train_acc, dt_train_acc, dt4_train_acc, stumps_train_acc]\n train_errors = [sgd_train_std, dt_train_std, dt4_train_std, stumps_train_std]\n\n cv_accs = [sgd_heldout_acc, dt_heldout_acc, dt4_heldout_acc, stumps_heldout_acc]\n cv_errors = [sgd_heldout_std, dt_heldout_std, dt4_heldout_std, stumps_heldout_std]\n\n test_accs = [sgd_test_acc, dt_test_acc, dt4_test_acc, stumps_test_acc]\n\n fig, ax = plt.subplots()\n ax.bar(train_x_pos, train_accs, yerr=train_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='train')\n ax.bar(cv_x_pos, cv_accs, yerr=cv_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='held-out')\n ax.bar(test_x_pos, test_accs, align='center', alpha=0.5, capsize=10, label='test')\n ax.set_ylabel('Accuracy')\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels)\n ax.set_title('Models')\n ax.yaxis.grid(True)\n ax.legend()\n plt.tight_layout()",
"def plot_bv_bar(df, xcolname, ycolname, icol=0):\n # set plot size\n fig, ax = plt.subplots(figsize=(8,6))\n \n # plotting... box\n sns.barplot(ax=ax, data = df\n , x = str(xcolname)\n , y = str(ycolname)\n , color = sns.color_palette()[icol]);\n \n \n # title and labels\n plt.title(xcolname+' Vs '+ycolname, fontsize=20)\n plt.xlabel(xcolname+ ' (units)', fontsize=16)\n plt.ylabel(ycolname+ ' (units)', fontsize=16)\n \n return plt.show()",
"def feature_importance_plot(algorithm,X_train,y_train,of_type):\r\n if of_type == \"coef\":\r\n algorithm.fit(X_train,y_train)\r\n coef = pd.DataFrame(algorithm.coef_.ravel())\r\n coef[\"coef\"] = X_train.columns\r\n plt.figure(figsize=(14,4))\r\n ax1 = sns.barplot(coef[\"coef\"],coef[0],palette=\"jet_r\",\r\n linewidth=2,edgecolor=\"k\"*coef[\"coef\"].nunique())\r\n #ax1.set_facecolor(\"lightgrey\")\r\n ax1.axhline(0,color=\"k\",linewidth=2)\r\n plt.ylabel(\"coefficients\")\r\n plt.xlabel(\"features\")\r\n plt.xticks(rotation='vertical')\r\n plt.title('FEATURE IMPORTANCES')\r\n \r\n elif of_type == \"feat\":\r\n algorithm.fit(X_train,y_train)\r\n coef = pd.DataFrame(algorithm.feature_importances_)\r\n coef[\"feat\"] = X_train.columns\r\n plt.figure(figsize=(14,4))\r\n ax2 = sns.barplot(coef[\"feat\"],coef[0],palette=\"jet_r\",\r\n linewidth=2,edgecolor=\"k\"*coef[\"feat\"].nunique())\r\n #ax2.set_facecolor(\"lightgrey\")\r\n ax2.axhline(0,color=\"k\",linewidth=2)\r\n plt.ylabel(\"coefficients\")\r\n plt.xlabel(\"features\")\r\n plt.xticks(rotation='vertical')\r\n plt.title('FEATURE IMPORTANCES')",
"def show_class_imbalance(df, title='Class Imbalance', PATH=None):\n ax = sns.barplot(x=[\"Normal\", \"Clickbait\"], y=df.groupby(['target']).target.count())\n ax.set_title(title, size=20)\n plt.xticks([0,1],[\"Normal\", \"Clickbait\"], size = 20)\n ax.set_ylabel(\"Document Count\", size=17)\n ax.set_xlabel(\"Article Class\", size=20)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n return ax",
"def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')",
"def plot(profile, filename=\"area_plot.png\", colors=None):\n \n if profile.abundance_data.shape[1] > MAX_DATA_POINTS:\n print(\"Too many data points to plot area plot.\")\n return\n\n col_label = sort_for_area_plot(profile.abundance_data)\n\n profile.abundance_data.sort(columns=col_label, axis=0, inplace=True)\n\n if colors == None:\n colors = generate_colors(len(profile.abundance_data.columns))\n\n w = 0 # x coordinate to plot the new bar on\n \n prev = dict()\n plt.clf()\n plt.title(\"Area Plot\")\n lgd_labels = dict() # stores information for the plot legend \n \n for cls in profile.references.keys():\n df = profile.abundance_data.loc[profile.references[cls]]\n sort_by_most_abundant(df)\n \n # change order of columns so most abundant attribute is plotted first\n l = list(df.columns)[::-1]\n df = df[l]\n \n for sample in df.index:\n for i in range(len(df.columns)):\n attr = df.columns[i] \n if i == 0:\n prev[sample] = 0\n plt.bar(w, df.loc[sample, attr], linewidth=0, bottom=prev[sample], color=colors[i])\n prev[sample] += df.loc[sample, attr]\n if attr not in lgd_labels.keys():\n lgd_labels[attr] = mpatches.Patch(color=colors[i], label=attr)\n w += 0.8\n\n ticks = list()\n ticks.append(0)\n running = 0\n for cls in profile.references.keys():\n running = running + len(profile.references[cls]) * 0.8\n ticks.append(running)\n plt.axvline(x=running, color='black')\n \n plt.xticks(ticks, list(profile.references.keys()))\n plt.xlim(0, len(profile.abundance_data.index) * 0.8)\n plt.ylim(0,1)\n plt.xlabel(\"Samples\")\n plt.ylabel(\"Abundance\")\n lgd = plt.legend(title=\"Attributes\", handles=list(lgd_labels.values()), \n loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=2, fontsize=8)\n plt.savefig(filename, bbox_extra_artists=(lgd,), bbox_inches='tight', \n dpi=(400), figsize=(24, 24))",
"def plot_chrom_classification(df_status, ax=None, add_cbar=True, cbar_ax=None):\n df = df_status.copy()\n df.replace('Pass', 0, inplace=True)\n df.replace('Possible loss', 1, inplace=True)\n df.replace('Possible gain', 2, inplace=True)\n df.replace('Fail', 3, inplace=True)\n\n cp = sns.color_palette()\n c_loss, c_neut, c_gain = sns.diverging_palette(255, 133, l=60, n=3)\n cmap = matplotlib.colors.ListedColormap([c_neut, c_loss, c_gain, cp[2]])\n\n if not ax:\n f = plt.figure()\n ax = f.add_subplot(111)\n\n nd = df.as_matrix()\n\n cax = ax.imshow(nd, aspect='equal', cmap=cmap, interpolation=None, vmin=0, vmax=3)# , vmax=1, vmin=-1)\n ax.set_yticks(np.arange(0, df.shape[0]))\n ax.set_xticks(np.arange(0, df.shape[1]))\n ax.set_xticks(np.arange(0.5, df.shape[1]+0.5), minor=True)\n\n for y in np.arange(0.5, df.shape[0], 1):\n ax.axhline(y, linestyle='--', color='black', linewidth=1)\n \n ax.set_yticklabels([s.replace('_', '') for s in df.index])\n ax.set_xticklabels(df.columns);\n ax.grid(which='minor', color='black', linewidth=1)\n ax.set_xlabel('chromosome')\n \n #colorbar\n if add_cbar:\n cbar = ax.figure.colorbar(cax, ticks=[0.375, 0.75+0.375, 1.5+0.375, 2.25+0.375], cax=cbar_ax, orientation='horizontal')\n cbar.ax.set_xticklabels(['Pass', 'Pos. Loss', 'Pos. Gain', 'Fail'])\n cbar.ax.xaxis.tick_top()\n cbar.ax.tick_params(axis='x', which='major', pad=0)\n for y in [0.25, 0.5, 0.75]:\n cbar.ax.axvline(y, color='black', linewidth=1)\n \n return ax",
"def plot_bootstrap_statistics(stats, test_stat, ci, alpha, test_stat_label, x_label, title=None):\n\n _, ax = plt.subplots(figsize=(8, 6))\n ax.hist(stats)\n ax.axvline(x=test_stat, color='blue', linestyle='-',\n label=test_stat_label + str(round(test_stat, 3)))\n ci_label = ['lower ' + str(round(100 * (1 - alpha))) + '%',\n 'upper ' + str(round(100 * (1 - alpha))) + '%']\n linestyles = ['--', '-.']\n for i, val in enumerate(ci):\n label = str(ci_label[i])\n linestyle = linestyles[i]\n ax.axvline(x=val, color='blue', linestyle=linestyle,\n label=label + ' CI: ' + str(round(val, 3)))\n\n ax.set_ylabel('Frequency')\n ax.set_xlabel(x_label)\n ax.set_title(title)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.legend()\n return ax"
] | [
"0.7165492",
"0.65022504",
"0.6471554",
"0.6354328",
"0.63000333",
"0.6286439",
"0.6243622",
"0.6206506",
"0.6189631",
"0.6127064",
"0.6123013",
"0.6092255",
"0.60813063",
"0.6076884",
"0.60670537",
"0.60670257",
"0.6065567",
"0.60621655",
"0.6054025",
"0.60462296",
"0.6043612",
"0.6037677",
"0.603267",
"0.60219234",
"0.6014031",
"0.6007413",
"0.5981874",
"0.5971387",
"0.5969531",
"0.5960233"
] | 0.6859097 | 1 |
substring of pattern without the last letter | def prefix(pattern):
return pattern[0:len(pattern)-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prefix(pattern):\n return pattern[0:len(pattern)-1]",
"def sufix(pattern):\n return pattern[1:len(pattern)]",
"def without_end(s):\n string = s[1:-1]\n return string",
"def getFinal(endstr):\n if not endstr:\n return ''\n if endstr.endswith('ng'):\n return 'ng'\n lastchar = endstr[-1]\n if lastchar in ['m', 'b', 'n', \"x\", 'r', 'l', 't', 'x']:\n return lastchar\n return ''",
"def extract_sub(s: str):\n subject = re.search(r'sub-\\d+', s)[0]\n return subject",
"def lstripfirst(cls, s, pattern=None):\n if pattern is None or len(pattern) == 0:\n return StringCleaner.lstrip(s)\n\n if s.find(pattern) == 0:\n return s[len(pattern):]\n\n else:\n return s",
"def only_source_name(full_name):\n if full_name.count(\"-\") >= 3:\n tokens = full_name.split(\"-\")\n return \"-\".join(tokens[:-3])\n else:\n return full_name",
"def shorten(string, maxLen, last):\n if len(string) <= maxLen:\n return string\n string = string[:maxLen]\n string = string[::-1]\n found = re.search(re.escape(last), string)\n if found:\n string = string[found.start():]\n string = string[::-1]\n return string",
"def ending_cutter(name: str):\n if name.endswith('ID') and re.match(r'^(?=\\w+[A-Z])(?=\\w+[a-z])\\w+$', name):\n return name[:-2]\n return name",
"def get_suffix(word, length):\n if length <= 0:\n return \"\"\n if length <= len(word):\n start = len(word) - length\n return word[start:]\n else:\n return word.rjust(length, \"*\")",
"def StripSuffix(string, suffix):\n assert string.endswith(suffix)\n return string[:-len(suffix)]",
"def filter_pathext(val: Optional[str]) -> str:\n return os.path.splitext(val or '')[1]",
"def remove(somestring, sub):\n location = somestring.find(sub)\n length = len(sub)\n part_before = somestring[:length+location]\n part_after = somestring[location+length:]\n return part_before + part_after",
"def stripSuffix(suffix, string):\n\n if string.endswith(suffix):\n return string[:-len(suffix)]\n\n return string",
"def suffix(sequence, l):\n if l > len(sequence):\n return sequence\n else:\n return sequence[-l:]",
"def formatPattern(self, pat):\n\n if not pat:\n return ''\n else:\n return pat",
"def last(word):\n return word[-1]",
"def getsubString(w, c):\n count = 0\n for x in w:\n if x == c:\n break\n count=count+1\n return w[:count]",
"def cleaning_sequence_regex(sequence):\n amb = re.compile(r\"[^ACGT]\")\n return amb.sub(\"\", sequence)",
"def strip_optional_suffix(string, suffix):\n if string.endswith(suffix):\n string = string[:-len(suffix)]\n return string",
"def basename_sans(path):\n return os.path.splitext(os.path.basename(path))[0]",
"def _last_name(self, full_name):\n name_partition = full_name.partition(u',')\n no_suffix = name_partition[0].strip()\n suffix = name_partition[2].strip()\n name_parts = no_suffix.split()\n part_count = len(name_parts)\n if part_count == 1 or part_count == 2:\n return name_parts[-1], suffix\n else:\n assert part_count > 2\n if name_parts[-2].islower():\n return u' '.join(name_parts[-2:]), suffix\n else:\n return name_parts[-1], suffix",
"def get_last_part_of_path(path: str) -> str:\n multi_os_path = path.replace(\"\\\\\", \"/\")\n return re.search(\"(?:[^/](?!/))+$\", multi_os_path).group(0)",
"def slice(str):\n\tnew_string = ''\n\tfor i in reversed(str): #reading in str reversed\n \t\tif i != '/': #stopping building once we hit '/'\n \t\t\tnew_string += i\n \t\telse:\n \t\t\tnew_string = new_string[::-1] #re-reversing\n \t\t\tif new_string.endswith('.fastq.gz'):\n \t\t\t\tnew_string = new_string[:-9]\n \t\t\tif new_string.endswith('.fastq'): \n \t\t\t\tnew_string = new_string[:-6] #cutting out .fastq\n \t\t\treturn new_string",
"def first_last_chop(seq):\n return seq[4:-4:2]",
"def last(word):\n\treturn word[-1]",
"def _format_pattern(pattern: str) -> str:\n return pattern.rstrip('*') + '**'",
"def fix_extra(in_str):\n spaced = camel_re.sub(\"_\", in_str)\n return spaced.split(\"_\")[0]",
"def middle(word):\n return word[1:-1]",
"def remove_ending(self, value: str, ending: str):\n length = len(ending)\n if len(value) < length: return value\n\n if value[-1*length:].lower() == ending:\n return value[:-1*length]\n else:\n return value"
] | [
"0.70545506",
"0.69251823",
"0.65390664",
"0.6211243",
"0.61586004",
"0.6141123",
"0.60177153",
"0.5907959",
"0.5879715",
"0.58760023",
"0.57343626",
"0.57218856",
"0.5695807",
"0.5687177",
"0.56706446",
"0.5668733",
"0.5656999",
"0.5640331",
"0.5611039",
"0.5600304",
"0.5589669",
"0.55875504",
"0.55868274",
"0.5585504",
"0.55823344",
"0.55816597",
"0.5574258",
"0.5568343",
"0.5555527",
"0.55542696"
] | 0.70895904 | 0 |
Tests that an org can successfully be renamed. | def test_rename_org(client: Client) -> None:
with dev_login(client, 'admin'):
# Create an org
resp = client.post('api/v1/org', json={
'name': 'testorg12'
})
org_id = resp.json['id']
assert 200 <= resp.status_code <= 300
# Create the second test org
resp = client.post('api/v1/org', json={
'name': 'renameorgtest'
})
org_rename_test_id = resp.json['id']
org_rename_test_name = resp.json['name']
assert 200 <= resp.status_code <= 300
new_name = 'testorgkevinwashere'
# Test that user can successful rename the org
client.put(f'/api/v1/org/{org_id}/rename/{new_name}')
assert 200 <= resp.status_code <= 300
# Get the org name of org_id and check if it has changed
resp = client.get(f'/api/v1/org/{org_id}')
assert 200 <= resp.status_code <= 300
assert resp.json['name'] == new_name
# Check that we cannot rename the org to an org that already exists
resp = client.put(f'/api/v1/org/{org_rename_test_id}/rename/{org_rename_test_name}')
assert resp.status_code == 403
# Test that renaming an org that doesn't exist won't work
resp = client.put(f'/api/v1/org/THISORGDOESN\'TEXIST/rename/{org_rename_test_id}')
assert resp.status_code == 404 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_rename_org_permissions(client: Client) -> None:\n with dev_login(client, 'admin'):\n # Create an org\n resp = client.post('/api/v1/org', json={\n 'name': 'testorg12'\n })\n assert 200 <= resp.status_code <= 300\n org_id = resp.json['id']\n\n with dev_login(client, 'user'):\n # Check that can't rename the org\n resp = client.put(f'/api/v1/org/{org_id}/rename/kevinwasheretestrename')\n assert resp.status_code == 403",
"def test_organization_valid_name(self):\n hufflepuffs = models.Organization(name='hufflepuffs', title='Huffle Puffs')\n self.assertFalse(hufflepuffs.is_valid_name('#$%#%___2836273untitled'))\n self.assertTrue(hufflepuffs.is_valid_name('hufflepuffs'))",
"def test_rename_fail(cleandir, fake_db):\n cmd = commands.Rename(['bad_alias', 'NEW'])\n with pytest.raises(errors.AliasNotDefinedError):\n cmd()",
"def test_organization_name(self):\n insurgent = models.Organization(title='Insurgent')\n with self.assertRaises(ValueError):\n insurgent.name = '35453496*%&^$%^'\n with self.assertRaises(ValueError):\n insurgent.name = 'Insurgent'\n insurgent.name = 'insurgent'\n self.assertEqual(insurgent.name, 'insurgent')",
"def test_rename_cmd_line(self):\n\n cmd = ['pydroid', 'rename', 'name:%s' % NEW_APP_NAME,\n 'domain:%s' % NEW_DOMAIN]\n\n subprocess.call(cmd)\n self.assertTrue(os.path.exists(RENAMED_PROJECT_DIR))",
"def testRename(self):\n def _check(results):\n self.assertEqual(results[0], b'')\n self.assertEqual(results[1], b'testfile2')\n return self.runCommand('rename testfile2 testfile1')\n\n d = self.runScript('rename testfile1 testfile2', 'ls testfile?')\n d.addCallback(_check)\n d.addCallback(self.assertEqual, b'')\n return d",
"def test_simple_rename_success(run_line, go_ep1_id):\n load_response_set(\"cli.transfer_activate_success\")\n load_response_set(\"cli.rename_result\")\n\n result = run_line(f\"globus rename {go_ep1_id} foo/bar /baz/buzz\")\n assert \"File or directory renamed successfully\" in result.output",
"def test_component_rename_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component rename component1 changed_name')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_component_rename_error_bad_new_name(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('component rename component1 component2')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_milestone_rename_error_bad_milestone(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('milestone rename bad_milestone changed_name')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_milestone_rename_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('milestone rename milestone1 changed_milestone')\n rv, output = self._execute('milestone list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_rename_python_api(self):\n\n rename.rename([NEW_APP_NAME, NEW_DOMAIN])\n self.assertTrue(os.path.exists(RENAMED_PROJECT_DIR))",
"def test_version_rename_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('version rename 1.0 9.9')\n rv, output = self._execute('version list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_update_org(session): # pylint:disable=unused-argument\n org = factory_org_service()\n\n org.update_org(TestOrgInfo.org2)\n\n dictionary = org.as_dict()\n assert dictionary['name'] == TestOrgInfo.org2['name']",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def test_change_name_without_name(self):\r\n self.client.login(username=self.student.username, password='test')\r\n change_name_url = self.get_url()\r\n resp = self.client.post(change_name_url, {\r\n 'new_name': '',\r\n 'rationale': 'change identity'\r\n })\r\n response_data = json.loads(resp.content)\r\n self.assertFalse(response_data['success'])",
"def test_edit_rename(self):\n group = groups.get_by_name(\"First Group\")\n new_name = 'BRAND-NEW-NAME'\n self.open_url('/group/edit/{0}'.format(group.id))\n el = self.wd.find_element(By.ID, \"name\")\n el.clear()\n el.send_keys(new_name)\n self.submit_form(\"group_form\")\n self.assertEquals('Group List', self.wd.title)\n self.assert_in_list_table(new_name)",
"def change_nm(src,dst):\n\timport os\n\ttry:\n\t\tos.rename(src,dst)\n\texcept:\n\t\tprint \"this is a mistake\"\n\t\treturn -1\n\n\treturn 0",
"def test_version_rename_error_bad_version(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('version rename bad_version changed_name')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_component_rename_error_bad_component(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('component rename bad_component changed_name')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_rename_overwrite(getch, y_or_n, cleandir, fake_db, alias_dict):\n getch.side_effect = lambda x: y_or_n\n fnames = [name for name in alias_dict]\n OLD, NEW = fnames[0], fnames[1]\n cmd = commands.Rename([OLD, NEW])\n cmd()\n\n loaded_aliases = shared.load_aliases()\n if y_or_n == \"y\":\n cmd_string = alias_dict[OLD]\n else:\n cmd_string = alias_dict[NEW]\n\n assert loaded_aliases[NEW] == cmd_string",
"def test_organization_pickername(self):\n # scenario 1: when only title is given\n abnegation = models.Organization(title=\"Abnegation\")\n self.assertIsInstance(abnegation.pickername, str)\n self.assertEqual(abnegation.pickername, abnegation.title)\n\n # scenario 2: when both name and title are given\n name = 'cullens'\n title = 'The Cullens'\n olympic_coven = models.Organization(title=title)\n olympic_coven.name = name\n db.session.add(olympic_coven)\n db.session.commit()\n self.assertIsInstance(olympic_coven.pickername, str)\n assert (\n '{title} (@{name})'.format(title=title, name=name)\n in olympic_coven.pickername\n )",
"def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))",
"def test_invalid_stream_rename(self) -> None:\n user_profile = self.example_user(\"hamlet\")\n self.login_user(user_profile)\n stream = self.subscribe(user_profile, \"stream_name1\")\n do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)\n # Check for empty name\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"\"})\n self.assert_json_error(result, \"Stream name can't be empty!\")\n # Check for long name\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"a\" * 61})\n self.assert_json_error(result, \"Stream name too long (limit: 60 characters).\")\n # Check for Cc characters\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"test\\n\\rname\"})\n self.assert_json_error(result, \"Invalid character in stream name, at position 5!\")\n # Check for Cn characters\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"test\\uFFFEame\"})\n self.assert_json_error(result, \"Invalid character in stream name, at position 5!\")",
"def test_name_collision(self):\r\n org = \"myorg\"\r\n course = \"another_course\"\r\n name = \"running_again\"\r\n course_location = Location(org, course, name, 'course', name)\r\n course_xlate = loc_mapper().translate_location(course_location, add_entry_if_missing=True)\r\n self.assertEqual(course_location, loc_mapper().translate_locator_to_location(course_xlate))\r\n eponymous_block = course_location.replace(category='chapter')\r\n chapter_xlate = loc_mapper().translate_location(eponymous_block, add_entry_if_missing=True)\r\n self.assertEqual(course_location, loc_mapper().translate_locator_to_location(course_xlate))\r\n self.assertEqual(eponymous_block, loc_mapper().translate_locator_to_location(chapter_xlate))\r\n # and a non-existent one w/o add\r\n eponymous_block = course_location.replace(category='problem')\r\n with self.assertRaises(ItemNotFoundError):\r\n chapter_xlate = loc_mapper().translate_location(eponymous_block, add_entry_if_missing=False)",
"def test_it_has_a_name():\n rob = Unicorn('Robert')\n assert rob.name == 'Robert'",
"def check_deletion(oc_name, org):\n duplicate_name = org['name']\n\n distance = org_tools.getDistance(oc_name, duplicate_name)\n\n if distance <= 0.35:\n org['can_delete'] = 1\n else:\n org['can_delete'] = 0\n\n return org",
"def test_validate_valid_org(self):\r\n assert self.org_tree != 0",
"def test_6a_change_file_name(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.rename_file_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare renaming test.\")\n self.dismiss_dialogs()\n function = js_func[\"rename\"] % (GST.gs_file_paths[\"file_to_rename_path\"], GST.gs_file_paths[\"after_rename_path\"])\n try:\n self.send_request(function, \"rename()\")\n except Exception as e:\n raise RenameException(\"Failed to rename the file: \" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise RenameException(\"Failed to rename the file: \" + response)",
"def _should_be_renamed(old_name, new_name):\n # type: (str, str) -> bool\n\n # There's no point to rename into default name\n if _is_default_name(new_name):\n return False\n\n # Strip prefixes and check if names are the same\n return old_name.lstrip('_') != new_name.lstrip('_')"
] | [
"0.7659885",
"0.6947406",
"0.67307895",
"0.66063076",
"0.65478325",
"0.6541099",
"0.64136106",
"0.6373427",
"0.6351179",
"0.6306828",
"0.6300058",
"0.6207878",
"0.6204783",
"0.61821",
"0.6181979",
"0.6157419",
"0.61501044",
"0.6108966",
"0.610483",
"0.60282815",
"0.6019909",
"0.59880763",
"0.59731126",
"0.59682703",
"0.58934605",
"0.58866066",
"0.5794345",
"0.5789672",
"0.5759843",
"0.5714731"
] | 0.737055 | 1 |
Tests that a user cannot rename an org if they are not an admin | def test_rename_org_permissions(client: Client) -> None:
with dev_login(client, 'admin'):
# Create an org
resp = client.post('/api/v1/org', json={
'name': 'testorg12'
})
assert 200 <= resp.status_code <= 300
org_id = resp.json['id']
with dev_login(client, 'user'):
# Check that can't rename the org
resp = client.put(f'/api/v1/org/{org_id}/rename/kevinwasheretestrename')
assert resp.status_code == 403 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_admin_from_org(self):\n pass",
"def test_add_admin_to_org(self):\n pass",
"def test_get_one_for_other_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n coAdmin = User.create(name='coAdmin', email='[email protected]',\n owned_organizations=[org.uid])\n coAdmin.put()\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users/{}'.format(org.uid, coAdmin.uid),\n headers=self.login_headers(user),\n status=403,\n )",
"def test_remove_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Invalid Requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)",
"def test_modify_nonexist_username(self):\n print('(' + self.test_modify_nonexist_username.__name__+')',\n self.test_modify_nonexist_username.__doc__)\n self.assertIsNone(self.connection.modify_user(\n NON_EXIST_PATIENT_USERNAME, PATIENT['public_profile'],\n PATIENT['restricted_profile']))",
"def test_user_can_change_not_author(self):\n self.assertFalse(self.story.user_can_change(self.user2))",
"def be_admin(username):\n user_data = my_users.get(username)\n if not user_data or 'admin' not in user_data.get('roles', []):\n return \"User does not have admin role\"",
"def test_user_can_change_admin(self):\n self.assertTrue(self.story.user_can_change(self.admin_user))",
"def test_admin_cannot_create_users_with_same_name(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'This name is already registered!')\n self.assertEqual(resp.status_code, 400)",
"def test_set_display_name_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )",
"def test_admin_cannot_update_user_with_invalid_name(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover3',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Enter name in a correct string format, (john doe)!')\n self.assertEqual(resp.status_code, 400)",
"def test_admin_cannot_create_users_with_same_username(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Paul Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This username is already taken!')\n self.assertEqual(resp.status_code, 400)",
"def test_admin_cannot_update_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/5',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This user doesn't exist!\")\n self.assertEqual(resp.status_code, 400)",
"def test_rename_org(client: Client) -> None:\n with dev_login(client, 'admin'):\n # Create an org\n resp = client.post('api/v1/org', json={\n 'name': 'testorg12'\n })\n org_id = resp.json['id']\n assert 200 <= resp.status_code <= 300\n\n # Create the second test org\n resp = client.post('api/v1/org', json={\n 'name': 'renameorgtest'\n })\n org_rename_test_id = resp.json['id']\n org_rename_test_name = resp.json['name']\n assert 200 <= resp.status_code <= 300\n\n new_name = 'testorgkevinwashere'\n\n # Test that user can successful rename the org\n client.put(f'/api/v1/org/{org_id}/rename/{new_name}')\n assert 200 <= resp.status_code <= 300\n\n # Get the org name of org_id and check if it has changed\n resp = client.get(f'/api/v1/org/{org_id}')\n assert 200 <= resp.status_code <= 300\n assert resp.json['name'] == new_name\n\n # Check that we cannot rename the org to an org that already exists\n resp = client.put(f'/api/v1/org/{org_rename_test_id}/rename/{org_rename_test_name}')\n assert resp.status_code == 403\n\n # Test that renaming an org that doesn't exist won't work\n resp = client.put(f'/api/v1/org/THISORGDOESN\\'TEXIST/rename/{org_rename_test_id}')\n assert resp.status_code == 404",
"def allowed_user_access_create_different_org(user):\n return user.has_perm(\"vnswww.userprofile_create_different_org\")",
"def test_handle_edit_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"brS\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team \"\n \"edit brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()",
"def test_admin_cannot_update_user_with_different_roles(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='supervisor'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'role should either be admin or attendant')\n self.assertEqual(resp.status_code, 400)",
"def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)",
"def test_set_display_name_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )",
"def test_username_is_writable_for_user_creation(self):\n request = Mock()\n assert 'username' not in self.admin.get_readonly_fields(request)",
"def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)",
"def test_modify_access_bad_role(self):\r\n url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {\r\n 'unique_student_identifier': self.other_staff.email,\r\n 'rolename': 'robot-not-a-roll',\r\n 'action': 'revoke',\r\n })\r\n self.assertEqual(response.status_code, 400)",
"def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)",
"def test_unauthorized_mod(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.modify_user(user=existing_user_id, password=id(self), code=403)",
"def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])",
"def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])",
"def test_not_creator_cannot_update_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)",
"def test_change_permission(self):\r\n self.assertTrue(self.creator_admin.has_change_permission(self.request))\r\n\r\n self.request.user = self.user\r\n self.assertFalse(self.creator_admin.has_change_permission(self.request))",
"def test_admin_cannot_update_user_with_invalid_username(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love summer',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Enter username in a correct string format no spaces, (johndoe)!')\n self.assertEqual(resp.status_code, 400)",
"def test_remove_last_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_organizations=[org.uid])\n user.put()\n\n self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(user),\n )\n\n # not changed in the db\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)"
] | [
"0.67709506",
"0.6742589",
"0.6593983",
"0.6527626",
"0.64782",
"0.6455436",
"0.64416325",
"0.64155805",
"0.64003736",
"0.63265234",
"0.63119966",
"0.6310905",
"0.63084286",
"0.62969965",
"0.62476504",
"0.62471217",
"0.6205791",
"0.61786944",
"0.6114555",
"0.60967547",
"0.6096085",
"0.60781544",
"0.60705584",
"0.6065757",
"0.6039098",
"0.6039098",
"0.60373855",
"0.6032585",
"0.60289365",
"0.60263294"
] | 0.7595818 | 0 |
tax_id is None, defaults to 9606, if 0, means load all supported species, entrez_gene is only used in local mode to accelerate Symbol retrieval | def load(tax_id=9606, l_use_GPDB=True, user_go=None, l_L1k=False):
if tax_id is None:
util.error_msg('tax_id must be an9606 int, or 0 mans all supported species')
tax_id=abs(tax_id)
s_key=Cache.key(l_use_GPDB, l_L1k=l_L1k)
if tax_id!=0 and tax_id in Cache.TOTAL_GENE_COUNT[s_key]: return
S_tax_id=[]
# performance optimization
if l_L1k: return Cache.loadL1k()
if not l_use_GPDB:
if tax_id not in (0,9606):
util.error_msg('Local database only supports human!')
tax_id=9606
if tax_id in Cache.TOTAL_GENE_COUNT[s_key]: return
S_tax_id=[tax_id]
else:
mydb=db.DB('METASCAPE')
if tax_id>0:
S_tax_id=[tax_id]
else:
t=mydb.from_sql('SELECT DISTINCT tax_id FROM gid2source_id')
S_tax_id=[x for x in t.tax_id.astype(int).tolist() if x not in Cache.TOTAL_GENE_COUNT[s_key]]
if len(S_tax_id)==0: return
s_tax_id=",".join(util.iarray2sarray(S_tax_id))
print("Load %s GO database for tax_id: %s ..." % (s_key, s_tax_id))
if l_use_GPDB:
s_where_L1k="term_category_id>=91" if l_L1k else "term_category_id<91"
if Cache.CATEGORY[s_key] is None:
t=mydb.from_sql("SELECT term_category_id,category_name FROM term_category where "+s_where_L1k)
Cache.CATEGORY[s_key] = {t.ix[i,'term_category_id']:t.ix[i,'category_name'] for i in t.index}
t=mydb.from_sql("SELECT t.term_id GO,term_name AS DESCRIPTION,term_category_id CATEGORY_ID FROM term t where "+s_where_L1k)
X=t.DESCRIPTION.isnull()
if sum(X):
t.ix[X, 'DESCRIPTION']=t.ix[X, 'GO']
#if not util.is_python3():
# t['DESCRIPTION']=t['DESCRIPTION'].apply(lambda x: unicode(x, encoding="ISO-8859-1", errors='ignore')) # L1000 has micro Mol
Cache.GO_DESCRIPTION[s_key]=dict(zip(t.GO, t.DESCRIPTION))
t['CATEGORY_ID']=t['CATEGORY_ID'].astype(int)
Cache.GO_CATEGORY[s_key]={re.sub(r'^\d+_', '', row.GO):int(row.CATEGORY_ID) for row in t.itertuples() }
if tax_id==0:
t=mydb.from_sql("SELECT COUNT(*) as N,tax_id FROM annotation a where a.annotation_type_id=3 AND content='protein-coding' group by tax_id")
else:
t=mydb.sql_in("SELECT COUNT(*) as N,tax_id FROM annotation a where a.annotation_type_id=3 AND content='protein-coding' and tax_id in (", ") group by tax_id", S_tax_id)
Cache.TOTAL_GENE_COUNT[s_key]=dict(zip(t.tax_id, t.N))
if tax_id==0:
t=mydb.from_sql("SELECT term_id GO,gids GENES,tax_id FROM term2gids where "+s_where_L1k)
else:
t=mydb.sql_in("SELECT term_id GO,gids GENES,tax_id FROM term2gids WHERE "+s_where_L1k+" and tax_id in (", ")", S_tax_id)
#tmp=t[t.GO.apply(lambda x: x.startswith('6'))]
#print tmp[:4]
else:
DATA_FILE=setting.go['DATA_FILE']
#TAX_ID,GeneID
t_gene=pd.read_csv(DATA_FILE)
t_gene=t_gene[t_gene.TAX_ID==tax_id]
C_GENE=set(t_gene['GeneID'].astype(str).tolist())
Cache.TOTAL_GENE_COUNT[s_key][tax_id]=len(C_GENE)
if user_go is not None:
if os.path.isfile(user_go):
if user_go.upper().endswith(".CSV"):
t=pd.read_csv(user_go)
else:
t=pd.read_table(user_go)
elif os.path.isfile(Cache.DATA_DIR+"AllAnnotations.tsv"):
t=pd.read_csv(Cache.DATA_DIR+"AllAnnotations.tsv", sep='\t')
if t is None:
util.error_msg('No GO Annotations available.')
#GO TYPE GENES DESCRIPTION
S=util.unique(t.TYPE)
Cache.CATEGORY[s_key] = dict(zip(S, S))
Cache.GO_CATEGORY[s_key]=dict(zip(t.GO, t.TYPE))
Cache.GO_DESCRIPTION[s_key]=dict(zip(t.GO, t.DESCRIPTION))
t['tax_id']=tax_id
for x in S_tax_id:
Cache.ALL_GENE[s_key][x]=set()
Cache.GENE_GO[s_key][x]={}
Cache.GO_GENE[s_key][x]={}
Cache.CATEGORY_COUNT[s_key][x]={}
Cache.GO_GENE_ENRICH[s_key][x]=set()
#sw=util.StopWatch("AAAAAAA")
for tax_id2,t_v in t.groupby('tax_id'):
#t_v=t_v.copy()
GENE_GO={}
GO_GENE={}
GO_GENE_ENRICH=set()
ALL_GENE=set()
CATEGORY_COUNT={}
s_cat=0
S_genes=[ (row.GO, row.GENES.split(",")) for row in t_v.itertuples() ]
if not l_use_GPDB:
S_genes=[ (x, [y for y in Y if (y in C_GENE)]) for x,Y in S_genes ]
GO_GENE={x: set(Y) for x,Y in S_genes if (len(Y)>0 and len(Y)<=Cache.N_TRIVIAL) }
GO_GENE_ENRICH=set(GO_GENE.keys())
if l_use_GPDB:
for x in GO_GENE_ENRICH:
if re.sub(r'^\d+_', '', x) not in Cache.GO_CATEGORY[s_key]:
print(">>>>>>>>>>>>>>>>>>>", x, s_key, re.sub(r'^\d+_', '', x))
exit()
S_cat=[ Cache.GO_CATEGORY[s_key][re.sub(r'^\d+_','', x)] for x in GO_GENE_ENRICH ]
else:
S_cat=[ Cache.GO_CATEGORY[s_key][x] for x in GO_GENE_ENRICH ]
CATEGORY_COUNT=util.unique_count(S_cat)
# reduce is slower
#ALL_GENE=reduce(lambda a,b : a|b, GO_GENE.values())
ALL_GENE=set([x for Y in GO_GENE.values() for x in Y])
#for row in t_v.itertuples():
##for i in t_v.index:
# s_go=row.GO #t_v.ix[i, 'GO']
# S_genes=row.GENES.split(",") #t_v.ix[i, 'GENES'].split(",")
# if not l_use_GPDB:
# ### warning, gene ids not recognized are treated as tax ID 0!!!
# S_genes=[s for s in S_genes if s in C_GENE]
# if len(S_genes)==0: continue
# if len(S_genes)<=Cache.N_TRIVIAL:
# GO_GENE_ENRICH.add(s_go)
# if l_use_GPDB:
# s_cat=Cache.GO_CATEGORY[s_key].get(re.sub(r'^\d+_','',s_go), 0)
# CATEGORY_COUNT[s_cat]=CATEGORY_COUNT.get(s_cat, 0)+1
# GO_GENE[s_go]=set(S_genes)
# ALL_GENE.update(GO_GENE[s_go])
#sw.check("TTTTTTTTT "+str(tax_id))
for k,v in GO_GENE.items():
for s_gene in v:
if s_gene not in GENE_GO:
GENE_GO[s_gene]={k}
else:
GENE_GO[s_gene].add(k)
Cache.ALL_GENE[s_key][tax_id2]=ALL_GENE
Cache.GENE_GO[s_key][tax_id2]=GENE_GO
Cache.TOTAL_GENE_COUNT[s_key][tax_id2]=max(Cache.TOTAL_GENE_COUNT[s_key][tax_id2], len(GENE_GO))
Cache.CATEGORY_COUNT[s_key][tax_id2]=CATEGORY_COUNT
Cache.GO_GENE[s_key][tax_id2]=GO_GENE
Cache.GO_GENE_ENRICH[s_key][tax_id2]=GO_GENE_ENRICH
if l_L1k:
s_path=setting.go['L1000_PATH']
S_gene=util.read_list(s_path+'/L1kAllGenes.txt')
Cache.ALL_GENE[s_key][tax_id]=set(S_gene)
Cache.TOTAL_GENE_COUNT[s_key][tax_id]=len(S_gene) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, taxid, species_name = None, lineage=None):\n self.genes = dict()\n self.taxid = taxid\n self.species = species_name\n self.lineage = lineage",
"def fetch_by_id(self, taxon):\n res = self.ensembl.get_taxonomy_by_id(taxon)\n return res",
"def tax_id(self, tax_id: str):\n\n self._tax_id = tax_id",
"def init_taxon():\n if not exists('./data/taxdmp.zip'):\n ftp = FTP('ftp.ncbi.nih.gov')\n ftp.login()\n ftp.cwd('pub/taxonomy')\n ftp.retrbinary('RETR taxdmp.zip', open('./data/taxdmp.zip', 'wb').write)\n ftp.quit\n with ZipFile('./data/taxdmp.zip', 'r') as dumpfile:\n dumpfile.extractall(path='./data/')\n taxon_id = dict()\n data = list()\n name = dict()\n specie = list()\n son = dict()\n greatson = dict()\n parent = dict()\n rank = dict()\n global taxon\n taxon = list()\n with open('./data/names.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n if add[0] not in name or add[2] == 'scientific name':\n name[add[0]] = add[1]\n with open('./data/nodes.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n # 1696063|Sarcocystis corvusi||scientific name|\n taxon_id[add[0]] = add[1]\n rank[add[0]] = add[3]\n if add[2] == 'species':\n specie.append(add[0])\n for specie in specie:\n record = [specie, ]\n while taxon_id[specie] != '1':\n record.append(taxon_id[specie])\n specie = taxon_id[specie]\n # if '33090' in record:\n # record.pop()\n # record.pop()\n data.append(record)\n for data in data:\n for n in range(len(data)):\n if data[n] not in parent:\n parent[data[n]] = data[(n + 1):]\n if n == 0:\n continue\n if data[n] not in son:\n son[data[n]] = {data[n - 1], }\n else:\n son[data[n]].add(data[n - 1])\n if data[n] not in greatson:\n greatson[data[n]] = {data[0], }\n else:\n greatson[data[n]].add(data[0])\n for specie in name.items():\n if specie[0] not in son:\n son[specie[0]] = set()\n if specie[0] not in parent:\n parent[specie[0]] = list()\n if specie[0] not in greatson:\n greatson[specie[0]] = set()\n record = [specie[0], name[specie[0]], rank[specie[0]], son[specie[0]], parent[specie[0]], greatson[specie[0]]]\n taxon.append(record)\n\n con = sqlite3.connect('./data/DB')\n cur = con.cursor()\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS taxon (Id TEXT, Name TEXT, Rank TEXT, Son TEXT, Parent TEXT, GreatSon TEXT);')\n for line in taxon:\n son = ' '.join(line[3])\n parent = ' '.join(line[4])\n greatson = ' '.join(line[5])\n cur.execute('INSERT INTO taxon (Id, Name, Rank, Son, Parent, GreatSon) VALUES (?, ?, ?, ?, ?, ?);',\n (line[0], line[1], line[2], son, parent, greatson))\n con.commit()\n cur.close()\n con.close()\n print('Done.\\n')",
"def tax_id(self, tax_id):\n\n self._tax_id = tax_id",
"def get_taxid(xid,conn):\n\n taxid = ('SELECT DISTINCT dx.accession, o.genus, o.species '\n 'FROM feature x, organism o, organism_dbxref od, dbxref dx, db '\n 'WHERE x.organism_id = o.organism_id AND o.organism_id = od.organism_id '\n 'AND od.dbxref_id =dx.dbxref_id AND dx.db_id = db.db_id AND db.name = \\'NCBITaxon\\' '\n 'AND x.uniquename = %s')\n tid = connect(taxid,xid,conn)\n if len(tid) > 0:\n return(tid[0][0],tid[0][1],tid[0][2])\n else:\n return(None)",
"def load_gene_set(self, gene_set:List[str], taxon:str=None):\n self.gene_set = gene_set\n self.taxon = taxon",
"def tax_id(self):\n return self._tax_id",
"def testTaxaData(self):\n try:\n numEukaryota = 0\n numBacteria = 0\n numVirus = 0\n numArchaea = 0\n numOther = 0\n numUnclass = 0\n logger.info(\"Loading taxonomy data\")\n tU = TaxonomyUtils()\n logger.info(\"Done loading taxonomy data\")\n iCount = 0\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n for entryId in entryD:\n for entityId, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n taxId = eD[\"ncbi_taxonomy_id\"] if \"ncbi_taxonomy_id\" in eD else None\n if taxId is None:\n logger.debug(\"Missing taxId entryId %s entityId %s\", entryId, entityId)\n continue\n # lin = tU.getLineage(taxId)\n # nmL = tU.getLineageNames(taxId)\n ok1 = tU.isEukaryota(taxId)\n if ok1:\n numEukaryota += 1\n ok3 = tU.isVirus(taxId)\n if ok3:\n numVirus += 1\n ok2 = tU.isBacteria(taxId)\n if ok2:\n numBacteria += 1\n #\n ok4 = tU.isArchaea(taxId)\n if ok4:\n numArchaea += 1\n #\n ok5 = tU.isOther(taxId)\n if ok5:\n numOther += 1\n #\n ok6 = tU.isUnclassified(taxId)\n if ok6:\n numUnclass += 1\n\n if ok1 and (ok1 and ok2):\n logger.info(\"taxid %r conflicting lineage\", taxId)\n #\n if not ok1 and not ok2 and not ok3 and not ok4 and not ok5 and not ok6:\n logger.info(\"unassigned taxid %r\", taxId)\n\n logger.debug(\"taxId %r entryId %s entityId %s\", taxId, entryId, entityId)\n iCount += 1\n # if iCount > 5000:\n # break\n logger.info(\"Eukaryota %d Bacteria %d Virus %d Archaea %d Other/Syn %r Unclass %d\", numEukaryota, numBacteria, numVirus, numArchaea, numOther, numUnclass)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def tax_id(self) -> str:\n return self._tax_id",
"def _compute_tax_id(self):\n for order in self:\n order.order_line._compute_tax_id()",
"def tax_id(self, tax_id):\n if tax_id is not None and len(tax_id) > 15:\n raise ValueError(\"Invalid value for `tax_id`, length must be less than or equal to `15`\")\n\n self._tax_id = tax_id",
"def __init__(\n self,\n gene_lists,\n taxon,\n requests_per_sec=10,\n padj_threshold=0.05,\n log2_fc_threshold=0,\n fc_threshold=None,\n enrichment_fdr=0.05,\n annot_col=\"Name\",\n ):\n Ontology.__init__(self)\n PlotGOTerms.__init__(self)\n\n self.gene_lists = gene_lists\n self.enrichment_fdr = enrichment_fdr\n\n # users can set the fold change threshold in the log2 scale or normal\n # scale.\n assert log2_fc_threshold >= 0, \"log2 fc_threshold must be >=0\"\n if fc_threshold is not None:\n log2_fc_threshold = pylab.log2(fc_threshold)\n\n from bioservices import panther, quickgo\n\n self.quick_go_graph = QuickGOGraph()\n\n self.panther = panther.Panther(cache=True)\n self.valid_taxons = [x[\"taxon_id\"] for x in self.panther.get_supported_genomes()]\n self.summary = {}\n\n self._taxon = None\n self.taxon = taxon\n\n self.quickgo = quickgo.QuickGO(cache=True)\n self.quickgo.requests_per_sec = requests_per_sec\n self.quickgo.services.settings.TIMEOUT = 120\n\n self._ancestors = {\n \"MF\": \"GO:0003674\",\n \"CC\": \"GO:0005575\",\n \"BP\": \"GO:0008150\",\n \"SLIM_MF\": \"GO:0003674\",\n \"SLIM_CC\": \"GO:0005575\",\n \"SLIM_BP\": \"GO:0008150\",\n }\n self.ontologies.extend(\n [\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_MF\",\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_BP\",\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_CC\",\n \"ANNOT_TYPE_ID_PANTHER_PC\",\n \"ANNOT_TYPE_ID_PANTHER_PATHWAY\",\n \"ANNOT_TYPE_ID_REACTOME_PATHWAY\",\n ]\n )\n\n self.ontology_aliases.extend(\n [\n \"SLIM_MF\",\n \"SLIM_BP\",\n \"SLIM_CC\",\n \"PROTEIN\",\n \"PANTHER_PATHWAY\",\n \"REACTOME_PATHWAY\",\n ]\n )\n\n # panther accepts onyl ~2-3000 genes at max. Let us restrict the analysis\n # to the first 2000 genes based on their log2 fold change 2000 + and\n # 2000 negatives\n\n msg = \"Ignoring DEGs with adjusted p-value > {} and fold change in [{}, {}]\".format(\n padj_threshold, 1 / (2**log2_fc_threshold), 2**log2_fc_threshold\n )\n logger.info(msg)\n\n # used in report module\n self.summary[\"fold_change_range\"] = [\n 1 / (2**log2_fc_threshold),\n 2**log2_fc_threshold,\n ]\n self.summary[\"padj_threshold\"] = padj_threshold\n\n fc_threshold = log2_fc_threshold\n\n for x in sorted(gene_lists.keys()):\n\n N = len(gene_lists[x])\n logger.info(f\"Starting with {N} genes from category '{x}'\")\n\n self.summary[\"DGE_after_filtering\"] = {k: len(v) for k, v in gene_lists.items()}\n\n self.enrichment = {}\n self.stats = {}\n self.obsolets = []",
"def get_gene(self, gene_id, source=\"eid\"):\n\n gene_id = str(gene_id)\n\n \n try:\n valid_gene = self.gene_cache[self.source_cache[source][gene_id]]\n return valid_gene\n except KeyError:\n pass\n \n valid_eid = None\n\n if source == \"eid\":\n try:\n eid = int(gene_id)\n except ValueError:\n raise ValueError(\"gene_id must be an integer if source \" + \\\n \"is \\\"Entrez\\\"\")\n\n self.cursor.execute(\"\"\"\n SELECT EXISTS(\n SELECT * \n FROM genes \n WHERE entrez_id = %(eid)s\n )\"\"\", {'eid': eid})\n if self.cursor.fetchone()[0] == 1:\n valid_eid = eid\n\n else:\n\n self.cursor.execute(\"\"\"\n SELECT entrez_id\n FROM discontinued_genes\n WHERE discontinued_id = %(eid)s\"\"\", {'eid': eid})\n row = self.cursor.fetchone()\n if row is not None:\n valid_eid = row[0]\n else:\n raise KeyError(\"Entrez ID %d was not found in the database\" % eid)\n\n elif source == \"symbol\":\n\n args = {\"symbol\": gene_id}\n self.cursor.execute(\"\"\"\n SELECT entrez_id\n FROM genes\n WHERE symbol = %(symbol)s\"\"\", args)\n row = self.cursor.fetchone()\n if row is not None:\n valid_eid = row[0]\n else:\n self.cursor.execute(\"\"\"\n SELECT entrez_id\n FROM discontinued_genes\n WHERE discontinued_symbol = %(symbol)s\"\"\", args)\n row = self.cursor.fetchone()\n if row is not None:\n valid_eid = row[0]\n else:\n self.cursor.execute(\"\"\"\n SELECT entrez_id\n FROM gene_synonyms\n WHERE symbol = %(symbol)s\"\"\", args)\n row = self.cursor.fetchone()\n if row is not None:\n valid_eid = row[0]\n else:\n raise KeyError(\"Symbol %s not found in the database\" % gene_id)\n else:\n self.cursor.execute(\"\"\"\n SELECT entrez_id\n FROM gene_xrefs\n WHERE Xref_db = %(db)s\n AND Xref_id = %(id)s\"\"\", {'db': source, 'id': gene_id})\n row = self.cursor.fetchone()\n if row is not None:\n valid_eid = row[0]\n else:\n raise KeyError((\"Gene ID %s from source %s was not found \" + \\\n \"in the database\") % (gene_id, source))\n\n if valid_eid is None:\n raise KeyError(\"Unable to find a valid Entrez ID for %s from %s\" % (gene_id, source))\n\n valid_eid = int(valid_eid)\n if source not in self.source_cache:\n self.source_cache[source] = {}\n self.source_cache[source][gene_id] = valid_eid\n self.gene_cache[valid_eid] = Gene(valid_eid, self)\n\n return self.gene_cache[valid_eid]",
"def load_taxa(self, params, context=None):\n return self._client.call_method(\n 'ReferenceDataManager.load_taxa',\n [params], self._service_ver, context)",
"def get_functional_classification(self, mygenes, taxon): # pragma: no cover ; too slow\n logger.warning(\"Very slow. Please wait\")\n if isinstance(mygenes, list):\n mygenes = \",\".join(mygenes)\n\n res = self.panther.get_mapping(mygenes, taxon)\n res = res[\"mapped\"]\n\n for i, item in tqdm(enumerate(res)):\n accession = item[\"accession\"]\n res[i][\"persistent_id\"] = self._get_name_given_accession(accession)\n return res",
"def load_taxdict():\n tax = {}\n with open(\"../../data/taxonomy/tree_taxid.txt\", 'r') as file:\n for line in file:\n current_line = line.split() \n current_taxid = current_line[0]\n current_name = current_line[1]\n tax[current_taxid] = current_name \n\n return tax",
"def test_make_core_taxa(self):\n basic_test_runner(self, 'core_taxa', nrows=-1)",
"def get_taxonomy_info(self,taxonomy_path):\n Taxon = 'Taxon'\n p1 = '.*[Tt][Aa][Xx][Oo].*'\n pattern = re.compile(p1)\n feature_id = 'Feature ID'\n p2 = '.*[Ii][Dd].*'\n pattern2 = re.compile(p2)\n \n try:\n taxonomy_df = pd.read_csv(taxonomy_path, sep= '\\t')\n print('valid taxonomy file')\n except:\n print('unvalid taxonomy path')\n for ele in taxonomy_df.columns:\n if len(pattern.findall(ele)) >0:\n if pattern.findall(ele)[0]>3 :\n Taxon = ele\n else:\n pass\n break\n for ele in taxonomy_df.columns:\n if len(pattern2.findall(ele)) > 0:\n if len(pattern2.findall(ele)[0])>3 :\n feature_id = ele\n else:\n pass\n break\n taxonomy_df = taxonomy_df.set_index(feature_id)\n self.lineage = taxonomy_df[Taxon]",
"def test_client_tax_information_retrieve(self):\n pass",
"def name_to_taxid(name):\n name = name.replace(' ', '+').strip()\n search = Entrez.esearch(term=name, db='taxonomy', retmode='xml')\n return Entrez.read(search)['IdList'][0]",
"def species_lookup_by_taxonid(self, taxon_id):\n return self.species_name_lookup(taxon_id)",
"def init_gene():\n gene_details=dict(\n id = '', \n anno_id = [],\n confgenes_id = [],\n name = '',\n source = '',\n gene_info = {},\n alias = '',\n name2 = [],\n strand = '',\n chr = '',\n chr_num = [],\n paralogs = [],\n start = '',\n stop = '',\n transcripts = [],\n transcript_info = [],\n transcript_status = [],\n transcript_valid = [],\n exons = [],\n exons_confirmed = [],\n cds_exons = [],\n utr5_exons = [],\n utr3_exons = [],\n tis = [],\n tis_conf = [],\n tis_info = [],\n cdsStop = [],\n cdsStop_conf = [],\n cdsStop_info = [],\n tss = [],\n tss_info = [],\n tss_conf = [],\n cleave = [],\n cleave_info = [],\n cleave_conf = [],\n polya = [],\n polya_info = [],\n polya_conf = [],\n is_alt = [],\n is_alt_spliced = 0,\n is_valid = [],\n transcript_complete = [],\n is_complete = [],\n is_correctly_gff3_referenced = '',\n splicegraph = []\n )\n return gene_details",
"def set_tax_id_label(self, tax_id_label):\n self.tax_id_label = tax_id_label",
"def load_gene_ontology(self, file_path):\n\t\tpass",
"def test_taxonomy(n=5):\n ecoli_file = join(this_dir, \"e_coli_core.xml.gz\")\n ids = [\"Escherichia_coli_{}\".format(i) for i in range(1, n + 1)]\n taxa = pd.DataFrame({\"id\": ids})\n taxa[\"genus\"] = \"Escherichia\"\n taxa[\"species\"] = \"Eschericia coli\"\n taxa[\"reactions\"] = 95\n taxa[\"metabolites\"] = 72\n taxa[\"file\"] = ecoli_file\n return taxa",
"def entrez_gene_id(gene: GeneInfo):\n if (gene.identifiers is not None and gene.identifiers.entrez is not None):\n if (gene.identifiers.entrez.startswith('NCBIGene:')):\n return gene.identifiers.entrez[9:]\n else:\n return gene.identifiers.entrez\n return None",
"def get_full_tax(idx):\n logging.info('Compiling the taxonomy for all genomes...')\n tax_idx = collections.defaultdict(dict)\n for cluster_id,v in idx.items():\n for tax,vv in v.items():\n for genome_id,x in vv.items():\n tax_idx[tax][genome_id] = x['genome_len']\n n_genomes = 0\n for tax,v in tax_idx.items():\n n_genomes += len(v.keys())\n logging.info(' Total number of genomes: {}'.format(n_genomes))\n # return\n return tax_idx",
"def _load_gene(self, gene, batch) -> None:\n try:\n assert Gene(**gene)\n except pydantic.error_wrappers.ValidationError as e:\n logger.warning(f\"Unable to load {gene} due to validation error: \"\n f\"{e}\")\n else:\n concept_id = gene['concept_id'].lower()\n gene['label_and_type'] = f\"{concept_id}##identity\"\n gene['src_name'] = \\\n PREFIX_LOOKUP[gene['concept_id'].split(':')[0].lower()]\n gene['item_type'] = 'identity'\n\n for attr_type, item_type in ITEM_TYPES.items():\n if attr_type in gene:\n value = gene[attr_type]\n if value is not None and value != []:\n if isinstance(value, str):\n items = [value.lower()]\n else:\n gene[attr_type] = list(set(value))\n items = {item.lower() for item in value}\n for item in items:\n batch.put_item(Item={\n 'label_and_type': f\"{item}##{item_type}\",\n 'concept_id': concept_id,\n 'src_name': gene['src_name'],\n 'item_type': item_type\n })\n else:\n del gene[attr_type]\n batch.put_item(Item=gene)\n self._processed_ids.append(concept_id)",
"def get_taxa(taxa_fname, sample_ids_kept=None):\r\n # future: pass in open file object instead\r\n taxa_f = open(taxa_fname, 'U')\r\n\r\n sample_ids, otu_ids, otu_table, lineages =\\\r\n parse_otu_table(taxa_f, count_map_f=float, remove_empty_rows=True)\r\n if sample_ids_kept:\r\n sam_idxs = [sample_ids.index(sam) for sam in sample_ids_kept]\r\n otu_table = otu_table[:, sam_idxs]\r\n return otu_ids, otu_table"
] | [
"0.6346636",
"0.625637",
"0.6142789",
"0.61208576",
"0.60079664",
"0.6005236",
"0.5974465",
"0.59199226",
"0.58072674",
"0.57864153",
"0.5702004",
"0.5685285",
"0.56351686",
"0.55774224",
"0.5565969",
"0.5563953",
"0.5563647",
"0.5512005",
"0.55049115",
"0.5504886",
"0.54905075",
"0.54894483",
"0.5480695",
"0.5460665",
"0.5459967",
"0.54398817",
"0.5435387",
"0.543057",
"0.5421524",
"0.54182863"
] | 0.6361208 | 0 |
return a dict of GO and the number of genes appear in each GO if S_go is provided, only counts for those go terms | def go_count(self, S_hit, S_go=None):
c={}
if S_go is not None: S_go=set(S_go)
for x in S_hit:
Y=self.GENE_GO.get(x, [])
if S_go is not None: Y = set(Y).intersection(S_go)
for y in Y:
c[y]=c.get(y,0)+1
return c | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gene_count(self, S_go, S_gene=None):\n c={}\n if S_gene is not None: S_gene=set(S_gene)\n for x in S_go:\n Y=self.GO_GENE.get(x, [])\n if S_gene is not None: Y = set(Y).intersection(S_gene)\n for y in self.GO_GENE.get(x, []):\n c[y]=c.get(y,0)+1\n return c",
"def membership_count(self, S_go, S_gene):\n return self.go_count(S_gene, S_go)\n #c=self.go_count(S_gene)\n #if type(S_go)!=set:\n # S_go=set(S_go)\n #c={ k:v for k,v in c.items() if k in S_go }\n #return c",
"def result_count(sol,Nt,G):\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq",
"def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass",
"def result_count(sol,Nt,G):\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq",
"def count_go_term(self, query_term=\"growth\"):\n count = 0\n for go_term in self.dict_go.values():\n if query_term in go_term.name:\n count += 1\n return count",
"def analysis(self, S_hit, S_score=None, S_go=None, SRC_GENE=None, min_overlap=3, min_enrichment=0, p_cutoff=0.01, n_CPU=0, l_rsa_keep_most=True, S_go_category=None, l_background_by_ontology=False):\n\n def go_filtered(S_go, S_go_category):\n return [x for x in S_go if self.get_category_id(x) in S_go_category]\n\n S_all_go_filtered=[]\n def all_go_filtered(S_go_category):\n if len(S_all_go_filtered)==0:\n S_go=self.GO_GENE_ENRICH\n S_all_go_filtered.append(go_filtered(S_go, S_go_category))\n return S_all_go_filtered[0]\n\n N_go=0\n if S_go_category is not None and len(S_go_category)>0:\n # hard code for now, to be fixed later\n if type(S_go_category) in (int, str):\n S_go_category=[S_go_category]\n S_go_category={int(x) for x in S_go_category if self.CATEGORY_COUNT.get(x,0)>0 }\n for x in S_go_category:\n N_go+=self.CATEGORY_COUNT[x]\n else:\n N_go=sum(self.CATEGORY_COUNT.values())\n\n l_multi_list=type(S_hit) is dict\n if S_go is None:\n if l_multi_list:\n S_go={}\n for k in S_hit.keys():\n S_go[k]=all_go_filtered(S_go_category)\n else:\n S_go=all_go_filtered(S_go_category)\n else:\n if l_multi_list:\n for k in S_hit.keys():\n if S_go.get(k, None) is None:\n S_go[k]=all_go_filtered(S_go_category)\n else:\n S_go[k]=go_filtered(S_go[k], S_go_category)\n else:\n S_go=go_filtered(S_go, S_go_category)\n\n if SRC_GENE is not None:\n if type(SRC_GENE) is list:\n SRC_GENE=set(SRC_GENE)\n SRC_GENE=self.ALL_GENE.intersection(SRC_GENE) # remove genes from background, if it is not in self.ALL_GENE\n N_total=len(SRC_GENE) #self.ALL_GENE.intersection(SRC_GENE))\n elif l_background_by_ontology:\n # GeneGo uses this\n if l_multi_list:\n X=set()\n for x in S_go.values():\n X.add(set(x))\n src_genes=self.gene_count(list(X))\n else:\n src_genes=self.gene_count(S_go)\n N_total=len(src_genes)\n SRC_GENE=set(src_genes.keys())\n else:\n if self.is_L1000():\n N_total=len(self.ALL_GENE)\n else:\n N_total=len(self.GENE_GO) #len(self.ALL_GENE), only count genes that has GO annotation\n #N_total=len(self.ALL_GENE)\n # prefiltering uninteresting GO terms\n # already converted to multiple hit list situation\n sw=util.StopWatch()\n L=[] # list of (S_hit, s_go)\n\n def spread_input(S_hit, S_go, key):\n #S_hit, S_go, key=(X[0], X[1], X[2])\n # may not worth it\n #c_cnt=self.go_count(S_hit, S_go)\n #S_go=[s_go for s_go in S_go if c_cnt.get(s_go,0)>=min_overlap ]\n # minimum size\n MIN_BATCH=2000\n S_go2=util.split(S_go, chunk_size=MIN_BATCH)\n return [(key, S_hit, x) for x in S_go2]\n\n #sw.check('To spreadout')\n if l_multi_list:\n #mp=parallel.MP()\n #m=1 if len(S_hit)<=3 else n_CPU\n #mp.start(f=spread_input, n_CPU=m)\n #L=[(X, S_go[k], k) for k,X in S_hit.items() if len(X)>=min_overlap]\n #out=mp.map(L)\n #L=[y for X in out for y in X]\n L=[]\n for k,X in S_hit.items():\n if len(X)<min_overlap: continue\n L.extend(spread_input(X, S_go[k], k))\n random.shuffle(L)\n else:\n if len(S_hit)>=min_overlap:\n L=spread_input(S_hit, S_go, 'Default')\n\n if self.eg is None:\n self.eg=ez.EntrezGene(tax_id=self.tax_id, l_use_GPDB=self.GPDB)\n if n_CPU==0: n_CPU=1\n #print \">>>>>>>>>>>>>>\", len(L)\n S_chunk=util.split(L, n_chunk=n_CPU)\n #sw.check('Spreadout tasks: %d' % len(L))\n\n def analyze(L):\n \"\"\"L is a list of [[s_name, S_hit, s_go]], s_go can also be a list\"\"\"\n rslt=[]\n #p=util.Progress(len(L))\n i=0\n import multiprocessing\n s_pid=str(multiprocessing.current_process().pid)\n for s_name, S_hit, S_go in L:\n i+=1\n #if (i % 50000): p.check(i, s_pid)\n if type(S_go) is str: S_go=[S_go]\n for s_go in S_go:\n if s_go not in self.GO_GENE: continue\n if S_score is None:\n c=self.analysis_go(s_go, S_hit, N_total, SRC_GENE=SRC_GENE, min_overlap=min_overlap, p_cutoff=p_cutoff)\n else:\n c=self.analysis_go_RSA(s_go, S_hit, S_score, N_total, SRC_GENE=SRC_GENE, min_overlap=min_overlap, p_cutoff=p_cutoff, l_keep_most=l_rsa_keep_most)\n if c is None:\n continue\n c['Name']=s_name\n if min_enrichment>0 and c['Enrichment']<min_enrichment: continue\n if p_cutoff<1 and 10**c['LogP']>p_cutoff: continue\n c['Description']= self.go_description(s_go)\n S_gene=c['GeneID'].split('|')\n S_symbol=[self.eg.C_GENENEW[x] if x in self.eg.C_GENENEW else x for x in S_gene]\n c['Hits']='|'.join(S_symbol)\n if 'GeneID_All' in c:\n S_gene=c['GeneID_All'].split('|')\n S_symbol=[self.eg.C_GENENEW[x] if x in self.eg.C_GENENEW else x for x in S_gene]\n c['Hits_All']='|'.join(S_symbol)\n if self.GPDB:\n c['CategoryID'] = self.get_category_id(c['GO'])\n c['Category'] = self.CATEGORY.get(self.get_category_id(c['GO']))\n c['GO'] = self.get_source_id(c['GO'])\n rslt.append(c)\n return rslt\n out=parallel.parmap(analyze, S_chunk, n_CPU=n_CPU)\n #if n_CPU>1:\n # mp=parallel.MP()\n # mp.start(f=analyze, n_CPU=n_CPU)\n # out=mp.map(S_chunk)\n #else:\n # out=[analyze(x) for x in S_chunk]\n\n #mp.start(n_CPU=n_CPU)\n #sw.check('P-value Calculation')\n #sw.check('P-value Calculation Done')\n rslt=[]\n for x in out:\n if len(x): rslt.extend(x)\n\n if len(rslt):\n #sw.check('Length: %d' % len(rslt))\n t=pd.DataFrame(rslt)\n #sw.check('Table DONE')\n if S_score is None:\n t=t.sort_values(['LogP','Enrichment','#GeneInGOAndHitList'], ascending=[True,False,False])\n cols = ['Name','GO','Description','LogP','Enrichment','Z-score','#TotalGeneInLibrary',\n '#GeneInGO','#GeneInHitList','#GeneInGOAndHitList','%InGO','STDV %InGO','GeneID','Hits']\n else:\n t=t.sort_values(['LogP','Enrichment','#HitInGORemain','#GeneInGOAndHitList'], ascending=[True,False,False,False])\n cols = ['Name','GO','Description','LogP','Enrichment','Z-score','#TotalGeneInLibrary',\n '#GeneInGO','#HitRemain','#HitInGORemain','Cutoff','#GeneInHitList','#GeneInGOAndHitList','%InGO','STDV %InGO','GeneID','Hits','GeneID_All','Hits_All']\n if self.GPDB:\n #cols.insert(1,'field1')\n cols.insert(1,'CategoryID')\n cols.insert(1,'Category')\n #sw.check('sorted DONE')\n t=t.reindex(columns=cols)\n # FDR\n #print \">>> N_go: \", N_go\n #sw.check('reindex DONE')\n t['Log(q-value)']=np.log10(np.clip(stats.adjust_p(np.power(10, t.LogP.values), N=N_go), 1e-100, 1.0))\n #sw.check('q-value DONE')\n if not l_multi_list:\n t.drop('Name', axis=1, inplace=True)\n return t\n else:\n return None",
"def get_stats(sents):\n import os\n import re \n # first, put the relevant trees into temp file\n if 'outname' in kwargs.keys():\n to_open = 'tmp-%s.txt' % kwargs['outname']\n else:\n to_open = 'tmp.txt'\n with open(to_open, \"w\") as fo:\n for sent in sents:\n statsmode_results['Sentences'] += 1\n fo.write(sent.parse_string.rstrip().encode('utf-8', errors = 'ignore') + '\\n')\n deps = get_deps(sent, dep_type)\n numpass = len([x for x in deps.links if x.type.endswith('pass')])\n statsmode_results['Passives'] += numpass\n statsmode_results['Tokens'] += len(sent.tokens)\n statsmode_results['Words'] += len([w for w in sent.tokens if w.word.isalnum()])\n #statsmode_results['Unique words'] += len(set([w.word.lower() for w in sent.tokens if w.word.isalnum()]))\n #statsmode_results['Unique lemmata'] += len(set([w.lemma.lower() for w in sent.tokens if w.word.isalnum()]))\n\n # count moods via trees (/\\?/ !< __)\n from dictionaries.process_types import processes\n from corpkit.other import as_regex\n tregex_qs = {'Imperative': r'ROOT < (/(S|SBAR)/ < (VP !< VBD !< VBG !$ NP !$ SBAR < NP !$-- S !$-- VP !$ VP)) !<< (/\\?/ !< __) !<<- /-R.B-/ !<<, /(?i)^(-l.b-|hi|hey|hello|oh|wow|thank|thankyou|thanks|welcome)$/',\n #'Open interrogative': r'ROOT < SBARQ <<- (/\\?/ !< __)', \n #'Closed interrogative': r'ROOT ( < (SQ < (NP $+ VP)) << (/\\?/ !< __) | < (/(S|SBAR)/ < (VP $+ NP)) <<- (/\\?/ !< __))',\n 'Unmodalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP !< MD)))',\n 'Modalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP < MD)))',\n 'Open class words': r'/^(NN|JJ|VB|RB)/ < __',\n 'Closed class words': r'__ !< __ !> /^(NN|JJ|VB|RB)/',\n 'Clauses': r'/^S/ < __',\n 'Interrogative': r'ROOT << (/\\?/ !< __)',\n 'Mental processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.mental, boundaries = 'w'),\n 'Verbal processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.verbal, boundaries = 'w'),\n 'Relational processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.relational, boundaries = 'w')}\n\n for name, q in sorted(tregex_qs.items()):\n res = tregex_engine(query = q, \n options = ['-o', '-C'], \n corpus = to_open, \n root = root)\n statsmode_results[name] += int(res)\n global numdone\n numdone += 1\n if root:\n root.update()\n if not root:\n tot_string = str(numdone + 1) + '/' + str(total_files * len(tregex_qs.keys()))\n if 'outname' in kwargs.keys():\n tot_string = '%s: %s' % (kwargs['outname'], tot_string)\n animator(p, numdone, tot_string, **par_args)\n if 'note' in kwargs.keys() and kwargs['note'] is not False:\n kwargs['note'].progvar.set((numdone * 100.0 / (total_files * len(tregex_qs.keys())) / denom) + startnum)\n os.remove(to_open)",
"def count_houses_delivered_with_robot(s):\n s_santa, s_robot = s[::2], s[1::2]\n deliveries_santa = make_deliveries(s_santa)\n deliveries_robot = make_deliveries(s_robot)\n all_deliveries = combine_dicts(deliveries_santa, deliveries_robot, lambda x,y: x+y, 0)\n return len(all_deliveries)",
"def count_genotypes(genotypeList,StateGenPosData, x, y):\r\n allMos = 0\r\n nonEggs = 0\r\n Adults = 0\r\n for i in range(len(genotypeList)):\r\n gt = genotypeList[i]\r\n b = sum(1 for item in StateGenPosData if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n c = sum(1 for item in StateGenPosData if 'adult' in item[0] and 'XX' in item[1] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n d = sum(1 for item in StateGenPosData if 'adult' in item[0] and gt in item[1] and item[2]==(x,y))\r\n## for item in StateGenPosData:\r\n## print(item[0],item[1],item[2])\r\n## if 'adult' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## d+=1\r\n## print('yay')\r\n## if not 'new' in item[0] and not 'egg' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## c+=1\r\n## if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## b+=1\r\n allMos = allMos + b\r\n nonEggs = nonEggs + c\r\n Adults = Adults + d\r\n return allMos, nonEggs, Adults",
"def get_GO_presence_labels(genes_of_interest, min_GO_size=200, max_GO_size=300):\n genes = pd.Series(genes_of_interest)\n go_group_presence = {}\n\n for GO in go2geneIDs:\n gene_ids = go2geneIDs[GO]\n\n # boolean vector (length is num of genes in embedding)\n in_go_group_vector = genes.isin(gene_ids)\n\n if (in_go_group_vector.sum() > min_GO_size) & (in_go_group_vector.sum() < max_GO_size):\n go_group_presence[GO] = in_go_group_vector\n\n result = pd.DataFrame(go_group_presence)\n result.index = genes\n result.index.name = 'entrezgene'\n return result",
"def analysis_go_RSA(self, s_go, S_hit, S_score, N_total=0, SRC_GENE=None, min_overlap=3, p_cutoff=0.01, l_keep_most=True):\n c={'GO':s_go, '#TotalGeneInLibrary':N_total, '#GeneInGO':0, '#GeneInHitList':0, '#GeneInGOAndHitList':0, 'Cutoff':None, '#HitRemain':0, '#HitInGORemain':0, 'LogP':0.0, 'Enrichment':0}\n #if SRC_GENE is not None:\n # print \"SRC_GENE: \"+str(len(SRC_GENE))\n S_gene=self.GO_GENE[s_go]\n if len(S_gene)>=Cache.N_TRIVIAL:\n return None\n if not N_total:\n N_total=len(self.GENE_GO) #len(self.ALL_GENE), only count genes that has GO annotation\n t_hit=pd.DataFrame(data={'Hit':S_hit, 'Score':S_score})\n if SRC_GENE is not None:\n S_gene=S_gene.intersection(SRC_GENE)\n t_hit=t_hit[ t_hit.Hit.apply(lambda x: x in SRC_GENE) ]\n S_hit=set(t_hit.Hit)\n else:\n S_hit=set(S_hit)\n t_hit.sort_values('Score', inplace=True)\n c['#GeneInGO']=len(S_gene)\n c['#GeneInHitList']=len(S_hit)\n if c['#GeneInGO']<min_overlap or c['#GeneInHitList']<min_overlap:\n return None\n S_both=S_gene.intersection(S_hit)\n c['#GeneInGOAndHitList']=len(S_both)\n if c['#GeneInGOAndHitList']<min_overlap:\n return None\n\n I_index=np.arange(len(t_hit))[t_hit.Hit.apply(lambda x: x in S_gene).values]\n I_rank=stats.RSA_rank(t_hit.Score.values, I_index)\n rslt=stats.RSA_score(I_rank, N_total, l_BonferroniCorrection=True, l_keep_most=l_keep_most, p_cutoff=p_cutoff)\n c['#HitInGORemain']=rslt[\"cutoff\"]+1\n if c['#HitInGORemain']<min_overlap: return None\n c['#HitRemain']=I_rank[rslt[\"cutoff\"]]\n c['Cutoff']=t_hit.Score.values[rslt[\"cutoff\"]]\n c['%InGO']=c['#HitInGORemain']*100.0/c['#HitRemain']\n q=min(max(c['%InGO']/100, 1.0/c['#HitRemain']), 1-1.0/c['#HitRemain'])\n c['STDV %InGO']=np.sqrt(q*(1-q)/c['#HitRemain'])*100\n c['Enrichment']=c['%InGO']/100.0*N_total/c['#GeneInGO']\n S=[int(x) for x in S_both]\n S.sort()\n c['GeneID_All']='|'.join([str(x) for x in S])\n S=[int(x) for x in list(t_hit.Hit[: rslt[\"cutoff\"]+1])]\n S.sort()\n c['GeneID']='|'.join([str(x) for x in S])\n c['LogP']=rslt['logP']\n return c",
"def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq",
"def count_genres(type_: str, value_=''):\n count = factory.get_elem_count(Genre, type_, value_)\n return count",
"def count_words_and_dublicates(novel):",
"def get_counts(self):\n counts = {}\n for document in self.docs:\n for word in document:\n if word not in counts.keys():\n counts[word] = 1\n else:\n counts[word] += 1\n return counts",
"def subtype_counts(node_set, G, log=False):\n subtypes = Counter()\n for n in node_set:\n subtype = G.node[n]['subtype']\n subtypes[subtype] += 1\n\n if log:\n for k, v in subtypes.items():\n subtypes[k] = np.log10(v)\n \n return subtypes",
"def count_hits(bh, vc, verbose=False):\n\n hc = {}\n for g in bh:\n hc[g] = {}\n for b in bh[g]:\n hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1\n besthit = None\n bhc = 0\n for h in hc[g]:\n if hc[g][h] > bhc:\n bhc = hc[g][h]\n besthit = h\n #print(f\"{g}\\t{besthit}\\t{bhc}\\t{len(bh[g])}\")\n print(f\"{g}\\t{besthit}\")\n\n return hc",
"def count_freq(self, types=1):\n count_dict = {}\n if types == 1:\n for cat in self.categories:\n num_images = sum(\n [1 for i in self.data['annotations'] if i['category_id'] == self.cats_idx[cat]])\n count_dict[cat] = num_images\n elif types == 2:\n pass\n\n return count_dict",
"def test_count_genomic_types(self):\n \n result, bed_result = parse_AS_STRUCTURE_dict(\"test\", clipper.test_dir())\n result = count_genomic_types(result)\n \n self.assertDictEqual(result, {\"CE:\" : 14})",
"def count_pathologies(graph):\n return Counter(_pathology_iterator(graph))",
"def make_global_state(self, shreds_tags):\n doc_counts = collections.defaultdict(int)\n\n for doc, tags in shreds_tags.items():\n for tag in tags:\n doc_counts[tag] += 1\n\n num_docs = float(len(shreds_tags))\n\n idf = {}\n for tag, count in doc_counts.items():\n idf[tag] = math.log(num_docs / count)\n return {\n 'idf_map': idf,\n 'all_terms': sorted(idf.keys()),\n }",
"def counts(self):\n\n counts = defaultdict(int)\n\n for i, geom in zip(self.tree_ids, self.tree):\n point_int = list(self.sindex.intersection(geom.bounds))\n if point_int:\n counts[i] += len(point_int)\n\n return dict(counts)",
"def get_count_words(novel, words):\n dic_word_counts = {}\n for word in words:\n dic_word_counts[word] = novel.get_count_of_word(word)\n return dic_word_counts",
"def _char_and_word_ngrams_counts(sentence: str, n_char_order: int, n_word_order: int, lowercase: bool) ->Tuple[Dict[int, Dict[Tuple[str, ...], Tensor]], Dict[int, Dict[Tuple[str, ...], Tensor]]]:\n if lowercase:\n sentence = sentence.lower()\n char_n_grams_counts = _ngram_counts(_get_characters(sentence, whitespace), n_char_order)\n word_n_grams_counts = _ngram_counts(_get_words_and_punctiation(sentence), n_word_order)\n return char_n_grams_counts, word_n_grams_counts",
"def count_synonymous(self):\n if not self.is_coding():\n log_message(\n logging_callback=logging.warning,\n msg=\"Cannot count synonymous mutations in noncoding data\",\n extra={\"oname\": self.name},\n )\n return\n\n if self.check_store(\"/main/synonymous/counts\"):\n return\n\n log_message(\n logging_callback=logging.info,\n msg=\"Counting synonymous variants\",\n extra={\"oname\": self.name},\n )\n df_dict = dict()\n\n for variant, count in self.store[\"/main/variants/counts\"].iterrows():\n if variant == WILD_TYPE_VARIANT:\n df_dict[variant] = count[\"count\"]\n else:\n variant = protein_variant(variant)\n if len(variant) == 0:\n variant = SYNONYMOUS_VARIANT\n try:\n df_dict[variant] += count[\"count\"]\n except KeyError:\n df_dict[variant] = count[\"count\"]\n\n self.save_counts(\"synonymous\", df_dict, raw=False)\n del df_dict",
"def count(words):\n word_count = {}\n num_words = 0\n unique_words = 0\n for word in words:\n num_words += 1\n if word_count.has_key(word):\n word_count[word] += 1\n else:\n word_count[word] = 1\n unique_words += 1\n word_count[\"total\"] = num_words\n word_count[\"unique\"] = unique_words\n return word_count",
"def get_gc_count(dataset):\n\n gc_count_dict = {}\n\n for sequence in SeqIO.parse(dataset, 'fasta'):\n c_count = sequence.seq.count('C')\n g_count = sequence.seq.count('G')\n gc_count = ((c_count + g_count)/len(sequence))*100\n gc_count_dict[sequence.id] = gc_count\n\n\n return gc_count_dict",
"def compute_vocab_count(sents):\n counter = collections.Counter()\n for sentence in sents:\n counter.update(untag(sentence))\n return counter",
"def clothing_type_count(clothes_list):\n types_count = {}\n for garment in clothes_list:\n if garment.db.clothing_type:\n type = garment.db.clothing_type\n if type not in types_count.keys():\n types_count[type] = 1\n else:\n types_count[type] += 1\n return types_count"
] | [
"0.8170838",
"0.748599",
"0.59387326",
"0.5920755",
"0.591932",
"0.58161604",
"0.5669939",
"0.5669421",
"0.56335974",
"0.5606092",
"0.55677086",
"0.54992825",
"0.5486521",
"0.5461191",
"0.54585624",
"0.54531324",
"0.5440361",
"0.5438768",
"0.5429132",
"0.5395651",
"0.5392278",
"0.5392177",
"0.5339845",
"0.53387517",
"0.5332772",
"0.5302133",
"0.529536",
"0.52808875",
"0.52425224",
"0.5233506"
] | 0.7851776 | 1 |
return a dict of Gene and the number of GOs appear for each gene if S_gene is provided, only counts for those genes | def gene_count(self, S_go, S_gene=None):
c={}
if S_gene is not None: S_gene=set(S_gene)
for x in S_go:
Y=self.GO_GENE.get(x, [])
if S_gene is not None: Y = set(Y).intersection(S_gene)
for y in self.GO_GENE.get(x, []):
c[y]=c.get(y,0)+1
return c | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def membership_count(self, S_go, S_gene):\n return self.go_count(S_gene, S_go)\n #c=self.go_count(S_gene)\n #if type(S_go)!=set:\n # S_go=set(S_go)\n #c={ k:v for k,v in c.items() if k in S_go }\n #return c",
"def go_count(self, S_hit, S_go=None):\n c={}\n if S_go is not None: S_go=set(S_go)\n for x in S_hit:\n Y=self.GENE_GO.get(x, [])\n if S_go is not None: Y = set(Y).intersection(S_go)\n for y in Y:\n c[y]=c.get(y,0)+1\n return c",
"def anno_gene_stats(anno_gene, loc_file, gene_file, isConvert):\r\n LocationNum = collections.Counter()\r\n LocationGene = collections.defaultdict(list)\r\n\r\n\r\n GeneCatSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n CatGeneSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n\r\n allLocations = set()\r\n anno_h = open(anno_gene, \"r\")\r\n for line in anno_h:\r\n lines = line.strip().split(\"\\t\")\r\n sample, location, number, gene = lines[:4]\r\n number = int(number)\r\n\r\n ### whether convert the category to \"Exon\" or \"Intron\"\r\n if isConvert == \"True\":\r\n if location == \"Intron\":\r\n newLoc = \"Intron\"\r\n else:\r\n newLoc = \"Exon\"\r\n elif isConvert == \"False\":\r\n newLoc = location\r\n else:\r\n print(\"Please check whether convert the original category to 'Intron' or 'Exon' based on True of False.\")\r\n sys.exit(1)\r\n\r\n allLocations.add(newLoc)\r\n ### get the dict of gene -> location -> sample\r\n genes = gene.split(\",\")\r\n for g in genes:\r\n GeneCatSample[g][newLoc].append(sample)\r\n\r\n ### get the location -> gene -> sample\r\n CatGeneSample[newLoc][g].append(sample)\r\n anno_h.close()\r\n\r\n\r\n ## output gene and number in samples\r\n ### sort all locations\r\n sortedAllLocation = sorted(list(allLocations))\r\n\r\n gene_h = open(gene_file, \"w\")\r\n\r\n headerSample = [l + \"_samples\" for l in sortedAllLocation]\r\n gene_h.write(\"Gene\\tTotal\\t%s\\t%s\\n\" % (\"\\t\".join(sortedAllLocation), \"\\t\".join(headerSample)))\r\n\r\n GeneRecord = {}\r\n GeneNumber = {}\r\n\r\n allGenes = sorted(list(GeneCatSample.keys()))\r\n for ge in allGenes:\r\n ### get the number and samples for each location of each gene\r\n GeneNum = []\r\n GeneSample = []\r\n\r\n for loc in sortedAllLocation:\r\n if loc in GeneCatSample[ge]:\r\n samples = GeneCatSample[ge][loc]\r\n ##############################\r\n ####### unique for samples\r\n samples = sorted(list(set(samples)))\r\n sampleNum = len(samples)\r\n else:\r\n sampleNum = 0\r\n samples = [\"-\"]\r\n\r\n GeneNum.append(sampleNum)\r\n GeneSample.append(samples)\r\n\r\n GeneNumSum = sum(GeneNum)\r\n CatNumOut = \"\\t\".join([str(g) for g in GeneNum])\r\n CatSampleOut = \"\\t\".join([\",\".join(s) for s in GeneSample])\r\n\r\n record = \"%s\\t%d\\t%s\\t%s\\t\" % (ge, GeneNumSum, CatNumOut, CatSampleOut)\r\n GeneNumber[ge] = GeneNumSum\r\n GeneRecord[ge] = record\r\n \r\n ### output\r\n GeneNumSorted = sort_dict_value(GeneNumber)\r\n for g, n in GeneNumSorted:\r\n r = GeneRecord[g]\r\n gene_h.write(\"%s\\n\" % r)\r\n\r\n gene_h.close() \r\n\r\n\r\n ### location and genes\r\n loc_h = open(loc_file, \"w\")\r\n loc_h.write(\"Location\\tGeneNumber\\tGenes\\tSampleNumber\\tSamples\\n\")\r\n for loc in sortedAllLocation:\r\n geneSample = CatGeneSample[loc]\r\n genes = sorted(list(geneSample.keys()))\r\n geneNum = len(genes)\r\n samNum = 0\r\n samList = []\r\n for ge in geneSample:\r\n sam = geneSample[ge]\r\n samList.append(sam)\r\n samNum += len(sam)\r\n samOut = \";\".join([\",\".join(s) for s in samList])\r\n loc_h.write(\"%s\\t%d\\t%s\\t%d\\t%s\\n\" % (loc, geneNum, \",\".join(genes), samNum, samOut))\r\n loc_h.close()",
"def routine():\n genes = g.genes\n gene_db = db['ncbi_gene_docs']\n for gene in genes:\n count = gene_db.count({\"gene_id\": gene})\n if count is not 1:\n logger.debug(\"FixMe: {0};\\tCount: {1}\".format(gene, count))",
"def countgenes():\n directory = openfile('db_directory.txt')\n no_genes_file = directory+'GENES_IN_HPO.txt'\n GENES_IN_HPO = openfile(no_genes_file)\n #GENES_IN_HPO = openfile(numbergenes_file)\n return int(GENES_IN_HPO)",
"def get_gc_count(dataset):\n\n gc_count_dict = {}\n\n for sequence in SeqIO.parse(dataset, 'fasta'):\n c_count = sequence.seq.count('C')\n g_count = sequence.seq.count('G')\n gc_count = ((c_count + g_count)/len(sequence))*100\n gc_count_dict[sequence.id] = gc_count\n\n\n return gc_count_dict",
"def count_synonymous(self):\n if not self.is_coding():\n log_message(\n logging_callback=logging.warning,\n msg=\"Cannot count synonymous mutations in noncoding data\",\n extra={\"oname\": self.name},\n )\n return\n\n if self.check_store(\"/main/synonymous/counts\"):\n return\n\n log_message(\n logging_callback=logging.info,\n msg=\"Counting synonymous variants\",\n extra={\"oname\": self.name},\n )\n df_dict = dict()\n\n for variant, count in self.store[\"/main/variants/counts\"].iterrows():\n if variant == WILD_TYPE_VARIANT:\n df_dict[variant] = count[\"count\"]\n else:\n variant = protein_variant(variant)\n if len(variant) == 0:\n variant = SYNONYMOUS_VARIANT\n try:\n df_dict[variant] += count[\"count\"]\n except KeyError:\n df_dict[variant] = count[\"count\"]\n\n self.save_counts(\"synonymous\", df_dict, raw=False)\n del df_dict",
"def analysis_go_RSA(self, s_go, S_hit, S_score, N_total=0, SRC_GENE=None, min_overlap=3, p_cutoff=0.01, l_keep_most=True):\n c={'GO':s_go, '#TotalGeneInLibrary':N_total, '#GeneInGO':0, '#GeneInHitList':0, '#GeneInGOAndHitList':0, 'Cutoff':None, '#HitRemain':0, '#HitInGORemain':0, 'LogP':0.0, 'Enrichment':0}\n #if SRC_GENE is not None:\n # print \"SRC_GENE: \"+str(len(SRC_GENE))\n S_gene=self.GO_GENE[s_go]\n if len(S_gene)>=Cache.N_TRIVIAL:\n return None\n if not N_total:\n N_total=len(self.GENE_GO) #len(self.ALL_GENE), only count genes that has GO annotation\n t_hit=pd.DataFrame(data={'Hit':S_hit, 'Score':S_score})\n if SRC_GENE is not None:\n S_gene=S_gene.intersection(SRC_GENE)\n t_hit=t_hit[ t_hit.Hit.apply(lambda x: x in SRC_GENE) ]\n S_hit=set(t_hit.Hit)\n else:\n S_hit=set(S_hit)\n t_hit.sort_values('Score', inplace=True)\n c['#GeneInGO']=len(S_gene)\n c['#GeneInHitList']=len(S_hit)\n if c['#GeneInGO']<min_overlap or c['#GeneInHitList']<min_overlap:\n return None\n S_both=S_gene.intersection(S_hit)\n c['#GeneInGOAndHitList']=len(S_both)\n if c['#GeneInGOAndHitList']<min_overlap:\n return None\n\n I_index=np.arange(len(t_hit))[t_hit.Hit.apply(lambda x: x in S_gene).values]\n I_rank=stats.RSA_rank(t_hit.Score.values, I_index)\n rslt=stats.RSA_score(I_rank, N_total, l_BonferroniCorrection=True, l_keep_most=l_keep_most, p_cutoff=p_cutoff)\n c['#HitInGORemain']=rslt[\"cutoff\"]+1\n if c['#HitInGORemain']<min_overlap: return None\n c['#HitRemain']=I_rank[rslt[\"cutoff\"]]\n c['Cutoff']=t_hit.Score.values[rslt[\"cutoff\"]]\n c['%InGO']=c['#HitInGORemain']*100.0/c['#HitRemain']\n q=min(max(c['%InGO']/100, 1.0/c['#HitRemain']), 1-1.0/c['#HitRemain'])\n c['STDV %InGO']=np.sqrt(q*(1-q)/c['#HitRemain'])*100\n c['Enrichment']=c['%InGO']/100.0*N_total/c['#GeneInGO']\n S=[int(x) for x in S_both]\n S.sort()\n c['GeneID_All']='|'.join([str(x) for x in S])\n S=[int(x) for x in list(t_hit.Hit[: rslt[\"cutoff\"]+1])]\n S.sort()\n c['GeneID']='|'.join([str(x) for x in S])\n c['LogP']=rslt['logP']\n return c",
"def count_genes(node_id):\n node = tree.node(node_id)\n\n if options.loglevel >= 6:\n options.stdlog.write(\"# node_id=%i\\n\" % node_id)\n if options.loglevel >= 10:\n options.stdlog.write(\"# sets=%s\\n\" % (str(genes)))\n\n # species in pattern\n num_species_in_pattern = len(positive_set)\n\n if node.succ:\n # process non-leaf node\n for s in node.succ:\n\n # propagate: terminated nodes force upper nodes to terminate\n # (assigned to None).\n if not genes[s]:\n genes[node_id] = None\n return\n\n # total number of genes at node\n num_genes_at_node = 0\n # total number of species at node\n num_species_at_node = 0\n\n # compute new gene set for each species at node\n for x in positive_set:\n genes[node_id][x] = genes[node_id][x].union(genes[s][x])\n\n num_genes_for_species = len(genes[node_id][x])\n if exit_function(num_genes_for_species):\n genes[node_id] = None\n return\n num_genes_at_node += num_genes_for_species\n if num_genes_for_species:\n num_species_at_node += 1\n\n if options.loglevel >= 6:\n print \"node=\", node_id, \"species_at_node\", num_species_at_node, \"genes_at_node=\", num_genes_at_node, \\\n \"num_genes_for_species=\", num_genes_for_species, \"ngenes=\", sum(\n map(lambda x: len(x), genes[node_id]))\n options.stdlog.write(\n \"# genes at node %i\\t%s\\n\" % (node_id, genes[node_id]))\n if outgroups:\n print sum([len(genes[node_id][x]) for x in outgroups])\n print check_outgroup_function(genes[node_id])\n\n # check stop criterion\n if total_species_function(num_species_at_node, num_species_in_pattern):\n # check if positive requirements are fulfilled\n for x in positive_set:\n if not keep_function(len(genes[node_id][x])):\n if options.loglevel >= 6:\n options.stdlog.write(\n \"# keep function false for species %i\\n\" % x)\n break\n else:\n if total_genes_function(num_genes_at_node, num_species_in_pattern):\n if options.loglevel >= 6:\n options.stdlog.write(\"# recording node %i\\n\" % x)\n ortholog_nodes.append((node_id, genes[node_id]))\n genes[node_id] = None\n return\n elif check_outgroup_function(genes[node_id]):\n ortholog_nodes.append((node_id, genes[node_id]))\n genes[node_id] = None\n return\n elif negative_set:\n if total_genes_function(num_genes_at_node, num_species_in_pattern):\n if options.loglevel >= 6:\n options.stdlog.write(\"# recording node %i\\n\" % node_id)\n ortholog_nodes.append((node_id, genes[node_id]))\n\n else:\n # process leaf\n s, t, g, q = parseIdentifier(node.data.taxon, options)\n c = options.org2column[s]\n if c in positive_set:\n genes[node_id][c].add(g)\n elif c in negative_set:\n genes[node_id] = None",
"def num_protogenes(self):\n return len(self.protogenes.keys())",
"def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(\",\"):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes",
"def counts(self):\n\n counts = defaultdict(int)\n\n for i, geom in zip(self.tree_ids, self.tree):\n point_int = list(self.sindex.intersection(geom.bounds))\n if point_int:\n counts[i] += len(point_int)\n\n return dict(counts)",
"def group_data_by_gs(data_table):\n gene_data = collections.defaultdict(lambda: collections.defaultdict(list))\n for _idx, row in data_table.iterrows():\n samp = row['sample']\n gene = row['gene']\n gene_data[gene][samp].append({\n 'muttype': row['type'].strip(),\n 'normalized': row['Normalized'], # NMAF in the manuscript\n 'consequence': row['MissenseConsequence'].strip(),\n })\n return gene_data",
"def test_count_genomic_types(self):\n \n result, bed_result = parse_AS_STRUCTURE_dict(\"test\", clipper.test_dir())\n result = count_genomic_types(result)\n \n self.assertDictEqual(result, {\"CE:\" : 14})",
"def count_kmer(gene_list, codon_seqs, R, kmer_size=3):\n\n kmer = kmer_size\n MM = 'yes'\n\n list_seqfile = list( codon_seqs.keys() )\n kmer_dict = {}\n\n for orf in gene_list:\n if orf in list_seqfile:\n current_seq = np.array(codon_seqs[orf])\n\n for pos in range(len(current_seq) - (kmer + 1) ):\n if MM == 'yes' and orf in list( mm_consensus.keys() ):\n current_mm = mm_consensus[orf]\n if np.all(current_mm[pos:(pos+kmer)]): # check that no kmer position is MM\n current_kmer = \"\".join( current_seq[pos:pos+kmer])\n if current_kmer in kmer_dict.keys():\n kmer_dict[current_kmer] += 1\n else:\n kmer_dict[current_kmer] = 1\n\n elif MM == 'no':\n current_kmer = \"\".join( current_seq[pos:pos+kmer])\n if current_kmer in kmer_dict.keys():\n kmer_dict[current_kmer] += 1\n else:\n kmer_dict[current_kmer] = 1\n\n new_dict = {}\n list_redundant = []\n for k in kmer_dict.keys():\n if kmer_dict[k] > R:\n if k not in list_redundant:\n \t list_redundant.append(k)\n \n return list_redundant",
"def count_matching_genes(genome1, genome2):\n count = 0\n\n inno1 = max(genome1.nodes.keys())\n inno2 = max(genome2.nodes.keys())\n\n for i in range(max(inno1, inno2) + 1):\n n1 = genome1.nodes.get(i, None)\n n2 = genome2.nodes.get(i, None)\n if not (n1 is None or n2 is None):\n count += 1\n\n inno1 = max(genome1.connections.keys())\n inno2 = max(genome2.connections.keys())\n\n for i in range(max(inno1, inno2) + 1):\n c1 = genome1.connections.get(i, None)\n c2 = genome2.connections.get(i, None)\n if not (c1 is None or c2 is None):\n count += 1\n\n return count",
"def tallying_genes():\n #Creating a tallying Mechanism of genes with multiple sequences in file and\n # an output file for future alignment of sequences \n blast_hit_results = open('blast_hits_report.txt', 'r')\n gene_dict={}\n\n for line in blast_hit_results:\n data = line.split(\"\\t\")\n \n if line.startswith('SeqID'):\n continue\n else:\n #Test to see if organism in dictionary\n verdict = gene_dict.get(data[6])\n \n if str(verdict) == \"None\":\n #creating new entry\n key = data[6]\n seq_info=str(data[0])+\"|\"+str(data[1])\n counter = 1\n #Value[Counts, Trimmed_Length, Blast Length, Blast_Score, Blast_Percent_Identity]\n value=[data[5], counter, [seq_info]]\n gene_dict.update({key:value})\n else:\n #Fills dictionary based on organism name\n seq_info=str(data[0])+\"|\"+str(data[1])\n gene_dict[data[6]][1]+=1\n gene_dict[data[6]][2].append(seq_info)\n blast_hit_results.close()\n return(gene_dict)",
"def count_genotypes(genotypeList,StateGenPosData, x, y):\r\n allMos = 0\r\n nonEggs = 0\r\n Adults = 0\r\n for i in range(len(genotypeList)):\r\n gt = genotypeList[i]\r\n b = sum(1 for item in StateGenPosData if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n c = sum(1 for item in StateGenPosData if 'adult' in item[0] and 'XX' in item[1] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n d = sum(1 for item in StateGenPosData if 'adult' in item[0] and gt in item[1] and item[2]==(x,y))\r\n## for item in StateGenPosData:\r\n## print(item[0],item[1],item[2])\r\n## if 'adult' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## d+=1\r\n## print('yay')\r\n## if not 'new' in item[0] and not 'egg' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## c+=1\r\n## if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## b+=1\r\n allMos = allMos + b\r\n nonEggs = nonEggs + c\r\n Adults = Adults + d\r\n return allMos, nonEggs, Adults",
"def Genes_Per_Genome(Input, Gene_Separator,Contig_Separator):\n Number_Genes = {}\n Gene_Length = {}\n with open(Input) as FastAInput:\n for line in FastAInput:\n if \">\" in line:\n Gene = line.split()[0].replace(\">\",\"\")\n Gene_Length[Gene] = 0\n Genome = Gene.split(Contig_Separator)\n Genome = Contig_Separator.join(Genome[:-1])\n Number_Genes[Genome] = Number_Genes.get(Genome, 0) + 1\n else:\n line = line.strip()\n Gene_Length[Gene] += len(line)\n return (Number_Genes, Gene_Length)",
"def get_GO_presence_labels(genes_of_interest, min_GO_size=200, max_GO_size=300):\n genes = pd.Series(genes_of_interest)\n go_group_presence = {}\n\n for GO in go2geneIDs:\n gene_ids = go2geneIDs[GO]\n\n # boolean vector (length is num of genes in embedding)\n in_go_group_vector = genes.isin(gene_ids)\n\n if (in_go_group_vector.sum() > min_GO_size) & (in_go_group_vector.sum() < max_GO_size):\n go_group_presence[GO] = in_go_group_vector\n\n result = pd.DataFrame(go_group_presence)\n result.index = genes\n result.index.name = 'entrezgene'\n return result",
"def create_gene_dict(self, variants):\n \n # organise the variants into entries for each gene\n genes = {}\n for var in variants:\n # variants (particularly CNVs) can span multiple genes, so we need\n # to check each gene separately, and then collapse duplicates later\n for gene_list in var.get_genes():\n for gene in gene_list:\n if gene not in genes:\n genes[gene] = []\n # add the variant to the gene entry\n genes[gene].append(var)\n \n return genes",
"def get_gene_frequencies(self):\n path = os.path.join(self.parent_path, \"gene_frequencies.txt\")\n with open(path, \"w\") as freqs:\n freqs.write(\"Organism,Gene Frequency\\n\")\n for org, data in self.organisms.items():\n freqs.write(\"{},{}\\n\".format(org, data.get(self.FREQ_KEY)))",
"def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass",
"def analysis(self, S_hit, S_score=None, S_go=None, SRC_GENE=None, min_overlap=3, min_enrichment=0, p_cutoff=0.01, n_CPU=0, l_rsa_keep_most=True, S_go_category=None, l_background_by_ontology=False):\n\n def go_filtered(S_go, S_go_category):\n return [x for x in S_go if self.get_category_id(x) in S_go_category]\n\n S_all_go_filtered=[]\n def all_go_filtered(S_go_category):\n if len(S_all_go_filtered)==0:\n S_go=self.GO_GENE_ENRICH\n S_all_go_filtered.append(go_filtered(S_go, S_go_category))\n return S_all_go_filtered[0]\n\n N_go=0\n if S_go_category is not None and len(S_go_category)>0:\n # hard code for now, to be fixed later\n if type(S_go_category) in (int, str):\n S_go_category=[S_go_category]\n S_go_category={int(x) for x in S_go_category if self.CATEGORY_COUNT.get(x,0)>0 }\n for x in S_go_category:\n N_go+=self.CATEGORY_COUNT[x]\n else:\n N_go=sum(self.CATEGORY_COUNT.values())\n\n l_multi_list=type(S_hit) is dict\n if S_go is None:\n if l_multi_list:\n S_go={}\n for k in S_hit.keys():\n S_go[k]=all_go_filtered(S_go_category)\n else:\n S_go=all_go_filtered(S_go_category)\n else:\n if l_multi_list:\n for k in S_hit.keys():\n if S_go.get(k, None) is None:\n S_go[k]=all_go_filtered(S_go_category)\n else:\n S_go[k]=go_filtered(S_go[k], S_go_category)\n else:\n S_go=go_filtered(S_go, S_go_category)\n\n if SRC_GENE is not None:\n if type(SRC_GENE) is list:\n SRC_GENE=set(SRC_GENE)\n SRC_GENE=self.ALL_GENE.intersection(SRC_GENE) # remove genes from background, if it is not in self.ALL_GENE\n N_total=len(SRC_GENE) #self.ALL_GENE.intersection(SRC_GENE))\n elif l_background_by_ontology:\n # GeneGo uses this\n if l_multi_list:\n X=set()\n for x in S_go.values():\n X.add(set(x))\n src_genes=self.gene_count(list(X))\n else:\n src_genes=self.gene_count(S_go)\n N_total=len(src_genes)\n SRC_GENE=set(src_genes.keys())\n else:\n if self.is_L1000():\n N_total=len(self.ALL_GENE)\n else:\n N_total=len(self.GENE_GO) #len(self.ALL_GENE), only count genes that has GO annotation\n #N_total=len(self.ALL_GENE)\n # prefiltering uninteresting GO terms\n # already converted to multiple hit list situation\n sw=util.StopWatch()\n L=[] # list of (S_hit, s_go)\n\n def spread_input(S_hit, S_go, key):\n #S_hit, S_go, key=(X[0], X[1], X[2])\n # may not worth it\n #c_cnt=self.go_count(S_hit, S_go)\n #S_go=[s_go for s_go in S_go if c_cnt.get(s_go,0)>=min_overlap ]\n # minimum size\n MIN_BATCH=2000\n S_go2=util.split(S_go, chunk_size=MIN_BATCH)\n return [(key, S_hit, x) for x in S_go2]\n\n #sw.check('To spreadout')\n if l_multi_list:\n #mp=parallel.MP()\n #m=1 if len(S_hit)<=3 else n_CPU\n #mp.start(f=spread_input, n_CPU=m)\n #L=[(X, S_go[k], k) for k,X in S_hit.items() if len(X)>=min_overlap]\n #out=mp.map(L)\n #L=[y for X in out for y in X]\n L=[]\n for k,X in S_hit.items():\n if len(X)<min_overlap: continue\n L.extend(spread_input(X, S_go[k], k))\n random.shuffle(L)\n else:\n if len(S_hit)>=min_overlap:\n L=spread_input(S_hit, S_go, 'Default')\n\n if self.eg is None:\n self.eg=ez.EntrezGene(tax_id=self.tax_id, l_use_GPDB=self.GPDB)\n if n_CPU==0: n_CPU=1\n #print \">>>>>>>>>>>>>>\", len(L)\n S_chunk=util.split(L, n_chunk=n_CPU)\n #sw.check('Spreadout tasks: %d' % len(L))\n\n def analyze(L):\n \"\"\"L is a list of [[s_name, S_hit, s_go]], s_go can also be a list\"\"\"\n rslt=[]\n #p=util.Progress(len(L))\n i=0\n import multiprocessing\n s_pid=str(multiprocessing.current_process().pid)\n for s_name, S_hit, S_go in L:\n i+=1\n #if (i % 50000): p.check(i, s_pid)\n if type(S_go) is str: S_go=[S_go]\n for s_go in S_go:\n if s_go not in self.GO_GENE: continue\n if S_score is None:\n c=self.analysis_go(s_go, S_hit, N_total, SRC_GENE=SRC_GENE, min_overlap=min_overlap, p_cutoff=p_cutoff)\n else:\n c=self.analysis_go_RSA(s_go, S_hit, S_score, N_total, SRC_GENE=SRC_GENE, min_overlap=min_overlap, p_cutoff=p_cutoff, l_keep_most=l_rsa_keep_most)\n if c is None:\n continue\n c['Name']=s_name\n if min_enrichment>0 and c['Enrichment']<min_enrichment: continue\n if p_cutoff<1 and 10**c['LogP']>p_cutoff: continue\n c['Description']= self.go_description(s_go)\n S_gene=c['GeneID'].split('|')\n S_symbol=[self.eg.C_GENENEW[x] if x in self.eg.C_GENENEW else x for x in S_gene]\n c['Hits']='|'.join(S_symbol)\n if 'GeneID_All' in c:\n S_gene=c['GeneID_All'].split('|')\n S_symbol=[self.eg.C_GENENEW[x] if x in self.eg.C_GENENEW else x for x in S_gene]\n c['Hits_All']='|'.join(S_symbol)\n if self.GPDB:\n c['CategoryID'] = self.get_category_id(c['GO'])\n c['Category'] = self.CATEGORY.get(self.get_category_id(c['GO']))\n c['GO'] = self.get_source_id(c['GO'])\n rslt.append(c)\n return rslt\n out=parallel.parmap(analyze, S_chunk, n_CPU=n_CPU)\n #if n_CPU>1:\n # mp=parallel.MP()\n # mp.start(f=analyze, n_CPU=n_CPU)\n # out=mp.map(S_chunk)\n #else:\n # out=[analyze(x) for x in S_chunk]\n\n #mp.start(n_CPU=n_CPU)\n #sw.check('P-value Calculation')\n #sw.check('P-value Calculation Done')\n rslt=[]\n for x in out:\n if len(x): rslt.extend(x)\n\n if len(rslt):\n #sw.check('Length: %d' % len(rslt))\n t=pd.DataFrame(rslt)\n #sw.check('Table DONE')\n if S_score is None:\n t=t.sort_values(['LogP','Enrichment','#GeneInGOAndHitList'], ascending=[True,False,False])\n cols = ['Name','GO','Description','LogP','Enrichment','Z-score','#TotalGeneInLibrary',\n '#GeneInGO','#GeneInHitList','#GeneInGOAndHitList','%InGO','STDV %InGO','GeneID','Hits']\n else:\n t=t.sort_values(['LogP','Enrichment','#HitInGORemain','#GeneInGOAndHitList'], ascending=[True,False,False,False])\n cols = ['Name','GO','Description','LogP','Enrichment','Z-score','#TotalGeneInLibrary',\n '#GeneInGO','#HitRemain','#HitInGORemain','Cutoff','#GeneInHitList','#GeneInGOAndHitList','%InGO','STDV %InGO','GeneID','Hits','GeneID_All','Hits_All']\n if self.GPDB:\n #cols.insert(1,'field1')\n cols.insert(1,'CategoryID')\n cols.insert(1,'Category')\n #sw.check('sorted DONE')\n t=t.reindex(columns=cols)\n # FDR\n #print \">>> N_go: \", N_go\n #sw.check('reindex DONE')\n t['Log(q-value)']=np.log10(np.clip(stats.adjust_p(np.power(10, t.LogP.values), N=N_go), 1e-100, 1.0))\n #sw.check('q-value DONE')\n if not l_multi_list:\n t.drop('Name', axis=1, inplace=True)\n return t\n else:\n return None",
"def histogram_genres(our_data):\n genre_list = []\n for album in our_data:\n genre_list.extend(genre.strip() for genre in album['genre'].split(','))\n genre_dict = {}\n for genre in genre_list:\n if genre in genre_dict:\n genre_dict[genre] += 1\n else:\n genre_dict[genre] = 1\n return genre_dict",
"def genes_GT():\n df1=pd.read_csv(config['geneInfo'], sep=\" \")\n df1=df1[df1.chr == '22']\n df2=pd.read_csv(config['counts'], sep=\" \")\n genes=df1.merge(df2.gene_id, on=\"gene_id\")\n return list(set(genes['gene_id']))",
"def get_singles_counts(sname, seglen, mincounts):\n counts = defaultdict(int)\n with open(sname) as fin:\n infile = csv.DictReader(fin, delimiter='\\t')\n for line in infile:\n ints = int(line['interactions'])\n if ints < mincounts:\n continue\n r1_reg = (\n line['RNA1 chromosome'],int(int(line['Start of RNA1 first read'])/seglen)*seglen,\n line['RNA1 strand'])\n r2_reg = (\n line['RNA2 chromosome'],int(int(line['Start of RNA2 last read'])/seglen)*seglen,\n line['RNA2 strand'])\n counts[r1_reg] += ints\n counts[r2_reg] += ints\n return counts",
"def N_genes_in_dataset(self):\n return len(self.all_genes_in_dataset)",
"def num_species_on_map(self):\n # tot_herbivores = 0\n # tot_carnivores = 0\n # for cells in itertools.chain.from_iterable(self.map):\n # curr_herbivore, curr_carnivore = cells.num_species_per_cell()\n # tot_herbivores += curr_herbivore\n # tot_carnivores += curr_carnivore\n\n return (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))\n\n # (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))",
"def result_count(sol,Nt,G):\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq"
] | [
"0.76964927",
"0.71774846",
"0.62404954",
"0.5983294",
"0.5660781",
"0.5644812",
"0.5643061",
"0.56363577",
"0.56089014",
"0.5598228",
"0.5502627",
"0.5496427",
"0.5490733",
"0.5489337",
"0.5476215",
"0.5476014",
"0.5450471",
"0.5441512",
"0.5429098",
"0.5408223",
"0.540261",
"0.5362888",
"0.53599864",
"0.53104144",
"0.53020376",
"0.5275741",
"0.52749866",
"0.52542925",
"0.52432513",
"0.5239306"
] | 0.8732856 | 0 |
L is a list of [[s_name, S_hit, s_go]], s_go can also be a list | def analyze(L):
rslt=[]
#p=util.Progress(len(L))
i=0
import multiprocessing
s_pid=str(multiprocessing.current_process().pid)
for s_name, S_hit, S_go in L:
i+=1
#if (i % 50000): p.check(i, s_pid)
if type(S_go) is str: S_go=[S_go]
for s_go in S_go:
if s_go not in self.GO_GENE: continue
if S_score is None:
c=self.analysis_go(s_go, S_hit, N_total, SRC_GENE=SRC_GENE, min_overlap=min_overlap, p_cutoff=p_cutoff)
else:
c=self.analysis_go_RSA(s_go, S_hit, S_score, N_total, SRC_GENE=SRC_GENE, min_overlap=min_overlap, p_cutoff=p_cutoff, l_keep_most=l_rsa_keep_most)
if c is None:
continue
c['Name']=s_name
if min_enrichment>0 and c['Enrichment']<min_enrichment: continue
if p_cutoff<1 and 10**c['LogP']>p_cutoff: continue
c['Description']= self.go_description(s_go)
S_gene=c['GeneID'].split('|')
S_symbol=[self.eg.C_GENENEW[x] if x in self.eg.C_GENENEW else x for x in S_gene]
c['Hits']='|'.join(S_symbol)
if 'GeneID_All' in c:
S_gene=c['GeneID_All'].split('|')
S_symbol=[self.eg.C_GENENEW[x] if x in self.eg.C_GENENEW else x for x in S_gene]
c['Hits_All']='|'.join(S_symbol)
if self.GPDB:
c['CategoryID'] = self.get_category_id(c['GO'])
c['Category'] = self.CATEGORY.get(self.get_category_id(c['GO']))
c['GO'] = self.get_source_id(c['GO'])
rslt.append(c)
return rslt | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def label(l):\r\n def action(string, loc, tokens):\r\n newlist = [l]\r\n newlist.extend(tokens)\r\n return newlist\r\n return action",
"def scatter_list(self, l):\n pass",
"def Student_names(l:list)->list:\n result=[]\n for s in l:\n result.append(s.name)\n return result",
"def make_list(unused_s, unused_l, toks):\n result = []\n for item in toks:\n result.append(item.asList())\n return result",
"def Students_at_level(l:list,c:str)->list:\n result=[]\n for s in l:\n if s.level==c:\n result.append(s)\n return result",
"def s2l(sents,i,f,freq):\n return [str(l) for _,_,l in sents[i]]",
"def retrieve_smiles(l):\n\t\n\tl = str(l)\n\tl = l.split(\"\\\\t\")\n\tentry_in_dataset = [l[0].split(\"'\")[1], l[1].split(\"\\\\n\")[0]] \n\t# print (entry_in_dataset)\n\treturn entry_in_dataset",
"def __init__(self, Ls, germs, prepStrs, effectStrs, aliases=None,\n sequenceRules=None):\n self.Ls = Ls[:]\n self.germs = germs[:]\n self.prepStrs = prepStrs[:]\n self.effectStrs = effectStrs[:]\n self.aliases = aliases.copy() if (aliases is not None) else None\n self.sequenceRules = sequenceRules[:] if (sequenceRules is not None) else None\n\n self.allstrs = []\n self.allstrs_set = set()\n self.unindexed = [] # unindexed strings\n self._plaquettes = {}\n self._firsts = []\n self._baseStrToLGerm = {}\n super(LsGermsStructure, self).__init__()",
"def make_label_names(name_lsit):\n\n hover_label_names = []\n for x in range(len(name_lsit)):\n temp1 = name_lsit[x]\n hover_label_names.append(temp1)\n\n return hover_label_names",
"def parse_ls(self,ins):\n global Creg\n if ins.instr == 'lb':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1])\n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'lbu':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'lh':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'lhu':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'lw':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n \n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'dlw':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = self.double_reg(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'dmfc1':\n if len(ins.args) == 2:\n self.need = [ins.args[1]] \n self.gen = self.double_reg(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'l.s':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n \n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'l.d':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n \n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'sb': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'sbu': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1])\n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sh': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1])\n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'shu': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sw': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.need = [ins.args[0]] + self.need\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'dsw': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.need = self.double_reg(ins.args[0]) + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'dsz': \n if len(ins.args) == 1:\n ins.args[0] = str(ins.args[0]) \n g = re.match(Creg, ins.args[0])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[0]] \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 's.s': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 's.d': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.need = self.double_reg(ins.args[0]) + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'move':\n if len(ins.args) == 2:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'mov.d':\n if len(ins.args) == 2:\n self.need = self.double_reg(ins.args[1])\n self.gen = self.double_reg(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'mov.s':\n if len(ins.args) == 2:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'li':\n if len(ins.args) == 2:\n self.gen = [ins.args[0]]\n self.ival = ins.args[1]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)",
"def lst() :\n return s.lst()",
"def magic_ll(self, parameter_s=''):\n self.magic_lc(parameter_s+' | grep ^l')",
"def __init__(self):\n self.l = []",
"def printL(L, L_name='List', verbose=True):\n if verbose:\n ('\\n[' + L_name + ']:')\n if verbose:\n for item in list(L):\n print('\\t' + str(item))\n print('[' + L_name + '] length: ' + str(len(L)) + '\\n')",
"def __init__(self, s=None):\n self._index_map = {}\n self._list = []\n\n if s is not None:\n for item in s:\n self.add(item)",
"def make_lists(sv):\r\n \r\n mark_delayed(sv) # identify delayed objects\r\n make_pin_list(sv) # detect and initialize inputs (to false) \r\n make_old_list(sv) # create a list of used old/old \r",
"def lstString() :\n return s.lstString()",
"def parseGoalList(s):\n return map(Parser._convertGoal, goalListNT.parseString(s).asList())",
"def add(self,l,s=True):\r\n\t\t\t\t\r\n\t\t# make line\r\n\t\ts = self.copy()\r\n\t\tl = Li(l)\r\n\t\ta = Li._condense(l,s)\r\n\t\ta = Li(a,c=False)\r\n\t\t\t\r\n\t\t# sort?\r\n\t\tif s:\r\n\t\t\ta = a.sort()\r\n\t\t\t\r\n\t\treturn a",
"def list(self, arg: SeField[Any]) -> str:\n if is_bare_list(arg.type):\n return arg.varname\n else:\n earg = arg[0]\n earg.name = \"v\"\n return f\"[{self.render(earg)} for v in {arg.varname}]\"",
"def __init__(self):\n self.l = {}\n self.s = {}",
"def test_createGlossaryByList(self):\n li = []\n li.append(['term', 'tags', 'value'])\n li.append(['foo', 'a', '1'])\n li.append(['bar', 'a, b', '2'])\n li.append(['gnark', 'a, c', '3'])\n self.g = glossary.Glossary(li)",
"def handleList(self, _): # pylint: disable=invalid-name",
"def Students_in_class(l:list,d:str,c:str)->list:\n result=[]\n for s in l:\n if Student_is_enrolled(s,d,c):\n result.append(s)\n return result",
"def check_for_list(check):",
"def _list(self, source, what, *args):\n result = []\n assert source\n raw = getattr(source, 'resolv_%s' % what)(*args)\n for line in raw.splitlines():\n map(result.append, line.strip().split())\n return result",
"def get_lvs() -> List[Dict[str, str]]:\n p = subprocess.run(\n [\"lvs\", \"--reportformat\", \"json\"], check=True, capture_output=True\n )\n output = json.loads(p.stdout)\n result = []\n for lv in output[\"report\"][0][\"lv\"]:\n lvname = lv[\"lv_name\"]\n vgname = lv[\"vg_name\"]\n devname = f\"{vgname}-{lvname}\"\n path = f\"/dev/mapper/{devname}\"\n result.append({\"lv\": lvname, \"vg\": vgname, \"devname\": devname, \"devpath\": path})\n return result",
"def lad_lut(lads):\n for lad in lads:\n yield lad['properties']['name']",
"def __init__(self):\n self.l = []\n self.length = 0",
"def get_spei(datalist):\n dtime_retlist = []\n lat_retlist = []\n lon_retlist = []\n spei_retlist = []\n\n dtime_retlist.append(datalist[0])\n lat_retlist.append(datalist[1])\n lon_retlist.append(datalist[2])\n spei_retlist.append(spei.data[datalist[0], datalist[1], datalist[2]])\n\n return dtime_retlist, lat_retlist, lon_retlist, spei_retlist"
] | [
"0.5778658",
"0.56123483",
"0.5564983",
"0.5401471",
"0.53305316",
"0.51975167",
"0.5192305",
"0.5182059",
"0.513531",
"0.5119218",
"0.50108427",
"0.49965453",
"0.49447706",
"0.49369943",
"0.49039128",
"0.48992655",
"0.4888057",
"0.48818138",
"0.48812246",
"0.48663783",
"0.48335665",
"0.48003584",
"0.4791281",
"0.47487447",
"0.4738337",
"0.4725935",
"0.4717128",
"0.4681826",
"0.46741578",
"0.46684867"
] | 0.57596344 | 1 |
Look for terms presented in both list and the pvaleu is even better in the combined list S_old is the set of genes in Old set, if None, set to all genes in t_old table This method is to be used by analyze_key_terms. | def key_terms(self, t_old, t_new, t_union, S_old=None, t_over=None):
if t_old is None or t_new is None or t_union is None:
return None
print("Old: %d, New: %d, Union: %d" % (len(t_old), len(t_new), len(t_union)))
if S_old is None:
S_old=set([y for x in [ t_old.ix[i, 'GeneID'].split("|") for i in t_old.index ] for y in x])
elif type(S_old) is list:
S_old=set(S_old)
t_old=t_old[["GO","LogP"]]
t_old.rename2({"LogP":"LogP_Hit"})
t_new=t_new[["GO","LogP"]]
t_new.rename2({"LogP":"LogP_OverConnect"})
#t_old.rename2({"LogP":"LogP_Union"})
t=pd.merge(t_union, t_old, on="GO")
if len(t)==0: return None
t=pd.merge(t, t_new, on="GO")
if len(t)==0: return None
t.sort_values(["LogP", "Enrichment"], ascending=[True, False], inplace=True)
S_id1=[]
S_id2=[]
S_name1=[]
S_name2=[]
S_count=[]
for i in t.index:
S_id=t.ix[i, 'GeneID'].split("|")
S_name=t.ix[i, "Hits"].split("|")
Idx={ j for j,id in enumerate(S_id) if id in S_old }
S_id1.append("|".join([ x for j,x in enumerate(S_id) if j in Idx ]))
S_name1.append("|".join([ x for j,x in enumerate(S_name) if j in Idx ]))
S_id2.append("|".join([ x for j,x in enumerate(S_id) if j not in Idx ]))
S_name2.append("|".join([ x for j,x in enumerate(S_name) if j not in Idx ]))
S_count.append("%d|%d" % (len(Idx), t.ix[i, "#GeneInGOAndHitList"]-len(Idx)))
# separate gene lists into those in Hits and those not in Hits
t["GeneID_InHit"]=S_id1
t["Gene_InHit"]=S_name1
t["GeneID_InOverConnect"]=S_id2
t["Gene_InOverConnect"]=S_name2
t["GeneCount_InHit_OverConnect"]=S_count
S_over=list(t_over.Node) if t_over is not None else None
other={'OldGO':t_old, 'NewGO':t_new, 'OverConnect':t_over, 'OldGeneList':list(S_old), 'OverGeneList':S_over}
return (t, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fix_missing(self, set_tweets, original_sents):\n res = []\n full_original_sents = [\" \".join(s[\"tokens\"]) for s in original_sents]\n for s1, s2 in zip(set_tweets, full_original_sents):\n if not s1:\n res.append([s2]) # add the original as candidate if no candidates are found\n else:\n res.append(s1)\n return res",
"def difference_update(self, *others):\r\n return self.sdiffstore(self.r_key, slef.r_key, *[o.r_key for o in others])",
"def _known_in(self, words):\n return set(word for word in words if self._word_2_frequency.get(word))",
"def update_values(self, old_values, new_values):\n\n to_add = []\n to_remove = []\n count = 0\n values_new_length = len(new_values)\n\n #print(\"\\nold: %s\\n\" % old_values)\n #print(\"\\nnew: %s\\n\" % new_values)\n for obj_new in new_values:\n to_add.append(obj_new)\n\n for obj_old in old_values:\n #print \"old %s\" % obj_old.id\n for obj_new in new_values:\n #print \"new %s\" % obj_new.id\n if obj_old.id == obj_new.id:\n #already present, does not need \n #to be added\n to_add.remove(obj_new)\n break\n else:\n count = count + 1\n\n if values_new_length == count:\n #the old value is not present in the new set\n #of selected values\n to_remove.append(obj_old)\n\n count = 0\n \n return to_add, to_remove",
"def _compare_dictionaries(self, _biblical=dict(), _fallback=dict(), _old=dict(), lang='en-US'):\n# global approved, conflicts, suggestions, unknown, cldr\n\n if _biblical:\n biblical = _biblical\n else:\n biblical = self.Biblical_Terms\n \n if _fallback:\n fallback = _fallback\n else:\n fallback = self.Fallback\n \n if _old:\n old = _old\n else:\n old = self.Old_Target\n# messagebox.showwarning('_compare_dictionaries','{}'.format(biblical))\n# messagebox.showwarning('_compare_dictionaries','{}'.format(fallback))\n if self.dict_in_changed.get() or self.terms_in_changed.get() or len(list(self.trout)) == 0:\n #for term in fallback, decide where it goes,\n #all terms should be under one of these five categories\n #messagebox.showwarning('_compare_dictionaries','dict {}, terms{}, trout {}'.format(self.dict_in_changed.get(), self.terms_in_changed.get(), len(list(self.trout))))\n approved = self.tree.insert('', 'end', iid='approved', values=['', ''], \\\n text=LOCALIZED_TEXT[lang]['Approved'], tags=('approved',))\n suggestions = self.tree.insert('', 'end', iid='suggestions', values=['', ''], \\\n text=LOCALIZED_TEXT[lang]['Suggestions'], tags=('suggestions',))\n conflicts = self.tree.insert('', 'end', iid='conflicts', values=['', ''], \\\n text=LOCALIZED_TEXT[lang]['Conflicts'], tags=('conflicts',))\n unknown = self.tree.insert('', 'end', iid='unknown', values=['', ''], \\\n text=LOCALIZED_TEXT[lang]['Unknown'], tags=('unknown',))\n cldr = self.tree.insert('', 'end', iid='cldr', values=['', ''], \\\n text=LOCALIZED_TEXT[lang]['CLDR'], tags=('cldr',))\n # if in biblical\n for term in fallback:\n if term in biblical:\n #how many renderings?\n if biblical[term] is None:\n renderings = ['',]\n else:\n renderings = biblical[term].split(',')\n if len(renderings) > 0:\n renderings = [r.strip() for r in renderings]\n renderings = [r.strip('*') for r in renderings]\n else:\n renderings = [\"\",]\n #remove any duplicates\n renderings = set(renderings)\n #put it in target anyway\n# if len(renderings) > 1: #at least one rendering exists\n# messagebox.showwarning('_compare_dictionaries', 'term=>{}<, renderings=>{}'.format(term, renderings))\n if len(renderings[0]) > 0: #at least one rendering exists\n #messagebox.showwarning('_compare_dictionaries', 'term is =>{}<'.format(term))\n #if term in target and has single non zero rendering\n # put in approved tab (stripping *?)\n if len(renderings) == 1: #is just one renderings\n if term in old and fallback[term] != old[term]:\n #put in conflicts\n item = self.tree.insert('conflicts', 'end', \\\n values=[\"{}\".format(term), \"{}\".format(renderings[0])], \\\n text='term', tags=('conflicts',))\n child = self.tree.insert(item, 'end', values=['', old[term]], text='old rendering', tags=('conflicts',))\n else:\n item = self.tree.insert('approved', 'end', \\\n values=[\"{}\".format(term), \\\n \"{}\".format(renderings[0])], \\\n text='term', tags=('approved',))\n \n else: #multiple possibilities so put in Suggestions\n item = self.tree.insert('suggestions', 'end', \\\n values=[\"{}\".format(term), \"\"], \\\n text='term', tags=('suggestions',))\n for rendering in renderings:\n# messagebox.showwarning('_compare_dictionaries', 'term=>{}<, rendering=>{}'.format(term, rendering))\n if len(rendering) > 0:\n child = self.tree.insert(item, 'end', \\\n values=[\"\", \\\n \"{}\".format(rendering)], \\\n text='rendering', tags=('suggestions',))\n # each rendering shown as child of term, terms with * put at top of list sans * or with and just strip when apply it\n if term in old and fallback[term] != old[term]:\n #put in conflicts\n child = self.tree.insert(item, 'end', \\\n values=[\"\", \\\n \"{}\".format(rendering.strip('*'))], \\\n text='old rendering', \\\n tags=('suggestions',))\n else:\n if term in old and old[term]:\n item = self.tree.insert('suggestions', 'end', \\\n values=[term, old[term]], text='term', \\\n tags=('suggestions'))\n child = self.tree.insert(item, 'end', \\\n values=[\"\", \"{}\".format(fallback[term])], \\\n text='fallback', \\\n tags=('old-rendering', 'suggestions',))\n elif term.isdigit() and fallback[term].isdigit() \\\n and self.accept_regional_digits.get() > 0:\n item = self.tree.insert('approved', 'end', \\\n values=[\"{}\".format(term), \\\n \"{}\".format(fallback[term])], \\\n text='term', tags=('approved',))\n else:\n \n item = self.tree.insert('unknown', 'end', \\\n values=[term, ''], text='term', \\\n tags=('unknown',))\n child = self.tree.insert(item, 'end', \\\n values=['', fallback[term]], \\\n text='fallback', \\\n tags=('suggestions',))\n self.dict_in_changed.set(0)\n self.terms_in_changed.set(0)\n else:\n #retain old tree\n #messagebox.showwarning(\"_compare_dictionaries pre-tree\",\"{}\".format(self.tree.get_children('')))\n self._from_etree_to_tree()\n if self.old_dict_changed.get():\n approved_terms = dict()\n #messagebox.showwarning(\"old_dict changed post-tree\",\"{}\".format(self.tree.get_children('')))\n for child in self.tree.get_children('approved'):\n values = self.tree.item(child)['values']\n approved_terms[values[0]] = child\n for term in approved_terms:\n if term in old:\n if fallback[term] != old[term]:\n #move to conflicts and add old suggestion\n self.tree.move(term, 'conflicts', 'end')\n self.tree.item(term, tags='conflicts')\n item = self.tree.insert(term, 'end', \\\n values=[term, old[term]], \\\n text='old rendering', \\\n tags=\"('conflicts',)\")\n\n self._make_suggestions()",
"def updates_sets(self, parent_yes, parent_no, parent_ignore):\n self.yes_words = set.union(self.yes_words, parent_yes) # this seems redundant\n self.no_words = set.union(self.no_words, parent_no) # this seems redundant\n self.ignore_words = set.union(self.ignore_words, parent_ignore, self.ignore) # this seems redundant\n if self.yes_no == 'y' or self.yes_no == 'yes':\n self.yes_words.add(self.word)\n elif self.yes_no == 'n' or self.yes_no == 'no':\n self.no_words.add(self.word)",
"def intersection_update(self, *others):\r\n return self.r.sinterstore(self.r_key, self.r_key, *[o.r_key for o in others])",
"def _available_origlangs(test_sets, langpair):\n origlangs = set()\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])\n if rawfile.endswith('.sgm'):\n with smart_open(rawfile) as fin:\n for line in fin:\n if line.startswith('<doc '):\n doc_origlang = re.sub(r'.* origlang=\"([^\"]+)\".*\\n', '\\\\1', line)\n origlangs.add(doc_origlang)\n return sorted(list(origlangs))",
"def match_variants(self,state,variants):\r\n for v in variants:\r\n terms = self.match_variant(state,v)\r\n if terms is not None:\r\n return terms\r\n return None",
"def get_common(self, other, mapping):\n\n self_oov = defaultdict(lambda: 0)\n other_oov = defaultdict(lambda: 0)\n self_word_id = deepcopy(self.word_id)\n other_word_id = deepcopy(other.word_id)\n new_words = []\n map_ = mapping.map\n for i, w in enumerate(self.word_id):\n if w not in map_:\n self_oov[w] += 1\n del self_word_id[w]\n continue\n\n if map_[w] not in other.word_id:\n other_oov[map_[w]] += 1\n del self_word_id[w]\n\n for i, w in enumerate(other.word_id):\n if w not in map_:\n del other_word_id[w]\n\n logging.info(\"We could not find {} {} words in our dictionary.\".format(\n len(self_oov), self.lang))\n logging.info(\"We could not find {} {} words in our target words.\".format(\n len(other_oov), other.lang))\n logging.info(\"Our {} vocabulary has {} valid words.\".format(\n self.lang, len(self_word_id)))\n\n sorted_self_word_id = Embeddings.sorted_words(self_word_id)\n self_vectors = asarray([self.vectors[i] for w, i in sorted_self_word_id])\n self_words = [w for w, i in sorted_self_word_id]\n new_self = Embeddings(lang=self.lang, vectors=self_vectors, words=self_words)\n\n sorted_other_word_id = Embeddings.sorted_words(other_word_id)\n other_vectors = asarray([other.vectors[i] for w, i in sorted_other_word_id])\n other_words = [w for w, i in sorted_other_word_id]\n new_other = Embeddings(lang=self.lang, vectors=other_vectors, words=other_words)\n\n return (new_self, new_other)",
"def getDirectFollowSets(self, FIRST):\n self.init_follow = {v:set() for v in self.v }\n self.containsFOLLOWOf = set()\n for v in self.v:\n if v == self.np[0][0]: # Starting Production\n self.init_follow[v] = set(['$']) # $ is in follow of 'S' applying rule 1\n for prod in self.g[v]:\n for i in range(len(prod)):\n if prod[i] in self.v and i+1 < len(prod):\n if prod[i+1] in self.t:\n self.init_follow[prod[i]] |= set([prod[i+1]])\n else:\n t = i + 1\n while t < len(prod) and prod[t] in self.nullables_map:\n if self.nullables_map[prod[t]] == True:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]\n break\n t += 1\n if t >= len(prod): # every thing on rhs of prod[i] could produce epsison, rule - 3\n self.containsFOLLOWOf |= set([(prod[i], v)])\n else: #prod[i+1] is a non nullable prod or prod[t] was a terminal\n if prod[t] in self.t:\n self.init_follow[prod[i]] |= set([prod[t]])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n\n elif prod[i] in self.v:\n self.containsFOLLOWOf |= set([(prod[i], v)]) # applying rule 2\n\n #self.containsFOLLOWOf = set([(a, b) for (a, b) in self.containsFOLLOWOf if a != b]) # remove the self loops\n return self.init_follow",
"def disambiguateWordsOld(self, word_list, tag_list):\n\t\t# print u\" \".join(word_list).encode('utf8');\n\t\t# print u\" \".join(tag_list).encode('utf8');\t\t\t\n\t\n\t\tif len(word_list)==0 or len(word_list)!=len(tag_list):\n\t\t\treturn word_list;\n\t\telse:\n\t\t\tnewwordlist=[];\n\t\t\twordtaglist=zip(word_list,tag_list);\n\t\t\t# print wordtaglist\n\t\t\tfor i in range(len(wordtaglist)):\n\t\t\t\tif i+1<=len(wordtaglist):\n\t\t\t\t\t# do tests with next word\n\t\t\t\t\t# إذا كانت الكلمة الحالية \"أن\" تكون \"أنْ\" حرف نصب إذا سبقت فعلا\n\t\t\t\t\t# وتكون أنّ، من أخوات إنّ إذا كان ما بعدها اسما\n\t\t\t\t\tif wordtaglist[i][0]==u'أن' and self.tagger.isVerbTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case1';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنْ','t');\n\t\t\t\t\telif wordtaglist[i][0]==u'أن' and self.tagger.isNounTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case 2';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنَّ','t');\n\t\t\t\tnewwordlist.append(wordtaglist[i][0]);\n\t\t\treturn newwordlist;",
"def fixup(self, l):\n\n\n fudges = [ ('A', 'B'),\n ('E', 'F') ]\n\n for x,y in fudges:\n if x in l and y not in l:\n l += y\n if y in l and x not in l:\n l += x\n\n return l",
"def __init__(self, initial_tags, other_tags, tagger):\n self.itags = set([i for i in initial_tags])\n self.otags = set([i for i in other_tags])\n self.tagger = tagger",
"def get_matching_tags(self, existing_tags):\n return list(set(existing_tags).intersection(self._ruleset))",
"def set_subhead_searched_words(self):\n\n searched_words = []\n for i in range(self.subhead.shape[0]):\n if (\n self.subhead[\"searched_pair_word\"][i]\n in self.subhead[\"searched_unique_single_word_synonym\"][i]\n ):\n searched_words.append(\n self.subhead[\"searched_unique_single_word_synonym\"][i]\n )\n else:\n searched_words.append(\n self.subhead[\"searched_unique_single_word_synonym\"][i]\n + self.subhead[\"searched_pair_word\"][i]\n )\n self.subhead[\"searched_words\"] = searched_words",
"def candidates(word):\r\n return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])",
"def used_phrases(self, start, end):\n used = set()\n for ngram in self.ngram_history:\n for occurance in self.ngram_history[ngram]['occurances']:\n if occurance['date'] >= start and occurance['date'] < end:\n used.add(ngram)\n return list(used)",
"def testOldKernelMatchesNewKernelSpeciesList(self):\n species_list1 = self.tree1.get_species_list()\n species_list2 = self.tree2.get_species_list()\n for i, each in enumerate(species_list1):\n self.assertListEqual(list(each), list(species_list2[i]))",
"def testOldKernelMatchesNewKernelSpeciesList(self):\n species_list1 = self.tree1.get_species_list()\n species_list2 = self.tree2.get_species_list()\n for i, each in enumerate(species_list1):\n self.assertListEqual(list(each), list(species_list2[i]))",
"def unchanged(self):\n return set(o for o in self.intersect\n if self.past_dict[o] == self.current_dict[o])",
"def set_gold_standard_and_priors(self):\n self.priors_data = self.input_dataframe(self.priors_file)\n\n if self.split_priors_for_gold_standard:\n self.split_priors_into_gold_standard()\n else:\n self.gold_standard = self.input_dataframe(self.gold_standard_file)\n\n if self.split_gold_standard_for_crossvalidation:\n self.cross_validate_gold_standard()\n\n try:\n check.index_values_unique(self.priors_data.index)\n except ValueError as v_err:\n utils.Debug.vprint(\"Duplicate gene(s) in prior index\", level=0)\n utils.Debug.vprint(str(v_err), level=0)\n\n try:\n check.index_values_unique(self.priors_data.columns)\n except ValueError as v_err:\n utils.Debug.vprint(\"Duplicate tf(s) in prior index\", level=0)\n utils.Debug.vprint(str(v_err), level=0)",
"def update(self, tree: \"Tree\") -> List[ValueObject]:\n new_values = set([])\n not_matched = set([])\n to_delete = set([])\n # Trees are lazy and need to be initialized before use.\n self.init()\n tree.init()\n # self.tree doesn't have labels -> there are no labels to query.\n if not self.tree and tree.vos:\n del self.vos[:]\n not_matched = range(len(tree.vos))\n else:\n # search_hits saves the intersection of all label matches.\n # The indices in the sets at the end are the search hits.\n search_hits = {ix: set([]) for ix in range(len(tree.vos))}\n for label in self.label_grid:\n if label in (\"_auto\",):\n continue\n if label in tree.tree and label in self.tree:\n # All label values that exist in both trees.\n for label_value in (\n tree.tree[label].keys() & self.tree[label].keys()\n ):\n for new_ix in tree.tree[label][label_value]:\n if new_ix in search_hits:\n if search_hits[new_ix]:\n search_hits[new_ix] &= self.tree[label][\n label_value\n ]\n else:\n search_hits[new_ix] |= self.tree[label][\n label_value\n ]\n # All label values in the new tree that are not in this tree.\n # Value objects that have a label value that is not included\n # in the current tree means that they will not be matched.\n for label_value in (\n tree.tree[label].keys() - self.tree[label].keys()\n ):\n for new_ix in tree.tree[label][label_value]:\n search_hits.pop(new_ix)\n not_matched.add(new_ix)\n elif label in self.tree:\n # All value objects with labels not specified in the other\n # tree are treated as search hits (for this label).\n unused_label = set.union(*self.tree[label].values())\n for new_ix in search_hits:\n if search_hits[new_ix]:\n search_hits[new_ix] &= unused_label\n else:\n search_hits[new_ix] |= unused_label\n elif label in tree.tree:\n raise ParamToolsError(\n f\"Label {label} was not defined in the defaults.\"\n )\n\n for ix, search_hit_ixs in search_hits.items():\n if search_hit_ixs:\n if tree.vos[ix][\"value\"] is not None:\n for search_hit_ix in search_hit_ixs:\n self.vos[search_hit_ix][\"value\"] = tree.vos[ix][\n \"value\"\n ]\n else:\n to_delete |= search_hit_ixs\n else:\n not_matched.add(ix)\n if to_delete:\n # Iterate in reverse so that indices point to the correct\n # value. If iterating ascending then the values will be shifted\n # towards the front of the list as items are removed.\n for ix in sorted(to_delete, reverse=True):\n del self.vos[ix]\n\n if not_matched:\n for ix in not_matched:\n if tree.vos[ix][\"value\"] is not None:\n self.vos.append(tree.vos[ix])\n new_values.add(len(self.vos) - 1)\n\n # It's faster to just re-build from scratch if values are deleted.\n if to_delete:\n self.new_values = None\n self.needs_build = True\n else:\n self.new_values = new_values\n self.needs_build = True\n\n return self.vos",
"def combine(sv, nod, O, oldnatA, oldnatB, oldnatres):\r\n newnatA, newnatB, newnatres = set(), set(), set()\r\n oldsetA, oldsetB, oldsetres=set(oldnatA), set(oldnatB), set(oldnatres)\r\n \r\n for allowA, a1, allowB, a2, allowres in Allowed[O]: # test compatibility of hypotheses\r\n # simple operands without distributivity\r\n setA=set(allowA) & oldsetA \r\n setB=set(allowB) & oldsetB\r\n setres=set(allowres) & oldsetres \r\n if (setres and setA and (setB or O in Unary)): # hypothesis is valid\r\n newnatA.update(setA) # add to list of possible natures\r\n newnatB.update(setB)\r\n newnatres.update(setres)\r\n\r\n # left distributivity (add list as a possible nature) \r\n if not (O in Non_distributive1) and Lst[0] in oldnatA and Lst[0] in oldnatres: \r\n newnatA.add(Lst[0]) \r\n newnatB.update(setB) \r\n newnatres.add(Lst[0])\r\n\r\n # right distributivity (add list as a possible nature) \r\n if not (O in Non_distributive2) and not (O in Unary) \\\r\n and Lst[0] in oldnatB and Lst[0] in oldnatres: \r\n newnatA.update(setA)\r\n newnatB.add(Lst[0])\r\n newnatres.add(Lst[0])\r\n \r\n # check compatibility\r\n if not (newnatres and newnatA and (newnatB or O in Unary)): \r\n print(\"\\n\", Err_incomp_nat) # ***Error: incompatible nature *** \r\n print(O, oldnatA, oldnatB)\r\n if nod.once: print(\"condition must be an event:\", nod.name)\r\n raise ReferenceError\r\n \r\n return list(newnatA), list(newnatB), list(newnatres)",
"def filter(self, new_set):\n for old_set in self.itervalues():\n for feat in old_set.iterkeys():\n if feat not in new_set:\n del old_set[feat]\n return self",
"def _usable_word(self, filtered_words):\n usable = set()\n for word in filtered_words:\n counter = 0\n for x in range(0, len(self._to_word)):\n if word[x] == self._from_word[x]:\n counter += 1\n if counter == len(self._to_word) - 1:\n usable.add(word)\n return usable",
"def refine_tokens( self, tokens ):\n k = 1.75\n b = 0.75\n stop_words_file = \"stop_words.txt\"\n all_stopwords = list()\n refined_tokens_sources = dict()\n \n # collect all the stopwords\n with open( stop_words_file ) as file:\n lines = file.read()\n all_stopwords = lines.split( \"\\n\" )\n \n for source in tokens:\n refined_tokens = dict()\n files = dict()\n inverted_frequency = dict()\n file_id = -1\n total_file_length = 0\n for item in tokens[ source ]:\n file_id += 1\n file_tokens = tokens[ source ][ item ].split(\" \")\n if source in \"name_desc_edam_help\":\n file_tokens = utils._clean_tokens( file_tokens, all_stopwords )\n total_file_length += len( file_tokens )\n term_frequency = dict()\n for token in file_tokens:\n if token is not '':\n file_ids = list()\n if token not in inverted_frequency:\n file_ids.append( file_id )\n else:\n file_ids = inverted_frequency[ token ]\n if file_id not in file_ids:\n file_ids.append( file_id )\n inverted_frequency[ token ] = file_ids\n # for term frequency\n if token not in term_frequency:\n term_frequency[ token ] = 1\n else:\n term_frequency[ token ] += 1\n files[ item ] = term_frequency\n N = len( files )\n average_file_length = float( total_file_length ) / N\n # find BM25 score for each token of each tool. It helps to determine\n # how important each word is with respect to the tool and other tools\n for item in files:\n file_item = files[ item ]\n file_length = len( file_item )\n for token in file_item:\n tf = file_item[ token ]\n # normalize the term freq of token for each document\n tf = float( tf ) / file_length\n idf = np.log2( N / len( inverted_frequency[ token ] ) )\n alpha = ( 1 - b ) + ( float( b * file_length ) / average_file_length )\n tf_star = tf * float( ( k + 1 ) ) / ( k * alpha + tf )\n tf_idf = tf_star * idf\n file_item[ token ] = tf_idf\n # filter tokens based on the BM25 scores and stop words. Not all tokens are important\n for item in files:\n file_tokens = files[ item ]\n tokens_scores = [ ( token, score ) for ( token, score ) in file_tokens.items() ]\n sorted_tokens = sorted( tokens_scores, key=operator.itemgetter( 1 ), reverse=True )\n refined_tokens[ item ] = sorted_tokens\n tokens_file_name = 'tokens_' + source + '.txt'\n token_file_path = os.path.join( os.path.dirname( self.tools_data_path ) + '/' + tokens_file_name )\n with open( token_file_path, 'w' ) as file:\n file.write( json.dumps( refined_tokens ) )\n file.close()\n refined_tokens_sources[ source ] = refined_tokens\n return refined_tokens_sources",
"def get_word_edit(self, old_log_file, new_log_file):\n Gumtree.gumtree.setOldAndNewFile(old_log_file, new_log_file)\n edit_elements = list(Gumtree.gumtree.getWordEdit())\n edit_words = []\n edit_feature = []\n for edit_element in edit_elements:\n old_element = edit_element[0]\n new_element = edit_element[1]\n edit_words.append([old_element, new_element])\n edit_feature.append(abs(hash(new_element.lower()) - hash(old_element.lower())))\n\n return edit_words, edit_feature",
"def apply(self):\n next_one = super().apply()\n next_both = set()\n\n for tup in next_one:\n if (tup[1], tup[0]) in next_one:\n next_both.add(tup)\n\n return list(next_both)",
"def subword_mapping(self, non_match: list) -> tuple:\n out_standard = []\n # out_standard_vec = []\n for i in non_match:\n # Each non-matched word vector\n vec = self.subword_embed_calss.get_embedding(term=i)\n\n # All the vectors of the Knowledge base\n if vec is not None:\n # Calculate Cosine Distance\n score = cos_similarity(vec1=vec, vec2=self.synonym_vec)\n\n # [Sub-words] standard term Mapping\n standard_term = self.find_standard_term(score=score, is_final=False)\n\n out_standard.append(standard_term)\n else:\n print('There was no word vector for ', i)\n return out_standard"
] | [
"0.5270499",
"0.5263518",
"0.52220106",
"0.52068335",
"0.51452315",
"0.50743026",
"0.50405735",
"0.5016303",
"0.50064653",
"0.49099362",
"0.48987463",
"0.48954973",
"0.48942754",
"0.48571077",
"0.4847399",
"0.48385495",
"0.48297605",
"0.47905686",
"0.47897458",
"0.47897458",
"0.47889164",
"0.47877765",
"0.47730643",
"0.47702065",
"0.47519562",
"0.4743673",
"0.47428203",
"0.4728037",
"0.4723996",
"0.4720767"
] | 0.6873936 | 0 |
If self.t_go is None, it cluster the self.data membership matrix and return a dict | def cluster(self, similarity=0.3, l_go_selective=False):
sw=util.StopWatch("GO_Cluster::cluster")
#K=stats.kappa_stat(self.data.values)
#T_edge=pd.DataFrame({'Gene_A':[],'Gene_B':[],'TYPE':[],'SCORE':[]})
M=self.data.values
n,m=M.shape
print("Matrix size: %d genes x %d GOs" % (n, m))
S_go=self.data.header()
out=self.t_go
if m==0:
self.DM=np.zeros(0)
if out is None:
return {}
return None
if m==1:
# only 1, no need to cluster
if out is None:
return {S_go[0]:1}
out['GROUP_ID']=1
out['FirstInGroupByEnrichment']=1
out['FirstInGroupByLogP']=1
if l_go_selective: out['FirstInGroupByGini']=1
self.DM=np.zeros(0)
return out
#T_edge=pd.DataFrame({'Gene_A':[S_go[0]],'Gene_B':[S_go[0]],'TYPE':['Direct'],'SCORE':[1.0]})
if self.DM is None:
self.DM=stats.kappa_stat(self.data.values, n_CPU=self.n_CPU)
sw.check("Kappa done ...")
#import ms.msobject as mo
#mo.MSObject.dump_object(self.DM, s_name='untitled', s_cache_dir=".")
import scipy.cluster.hierarchy as clst
import fastcluster
Z=fastcluster.linkage(1.0-self.DM, method='average')
S=clst.fcluster(Z, 1-similarity, criterion='distance')
c_grp={ x:S[i] for i,x in enumerate(S_go) }
if out is None:
return c_grp
out['GROUP_ID']=out.GO.apply(lambda x: c_grp[x])
self.similarity=similarity
if l_go_selective:
out.sort_values(['GROUP_ID','GiniIndex','LogP','Enrichment'], ascending=[True,False,True,False], inplace=True)
out['FirstInGroupByGini']=0
else:
out.sort_values(['GROUP_ID','LogP','Enrichment'], ascending=[True,True,False], inplace=True)
out['FirstInGroupByEnrichment']=0
out['FirstInGroupByLogP']=0
iB=iE=0
n=len(out)
out.index=list(range(n))
for i in range(1,n+1):
if i>=n or out.ix[i,'GROUP_ID']!=out.ix[i-1,'GROUP_ID']:
iE=i-1
out.ix[iB:iE,'BestLogPInGroup']=out.ix[iB:iE,'LogP'].min()
out.ix[iB:iE,'BestEnrichmentInGroup']=out.ix[iB:iE,'Enrichment'].max()
idx=out.ix[iB:iE,'LogP'].argmin()
out.ix[idx, 'FirstInGroupByLogP']=1
out.ix[iB, 'FirstInGroupByEnrichment']=1
if l_go_selective:
out.ix[iB:iE,'BestGiniInGroup']=out.ix[iB:iE,'GiniIndex'].max()
idx=out.ix[iB:iE,'GiniIndex'].argmax()
out.ix[idx, 'FirstInGroupByGini']=1
iB=i
if l_go_selective:
out.sort_values(['BestGiniInGroup','BestLogPInGroup','GROUP_ID','FirstInGroupByGini','GiniIndex','LogP','Enrichment'], ascending=[False,True,True,False,False,True,False], inplace=True)
out.index=list(range(n))
# out.to_csv('t0.csv', index=False)
# iteratively pick unique patterns
S_pattern=util.unique2(out._PATTERN_) # unique but preserve order
n_pattern=len(S_pattern)
iB=iE=0
i_pattern={k:(i+1) for i,k in enumerate(S_pattern)}
c_pattern={k:0 for k in S_pattern}
out['NEW_GROUP_ID']=0
for i in range(1,n+1):
if i>=n or out.ix[i,'GROUP_ID']!=out.ix[i-1,'GROUP_ID']:
iE=i-1
s_pat=out.ix[iB, '_PATTERN_']
out.ix[iB:iE, 'NEW_GROUP_ID']=c_pattern[s_pat]*n_pattern+i_pattern[s_pat]
c_pattern[s_pat]+=1
iB=i
out.sort_values(['NEW_GROUP_ID'], inplace=True)
out.drop('NEW_GROUP_ID', axis=1, inplace=True)
else:
out.sort_values(['BestLogPInGroup','GROUP_ID','FirstInGroupByLogP','LogP','Enrichment'], ascending=[True,True,False,True,False], inplace=True)
# relabel group id, so that group id are in order of statistical significance
c_order={}
cnt=1
for grp in out.GROUP_ID:
if grp not in c_order:
c_order[grp]=cnt
cnt+=1
out['GROUP_ID']=out['GROUP_ID'].apply(lambda x: c_order[x])
out['URL']=''
out.index=list(range(len(out)))
S_URL=out.URL.tolist()
for i in out.index:
if out.ix[i,'GO'].startswith('M'): #MsigDB
if re.search(r'\s\(.+\)$', out.ix[i,'Description']):
# Notice: description may contain ")" "GSE8515_IL1_VS_IL6_4H_STIM_)MAC_DN"
s_key=re.search(r'\s\(.+\)$', out.ix[i,'Description']).group()[2:-1]
S_URL[i]='http://http://www.broadinstitute.org/gsea/msigdb/geneset_page.jsp?geneSetName='+s_key
out['URL']=S_URL
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cluster(self):\n\t\tself.index[\"cluster\"] = {}\n\n\t\tfor item in self.index[\"items\"]:\n\t\t\tself.index[\"cluster\"][item] = [{\"weight\" : float(len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))))/float(len(self.index[\"items\"][item])) , \"name\" : id, \"authority\" : set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id])) } for id in self.index[\"items\"] if id != item and len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))) >= 1]\n\n\t\treturn self.index",
"def index_nodes(self):\n out = {}\n\n #avg = np.mean(list(self.rtype_vectors.values()),axis=0)\n\n\n #for name, node in self.nodes.items():\n # tmp1 = [self.rtype_vectors[rtype]\n # for rtype, dest in node.outgoing_relations] or [NULL_VEC()]\n # tmp2 = [permute_rtype_vector(self.rtype_vectors[rtype])\n # for rtype, prev in node.incoming_relations] or [NULL_VEC()]\n\n # net = tmp1 + tmp2\n\n # #out[name] = np.asarray(net).mean(axis=0)\n # #out[name] = np.asarray(net).sum(axis=0)\n # v = np.asarray(net).sum(axis=0)\n # if v.any():\n # out[name] = v/max(v)#softmax(v/max(v))\n # else:\n # out[name] = v\n\n\n #avg = np.mean(list(out.values()),axis=0)\n\n #maxm = np.max(list(out.values()),axis=0)\n\n ####normalize everything\n #for r,v in out.items():\n # if v.any():\n # #out[r] = v / sqrt(v.dot(v))\n # out[r] = softmax((v-avg)/maxm)\n\n\n\n # PCA method 0001701\n rmap = self.rtype_vectors\n data = np.zeros((len(self.nodes), JACCARD_DIMENSIONS), dtype=np.float)\n ix = 0\n for node in self.nodes.values():\n\n #compute weighted average of each relation type\n tmp = [rmap[rtype] for \n rtype, dest in node.outgoing_relations] + \\\n [permute_rtype_vector(rmap[rtype]) for \n rtype, prev in node.incoming_relations]\n\n v = np.asarray(tmp).mean(axis=0) if tmp else NULL_VEC()\n\n #normalize\n if v.any():\n data[ix] = v / sqrt(v.dot(v))\n else:\n data[ix] = v\n ix += 1\n\n #eliminate projection onto first 7 principal components\n d2 = data - PCA(data, 7)\n\n #order of nodes is preserved\n for i,v in enumerate(self.nodes):\n out[v] = softmax(d2[i])\n\n return out",
"def network(self, max_clusters=20, max_members=10, max_nodes=300, l_go_selective=False):\n if len(self.data)==0:\n return None\n if self.DM is None:\n util.error_msg('Please run cluster first!')\n S_node=GO_Cluster.sample_rows(self.t_go, max_clusters=max_clusters, max_members=max_members, max_nodes=max_nodes, l_go_selective=l_go_selective)\n T_node=self.t_go[self.t_go.GO.apply(lambda x: x in S_node)].copy()\n S_go=self.data.header()\n M=self.data.values\n n,m=M.shape\n S_node=set(T_node.GO)\n S_idx=[i for i,x in enumerate(S_go) if x in S_node ]\n S_name=[ S_go[i] for i in S_idx]\n T_node.rename2({'GO':'Gene'})\n s_name='GOCluster'\n if 'Name' in T_node.header():\n s_name=list(T_node.Name)[0]\n T_node.drop('Name', axis=1, inplace=True)\n if 'URL' in T_node.header():\n T_node.drop('URL', axis=1, inplace=True)\n\n c_has_neighbor={}\n data=[]\n c_cluster={ T_node.ix[i,'Gene']:T_node.ix[i,'GROUP_ID'] for i in T_node.index}\n n2=len(S_idx)\n for _i in range(n2):\n i=S_idx[_i]\n for _j in range(_i+1, n2):\n j=S_idx[_j]\n idx=i*(2*m-i-1)//2+(j-i)-1\n #print (_i, _j, n2, m, i, j, idx, S_name[_i], c_cluster[S_name[_i]], S_name[_j], c_cluster[S_name[_j]], K[idx])\n if self.DM[idx]>=self.similarity:\n data.append({'Gene_A':S_go[i], 'Gene_B':S_go[j], 'TYPE':'Direct', 'SCORE':self.DM[idx]})\n c_has_neighbor[S_go[i]]=True\n c_has_neighbor[S_go[j]]=True\n # keep singletons\n for i in S_idx:\n if S_go[i] not in c_has_neighbor:\n data.append({'Gene_A':S_go[i], 'Gene_B':S_go[i], 'TYPE':'Direct', 'SCORE':1.0})\n if len(data):\n T_edge=pd.DataFrame(data)\n T_node.index=list(range(len(T_node)))\n net=xgmml.Network(T_edge, T_node=T_node, name=s_name)\n return net",
"def get_cluster_dstructure(self, curs, mcl_id, splat_table, mcl_table):\n\t\tno_of_total_genes = get_no_of_total_genes(curs)\n\t\tcluster = self.get_basic_cluster_dstructure(curs, mcl_id, splat_table, mcl_table)\n\t\tif cluster:\t#not None\n\t\t\tcluster.go_no2association_genes = self.get_go_functions_of_this_gene_set(curs, cluster.vertex_set)\n\t\t\tcluster.go_no2information = self.get_information_of_go_functions(curs, cluster.go_no2association_genes, \\\n\t\t\t\tlen(cluster.vertex_set), no_of_total_genes)\n\t\t\tcluster.edge_cor_2d_list, cluster.edge_sig_2d_list = self.get_cor_sig_2d_list(curs, cluster.edge_set)\n\t\t\t#graph = self.graph_from_node_edge_set(cluster.vertex_set, cluster.edge_set)\n\t\treturn cluster\n\t\t\n\t\t\"\"\"\n\t\tprint \"vertex_set\"\n\t\tprint cluster.vertex_set\n\t\tprint \"edge_set\"\n\t\tprint cluster.edge_set\n\t\trecurrence_list_2d = ['recurrence_array']+cluster.recurrence_array\n\t\trecurrence_list_2d_1 = ['recurrence_array_1']+cluster.recurrence_array\n\t\trecurrence_list_2d = [recurrence_list_2d, recurrence_list_2d_1]\n\t\tself.column_output('/tmp/yh/recurrence_array',recurrence_list_2d)\n\n\t\tprint cluster.splat_connectivity\n\t\tprint \"connectivity\"\n\t\tprint cluster.connectivity\n\t\tprint \"connectivity_original\"\n\t\tprint cluster.connectivity_original\n\t\tcor_list_2d = []\n\t\tsig_list_2d = []\n\t\tfor i in range(len(cluster.edge_set)):\n\t\t\tcor_list_2d.append([repr(cluster.edge_set[i])]+cluster.edge_cor_2d_list[i])\n\t\t\tsig_list_2d.append([repr(cluster.edge_set[i])]+cluster.edge_sig_2d_list[i])\n\t\tself.column_output('/tmp/yh/edge_cor_2d_list', cor_list_2d)\n\t\tself.column_output('/tmp/yh/edge_sig_2d_list', sig_list_2d)\n\n\t\tgo_no_list_2d = []\n\t\tfor go_no,information in cluster.go_no2information.iteritems():\n\t\t\tgo_no_list_2d.append(list(information)+[len(cluster.go_no2association_genes[go_no])])\n\t\t#self.column_output('/tmp/yh/go_no_list_2d', go_no_list_2d)\n\t\t\"\"\"",
"def cluster_membership_occupancy(data):\n \n \n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n\n if n_clusters == 0:\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features()]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters ==1:\n #obtain_total_cluster_areas_set_everything_else_to_default\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n \n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0]\n \n Total_cluster_area=np.sum(cluster_chull_areas)\n areas=[Cluster_Area_Features([Total_cluster_area,0,0,0,0,0,0,0,0])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters >1:\n #Summarizing the cluster membership distribution characteristics\n cluster_size_nums=np.delete(np.array(data.groupby(['clusters']).size()),0)\n (cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD)= distribution_statistics(cluster_size_nums)\n\n #For each cluster calculate the area by calculating the area of the convex hull of cluster members\n # Note: concavehull implementation here might be a good addition as it will provide more imformative values. \n\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0,0,0]\n \n\n (avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area)= distribution_statistics(cluster_chull_areas)\n Total_cluster_area=np.sum(cluster_chull_areas)\n\n #Calculate cluster density: number of nuclei/ convex area of cluster\n cluster_density=np.divide(cluster_size_nums,cluster_chull_areas)\n (avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density)= distribution_statistics(cluster_density)\n\n #return dataframe of features\n membership=[Cluster_Membership_Features([cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD])]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features([Total_cluster_area,\n avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features([avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density])]\n density = pd.DataFrame([o.__dict__ for o in density])\n\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n return all_features",
"def get_cluster_info(self) -> Dict[str, Any]:\n pass",
"def __init__(self, S_all_gene, c_go=None, n_CPU=20):\n self.data=pd.DataFrame(data={\"Gene\":util.unique(S_all_gene)})\n self.data.set_index(\"Gene\", drop=True, inplace=True)\n self.t_go=None\n self.n_CPU=n_CPU if n_CPU>0 else 1\n sw=util.StopWatch(\"GO_Cluster\")\n if type(c_go) is not dict: # dataframe\n self.t_go=c_go\n s_col='GeneID_All' if 'GeneID_All' in self.t_go.header() else 'GeneID'\n c_go={ self.t_go.GO[i]:self.t_go[s_col][i].split('|') for i in self.t_go.index }\n\n L=util.split(list(c_go.keys()), self.n_CPU)\n def f(x):\n t, S_go = x[0], x[1]\n for k in S_go:\n for g in c_go[k]:\n t.ix[g, k]=1\n return t\n\n #mp=parallel.MP()\n #mp.start(f, n_CPU=len(L))\n L=[( self.data.copy(), x) for x in L]\n #out=mp.map(L)\n out=parallel.parmap(f, L, n_CPU=len(L))\n self.data=pd.concat(out, axis=1)\n #for k,v in c_go.items():\n sw.check('Done membership...')\n # for g in v:\n # self.data.ix[g, k]=1\n self.data.fillna(value=0, inplace=True)\n self.S_GO=self.data.header()\n self.DM=None # distance matrix\n self.similarity=None",
"def cluster(self):\n assert False",
"def __init__(self):\n self.tree = nx.Graph() \n self.orig_dist_matrix = pd.DataFrame()\n self.work_dist_matrix = pd.DataFrame() \n self.cluster_map = {} \n self.class_map = {}",
"def matrix_dist(self):\n matrix_dic = {}\n for clus in self.clusters:\n for other_clus in self.clusters:\n if clus.samples[0].s_id > other_clus.samples[0].s_id: # avoid duplicates\n matrix_dic[(clus.samples[0].s_id, other_clus.samples[0].s_id)] = clus.samples[0]\\\n .compute_euclidean_distance(other_clus.samples[0])\n return matrix_dic",
"def get_clusters(self, base_user, user_friends_graph, method=\"\") -> Dict: # TODO:\n if method==\"modularity\":\n community_data = greedy_modularity_communities(user_friends_graph)\n else:\n community_data = [item for item in label_propagation_communities(user_friends_graph)]\n d = {}\n for i in range(len(community_data)):\n if base_user in community_data[i]:\n li = list(community_data[i])\n li.pop(li.index(base_user))\n li.insert(0, base_user)\n d[i] = li\n else:\n d[i] = [base_user] + list(community_data[i])\n\n # Flatten dict into cluster list\n cluster_list = [d[k] for k in d]\n print(\"Number of clusters \" + str(len(cluster_list)))\n\n return cluster_list",
"def make_neighbor_db(data):\n acted_with = {}\n for i, j, _ in data:\n # the setdefault method lets us avoid checking for ourselves whether an\n # actor is aclready in the dictionary.\n # see https://docs.python.org/3/library/stdtypes.html#dict.setdefault\n acted_with.setdefault(i, set()).add(j)\n acted_with.setdefault(j, set()).add(i)\n return acted_with",
"def cluster_classification(weblog,classification_column_transaction,\\\n classification_column_diversity, session_data_threshold, cluster_type, classification_wanted_transaction, verbose = False):\n if verbose== True:\n start_time = timelib.time()\n print(\"\\n * Computing cluster matrices ...\") \n browsing_matrix = {}\n diversifying_matrix = {}\n # Selecting sessions from each cluster\n for cluster_id in session_data_threshold[cluster_type].unique():\n sessions_cluster = session_data_threshold[session_data_threshold[cluster_type]==cluster_id].session_id\n divpat_log = weblog[weblog.session_id.isin(sessions_cluster)]\n # Filtering some requests\n divpat_log=divpat_log[divpat_log['requested_'+classification_column_transaction].isin(classification_wanted_transaction)]\n divpat_log=divpat_log[divpat_log['referrer_'+classification_column_transaction].isin(classification_wanted_transaction)]\n \n # Defining matrices\n diversity_columns=('referrer_'+classification_column_diversity,'requested_'+classification_column_diversity)\n browsing_matrix[cluster_id],_ = compute_browsing_matrix(divpat_log,'referrer_'+classification_column_transaction,'requested_'+classification_column_transaction,labels=classification_wanted_transaction)\n diversifying_matrix[cluster_id],_ = compute_diversifying_matrix(divpat_log,'referrer_'+classification_column_transaction,'requested_'+classification_column_transaction,\\\n diversity_columns,labels = classification_wanted_transaction)\n if verbose == True:\n print(\" Cluster matrices computed in %.1f seconds.\"%(timelib.time() - start_time))\n \n return browsing_matrix, diversifying_matrix;",
"def cluster_life_expectancy() -> Dict:\n return dict(model=None, score=None, clusters=None)",
"def buildNodesDict(self):\n # Get relevant nodes from TANA ca_jc, intersect with BUS_ROUTE_TRAVERSAL_EDGES.\n # Then get the X,Y for the features.\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n arcpy.AddXY_management(PublicTransit.RELEVANT_NODES)\n nodes = arcpy.SearchCursor(PublicTransit.RELEVANT_NODES, \"\", \"\",\n \"ID_hash; POINT_X; POINT_Y\", \"\")\n self.nodesDict = dict()\n numNodes = int(arcpy.GetCount_management(PublicTransit.RELEVANT_NODES).getOutput(0))\n print \"Found %d nodes\" % numNodes\n for node in nodes:\n self.nodesDict[node.ID_hash] = Node(node.ID_hash, node.POINT_X, node.POINT_Y)\n del node\n del nodes",
"def matching_clusterization(self):\n result = []\n self.reclustering(self.groups.copy(deep=True), result)\n self.result = pd.DataFrame(result)\n return self.result.sort_values(by=['cluster_size'], ascending=False)",
"def makeCluster(self):\n for i in range(self.k):\n #vector of length total users, pick random number 1-5\n self.centroids.append(np.random.uniform(low=1,high=5,size=len(self.user)))\n memberList = []\n self.membership.append(memberList)\n self.centroids = np.round(self.centroids)\n\n for movie in self.dictionary.keys():\n #Finds the index of the closest centroid\n closest = np.argmin(self.calculateDistance(self.dictionary[movie]))\n newVector = []\n newVector.append(movie)\n #Add the movie to the list of members of the closest centroid\n self.membership[closest].append(newVector)\n self.recalculateCentroid(self.membership[closest], closest)",
"def get_results_for_init(self):\n return dict(init=self.centroids, n_clusters=self.centroids.shape[0])",
"def __init__(self):\n ## self.clusters[cluster] = list of coordinates\n self.clusters = {}\n ## self.centroids[cluster] = centroid\n self.centroids = {}",
"def _cluster(self):\n self._not_included = self.data\n self.leaves = []\n flag = int(rand() * len(self.data))\n flag = self._generate(flag)\n while len(self._not_included) > 0:\n flag = self._generate(flag)\n if flag == -1:\n break\n pass\n self._remember.append({\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n print(len(self._remember), {\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n return",
"def intents_clustering(self):\n self.phrs2intents = {}\n number_of_other = 10000;\n for i in range(len(self.data)):\n for ut in self.data[i]['utterances']:\n if ut['speaker'] == 'USER':\n if 'segments' in ut.keys():\n for seg in ut['segments']:\n if 'annotations' in seg.keys():\n for anno in seg['annotations']:\n name = anno['name']\n if ut['text'] not in self.phrs2intents.keys():\n self.phrs2intents[ ut['text'] ] = [name]\n elif name not in self.phrs2intents[ ut['text'] ]:\n self.phrs2intents[ ut['text'] ].append(name)\n else:\n if number_of_other > 0:\n self.phrs2intents[ ut['text'] ] = ['other']\n number_of_other -= 1\n self.X = np.array(list(self.phrs2intents.keys()))",
"def _collect_data(self):\n data = {\n \"K\": self.K,\n \"root\": self.root\n }\n return data",
"def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict",
"def clustering(dataset, logger):\n all_instances = dataset\n meta_dataset = collections.defaultdict(list)\n for instance in all_instances:\n meta_dataset[instance['label']].append(instance['coordinate'])\n\n tasklist = map(\n lambda item, meta_dataset=meta_dataset, logger=logger: (\n item[0],\n clustering_by_label,\n (item[1], item[0], meta_dataset, logger)), meta_dataset.items())\n\n # pool = multiprocessing.pool.Pool(PROCESS_COUNT)\n # clusters = dict(pool.map(map_generate_tuple, tasklist))\n clusters = dict(map(map_generate_tuple, tasklist))\n # pool.close()\n # pool.join()\n\n return clusters",
"def cluster(self):\n logger.debug(\"Beginning feature based clustering on %d clusters.\" % len(self.c2b))\n # Merge the two nearest clusters until we can't.\n #\n while self.mergeNearestClusters():\n pass\n logger.debug(\"After clustering, there are now %d clusters remaining.\" % len(self.c2b))\n return self.c2b.values()",
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def prepare_data_matrix():\n # create matrix X and list of languages\n\n lds = {}\n for fn in listdir(\"clustering\"):\n if fn.lower().endswith(\".txt\"):\n with open(join(\"clustering\", fn), encoding=\"utf8\") as f:\n text = f.read()\n nter = terke(text, n=3)\n lds[fn] = nter\n #print(lds.keys())\n \n #lds is a dictionary of dictionaries: {\"slovenian.txt\": {\"abc\":3,\"efg\":4...}, \"macedonian.txt\":{\"efg\":6...},...}\n l=listOfTuples(lds) #list of strings\n #print(l[:100])\n languages = list(lds.keys()) # ['Slo', 'Mac', ]\n # which language represents row number i: languages[i]\n # which row does language s represent: languagues.index(s)\n X=np.zeros([len(languages),100])\n for i in range(len(languages)):\n #print(languages[i])\n count = 0\n for j in range(100):\n if l[j] in lds[languages[i]]:\n X[i,j]=lds[languages[i]][l[j]]\n count += 1\n # print(count)\n\n #print([sum(x) for x in X])\n \n return X, languages\n # X, languages = prepare_data_matrix()",
"def to_json(self):\n\n tcluster = {\"clusters\": [], \"matchings\": None}\n if self.matching is not None:\n tcluster[\"matchings\"] = self.matching\n elif self.matched is not None:\n tcluster[\"matchings\"] = self.matched\n\n for tid in self.get_observation_ids():\n ct = self.get_clustering_at(tid)\n partition = {\n \"tid\": tid,\n \"communities\": ct.named_communities,\n \"algorithm\": ct.method_name,\n \"params\": ct.method_parameters,\n \"overlap\": ct.overlap,\n \"coverage\": ct.node_coverage,\n }\n tcluster[\"clusters\"].append(partition)\n\n return json.dumps(tcluster)",
"def cluster_amazon_video_game_again() -> Dict:\n return dict(model=None, score=None, clusters=None)",
"def iter_node_map(self):\n return self.d_inv.keys()"
] | [
"0.6026801",
"0.59989464",
"0.5909006",
"0.58761305",
"0.5691569",
"0.56816846",
"0.567273",
"0.55920225",
"0.5523509",
"0.55081856",
"0.5492725",
"0.548897",
"0.5476533",
"0.54749143",
"0.5442817",
"0.54381925",
"0.5394746",
"0.53318834",
"0.53260607",
"0.5310842",
"0.5309781",
"0.5289219",
"0.52779096",
"0.52760714",
"0.5257309",
"0.52480793",
"0.52318966",
"0.5188025",
"0.51869124",
"0.51790035"
] | 0.6161217 | 0 |
Construct a GO network, b/c too many nodes will lead to useless networks, we can impose an upper bound on the max_nodes size. We try to take 1 node from each cluster, then 2, then 3, until max_nodes is reached. | def network(self, max_clusters=20, max_members=10, max_nodes=300, l_go_selective=False):
if len(self.data)==0:
return None
if self.DM is None:
util.error_msg('Please run cluster first!')
S_node=GO_Cluster.sample_rows(self.t_go, max_clusters=max_clusters, max_members=max_members, max_nodes=max_nodes, l_go_selective=l_go_selective)
T_node=self.t_go[self.t_go.GO.apply(lambda x: x in S_node)].copy()
S_go=self.data.header()
M=self.data.values
n,m=M.shape
S_node=set(T_node.GO)
S_idx=[i for i,x in enumerate(S_go) if x in S_node ]
S_name=[ S_go[i] for i in S_idx]
T_node.rename2({'GO':'Gene'})
s_name='GOCluster'
if 'Name' in T_node.header():
s_name=list(T_node.Name)[0]
T_node.drop('Name', axis=1, inplace=True)
if 'URL' in T_node.header():
T_node.drop('URL', axis=1, inplace=True)
c_has_neighbor={}
data=[]
c_cluster={ T_node.ix[i,'Gene']:T_node.ix[i,'GROUP_ID'] for i in T_node.index}
n2=len(S_idx)
for _i in range(n2):
i=S_idx[_i]
for _j in range(_i+1, n2):
j=S_idx[_j]
idx=i*(2*m-i-1)//2+(j-i)-1
#print (_i, _j, n2, m, i, j, idx, S_name[_i], c_cluster[S_name[_i]], S_name[_j], c_cluster[S_name[_j]], K[idx])
if self.DM[idx]>=self.similarity:
data.append({'Gene_A':S_go[i], 'Gene_B':S_go[j], 'TYPE':'Direct', 'SCORE':self.DM[idx]})
c_has_neighbor[S_go[i]]=True
c_has_neighbor[S_go[j]]=True
# keep singletons
for i in S_idx:
if S_go[i] not in c_has_neighbor:
data.append({'Gene_A':S_go[i], 'Gene_B':S_go[i], 'TYPE':'Direct', 'SCORE':1.0})
if len(data):
T_edge=pd.DataFrame(data)
T_node.index=list(range(len(T_node)))
net=xgmml.Network(T_edge, T_node=T_node, name=s_name)
return net | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __createNetwork__(self, amount_nodes, amount_links):\n random.seed()\n numOfNodes = 0\n linksPerIteration = (amount_links-3)/(amount_nodes-3) if amount_nodes > 3 else 1\n #generate n nodes\n while numOfNodes < amount_nodes:\n node = Node(numOfNodes)\n self.appendNode(node)\n numOfNodes += 1\n #make first three nodes fully connected\n if numOfNodes == 2:\n self.__connectNode__(numOfNodes, 1)\n if numOfNodes == 3:\n self.__connectNode__(numOfNodes, 2)\n #link following nodes\n if numOfNodes > 3:\n self.__connectNode__(numOfNodes, linksPerIteration)",
"def generate_graph(size, number_of_clusters, minimal_size):\n base_list = list(range(size))\n result_list = []\n random.shuffle(base_list)\n for i in range(number_of_clusters - 1):\n size = random.randint(minimal_size, len(base_list) - (number_of_clusters - i - 1) * minimal_size)\n cluster = []\n for n in range(size):\n actual = random.choice(base_list)\n base_list.remove(actual)\n cluster.append(actual)\n result_list.append(strongly_connect(cluster))\n result_list.append(strongly_connect(base_list))\n\n while len(result_list) < 5:\n result_list.append([])\n\n print(sorted([len(i) for i in result_list], reverse=True)[:5])\n\n return weak_connect_graph(result_list)",
"def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)",
"def create_network(self, community_detection, wt_steps, n_clust, network_from, neighbors, top):\n \n if network_from == 'top_n':\n sort_by_scores = []\n\n for pair, score in scores_update.items():\n sort_by_scores.append([pair[0], pair[1], score[2]])\n top_n = sorted(sort_by_scores, reverse=False, key=lambda x: x[2])[:top]\n\n # Convert from distance to similarity for edge\n for score in top_n: \n c = 1/(1 + score[2])\n score[2] = c\n\n flat = [tuple(pair) for pair in top_n]\n\n elif network_from == 'knn': \n flat = []\n projection_knn = nearest_neighbors(neighbors=neighbors)\n\n for projection, knn in projection_knn.items():\n for n in knn:\n flat.append((projection, n[0], abs(n[3]))) # p1, p2, score\n\n clusters = {}\n g = Graph.TupleList(flat, weights=True)\n\n if community_detection == 'walktrap':\n try:\n wt = Graph.community_walktrap(g, weights='weight', steps=wt_steps)\n cluster_dendrogram = wt.as_clustering(n_clust)\n except:\n self.show_cluster_fail()\n elif community_detection == 'betweenness':\n try:\n ebs = Graph.community_edge_betweenness(g, weights='weight', directed=True)\n cluster_dendrogram = ebs.as_clustering(n_clust)\n except:\n self.show_cluster_fail()\n\n for community, projection in enumerate(cluster_dendrogram.subgraphs()):\n clusters[community] = projection.vs['name']\n\n #convert node IDs back to ints\n for cluster, nodes in clusters.items():\n clusters[cluster] = sorted([int(node) for node in nodes])\n \n remove_outliers(clusters)\n\n clustered = []\n for cluster, nodes in clusters.items():\n for n in nodes:\n clustered.append(n)\n\n clusters['singles'] = [] # Add singles to clusters if not in top n scores\n clusters['removed'] = []\n \n for node in projection_2D:\n if node not in clustered and node not in drop:\n clusters['singles'].append(node)\n elif node in drop:\n clusters['removed'].append(node)\n \n G = nx.Graph()\n\n for pair in flat:\n G.add_edge(int(pair[0]), int(pair[1]), weight=pair[2])\n\n #if you want to see directionality in the networkx plot\n #G = nx.MultiDiGraph(G)\n\n #adds singles if not in top n scores\n for node_key in projection_2D:\n if node_key not in G.nodes:\n G.add_node(node_key)\n\n return flat, clusters, G",
"def make_parallel(self, n):\n if self._use_naive_parallel_network:\n return alf.networks.NaiveParallelNetwork(self, n)\n else:\n return super().make_parallel(n, True)",
"def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict",
"def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict",
"def create_network(num_nodes=8, num_assets=1, channels_per_node=3, transport_class=None):\n # pylint: disable=too-many-locals\n\n # TODO: check if the loopback interfaces exists\n\n random.seed(1337)\n\n if channels_per_node > num_nodes:\n raise ValueError(\"Can't create more channels than nodes\")\n\n client_hosts = ['127.0.0.10', '127.0.0.11']\n\n # if num_nodes it is not even\n half_of_nodes = int(ceil(num_nodes / 2))\n\n # globals\n discovery = PredictiveDiscovery((\n (host, half_of_nodes, INITIAL_PORT)\n for host in client_hosts\n ))\n\n # The mock needs to be atomic since all app's will use the same instance,\n # for the real application the syncronization is done by the JSON-RPC\n # server\n blockchain_service = BlockChainServiceMock()\n\n # Each app instance is a Node in the network\n apps = []\n for host in client_hosts:\n for idx in range(half_of_nodes):\n port = INITIAL_PORT + idx\n\n app = mk_app(\n blockchain_service,\n discovery,\n transport_class or UDPTransport,\n port=port,\n host=host,\n )\n\n apps.append(app)\n\n for i in range(num_assets):\n asset_address = sha3('asset:%d' % i)[:20]\n blockchain_service.new_channel_manager_contract(asset_address=asset_address)\n\n asset_list = blockchain_service.asset_addresses\n assert len(asset_list) == num_assets\n\n create_network_channels(blockchain_service, asset_list, apps, channels_per_node)\n\n for app in apps:\n for asset_address in asset_list:\n app.raiden.setup_asset(asset_address, app.config['reveal_timeout'])\n\n return apps",
"def create_dedicated_clusters(ws,number_of_clusters, number_of_nodes, idle_time_out):\n clusters = {}\n for i in range (0,number_of_clusters):\n dig = '{0}{1}'.format(''.join(random.sample(string.digits, 2)),''.join(random.sample(string.ascii_letters, 2)))\n cluster_name = 'NC6-D{1}-{0}'.format(dig,number_of_nodes)\n try:\n compute_target = ComputeTarget(workspace=ws, name=cluster_name) \n except ComputeTargetException:\n compute_config = AmlCompute.provisioning_configuration(vm_size=vmsize,\n max_nodes=number_of_nodes, \n idle_seconds_before_scaledown=idle_time_out)\n compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n compute_target.wait_for_completion(show_output=True)\n clusters[i] = compute_target\n return clusters",
"def __init__(self, node_number, all_nodes):\n self.self_host = 'localhost'\n self.base_port = 5000\n self.self_port = self.base_port + node_number\n self.other_nodes = [(self.self_host, p)\n for p in range(self.base_port + 1, self.base_port + all_nodes + 1)\n if p != self.self_port]\n self.other_nodes_len = all_nodes - 1\n self.current_node = 0",
"def __generate_central_nodes(self,k=3):\n if k < 3:\n k = 3\n \n self.__logger.info(\"CENTRAL_NODES: Try to seek {} nodes which are currently central\".format(k)) \n res = [n for n,_ in sorted(nx.betweenness_centrality(self.G).items(),key=itemgetter(1),reverse=True)[:4*k]]\n self.__logger.info(\"CENTRAL_NODES: Generated top {} central nodes (according to betweeness centrality)\".format(len(res)))\n \n self.__logger.info(\"CENTRAL_NODES: Sample {} items from the candidates as was requested\".format(k))\n tmp = list(res)\n random.shuffle(tmp)\n return tmp[0:k]",
"def createcluster(self):\n for hostitem in OTHER_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n alive = str(REMAINING_NODES)[1:-1]\n print \"{}\\nThe following nodes are alive in cluster:{}\\n {}\".format(\n RED, WHITE, alive)\n print \"\\n\\nTo boostrap a new cluster you need to switch them off\\n\"\n os.sys.exit(1)\n else:\n if self.mode == \"new\" and not self.force:\n ask('\\nThis operation will destroy the local data')\n clean_dir(self.datadir)\n initialize_mysql(self.datadir)\n bootstrap_mysql(self.mode)\n if self.mode == \"new\":\n create_monitor_table()\n ALL_NODES.append(\"localhost\")\n for creditem in CREDENTIALS:\n create_users(creditem)\n print \"\"\n drop_anonymous()",
"def to_networkx(self, max_vertices: int = 5000) -> nx.Graph:\n graph_nx = nx.Graph()\n for v in self._vertices.values():\n graph_nx.add_node(v.item, kind=v.kind)\n\n for u in v.neighbours:\n if graph_nx.number_of_nodes() < max_vertices:\n graph_nx.add_node(u.item, kind=u.kind)\n\n if u.item in graph_nx.nodes:\n graph_nx.add_edge(v.item, u.item)\n\n if graph_nx.number_of_nodes() >= max_vertices:\n break\n\n return graph_nx",
"def test_create_cluster_network(self):\n pass",
"def numberOfNodes( gen ):\n return int( scipy.sum( [ 3.**i for i in range( 1, gen + 2 ) ] ) )",
"def create_sequential_network(num_nodes, deposit, asset, transport_class=None):\n if num_nodes < 2:\n raise ValueError('cannot create a network with less than two nodes')\n\n host = '127.0.0.10'\n\n random.seed(42)\n\n discovery = PredictiveDiscovery((\n (host, num_nodes, INITIAL_PORT),\n ))\n\n blockchain_service = BlockChainServiceMock()\n blockchain_service.new_channel_manager_contract(asset_address=asset)\n\n apps = []\n for idx in range(num_nodes):\n port = INITIAL_PORT + idx\n\n app = mk_app(\n blockchain_service,\n discovery,\n transport_class or UDPTransport,\n port=port,\n host=host,\n )\n apps.append(app)\n\n for first, second in zip(apps[:-1], apps[1:]):\n netcontract_address = blockchain_service.new_netting_contract(\n asset,\n first.raiden.address,\n second.raiden.address,\n )\n\n for address in [first.raiden.address, second.raiden.address]:\n blockchain_service.deposit(\n asset,\n netcontract_address,\n address,\n deposit,\n )\n\n for app in apps:\n app.raiden.setup_asset(asset, app.config['reveal_timeout'])\n\n return apps",
"def generate_graph(number_of_nodes):\n cities = []\n size = int(math.sqrt(number_of_nodes))\n if size*size != number_of_nodes:\n raise ArgumentError(\"At the moment generate_graph() only takes perfect squares (3, 16, 25 etc.). Feel free to improve it.\")\n test = 0\n for position in range(0, number_of_nodes):\n city = City()\n city.x_position = (position) % size\n city.y_position = int(position / size)\n cities.append(city)\n\n for i_city in range(0, len(cities)):\n city = cities[i_city]\n x_pos = city.x_position\n y_pos = city.y_position\n\n if x_pos != 0:\n city.adjacent_cities.append(cities[i_city - 1])\n\n if x_pos != size-1:\n city.adjacent_cities.append(cities[i_city + 1])\n\n if y_pos != 0:\n city.adjacent_cities.append(cities[i_city - size])\n\n if y_pos != size-1:\n city.adjacent_cities.append(cities[i_city + size])\n\n return cities",
"def gen_nodes(modelfile, starting_genes):\n # read json file with final model variables\n shape, top_genes, weights, output_key, biases = read_json(modelfile)\n\n # initialize database\n database = db.Database()\n\n # create list to store all layers\n NN = []\n\n # get input probe sequences\n input_seqs_df = inputs.probes_df(top_genes)\n # each layer is a dictionary with keys as names of strands and values as a list of seqs\n l_0 = {}\n probe_seqs = []\n for probe in input_seqs_df[\"Probe Sequences\"]:\n index = 0\n size = database.size\n while database.size < size + 1:\n try:\n database.database_insert(Seq(probe[index]))\n index += 1\n # except block handles case that NONE of the probe sequences were accepted into the database\n # ***TEMPORARY FIX***\n except IndexError:\n index -= 1\n break\n probe_seqs.append(Seq(probe[index]))\n l_0[\"Probe Sequence\"] = probe_seqs\n print(\"Layer 0: \", l_0)\n NN.append(l_0)\n\n # add the tether and promotor to the database\n database.database_insert(starting_genes[\"Tether\"])\n database.database_insert(starting_genes[\"T7 Promoter\"])\n\n # generate all the sequences for every node in each layer\n for layer in range(1, len(shape)):\n # add the cage and tether sequences to the layer dictionary\n l_i = {}\n l_i[\"Cage Sense\"] = [starting_genes[\"Cage Sense\"]] * shape[layer]\n l_i[\"Cage Antisense\"] = [starting_genes[\"Cage Antisense\"]] * shape[layer]\n l_i[\"Tether\"] = [starting_genes[\"Tether\"]] * shape[layer]\n\n print(\"getting anchor strands\")\n tether_length = len(starting_genes[\"Tether\"])\n size = database.size\n # generate anchor strands until all of them have been accepted into the database\n while database.size < size + shape[layer]:\n anchor = oligo.oligo(tether_length)\n database.database_insert(anchor)\n anchor_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n print(\"getting transcription factors\")\n threshold_energy = 9 # variable that can be changed, pos integer, see gen_tf for description\n static_tf_seqs = []\n tf_seqs = []\n for anchor in anchor_seqs:\n static_tf, tf = gen_tf(anchor, starting_genes[\"Tether\"], threshold_energy)\n static_tf_seqs.append(static_tf)\n tf_seqs.append(tf)\n print(\"DONE\")\n\n print(\"getting outputs\")\n output_length = 25 # length of dna transcript from one node\n size = database.size\n while database.size < size + shape[layer]:\n output = oligo.oligo(output_length).sequence\n database.database_insert(output)\n transcript_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n # assemble longer strands in the node\n l_i[\"Static TF + Transcript Sense\"] = [static_tf_seqs[i] + starting_genes[\"T7 Promoter\"] + transcript_seqs[i]\n for i in range(shape[layer])]\n l_i[\"Transcript Antisense + Anchor\"] = [\n oligo.complement(transcript_seqs[i]) + oligo.complement(starting_genes[\"T7 Promoter\"]) + anchor_seqs[i] for\n i in range(shape[layer])]\n\n # intermediates are the strands that determine weights in toehold-mediated displacement\n print(\"getting intermediate\")\n toe_length = 20 # standard length for all toehold sequences\n # get the 2D matrix for this layer and round the values to one decimal place\n weight_matrix = np.array(weights[layer - 1])\n weight_matrix = np.round(weight_matrix, 1)\n intermediate_seqs = []\n tf_appendage_seqs = []\n for i in range(shape[layer - 1]):\n if layer == 1:\n output = NN[0][\"Probe Sequence\"][i]\n else:\n output = NN[layer - 1][\"Static TF + Transcript Sense\"][i][-output_length:]\n inters = []\n top_toe = output[:toe_length]\n b_dom = output[toe_length:]\n tf_appendage_seqs.append(b_dom)\n # get all the possible sequences for toehold weights between 0 and 1\n weight_dict = quant.find_quanta(top_toe)\n for j in range(shape[layer]):\n w = weight_matrix[j, i]\n tf = tf_seqs[j]\n a_star_tf = tf[:len(tf) // 2]\n if w < 0:\n # negative weights\n inters.append(a_star_tf + oligo.complement(b_dom) + weight_dict[w * -1])\n else:\n # positive weights\n inters.append(oligo.complement(a_star_tf) + oligo.complement(b_dom) + weight_dict[w])\n\n intermediate_seqs.append(inters)\n # each list in the nested list is for one node in the layer, get nodes row-wise\n l_i[\"Intermediate\"] = np.array(intermediate_seqs).T.tolist()\n print(\"DONE\")\n\n # TF and TF Inhibitor are products of toehold-mediated displacement for pos and neg weights, respectively\n full_tf_seqs_2D = []\n attack_seqs_2D = []\n for tf in tf_seqs:\n full_tf_seqs = []\n attack_seqs = []\n for appendage in tf_appendage_seqs:\n full_tf_seq = appendage + tf\n attack_seq = appendage + oligo.complement(tf[:len(tf) // 2])\n full_tf_seqs.append(full_tf_seq)\n attack_seqs.append(attack_seq)\n full_tf_seqs_2D.append(full_tf_seqs)\n attack_seqs_2D.append(attack_seqs)\n l_i[\"TF\"] = full_tf_seqs_2D\n l_i[\"TF Inhibitor\"] = attack_seqs_2D\n\n print(\"Layer {}: \".format(layer), l_i)\n # add the completed layer to the NN list\n NN.append(l_i)\n\n return NN",
"def network_topology(voxels, clusters, primaries, edges, mode='sphere'):\n # Define the arrays of node positions (barycenter of voxels in the cluster)\n pos = np.array([voxels[c].cpu().numpy().mean(0) for c in clusters])\n\n # Define the node features (label, color)\n n = len(clusters)\n node_labels = ['%d (%0.1f, %0.1f, %0.1f)' % (i, pos[i,0], pos[i,1], pos[i,2]) for i in range(n)]\n \n node_colors = ['#ff7f0e' if i in primaries else '#1f77b4' for i in range(n)]\n\n # Define the nodes and their connections\n graph_data = []\n edge_vertices = []\n if mode == 'sphere':\n # Define the node size\n logn = np.array([np.log(len(c)) for c in clusters])\n node_sizes = np.interp(logn, (logn.min(), logn.max()), (5, 50))\n \n # Define the nodes as sphere of radius proportional to the log of the cluster voxel content\n graph_data.append(go.Scatter3d(x = pos[:,0], y = pos[:,1], z = pos[:,2],\n name = 'clusters',\n mode = 'markers',\n marker = dict(\n symbol = 'circle',\n size = node_sizes,\n color = node_colors,\n colorscale = 'Viridis',\n line = dict(color='rgb(50,50,50)', width=0.5)\n ),\n text = node_labels,\n hoverinfo = 'text'\n ))\n\n # Define the edges center to center\n edge_vertices = np.concatenate([[pos[i], pos[j], [None, None, None]] for i, j in zip(edges[0], edges[1])])\n\n elif mode == 'hull':\n # For each cluster, add the convex hull of all its voxels\n graph_data += [go.Mesh3d(alphahull =10.0,\n name = '',\n x = voxels[c][:,0],\n y = voxels[c][:,1],\n z = voxels[c][:,2],\n color = node_colors[i],\n opacity = 0.3,\n text = node_labels[i],\n hoverinfo = 'text'\n ) for i, c in enumerate(clusters)]\n\n # Define the edges closest pixel to closest pixel\n import scipy as sp\n edge_vertices = []\n for i, j in zip(edges[0], edges[1]):\n vi, vj = voxels[clusters[i]], voxels[clusters[j]]\n d12 = sp.spatial.distance.cdist(vi, vj, 'euclidean')\n i1, i2 = np.unravel_index(np.argmin(d12), d12.shape)\n edge_vertices.append([vi[i1].cpu().numpy(), vj[i2].cpu().numpy(), [None, None, None]])\n \n edge_vertices = np.concatenate(edge_vertices)\n \n else:\n raise ValueError\n \n # Initialize a graph that contains the edges\n graph_data.append(go.Scatter3d(x = edge_vertices[:,0], y = edge_vertices[:,1], z = edge_vertices[:,2],\n mode = 'lines',\n name = 'edges',\n line = dict(\n color = 'rgba(50, 50, 50, 0.5)',\n width = 1\n ),\n hoverinfo = 'none'\n ))\n\n # Return\n return graph_data",
"def generate_gn_benchmark(self, zout) :\n\n pout = float(zout)/96.\n pin = (16.-pout*96.)/31.\n graph = nx.Graph()\n graph.add_nodes_from(range(128))\n for x in graph.nodes() :\n for y in graph.nodes() :\n if x < y :\n val = random.random()\n if x % 4 == y % 4 :\n #nodes belong to the same community\n if val < pin :\n graph.add_edge(x, y)\n\n else :\n if val < pout :\n graph.add_edge(x, y)\n return graph",
"def network_schematic(clusters, primaries, edges):\n # Define the node positions (primaries on the left, secondaries on the right)\n n = len(clusters)\n pos = np.array([[1.-float(i in primaries), i] for i in range(n)])\n\n # Define the node features (label, size, color)\n node_labels = [str(i) for i in range(n)]\n \n logn = np.array([np.log(len(c)) for c in clusters])\n node_sizes = np.interp(logn, (logn.min(), logn.max()), (5, 50))\n \n node_colors = ['#ff7f0e' if i in primaries else '#1f77b4' for i in range(n)]\n\n # Define the nodes as sphere of radius proportional to the log of the cluster voxel content\n graph_data = []\n graph_data.append(go.Scatter(\n x = pos[:,0],\n y = pos[:,1],\n mode = 'markers',\n name = 'clusters',\n marker = dict(\n color = node_colors,\n size = node_sizes,\n ),\n text = node_labels,\n hoverinfo = 'text'\n ))\n\n # Initialize the edges\n edge_vertices = np.concatenate([[pos[i], pos[j], [None, None]] for i, j in zip(edges[0], edges[1])])\n graph_data.append(go.Scatter(x = edge_vertices[:,0], y = edge_vertices[:,1],\n mode = 'lines',\n name = 'edges',\n line = dict(\n color = 'rgba(50, 50, 50, 0.5)',\n width = 1\n ),\n hoverinfo = 'none'\n ))\n\n return graph_data",
"def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph",
"def create_low_clusters(ws,number_of_clusters, number_of_nodes, idle_time_out):\n clusters = {}\n for i in range (0,number_of_clusters):\n dig = '{0}{1}'.format(''.join(random.sample(string.digits, 2)),''.join(random.sample(string.ascii_letters, 2)))\n cluster_name = 'NC6-L{1}-{0}'.format(dig,number_of_nodes)\n try:\n compute_target = ComputeTarget(workspace=ws, name=cluster_name) \n except ComputeTargetException:\n compute_config = AmlCompute.provisioning_configuration(vm_size=vmsize,\n vm_priority = 'lowpriority',max_nodes=number_of_nodes, \n idle_seconds_before_scaledown=idle_time_out)\n compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n compute_target.wait_for_completion(show_output=True)\n clusters[i] = compute_target\n return clusters",
"def constructConnMatrices(nE=4000, nI=1000, n_clusters=50,\n pEE=.2, pEI=.5, pIE=.5, pII=.5, rEE=1.0,\n jEE_out=0.024, jEE_in=1.9, jEI=0.014, jIE=-0.045, jII=-0.057): # Need to add synaptic strengths\n # Structural connectivity (probabilities taken from Litwin-Kumar & Doiron, 2012)\n if rEE==1:\n sEE = np.random.binomial(1,pEE,size=(nE,nE))\n wEE = sEE*jEE_out\n else:\n neurons_per_clust = int(nE/float(n_clusters))\n pEE_out = (pEE*(nE-1))/(rEE*neurons_per_clust + (nE-1) - neurons_per_clust)\n pEE_in = rEE*pEE_out\n\n # First define all pEE_out connections\n sEE = np.random.binomial(1,pEE_out,size=(nE,nE))\n wEE = sEE * jEE_out\n # Now we will re-do clustered connections\n neuron_count = 0\n for clust in range(n_clusters):\n i = int(neuron_count)\n j = int(neuron_count + neurons_per_clust)\n sEE[i:j,i:j] = 0\n sEE[i:j,i:j] = np.random.binomial(1,pEE_in,size=(neurons_per_clust,neurons_per_clust))\n wEE[i:j,i:j] = sEE[i:j,i:j] * jEE_in\n neuron_count += neurons_per_clust\n\n # No self connections\n np.fill_diagonal(wEE,0)\n\n # Structural matrices\n sEI = np.random.binomial(1,pEI,size=(nE,nI))\n sIE = np.random.binomial(1,pIE,size=(nI,nE))\n sII = np.random.binomial(1,pII,size=(nI,nI))\n\n # Synaptic weights\n wEI = sEI * jEI\n wIE = sIE * jIE\n wII = sII * jII\n\n # No self connections\n np.fill_diagonal(wII,0)\n\n return wEE, wEI, wIE, wII",
"def sampled_clique(clusters,strategy):\n G = nx.Graph()\n sample = []\n #Sample 'size' nodes from a single cluster\n if strategy == \"rand\":\n size = len(clusters)\n while len(sample) < size:\n cluster = random.choice(clusters)\n if len(cluster) >= size:\n sample = random.sample(cluster,size)\n #Sample 1 choice from each cluster\n elif strategy == \"optim\":\n for _,cluster in clusters.items():\n if len(cluster) > 0:\n sample.append(random.choice(cluster))\n for n1 in sample:\n for n2 in sample:\n if n1 != n2:\n G.add_edge(n1,n2)\n return G",
"def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0",
"def mmn(modul_number, modul_size, kmin, kmax, g, c, offset):\n check_unique = 0 # Checks if inter mod. con. are unique\n check_con = 0 # Checks if network is connected\n while check_unique != modul_number*c/2 or check_con != 1:\n inter_nodes = np.zeros((modul_number, c))\n network = gt.Graph(directed=False)\n # Constructs disconnected modules and combines them in a network\n # in the graph tool format.\n for i in range(modul_number):\n module_network, inter_nodes[i] = configuration_model(\n g, kmin, kmax,\n modul_size, c, offset)\n # Assigns the nodes to the corresponding module.\n inter_nodes[i] += i*modul_size\n network = gt.generation.graph_union(network, module_network)\n\n inter_nodes = np.transpose(inter_nodes)\n for row in inter_nodes:\n np.random.shuffle(row)\n\n inter_links = inter_nodes.ravel().reshape((int(modul_number*c/2), 2))\n check_unique = len(np.unique(inter_links, axis=0))\n network.add_edge_list(inter_links)\n\n _, check_con = gt.topology.label_components(network)\n check_con = len(check_con)\n \n return network",
"def singleton_node_network(mols, std_edges):\n extra_mol = SmallMoleculeComponent(mol_from_smiles(\"CCC\"))\n all_mols = list(mols) + [extra_mol]\n return _NetworkTestContainer(\n network=LigandNetwork(edges=std_edges, nodes=all_mols),\n nodes=all_mols,\n edges=std_edges,\n n_nodes=4,\n n_edges=3,\n )",
"def neato_graph_from_corpus( corpus, max_nodes ) :\n\n O, row_dois, column_dois = cites_matrix( corpus )\n neato_cooccurrence_graph( O, column_dois )\n return None\n\n \n v = total_occurrences( O ) \n nv = v.astype( float32 ) / v.max()\n C = cooccurrence_matrix ( O )\n nC = normalized_cooccurrence_matrix( O )\n\n # now find our cutoff!\n # find the max number of cocites and start there\n cocite_cutoff = C.max()\n num_nodes = nodes_from_c( C[C >= cocite_cutoff] )\n # then reduce the number until we exceed max_nodes\n while num_nodes < max_nodes :\n cocite_cutoff = cocite_cutoff - 1\n num_nodes = nodes_from_c( C[C >= cocite_cutoff] )\n\n if num_nodes > max_nodes :\n cocite_cutoff = cocite_cutoff + 1\n \n C = C.copy()\n C[ C < cocite_cutoff ]= 0\n\n graph = pydot.Dot( graph_type = 'graph' )\n graph.set_overlap(\"false\")\n coords = zip(*(C >= cocite_cutoff).nonzero())\n\n # make a dict of all nodes which are mentioned in the coords\n nodes = {}\n index = 1\n for coord in set(chain.from_iterable(coords)) :\n if not nodes.has_key( coord ) :\n node = pydot.Node( str(coord) )\n if v != None :\n doi = column_dois[coord]\n node.set_label( str(index) )\n node.set_penwidth( nv[ coord ] )\n node.set_fixedsize(\"true\")\n node.set_width( 1.0 *nv[ coord ] )\n #node.set_shape(\"circle\")\n nodes[ coord ] = node\n graph.add_node( node )\n index = index + 1\n\n for coord in coords :\n \n edge = pydot.Edge( nodes[coord[0]], nodes[coord[1]] )\n edge.set_weight( nC[coord] )\n edge.set_penwidth( nC[coord]*5 )\n #edge.set_label( str(int(m[coord]) ))\n graph.add_edge(edge)\n\n \n legend = pydot.Node( \"legend\" )\n nodelist = nodes.items()\n nodelist.sort( lambda a,b : cmp(node_index(a[1].get_label()),node_index(b[1].get_label())) )\n legend.set_label( \"\\l\".join([x[1].get_label()+\":\"+column_dois[x[0]] for x in nodelist])+\"\\l\" )\n legend.set_shape(\"box\")\n graph.add_node(legend)\n\n print graph.to_string()\n #graph.write_dot('test.dot', prog='neato' )\n #graph.write_png('test.png', prog='neato' )\n #graph.write_pdf('test.pdf', prog='neato' )",
"def _create_graph(netlist):\n G = nx.Graph()\n for t in netlist:\n G.add_edges_from([(t.name, t.drain), (t.name, t.gate), (t.name, t.source)])\n return G"
] | [
"0.70511895",
"0.63774914",
"0.625035",
"0.61599034",
"0.59952337",
"0.5991076",
"0.5991076",
"0.59114605",
"0.5858617",
"0.5844498",
"0.5819003",
"0.58181596",
"0.5803368",
"0.58022726",
"0.57998455",
"0.579773",
"0.5786971",
"0.5779051",
"0.5770885",
"0.57656467",
"0.57399756",
"0.57258433",
"0.57190293",
"0.5717943",
"0.56979173",
"0.5666072",
"0.5652223",
"0.56518835",
"0.5651002",
"0.56459403"
] | 0.737439 | 0 |
Main method for plotting a confusion matrix of an input file. | def main():
parser = ArgumentParser(description='Run BoW experiments.')
parser.add_argument('input', help='csv file')
parser.add_argument('output', help='output path.', default=None)
args = vars(parser.parse_args())
data = np.genfromtxt(args['input'], delimiter=';', dtype=str)
predictions = data[1:, 0]
actual = data[1:, 1]
print(predictions, actual)
labels = sorted(set(actual))
cm = confusion_matrix(actual, predictions, labels=labels)
print(accuracy_score(actual, predictions))
fig = plot_confusion_matrix(
cm,
classes=labels,
normalize=True,
title='Confusion Matrix',
cmap='Blues')
save_fig(fig, args['output']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_confusion_matrix(conf_matrix_list, labels, cm_file_path):\n\n logging.info(\"run plot_confusion_matrix\")\n\n # construct plot figure with 36 subplots in a square grid\n fig, ax = plt.subplots(6, 6, figsize=(12, 7))\n\n # for each categories' name and confusion matrices\n for axes, cm, label in zip(ax.flatten(), conf_matrix_list, labels):\n\n #plot heatmap of single confusion matrix in list\n sns.heatmap(cm, annot=True, fmt='.2f', cbar=False, ax=axes)\n\n # label axis\n axes.set_ylabel('True label')\n axes.set_xlabel('Predicted label')\n\n # set title\n axes.set_title(label)\n\n # save plots in file\n fig.tight_layout()\n fig.savefig(cm_file_path)",
"def plot_confusion_matrix(self):\r\n interp = ClassificationInterpretation.from_learner(self.learn)\r\n interp.plot_confusion_matrix()",
"def plot_cf_matrix(clf, X, y, label_names, dataset_name, clf_name, fname):\r\n _, ax = plt.subplots(figsize=(20,20))\r\n plot_confusion_matrix(clf, X, y, ax=ax, normalize='true', cmap='Blues', display_labels=label_names, values_format='.1%')\r\n plt.title('Confusion Matrix - %s - %s'%(dataset_name, clf_name))\r\n plt.savefig(fname, bbox_inches='tight')\r\n plt.close()",
"def plot_confusion_matrix(name, trained_predictor, X_test, y_test):\n\n fig, ax = plt.subplots()\n fig.tight_layout()\n cm = confusion_matrix(y_test, trained_predictor.predict(X_test), normalize=\"all\")\n ConfusionMatrixDisplay(cm, display_labels=[\"False\", \"True\"]).plot(\n ax=ax\n )\n plt.title(name)",
"def plot(self):\n plt.imshow(self.cm, interpolation='nearest', cmap=self.cmap)\n plt.title(self.title)\n plt.colorbar()\n tick_marks = np.arange(len(self.classes))\n plt.xticks(tick_marks, self.classes, rotation=45)\n plt.yticks(tick_marks, self.classes)\n \n if self.normalize:\n self.cm = self.cm.astype('float') / self.cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(self.cm)\n \n thresh = self.cm.max() / 2.\n for i, j in itertools.product(range(self.cm.shape[0]), range(self.cm.shape[1])):\n plt.text(j, i, self.cm[i, j], horizontalalignment=\"center\", color=\"white\" if self.cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True Label')\n plt.xlabel('Predicted label')",
"def plot_cnf_matrix(self):\n\t\tplt.figure()\n\t\tanalysis.plot_cnf_matrix(self.y_pred, self.y_test)",
"def plot_confusion_matrix(cm, class_names):\n figure = plt.figure(figsize=(10, 10))\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n \n\n # Normalize the confusion matrix.\n #cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n # Use white text if squares are dark; otherwise black.\n #threshold = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n #print(cm[i, j])\n color = \"black\"\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=color)\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return figure",
"def plot_single_confusion_matrix(CM, labels, title, fname, cmap=plt.cm.Blues):\n print(\"Confusion Matrix\")\n print(labels, \"\\n\", CM)\n\n plt.figure()\n plt.imshow(CM, interpolation=\"nearest\", cmap=cmap)\n plt.title(title, fontsize=10)\n plt.colorbar()\n\n ticks = np.arange(len(labels))\n plt.xticks(ticks, labels, rotation=45)\n plt.yticks(ticks, labels)\n\n for i, j in product(range(CM.shape[0]), range(CM.shape[1])):\n color = \"white\" if CM[i, j] > CM.max() * 0.5 else \"black\"\n plt.text(j, i, format(CM[i, j], \"d\"), horizontalalignment=\"center\", color=color)\n\n plt.tight_layout()\n plt.ylabel(\"True Labels\")\n plt.xlabel(\"Predicted Labels\")\n\n outpath = os.path.join(BASEDIR, \"img\", f\"confusion-matrix-{fname}.png\")\n plt.savefig(outpath)\n\n return None",
"def plot_confusion_matrix(cm, class_names, sentiment):\n\t##plotting unnormalized confusion matrix\n\tplt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n\tplt.title('confusion matrix of ' + sentiment + ' classification')\n\n\t#uncomment for actual labels \n\t# tick_marks = np.arange(len(class_names))\n\t# plt.xticks(tick_marks, class_names, rotation=90, fontsize=5)\n\t# plt.yticks(tick_marks, class_names, fontsize=5)\n\n\tplt.ylabel('True label')\n\tplt.xlabel('Predicted label')\n\n\tplt.show()",
"def plot_confusion_matrix(self, y_true, y_pred, title=None):\r\n\r\n if not title:\r\n title = 'confusion matrix'\r\n\r\n # Compute confusion matrix\r\n\r\n y_pred = np.array(y_pred)\r\n y_true = np.array(y_true)\r\n cm = confusion_matrix(y_true, y_pred)\r\n # Only use the labels that appear in the data\r\n classes = self.classes\r\n print('Confusion matrix')\r\n\r\n print(cm)\r\n fig2, ax = plt.subplots()\r\n im = ax.imshow(cm, interpolation='nearest')\r\n ax.figure.colorbar(im, ax=ax)\r\n # We want to show all ticks...\r\n ax.set(xticks=np.arange(cm.shape[1]),\r\n yticks=np.arange(cm.shape[0]),\r\n # ... and label them with the respective list entries\r\n xticklabels=classes, yticklabels=classes,\r\n title=title,\r\n ylabel='True label',\r\n xlabel='Predicted label')\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Loop over data dimensions and create text annotations.\r\n fmt = '.2f'\r\n thresh = cm.max() / 2.\r\n for i in range(cm.shape[0]):\r\n for j in range(cm.shape[1]):\r\n ax.text(j, i, format(cm[i, j], fmt),\r\n ha=\"center\", va=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n fig2.tight_layout()\r\n file_loc = [str(self.training_path) +\r\n '\\\\checkpoints\\\\confusion_matrix.jpg'] # NEED TO FIX\r\n s = \"\"\r\n s = s.join(file_loc)\r\n conf_path = Path(s)\r\n plt.savefig(conf_path)\r\n plt.show()\r\n\r\n return ax",
"def plot_cnf_matrix(y_pred, y_test):\n\tprint(\"\\t\\tGenerating confusion matrix\")\n\n\tmatrix = confusion_matrix(y_test, y_pred)\n\tclasses = [\"0\", \"1\"]\n\n\tplt.imshow(matrix, interpolation=\"nearest\", cmap=plt.cm.Blues)\n\tplt.title(\"Confusion Matrix\")\n\tplt.colorbar()\n\n\ttick_marks = np.arange(len(classes))\n\n\tplt.xticks(tick_marks, classes, rotation=45)\n\tplt.yticks(tick_marks, classes)\n\n\tthresh = matrix.max() / 2.0\n\tfor i, j in itertools.product(range(matrix.shape[0]), range(matrix.shape[1])):\n\t\tplt.text(j, i, format(matrix[i, j], \"d\"), horizontalalignment=\"center\", color=\"white\" if matrix[i, j] > thresh else \"black\")\n\n\tplt.tight_layout()\n\tplt.ylabel(\"True Label\")\n\tplt.xlabel(\"Predicted Label\")\n\n\tfilename = \"\"\n\n\t#Save the image in the current directory\n\tif COUNTER == 0:\n\t\tfilename = \"/img/log_reg_confusion_matrix.png\"\n\telif COUNTER == 1:\n\t\tfilename = \"/img/rand_forest_confusion_matrix.png\"\n\telse:\n\t\tfilename = \"/img/gbm_confusion_matrix.png\"\n\n\tplt.savefig(PARENT_DIR + filename, bbox_inches='tight')\n\tincrement_counter()",
"def plot_confusion_matrix(y_targeted, y_predicted, instrument_classes, directory_name):\n cm = confusion_matrix(y_targeted, y_predicted)\n np.set_printoptions(precision=2)\n cm = cm / cm.astype(np.float).sum(axis=1)\n print(\"Normalized confusion matrix:\\n\", cm)\n\n fig = plt.figure(figsize=(10, 7))\n cmap = plt.cm.Blues\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.colorbar()\n tick_marks = np.arange(len(instrument_classes))\n plt.xticks(tick_marks, instrument_classes, rotation=45)\n plt.yticks(tick_marks, instrument_classes)\n\n fmt = '.2f'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.title(\"Confusion matrix\")\n plt.xlabel('Predicted label')\n plt.ylabel('True label')\n plt.tight_layout()\n fig.savefig('./' + directory_name + '/confusion_matrix')\n pass",
"def plotConfusionMatrix(y, pred, title, labels, outfile, cmap=plt.cm.Blues):\n \n cm = confusion_matrix(y, pred);\n ncm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n accuracy = accuracy_score(y, pred)\n \n fig = plt.figure(figsize=(10, 10))\n plt.imshow(ncm, interpolation='nearest', cmap=cmap, vmin=0, vmax=1)\n plt.title(title+\" Acc: \"+str(accuracy)+\")\")\n plt.colorbar()\n for i in range(0,len(labels)):\n for j in range(0,len(labels)):\n plt.text(j,i,cm[i,j],va='center',ha='center')\n tick_marks = np.arange(len(labels))\n plt.xticks(tick_marks, labels, rotation=45)\n plt.yticks(tick_marks, labels)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n pdfplot = PdfPages(outfile);\n pdfplot.savefig(fig)\n pdfplot.close()",
"def _single_confusion_matrix_plot(self, confusion_array, palette, model_name):\n # source and cmap\n source = self._create_column_data_source(confusion_array)\n cmap = LinearColorMapper(palette=palette[::-1], low=0, high=max(confusion_array.ravel()))\n\n # figure\n p = default_figure(\n {\n \"title\": model_name,\n \"height\": 300,\n \"width\": 300,\n \"x_range\": self.labels,\n \"y_range\": self.labels[::-1]\n }\n )\n\n # Rectangles (HeatMap)\n p.rect(\n x=self._x,\n y=self._y,\n source=source,\n fill_color={\"field\": self._values, \"transform\": cmap},\n width=1,\n height=1,\n line_color=None,\n )\n\n # Numbers on Rectangles\n labels = LabelSet(\n x=self._x,\n y=self._y,\n text=self._values,\n source=source,\n render_mode=\"canvas\",\n x_offset=-7, # trying to center the numbers manually\n y_offset=-7,\n text_color=\"black\",\n text_font_size=\"11px\",\n )\n p.add_layout(labels)\n\n # plot specific styling\n p.yaxis.axis_label = \"Actual\"\n p.xaxis.axis_label = \"Predicted\"\n p.xaxis.major_label_orientation = -1.57 # in radians\n\n return p",
"def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='',\n cmap=plt.cm.Blues, file_name='cm_plot'):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n plt.rcParams[\"font.size\"] = FONT_SIZE\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n\n fmt = '.6f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label', fontsize=FONT_SIZE)\n plt.xlabel('Predicted label', fontsize=FONT_SIZE)\n plt.subplots_adjust(bottom=0.13)\n with PdfPages(file_name) as pdf:\n pdf.savefig()\n plt.close()",
"def showConfusionMatrix(self): \r\n sn.heatmap(self.conf_matrix, annot=True)\r\n plt.plot( label=\"Accuracy\")\r\n plt.plot( label=\"Error\")\r\n plt.figtext(0,0,'Accuracy: {}\\nError: {}\\nRecall: {}\\nPrecision: {}'.format(self.accuracy,\r\n self.error,\r\n self.recall,\r\n self.precision))\r\n plt.title('Confusion Matrix')\r\n plt.show()\r\n return None",
"def plot_confusion_matrix(cm, class_names=None, title=\"Confusion Matrix\", cmap=plt.cm.Purples, fig_size=(8, 6)):\n\n class_names = [\"0\", \"1\"] if class_names is None else class_names\n df = pd.DataFrame(cm, index=class_names, columns=class_names)\n\n plt.figure(figsize=fig_size)\n with sns.axes_style(\"darkgrid\"):\n # sns.set_context(\"notebook\") # , font_scale = 1.1)\n sns.set_style({\"font.sans-serif\": [\"Segoe UI\", \"Calibri\", \"SF Pro Display\", \"Arial\", \"DejaVu Sans\", \"Sans\"]})\n hmap = sns.heatmap(df, annot=True, fmt=\"d\", cmap=cmap)\n hmap.yaxis.set_ticklabels(hmap.yaxis.get_ticklabels(), rotation=0, ha=\"right\")\n hmap.xaxis.set_ticklabels(hmap.xaxis.get_ticklabels(), rotation=30, ha=\"right\")\n\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n plt.title(title)\n plt.show()\n plt.close()",
"def plot_confusion_matrix(cm, class_names):\n figure = plt.figure(figsize=(8, 8))\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n\n # Normalize the confusion matrix.\n cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n # Use white text if squares are dark; otherwise black.\n threshold = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n color = \"white\" if cm[i, j] > threshold else \"black\"\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=color)\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return figure",
"def plot_confusion_matrix(confusion_mat, classes, figure_axis, title=None, cmap=plt.cm.Blues):\n\n # Compute confusion matrix\n cm = confusion_mat\n\n ax = figure_axis\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.grid(False)\n ax.figure.colorbar(im, ax=ax)\n\n if classes is not None:\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n ylabel='True Label',\n xlabel='Predicted Label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n else:\n plt.setp( ax.get_xticklabels(), visible=False)\n plt.setp( ax.get_yticklabels(), visible=False)\n plt.setp( ax.get_xticklines(), visible=False)\n plt.setp( ax.get_yticklines(), visible=False)\n\n ax.set(title=title)\n\n # Loop over data dimensions and create text annotations.\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, cm[i, j],\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")",
"def plot_confusion_matrix(data, save_figure_path=None):\n # plot confusion matrix in heatmap format\n fig, ax = plt.subplots(figsize=(9, 9))\n\n # set color scheme and style\n cmap = colors.LinearSegmentedColormap.from_list(\n \"nameofcolormap\", [\"w\", \"b\"], gamma=2.0\n )\n style.use(\"seaborn-paper\") # sets the size of the charts\n sns.set_style({\"xtick.bottom\": True}, {\"ytick.left\": True})\n\n # plot heatmap using seaborn\n ax = sns.heatmap(\n data,\n annot=True,\n linewidths=0.1,\n square=True,\n cmap=cmap,\n cbar_kws={\"shrink\": 0.7, \"ticks\": [0.0, 2.5, 5.0, 7.5, 10.0, 12.5]},\n linecolor=\"black\",\n ax=ax,\n fmt=\".2f\",\n annot_kws={\"size\": 14},\n cbar=True,\n )\n\n # add the column names as labels, set fontsize and set title\n fontsize = 14\n ax.set_yticklabels(data.columns, rotation=0, fontsize=fontsize)\n ax.set_xticklabels(data.columns, fontsize=fontsize)\n ax.axhline(y=0, color=\"k\", linewidth=2)\n ax.axhline(y=data.shape[1], color=\"k\", linewidth=2)\n ax.axvline(x=0, color=\"k\", linewidth=2)\n ax.axvline(x=data.shape[0], color=\"k\", linewidth=2)\n ax.set_title(\"Rank 1 (%) Error\", fontsize=fontsize)\n\n # save figure\n if save_figure_path is not None:\n plt.savefig(save_figure_path)",
"def plot_confusion_matrix(cm, classes, acc,\n cmap=plt.cm.Reds):\n\n plt.figure(figsize=(10, 8))\n title = 'Confusion Matrix (Accuracy: %0.3f%%)' % (acc * 100)\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, fontsize=16)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = 'd'\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if i == 0:\n plt.text(j, i + 0.2, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"black\")\n elif i == cm.shape[1] - 1:\n plt.text(j, i - 0.2, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\")\n else:\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"black\")\n\n plt.ylabel('True label', fontsize=18)\n plt.xlabel('Predicted label', fontsize=18)\n plt.tight_layout()\n plt.show()",
"def plot_confusion_matrix(self, confusion_matrix, class_names, normalize=False, cmap=\"gray\", path_to_save=\"./\", name=\"temp\"):\r\n # https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py\r\n if normalize:\r\n confusion_matrix = confusion_matrix.astype('float') / confusion_matrix.sum(axis=1)[:, np.newaxis]\r\n # print(\"Normalized confusion matrix\")\r\n else:\r\n pass\r\n # print('Confusion matrix, without normalization')\r\n # print(cm)\r\n plt.imshow(confusion_matrix, interpolation='nearest', cmap=cmap)\r\n # plt.colorbar()\r\n tick_marks = np.arange(len(class_names))\r\n # plt.xticks(tick_marks, class_names, rotation=45)\r\n plt.xticks(tick_marks, class_names, rotation=0)\r\n plt.yticks(tick_marks, class_names)\r\n # tick_marks = np.arange(len(class_names) - 1)\r\n # plt.yticks(tick_marks, class_names[1:])\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = confusion_matrix.max() / 2.\r\n for i, j in itertools.product(range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1])):\r\n plt.text(j, i, format(confusion_matrix[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if confusion_matrix[i, j] > thresh else \"black\")\r\n # plt.ylabel('True label')\r\n # plt.xlabel('Predicted label')\r\n plt.ylabel('true distortion type')\r\n plt.xlabel('predicted distortion type')\r\n plt.ylim([self.n_classes - 0.5, -0.5])\r\n plt.tight_layout()\r\n # plt.show()\r\n plt.savefig(path_to_save + name + \".png\")\r\n plt.clf()\r\n plt.close()",
"def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix',saveas='cm', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n\n plt.figure() \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n foo_fig = plt.gcf() # 'get current figure'\n# foo_fig.savefig('confusion_matrix.eps', format='eps', dpi=1000) \n foo_fig.savefig(saveas, dpi=1000, bbox_inches='tight')\n plt.show()",
"def draw_confusion_matrix(confusion_matrix, class_names=['true', 'false'], figsize = (10,7), fontsize=21):\n df_cm = pd.DataFrame(\n confusion_matrix, index=class_names, columns=class_names, \n )\n fig = plt.figure(figsize=figsize)\n try:\n heatmap = sns.heatmap(df_cm, annot=True, fmt=\"d\")\n except ValueError:\n raise ValueError(\"Confusion matrix values must be integers.\")\n heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)\n heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize)\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n # return fig",
"def plot_confusion_matrix(cm, my_tags, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(my_tags))\n target_names = my_tags\n plt.xticks(tick_marks, target_names, rotation=45)\n plt.yticks(tick_marks, target_names)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')",
"def plot_confusion_matrix(y_true, y_pred, cmx = None, to_include='', name_extension='', \n cmap=plt.cm.Blues, importances=None, parsed=None):\n if str(parsed) != 'None':\n cm = parsed\n\n else:\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)# if not cmx.any else cmx\n cm = np.array(cm)\n #normalize\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n if len(cm) == 5:\n classes = np.array(['SNIa', 'SNIbc','SNII', 'SNIIn', 'SLSNe'])\n order = [4,2,3,0,1]\n elif len(cm) == 4:\n classes = np.array(['SNIa', 'SNIbc','SNII', 'SNIIn'])\n order = [2,3,0,1]\n elif len(cm) == 3:\n classes = np.array(['SNIa', 'Ibc/SLS','II/IIn'])\n order = [1,2,0]\n elif len(cm) ==2:\n order = [1,0]\n classes = np.array(['SNIa','CC'])\n else:\n raise\n\n classes = classes[order]\n print('Confusion matrix:')\n print(cm)\n\n bal_score = str(balanced_score(cm))[:4]\n diag = str(diagonalishness(cm))\n info_str = \"bal_score:\" + bal_score \\\n + \" diag:\" + diag + '\\n' \\\n + str(to_include)\n \n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0, vmax=1)\n\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n title=info_str,\n xticklabels=classes, yticklabels=classes,\n ylabel='True label',\n xlabel='Predicted label')\n ax.set_ylim(len(cm) - 0.5, -0.5)\n\n #ax.tick_params(axis='both', which='major', labelsize=10)\n \n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n font = {\n 'weight' : 'normal',\n 'size' : 20}\n plt.rc('font', **font)\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if True else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n #fig.tight_layout()\n \n plt.savefig(name_extension +'.png', bbox_inches = \"tight\")\n plt.grid(False)\n plt.show()\n plt.close()",
"def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues, ylabel='True label', xlabel='Predicted label', filename=None):\n np.set_printoptions(precision=2, suppress=True)\n\n if normalize:\n cm = cm.astype('float') / (cm.sum(axis=1)[:, np.newaxis] + np.finfo(np.float).eps)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0, vmax=1)\n plt.title(title, fontsize=20)\n # plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90, fontsize=20)\n plt.yticks(tick_marks, classes, fontsize=20)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"lightgrey\" if cm[i, j] > thresh else \"black\",\n fontsize=12)\n\n plt.tight_layout()\n plt.ylabel(ylabel, fontsize=20)\n plt.xlabel(xlabel, fontsize=20)\n plt.ylim(top=-0.5)\n plt.ylim(bottom=len(classes)-0.5)",
"def plot_confusion_matrix(self, cm, class_names):\n figure = plt.figure(figsize=(8, 8))\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n\n # Normalize the confusion matrix.\n cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n # Use white text if squares are dark; otherwise black.\n threshold = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n color = \"white\" if cm[i, j] > threshold else \"black\"\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=color)\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return figure",
"def plot_confusion_matrix(cm, class_names):\n # normalize confusion matrix\n cm_normalized = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n figure = plt.figure(figsize=(8, 8))\n plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Normalized Confusion Matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n\n # Use white text if squares are dark; otherwise black.\n threshold = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n color = \"white\" if cm[i, j] > threshold else \"black\"\n plt.text(j, i, cm_normalized[i, j], horizontalalignment=\"center\", color=color)\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return figure",
"def plot_confusion_matrix(cm,classes,title='Confusion matrix',cmap=plt.cm.Blues):\n x_classes=classes+ ['Recall']\n y_classes=classes+ ['Precision']\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title,fontname='Times New Roman',fontsize = 16, y=1.03)\n cbar=plt.colorbar(fraction=0.046)\n cbar.ax.tick_params(labelsize=14) \n tick_marks = np.arange(len(x_classes))\n plt.xticks(tick_marks, x_classes, fontsize = 13,fontname='Times New Roman')\n plt.yticks(tick_marks, y_classes, fontsize = 13,fontname='Times New Roman')\n plt.axhline(y=3.51,color='black')\n plt.axvline(x=3.51,color='black')\n\n fmt = '.2f' \n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",fontsize=13,fontname='Times New Roman')\n plt.ylabel('True label',fontsize = 15,fontname='Times New Roman')\n plt.xlabel('Predicted label',fontsize = 15,labelpad=10,fontname='Times New Roman')\n plt.tight_layout()"
] | [
"0.6811695",
"0.66890585",
"0.6661385",
"0.6602234",
"0.6555866",
"0.65173465",
"0.65019476",
"0.6501667",
"0.6468689",
"0.6454575",
"0.64506686",
"0.6438366",
"0.64235675",
"0.6412188",
"0.63892514",
"0.63323826",
"0.63196456",
"0.6305887",
"0.62828386",
"0.62782794",
"0.6266773",
"0.62563014",
"0.62511706",
"0.624875",
"0.62444085",
"0.62412417",
"0.62402654",
"0.62307173",
"0.6227979",
"0.62272537"
] | 0.6841279 | 0 |
Define the ellipsoid used by the source, target, and definition CRS. Required to apply the offset to coordinates | def setCrsEllipsoid(self, a, rf=None):
self.isgeographic = True
self.ellipsoid_a = a
if rf is None:
self.ellipsoid_b = a
else:
self.ellipsoid_b = a * (1.0 - 1.0 / rf)
self.a2 = self.ellipsoid_a * self.ellipsoid_a
self.b2 = self.ellipsoid_b * self.ellipsoid_b | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_projection(self):\n radius = 6370e3\n \n # Spherical latlon used by WRF\n self.latlon_sphere = pyproj.Proj(proj='latlong',\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n # Lambert Conformal Conic used by WRF\n self.lambert_grid = pyproj.Proj(proj='lcc',\n lat_1=self.truelats[0],\n lat_2=self.truelats[1],\n lat_0=self.ref_latlon[0],\n lon_0=self.stand_lon,\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n grid_size_i = (self.domain_size[0] - 2) * self.cell_size[0]\n grid_size_j = (self.domain_size[1] - 2) * self.cell_size[1]\n\n grid_center_i, grid_center_j = pyproj.transform(\n self.latlon_sphere, self.lambert_grid,\n self.ref_latlon[1], self.ref_latlon[0])\n \n self.offset_i = grid_center_i - grid_size_i * .5\n self.offset_j = grid_center_j - grid_size_j * .5",
"def construct_by_ellipse(a_xx, h_xy, b_yy, g_x, f_y, d, focal_length):\n gamma = - focal_length\n a = gamma**2 * a_xx\n b = gamma**2 * b_yy\n c = d\n d = gamma**2 * d\n f = -gamma*(f_y)\n g = -gamma*(g_x)\n h = gamma**2 * h_xy\n #Not needed\n u = gamma**2 * g_x\n v = gamma**2 * f_y\n w = -gamma*(d)\n return ConeCamera(a, b, c, f, g, h)",
"def newEllipsoid(**kwds):\n # invoke the foundry to get to the class with the implementation\n ellipsoid = core.ellipsoid()\n # instantiate one and return it\n return ellipsoid(**kwds)",
"def add_source_ellipse(self):\n\n # Add ellipse for source within positional uncertainty\n if self.plot_source:\n source_colour = 'springgreen' if self.stokes == 'v' else 'springgreen'\n pos = SkyCoord(ra=self.source.ra_deg_cont, dec=self.source.dec_deg_cont, unit='deg')\n self.sourcepos = EllipseSkyRegion(\n pos,\n width=self.source.maj_axis * u.arcsec,\n height= self.source.min_axis * u.arcsec,\n angle=(self.source.pos_ang + 90) * u.deg,\n ).to_pixel(self.wcs)\n self.sourcepos.plot(\n ax=self.ax,\n facecolor='none',\n edgecolor=source_colour,\n ls=':',\n lw=2,\n zorder=10,\n )\n \n # Add ellipse for other components in the FoV\n if self.plot_neighbours:\n neighbour_colour = 'k' if self.stokes == 'v' else 'rebeccapurple'\n for idx, neighbour in self.neighbours.iterrows():\n pos = SkyCoord(\n ra=neighbour.ra_deg_cont,\n dec=neighbour.dec_deg_cont,\n unit='deg',\n )\n n = EllipseSkyRegion(\n pos,\n width=neighbour.maj_axis * u.arcsec,\n height=neighbour.min_axis * u.arcsec,\n angle=(neighbour.pos_ang + 90) * u.deg,\n ).to_pixel(self.wcs)\n n.plot(\n ax=self.ax,\n facecolor='none',\n edgecolor=neighbour_colour,\n ls=':',\n lw=2,\n zorder=1,\n )",
"def r_ellipse(self,xc=None,yc=None):\n x = self.x\n y = self.y\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n self.rel = sqrt(self.cxx*(x-xc)**2 +\n\t\t self.cyy*(y-yc)**2 +\n\t\t self.cxy*(x-xc)*(y-yc)\n\t\t )",
"def createEllipsoid( position=(0,0,0), radius=(1,1,1), colour=(0.6,0.6,0.6), samplesY = 20, samplesXZ = 20, exponentBottom = 2, exponentTop = 2, exponentSide = 2 ):\r\n \r\n if exponentBottom < 2.0 or exponentTop < 2.0 or exponentSide < 2.0 :\r\n raise ValueError( 'Exponents for ellipsoid must all be under 2.0!' )\r\n \r\n position = PyUtils.toPoint3d(position)\r\n vertices = []\r\n for i in range(1,samplesY):\r\n thetaI = i*math.pi/float(samplesY)\r\n if i < samplesY / 2 : \r\n n = exponentTop\r\n else:\r\n n = exponentBottom\r\n cos = math.cos(thetaI) \r\n y = cos * radius[1]\r\n scaleXZ = math.pow( 1-math.pow(math.fabs(cos),n), 1.0/float(n) )\r\n for j in range(0,samplesXZ):\r\n thetaJ = j*2.0*math.pi/float(samplesXZ)\r\n n = exponentSide\r\n cos = math.cos(thetaJ)\r\n x = cos * scaleXZ * radius[0]\r\n z = math.pow( 1-math.pow(math.fabs(cos),n), 1.0/float(n) ) * math.copysign(1, math.sin(thetaJ)) * scaleXZ * radius[2]\r\n vertices.append( position + Vector3d(x,y,z) )\r\n vertices.append( position + Vector3d(0,radius[1],0) )\r\n vertices.append( position + Vector3d(0,-radius[1],0) ) \r\n\r\n faces = []\r\n for i in range(0,(samplesY-2)*samplesXZ,samplesXZ) :\r\n for j in range(0,samplesXZ) :\r\n faces.append( (i+j, i+(j+1)%samplesXZ, i+samplesXZ+(j+1)%samplesXZ, i+samplesXZ+j) ) \r\n\r\n for i in range(0,samplesXZ) :\r\n base = (samplesY-2)*samplesXZ\r\n faces.append( ((i+1)%samplesXZ, i, (samplesY-1)*samplesXZ) ) \r\n faces.append( (base+i, base+(i+1)%samplesXZ, (samplesY-1)*samplesXZ+1) ) \r\n\r\n \r\n return create( vertices, faces, colour )",
"def ellipse(self):\n f = self.img\n x = self.x\n y = self.y\n x2 = self.x2\n y2 = self.y2\n xy = self.xy\n self.a2 = (x2+y2) + sqrt(((x2-y2)/2.)**2 + xy**2)\n self.b2 = (x2+y2) - sqrt(((x2-y2)/2.)**2 + xy**2)\n self.a = sqrt(self.a2)\n self.b = sqrt(self.b2)\n tan2theta = 2* (xy/(x2-y2))\n self.theta = arctan(tan2theta)/2.\n denominator = sqrt(((x2-y2)/2)**2+xy**2)\n self.cxx = y2/denominator\n self.cyy = x2/denominator\n self.cxy = -2*xy/denominator",
"def __init__(\n self,\n newE0=None,\n newE1=None,\n newE2=None,\n newMaximumHorizontalProjection=None,\n newMaximumVerticalProjection=None,\n newEquivalentHorizontalRadius=None,\n ):\n\n # Required Keys\n if newE0 is not None:\n self.e0 = newE0\n else:\n self.e0 = processingformats.errorEllipseAxis.ErrorEllipseAxis()\n\n if newE1 is not None:\n self.e1 = newE1\n else:\n self.e1 = processingformats.errorEllipseAxis.ErrorEllipseAxis()\n\n if newE2 is not None:\n self.e2 = newE2\n else:\n self.e2 = processingformats.errorEllipseAxis.ErrorEllipseAxis()\n\n if newMaximumHorizontalProjection is not None:\n self.maximumHorizontalProjection = newMaximumHorizontalProjection\n\n if newMaximumVerticalProjection is not None:\n self.maximumVerticalProjection = newMaximumVerticalProjection\n\n if newEquivalentHorizontalRadius is not None:\n self.equivalentHorizontalRadius = newEquivalentHorizontalRadius",
"def set_earth(inclination, phases):\n cosi, sini = np.cos(inclination), np.sin(inclination)\n cosp = np.cos(2*np.pi*phases)\n sinp = np.sin(2*np.pi*phases)\n return CartesianRepresentation(sini*cosp, -sini*sinp, cosi)",
"def point2wgs84_9603(self, datum):\n \"\"\"\n h is the height above the ellipsoid. This is the height value that is \n delivered by GPS satellite observations but is not the gravity-related height \n value which is normally used for national mapping and levelling operations. The\n gravity-related height (H) is usually the height above mean sea level or an \n alternative level reference for the country. If one starts with a gravity-related \n height H, it will be necessary to convert it to an ellipsoid height (h) before \n using the above transformation formulas. See section 4.11.1. For the WGS 84 \n ellipsoid the difference between ellipsoid and mean sea level can vary between \n values of -100m in the Sri Lanka area to +80m in the North Atlantic.)\n \"\"\"\n h=0\n # a is the semi-major axis of the ellipsoid of the given datum.\n a = datum.axis\n\n # f is the flattening of the ellipsoid of the given datum \n # (get_flattening actually returns the inverse flattening).\n f = 1.0/datum.flattening\n \n # dx, dy, dz are the x, y, z offset parameters for the given datum transformation\n # to WGS84\n dx = datum.dx\n dy = datum.dy\n dz = datum.dz\n \n # latr, lngr are the latitude and longitude in radians\n latr = math.radians(self.lat)\n lngr = math.radians(self.lng)\n\n # e is the eccentricity of the ellipsoid\n e_squared = f*(2-f)\n\n # nu is the prime vertical radius of curvature at latr\n nu = a/math.pow((1-e_squared*sqr(math.sin(latr))),0.5)\n\n X = (nu+h)*math.cos(latr)*math.cos(vlambda)\n Y = (nu+h)*math.cos(latr)*math.sin(vlambda)\n Z = ((1 - math.pow(e,2))*nu + h)*math.sin(phi)\n\n Xwgs84 = X+dx\n Ywgs84 = Y+dy\n Zwgs84 = Z+dz\n\n epsilon = e_squared/(1-e_squared)\n b = a*(1-f)\n p = math.pow(sqr(Xwgs84)+sqr(Ywgs84),0.5)\n q = math.atan2((Zwgs84*a),(p*b))\n\n latrwgs84 = math.atan2( (Zwgs84 + epsilon*b*math.pow(math.sin(q)),3)), \\\n (p - e_squared*a*math.pow(math.cos(q),3) )\n lngrwgs84 = math.atan2(Ywgs84, Xwgs84)\n hwgs84 = (p/math.cos(latrwgs84))-nu\n newlng = lng180(math.degrees(lngrwgs84))\n newlat = math.degrees(latrwgs84)\n return Point(float(truncate(newlng,DEGREE_DIGITS)), float(truncate(newlat,DEGREE_DIGITS)))",
"def make_mage_ellipsoids(ids, coord_dict, coord_low_dict,\n coord_high_dict, color, ellipsoid_prefs=\\\n {\"smoothness\":2,\"alpha\":.25}):\n alpha = ellipsoid_prefs['alpha']\n nsubdivs = ellipsoid_prefs['smoothness']\n result = []\n coord_lines = []\n for id_ in sorted(ids):\n if id_ in coord_dict:\n center = coord_dict[id_][:3]\n dims = coord_high_dict[id_][:3] - coord_low_dict[id_][:3]\n\n faces = make_ellipsoid_faces(center, dims, nsubdivs=nsubdivs)\n for face in faces:\n result.append(\"@trianglelist color=%s alpha=%f master={points} nobutton\" %(color, alpha))\n for point in face:\n result.append(' '.join(map(str,point)))\n return result",
"def __init__(self, gridname=None, verbose=False):\n self.gridname = gridname\n g = re.match(r'(EASE2_[NST])([0-9\\.]+)km', gridname)\n if g is None:\n print(\"%s : error parsing gridname %s\" % (__name__, gridname),\n file=sys.stderr,\n flush=True)\n raise ValueError\n projection = g.group(1)\n resolution = g.group(2)\n\n # Check for typos in resolution\n if resolution not in resolutions:\n print(\"%s : unrecognized resolution %s\" % (__name__, resolution),\n file=sys.stderr,\n flush=True)\n raise ValueError\n\n # The geotransform information\n # is the set of GDAL affine transform parameters:\n # (map_UL_x, scale_x, b, map_UL_y, d, scale_y)\n if projection == \"EASE2_N\":\n # The geotransform is the set of GDAL affine transform parameters:\n # (map_UL_x, scale_x, b, map_UL_y, d, scale_y)\n self.proj4text = \"+proj=laea +lat_0=90 +lon_0=0 \" + \\\n \"+x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m\"\n self.map_UL_x = -9000000.\n self.map_UL_y = 9000000.\n self.b = 0.\n self.d = 0.\n self.scale_x = float(resolution) * m_per_km\n self.scale_y = -1 * float(resolution) * m_per_km\n\n elif projection == \"EASE2_S\":\n self.proj4text = \"+proj=laea +lat_0=-90 +lon_0=0 \" + \\\n \"+x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m\"\n self.map_UL_x = -9000000.\n self.map_UL_y = 9000000.\n self.b = 0.\n self.d = 0.\n self.scale_x = float(resolution) * m_per_km\n self.scale_y = -1 * float(resolution) * m_per_km\n\n elif projection == \"EASE2_T\":\n self.proj4text = \"+proj=cea +lat_0=0 +lon_0=0 +lat_ts=30 \" \\\n \"+x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m\"\n self.map_UL_x = -17367530.44\n self.map_UL_y = 6756820.20000\n self.b = 0.\n self.d = 0.\n base_resolution_m = 25025.26000\n factor = resolutions.index(resolution)\n self.scale_x = base_resolution_m / (2. ** factor)\n self.scale_y = -1 * base_resolution_m / (2. ** factor)\n\n else:\n print(\"%s : unrecognized projection %s\" % (__name__, projection),\n file=sys.stderr,\n flush=True)\n raise ValueError\n\n # Thanks to affine help pages at\n # https://github.com/sgillies/affine/blob/master/README.rst\n # http://www.perrygeo.com/python-affine-transforms.html\n geotransform = (self.map_UL_x + self.scale_x / 2.,\n self.scale_x,\n self.b,\n self.map_UL_y + self.scale_y / 2.,\n self.d,\n self.scale_y)\n self.fwd = Affine.from_gdal(*geotransform)\n\n # Initialize and save coordinate transformation\n # for this projection\n self.gridSpatialRef = osr.SpatialReference()\n self.gridSpatialRef.SetFromUserInput(self.proj4text)\n\n # Initialize and save coordinate transformation\n # for EPSG4326 (lat/lon)\n self.epsg4326SpatialRef = osr.SpatialReference()\n self.epsg4326SpatialRef.SetFromUserInput(self.epsg4326Proj4text)\n\n # Initialize and save the forward and reverse transformations\n self.projToGeog = osr.CoordinateTransformation(\n self.gridSpatialRef, self.epsg4326SpatialRef)\n self.geogToProj = osr.CoordinateTransformation(\n self.epsg4326SpatialRef, self.gridSpatialRef)\n\n if verbose:\n print(\"%s : initialized new Ease2Transform object\" % (__name__),\n file=sys.stderr,\n flush=True)",
"def __init__(self, x, y, z): \n\t\tself.x = x # x coordinate (EW distance from observatory center)\n\t\tself.y = y # y coordinate (NS distance from observatory center)\n\t\tself.z = z # z coordinate (altitude rel. to observatory center)",
"def eoa(self, *args):\n\n\t\t#Assume coordinate is in center of pixel.\n\t\t#Information on pixel standard is in this article.\n\t\t#http://www.aanda.org/component/article?access=bibcode&bibcode=&bibcode=2002A%2526A...395.1061GFUL\n\t\tif isinstance(args[0], np.ndarray):\n\t\t\tlonUL, latUL = self.heliographic(args[0], -.5, -.5)\n\t\t\tlonLL, latLL = self.heliographic(args[0], .5, -.5)\n\t\t\tlonLR, latLR = self.heliographic(args[0], .5, .5)\n\t\t\tlonUR, latUR = self.heliographic(args[0], -.5, .5)\n\t\telse:\n\t\t\tx = args[0]\n\t\t\ty = args[1]\n\t\t\tlonUL, latUL = self.heliographic(x - .5, y - .5)\n\t\t\tlonLL, latLL = self.heliographic(x + .5, y - .5)\n\t\t\tlonLR, latLR = self.heliographic(x + .5, y + .5)\n\t\t\tlonUR, latUR = self.heliographic(x - .5, y + .5)\n\n\t\t# Calculating unit vectors of pixel corners for solid angle.\n\t\tr1 = np.array([np.cos(np.deg2rad(latUL))*np.cos(np.deg2rad(lonUL)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latUL))*np.sin(np.deg2rad(lonUL)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latUL))])\n\n\t\tr2 = np.array([np.cos(np.deg2rad(latLL))*np.cos(np.deg2rad(lonLL)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latLL))*np.sin(np.deg2rad(lonLL)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latLL))])\n\n\t\tr3 = np.array([np.cos(np.deg2rad(latLR))*np.cos(np.deg2rad(lonLR)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latLR))*np.sin(np.deg2rad(lonLR)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latLR))])\n\n\t\tr4 = np.array([np.cos(np.deg2rad(latUR))*np.cos(np.deg2rad(lonUR)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latUR))*np.sin(np.deg2rad(lonUR)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latUR))])\n\n\t\t# Calculate solid angle of pixel based on a pyrimid shaped polygon.\n\t\t# See \n\t\tcross1 = np.cross(r1, r2, axis=0)\n\t\tcross2 = np.cross(r3, r4, axis=0)\n\t\tnumerator1 = dot(cross1, r3)\n\t\tnumerator2 = dot(cross2, r1)\n\t\tsolid_angle1 = 2*np.arctan2(numerator1,\n\t\t\t\t\t\t(dot(r1, r2) + dot(r2, r3) + dot(r3, r1) + 1))\n\t\tsolid_angle2 = 2*np.arctan2(numerator2, \n\t\t\t\t\t\t(dot(r3, r4) + dot(r4, r1) + dot(r3, r1) + 1))\n\t\tsolid_angle = solid_angle1 + solid_angle2\n\t\tr = 6.957e10 * u.cm\n\n\t\tif isinstance(args[0], np.ndarray):\n\t\t\tself.area = np.abs((r**2)*solid_angle)\n\t\t\tind = np.where(self.rg > self.rsun)\n\t\t\tself.area[ind] = np.nan\n\t\t\treturn self.area\n\t\telse:\n\t\t\treturn np.abs((r**2)*solid_angle)",
"def __init__(\r\n self,\r\n centre: Tuple[float, float] = (0.0, 0.0),\r\n ell_comps: Tuple[float, float] = (0.0, 0.0),\r\n einstein_radius: float = 1.0,\r\n core_radius: float = 0.01,\r\n ):\r\n super().__init__(\r\n centre=centre,\r\n ell_comps=ell_comps,\r\n einstein_radius=einstein_radius,\r\n slope=2.0,\r\n core_radius=core_radius,\r\n )",
"def ellipsoid(center, radii, rotation, scales=None, shape=None, minarea=0):\n center = np.array(center)\n radii = np.array(radii)\n rotation = np.array(rotation)\n assert center.shape == (3,)\n assert radii.shape == (3,)\n assert 0 < radii.max(), \"radii should contain at least one positive value\"\n assert rotation.shape == (3, 3)\n if scales is None:\n scales = (1.,) * 3\n scales = np.array(scales)\n assert scales.shape == (3,)\n\n scaled_center = center / scales\n\n # The upper_left_bottom and lower_right_top corners of the smallest cuboid\n # containing the ellipsoid.\n factor = np.array([\n [i, j, k] for k in (-1, 1) for j in (-1, 1) for i in (-1, 1)]).T\n while True:\n radii_rot = np.abs(\n np.diag(1. / scales).dot(rotation.dot(np.diag(radii).dot(factor)))\n ).max(axis=1)\n # In the original scikit-image code, ceil and floor were replaced.\n # https://github.com/scikit-image/scikit-image/blob/master/skimage/draw/draw.py#L127\n upper_left_bottom = np.floor(scaled_center - radii_rot).astype(int)\n lower_right_top = np.ceil(scaled_center + radii_rot).astype(int)\n\n if shape is not None:\n # Constrain upper_left and lower_ight by shape boundary.\n upper_left_bottom = np.maximum(\n upper_left_bottom, np.array([0, 0, 0]))\n lower_right_top = np.minimum(\n lower_right_top, np.array(shape[:3]) - 1)\n\n bounding_shape = lower_right_top - upper_left_bottom + 1\n\n d_lim, r_lim, c_lim = np.ogrid[0:float(bounding_shape[0]),\n 0:float(bounding_shape[1]),\n 0:float(bounding_shape[2])]\n d_org, r_org, c_org = scaled_center - upper_left_bottom\n d_rad, r_rad, c_rad = radii\n rotation_inv = np.linalg.inv(rotation)\n conversion_matrix = rotation_inv.dot(np.diag(scales))\n d, r, c = (d_lim - d_org), (r_lim - r_org), (c_lim - c_org)\n distances = (\n ((d * conversion_matrix[0, 0] +\n r * conversion_matrix[0, 1] +\n c * conversion_matrix[0, 2]) / d_rad) ** 2 +\n ((d * conversion_matrix[1, 0] +\n r * conversion_matrix[1, 1] +\n c * conversion_matrix[1, 2]) / r_rad) ** 2 +\n ((d * conversion_matrix[2, 0] +\n r * conversion_matrix[2, 1] +\n c * conversion_matrix[2, 2]) / c_rad) ** 2\n )\n if distances.size < minarea:\n old_radii = radii.copy()\n radii *= 1.1\n print('Increase radii from ({}) to ({})'.format(old_radii, radii))\n else:\n break\n distance_thresh = 1\n while True:\n dd, rr, cc = np.nonzero(distances < distance_thresh)\n if len(dd) < minarea:\n distance_thresh *= 1.1\n else:\n break\n dd.flags.writeable = True\n rr.flags.writeable = True\n cc.flags.writeable = True\n dd += upper_left_bottom[0]\n rr += upper_left_bottom[1]\n cc += upper_left_bottom[2]\n return dd, rr, cc",
"def _set_axis_len_and_dir(self):\n # Constant\n R = EQUATORIAL_EARTH_RADIUS\n\n # Compute the covariance matrixe for finding the axis direction\n nb_coord = np.sum(\n [self.sl_list[k].nb_points for k in range(self.nb_sl)]\n )\n merged_coord_list = np.zeros((nb_coord, 2))\n index = 0\n for sl_id in range(self.nb_sl):\n sl = self.sl_list[sl_id]\n merged_coord_list[index : index + sl.nb_points, :] = np.array(\n sl.coord_list\n )\n index += sl.nb_points\n merged_coord_list[:, 0] -= self.center[0]\n merged_coord_list[:, 1] -= self.center[1]\n cov_matrix = np.cov(merged_coord_list.T)\n self.axis_dir = np.linalg.eig(cov_matrix)[1]\n # Rotate and center the points so that the ellipse equation can be\n # written \"(x/a)**2 + (y/b)**2 = 1\"\n # angles = np.angle(self.axis_dir[:, 0] + 1j * self.axis_dir[:, 1])\n angles = np.angle(self.axis_dir[0, :] + 1j * self.axis_dir[1, :])\n angle = angles[0]\n\n rotation = np.array(\n [[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]]\n )\n aligned_points = np.dot(rotation, merged_coord_list.T).T\n\n # Compute the regression square error for given (a,b) parameters\n def error(a, b):\n points_sq = aligned_points ** 2\n points_sq[:, 0] /= max(1.0 * a * a, 0.0001)\n points_sq[:, 1] /= max(1.0 * b * b, 0.0001)\n return np.sum((np.sqrt(np.sum(points_sq, axis=1)) - 1) ** 2)\n\n # Gradient of the square error\n def grad_error(a, b):\n points_sq = aligned_points ** 2\n x2 = np.array(points_sq[:, 0])\n y2 = np.array(points_sq[:, 1])\n sq_coeff = np.sqrt(x2 / (a * a) + y2 / (b * b))\n common_coeff = -2 * (1 - 1 / sq_coeff)\n grad_a = np.sum(common_coeff * x2) / (a ** 3)\n grad_b = np.sum(common_coeff * y2) / (b ** 3)\n return grad_a, grad_b\n\n # Gradient decente\n a, b = grad_desc(error, grad_error)\n self.axis_len = np.array([a, b])\n return\n\n ##### Optimisation of parameters assuming a gaussian modelisation #####\n # # self.axis_len = np.linalg.eig(cov_matrix)[0]\n # # Rx, Ry in meter\n # axis_len_meter = np.zeros((2,))\n # rotation_inv = np.array(\n # [[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]\n # )\n # point_rx = np.array([self.axis_len[0], 0])\n # point_ry = np.array([0, self.axis_len[1]])\n # point_rx = np.dot(rotation_inv, point_rx.T).T\n # point_ry = np.dot(rotation_inv, point_ry.T).T\n # point_rx_meter = convert_from_degree_to_meter(\n # point_rx, self.center[1], R\n # )\n # point_ry_meter = convert_from_degree_to_meter(\n # point_ry, self.center[1], R\n # )\n # axis_len_meter[0] = np.linalg.norm(point_rx_meter)\n # axis_len_meter[1] = np.linalg.norm(point_ry_meter)\n\n # # Esimate h0\n # h0 = estimate_sea_level_center(\n # self.date,\n # self.center,\n # aligned_points,\n # self.axis_len,\n # self.axis_dir,\n # self.angular_velocity,\n # angle,\n # stream_data_fname=\"../data/data.nc\",\n # R=EQUATORIAL_EARTH_RADIUS,\n # )\n\n # # Compute theorical u,v and get measured (interpolized) u,v\n # u_v_evaluated = compute_u_v(\n # aligned_points, angle, self.center, axis_len_meter, h0\n # )\n # u_v_measured = get_interpolized_u_v(\n # aligned_points,\n # self.date,\n # self.center,\n # angle,\n # stream_data_fname=\"../data/data.nc\",\n # )\n # # Computing initial error\n # L1init = compute_error(u_v_evaluated, u_v_measured)\n\n # # Gradient descent\n # step_sizex = axis_len_meter[0]\n # step_sizey = axis_len_meter[1]\n # n_iter = 10\n # for iter in range(n_iter):\n # grad_u, grad_v = compute_grad_u_v(\n # aligned_points, angle, self.center, axis_len_meter, h0\n # )\n # gradL1 = compute_gradL1(\n # u_v_evaluated, u_v_measured, grad_u, grad_v\n # )\n\n # axis_len_meter[0] -= step_sizex * gradL1[0]\n # axis_len_meter[1] -= step_sizey * gradL1[1]\n # # Updating Rx and Ry in degreee to recompute h0\n # point_rx_meter = (\n # point_rx_meter\n # * axis_len_meter[0]\n # / np.linalg.norm(point_rx_meter)\n # )\n # point_ry_meter = (\n # point_ry_meter\n # * axis_len_meter[1]\n # / np.linalg.norm(point_ry_meter)\n # )\n # point_rx_degree = convert_from_meter_to_degree(\n # point_rx_meter, self.center[1], R\n # )\n # point_ry_degree = convert_from_meter_to_degree(\n # point_ry_meter, self.center[1], R\n # )\n # self.axis_len[0] = np.linalg.norm(point_rx_degree)\n # self.axis_len[1] = np.linalg.norm(point_ry_degree)\n # h0 = estimate_sea_level_center(\n # self.date,\n # self.center,\n # aligned_points,\n # self.axis_len,\n # self.axis_dir,\n # self.angular_velocity,\n # angle,\n # stream_data_fname=\"../data/data.nc\",\n # R=EQUATORIAL_EARTH_RADIUS,\n # )\n # u_v_evaluated = compute_u_v(\n # aligned_points, angle, self.center, axis_len_meter, h0\n # )\n # L1 = compute_error(u_v_evaluated, u_v_measured)\n # print(\"L1 : \", L1)\n # print(\"optimizing: \", L1 <= L1init)\n\n # # Updating Rx,Ry in degree\n # point_rx_meter = (\n # point_rx_meter * axis_len_meter[0] / np.linalg.norm(point_rx_meter)\n # )\n # point_ry_meter = (\n # point_ry_meter * axis_len_meter[1] / np.linalg.norm(point_ry_meter)\n # )\n # point_rx_degree = convert_from_meter_to_degree(\n # point_rx_meter, self.center[1], R\n # )\n # point_ry_degree = convert_from_meter_to_degree(\n # point_ry_meter, self.center[1], R\n # )\n # self.axis_len[0] = np.linalg.norm(point_rx_degree)\n # self.axis_len[1] = np.linalg.norm(point_ry_degree)",
"def setup_localxyzs(self):\n self.localxyzs = [sp.vscl(1.0/v[2],v) for v in self.uvlclxyzs]\n self.fovsides = list()\n lastxyz = self.localxyzs[-1]\n for xyz in self.localxyzs:\n self.fovsides.append(FOVSIDE(xyz,lastxyz))\n lastxyz = xyz",
"def __init__(self, unit_vector_3d):\n \n self.unit_vector = unit_vector_3d\n transposed_uv = np.transpose(self.unit_vector)\n self.x = transposed_uv[0] \n self.y = transposed_uv[1] \n self.z = transposed_uv[2]\n self.d = SkyCoord(self.x, self.y, self.z, \n unit = 'mpc', \n representation_type = 'cartesian', \n frame = 'icrs')\n self.d.representation_type = 'spherical'\n self.lons = self.d.galactic.l.wrap_at(360 * u.deg).deg\n self.lats = self.d.galactic.b.wrap_at(180 * u.deg).deg",
"def coordinateCorrection(data, snap_center, obs_center, **kwargs):\n import astropy\n from astropy.coordinates import SkyCoord # High-level coordinates\n from astropy.coordinates import ICRS, Galactic, Galactocentric, FK4, FK5 # Low-level frames\n from astropy.coordinates import Angle, Latitude, Longitude # Angles\n from astropy.coordinates import CartesianDifferential\n import astropy.units as u\n\n pos_unit = u.pc\n if ('pos_unit' in kwargs.keys()): pos_unit = kwargs['pos_unit']\n vel_unit = u.pc/u.Myr\n if ('vel_unit' in kwargs.keys()): vel_unit = kwargs['vel_unit']\n\n parameters={'galcen_distance':8.0*u.kpc, 'z_sun':15.*u.pc, 'galcen_v_sun':CartesianDifferential([10.0,235.,7.]*u.km/u.s)} \n for key in parameters.keys():\n if key in kwargs.keys():\n parameter[key] = kwargs[key]\n\n obs_cg = obs_center.transform_to(Galactocentric(**parameters))\n obs_cg.representation_type = 'spherical'\n\n snap_cg = snap_center.transform_to(Galactocentric(**parameters))\n snap_cg.representation_type = 'spherical'\n\n data_g = data.transform_to(Galactocentric(**parameters))\n data_g.representation_type = 'spherical'\n \n dlon = obs_cg.lon - snap_cg.lon\n dlat = obs_cg.lat - snap_cg.lat\n lon_new = data_g.lon + dlon\n lat_new = data_g.lat + dlat\n\n sel = lat_new > 90*u.deg\n lat_new[sel] = 180*u.deg - lat_new[sel]\n lon_new[sel] = 180*u.deg + lon_new[sel]\n \n sel = lat_new < -90*u.deg\n lat_new[sel] = -180*u.deg - lat_new[sel]\n lon_new[sel] = 180*u.deg + lon_new[sel]\n\n sel = (lon_new > 360*u.deg) | (lon_new < -360*u.deg)\n lon_new[sel] = lon_new[sel] - (lon_new[sel].to(u.deg).value/360.0).astype(int)*360*u.deg\n\n ddis = obs_cg.distance - snap_cg.distance\n dpm_lon = obs_cg.pm_lon - snap_cg.pm_lon\n dpm_lat = obs_cg.pm_lat - snap_cg.pm_lat\n drv = obs_cg.radial_velocity - snap_cg.radial_velocity\n \n \n data_c = SkyCoord(lon = lon_new,\n lat = lat_new,\n distance = data_g.distance + ddis,\n pm_lon = data_g.pm_lon + dpm_lon,\n pm_lat = data_g.pm_lat + dpm_lat,\n radial_velocity = data_g.radial_velocity + drv,\n frame='galactocentric', representation_type='spherical', **parameters)\n \n return data_c",
"def define_projection(self, region):\n region = {\n \"start_longitude\": region[0],\n \"end_longitude\": region[1],\n \"start_latitude\": region[2],\n \"end_latitude\": region[3],\n }\n projection = \"LambertConformal\"\n plotextend = [\n region[\"start_longitude\"],\n region[\"end_longitude\"],\n region[\"start_latitude\"],\n region[\"end_latitude\"],\n ]\n if projection == \"LambertConformal\":\n # plotextend has to be a little larger so everything is on there\n plotextend = [\n plotextend[0] - 1.0,\n plotextend[1] + 1.0,\n plotextend[2] - 1.0,\n plotextend[3] + 1.0,\n ]\n # path to cut out is exact though\n lons = self.region_to_square(region, \"longitude\")\n lats = self.region_to_square(region, \"latitude\")\n path_ext = [[lon, lat] for lon, lat in zip(lons, lats)]\n path_ext = mpath.Path(path_ext).interpolated(20)\n # South Hemisfere\n if region[\"start_latitude\"] <= 0 and region[\"end_latitude\"] <= 0:\n proj = ccrs.LambertConformal(\n central_longitude=np.sum(plotextend[:2]) / 2.0,\n central_latitude=np.sum(plotextend[2:]) / 2.0,\n cutoff=+30,\n standard_parallels=(-33, -45),\n )\n # North Hemisphere\n else:\n proj = ccrs.LambertConformal(\n central_longitude=np.sum(plotextend[:2]) / 2.0,\n central_latitude=np.sum(plotextend[2:]) / 2.0,\n )\n return proj, path_ext, plotextend",
"def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi",
"def __init__(\r\n self,\r\n centre: Tuple[float, float] = (0.0, 0.0),\r\n einstein_radius: float = 1.0,\r\n core_radius: float = 0.01,\r\n ):\r\n super().__init__(\r\n centre=centre,\r\n einstein_radius=einstein_radius,\r\n slope=2.0,\r\n core_radius=core_radius,\r\n )",
"def ellipsoidPair(N,srcdist=89.61e3+1.5e3,primalign=np.zeros(6),\\\n secalign=np.zeros(6),rrays=False,f=None,\\\n plist=[[0],[0],[0]],hlist=[[0],[0],[0]]):\n #Establish subannulus of rays\n r1 = conic.ellipsoidRad(srcdist,1.,220.,8400.,8500.)\n rays = sources.subannulus(220.,r1,100./220.,N,zhat=-1.)\n tran.pointTo(rays,0,0,srcdist,reverse=1.)\n## #Transform to node position\n## tran.transform(rays,220,0,0,0,0,0)\n## #Set up finite source distance\n## raydist = sqrt(srcdist**2+rays[1]**2+rays[2]**2)\n## rays[4] = rays[1]/raydist\n## rays[5] = rays[2]/raydist\n## rays[6] = -sqrt(1.-rays[4]**2-rays[5]**2)\n\n #Place mirror pair\n coords = [tran.tr.identity_matrix()]*4\n prad = conic.ellipsoidRad(srcdist,1.,220.,8400.,8450.)\n tran.transform(rays,prad,0,50.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*primalign,coords=coords)\n tran.transform(rays,-prad,0,-8450.,0,0,0,\\\n coords=coords)\n surf.ellipsoidPrimaryLL(rays,220.,8400.,srcdist,1.,8500.,8400.,100./220,\\\n *plist)\n #Vignette any rays outside of active area\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8500.,\\\n rays[3]>8400.))\n## surf.ellipsoidPrimary(rays,220.,8400.,srcdist,1.)\n tran.reflect(rays)\n #Place secondary in primary's reference frame\n srad = conic.ehSecRad(srcdist,1.,220.,8400.,8350.)\n tran.transform(rays,srad,0,8350.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*secalign,coords=coords)\n tran.itransform(rays,srad,0,8350.,0,0,0,\\\n coords=coords)\n## surf.ellipsoidSecondary(rays,220.,8400.,srcdist,1.)\n surf.ellipsoidSecondaryLL(rays,220.,8400.,srcdist,1.,8400.,8300.,100./220,\\\n *hlist)\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8400.,\\\n rays[3]>8300.))\n ang = anal.grazeAngle(rays)\n tran.reflect(rays)\n\n #Go back to nominal node reference frame and down to focus\n rays = tran.applyT(rays,coords,inverse=True)\n\n if f is None:\n f = -surf.focusI(rays)\n print f\n else:\n tran.transform(rays,0,0,-f,0,0,0)\n surf.flat(rays)\n\n if rrays is True:\n return rays\n \n return anal.hpd(rays)/f * 180/np.pi * 60.**2",
"def polarization_ellipse(self):\n self.ellipse = {}\n self.ellipse['d_lin'] = sqrt(self.Q**2 + self.U**2)/self.I\n self.ellipse['d_cir'] = abs(self.V)/self.I\n self.ellipse['d'] = sqrt(self.Q**2 + self.U**2 + self.V**2)/self.I\n if self.Q:\n self.ellipse['theta'] = 0.5*atan(self.U/self.Q)\n else:\n self.ellipse['theta'] = float('NaN')\n self.logger.debug(\"polarization_ellipse: theta = %f\",\n self.ellipse['theta'])\n\n if (self.Q**2 + self.U**2):\n self.ellipse['beta'] = 0.5*atan(self.V/sqrt(self.Q**2 + self.U**2))\n if self.V:\n self.ellipse['eccen'] = tan(self.ellipse['beta'])\n else:\n self.ellipse['eccen'] = 0.\n else:\n self.ellipse['beta'] = pi/4\n self.ellipse['eccen'] = 1.\n self.logger.debug(\"polarization_ellipse: beta = %f\",\n self.ellipse['beta'])\n self.logger.debug(\"polarization_ellipse: eccen = %f\",\n self.ellipse['eccen'])",
"def create_ring(self):\n\t\tself.north_coords = numpy.add(self.center, self.north)\n\t\tself.northeast_coords = numpy.add(self.center, self.northeast)\n\t\tself.east_coords = numpy.add(self.center, self.east)\n\t\tself.southeast_coords = numpy.add(self.center, self.southeast)\n\t\tself.south_coords = numpy.add(self.center, self.south)\n\t\tself.southwest_coords = numpy.add(self.center, self.southwest)\n\t\tself.west_coords = numpy.add(self.center, self.west)\n\t\tself.northwest_coords = numpy.add(self.center, self.northwest)",
"def fieldCenter(self):\n if self.ra0 is None:\n self.ra0 = reduce(lambda x, y: x + y, [src.pos.ra for src in self.sources]) / len(\n self.sources) if self.sources else 0\n if self.dec0 is None:\n self.dec0 = reduce(lambda x, y: x + y, [src.pos.dec for src in self.sources]) / len(\n self.sources) if self.sources else 0\n return self.ra0, self.dec0",
"def __init__(self, n_pixels_u, n_pixels_v, detector_size_u, detector_size_v, source_to_detector_dist,\n source_to_object_dist, angular_inc=1, center_of_rot=0, **kwargs):\n\n self.n_pixels_u = n_pixels_u\n self.n_pixels_v = n_pixels_v\n\n self.detector_size_u = detector_size_u\n self.detector_size_v = detector_size_v\n self.source_to_detector_dist = source_to_detector_dist\n self.source_to_object_dist = source_to_object_dist\n self.angular_inc = angular_inc\n\n self.center_of_rot_u = center_of_rot\n\n # All values below are calculated\n\n self.projection_angs = np.arange(0., 360, self.angular_inc)\n self.n_projections = len(self.projection_angs)\n\n self.object_size_x = self.detector_size_u * self.source_to_object_dist / self.source_to_detector_dist\n self.object_size_y = self.detector_size_u * self.source_to_object_dist / self.source_to_detector_dist\n self.object_size_z = self.detector_size_v * self.source_to_object_dist / self.source_to_detector_dist\n\n self.voxel_size_x = self.object_size_x / self.n_pixels_u\n self.voxel_size_y = self.object_size_y / self.n_pixels_u\n self.voxel_size_z = self.object_size_z / self.n_pixels_v\n\n self.pixel_size_u = self.detector_size_u / self.n_pixels_u\n self.pixel_size_v = self.detector_size_v / self.n_pixels_v\n\n self.center_of_rot_y = self.center_of_rot_u * (\n self.source_to_object_dist / self.source_to_detector_dist) * self.pixel_size_u\n\n self.object_ys = (np.arange(self.n_pixels_u, dtype=np.float64) - self.n_pixels_u / 2.) * self.voxel_size_y\n self.object_xs = (np.arange(self.n_pixels_u, dtype=np.float64) - self.n_pixels_u / 2.) * self.voxel_size_x\n self.object_zs = (np.arange(self.n_pixels_v, dtype=np.float64) - self.n_pixels_v / 2.) * self.voxel_size_z\n\n self.detector_us = (np.arange(self.n_pixels_u,\n dtype=np.float64) - self.n_pixels_u / 2.) * self.pixel_size_u\n self.detector_vs = (np.arange(self.n_pixels_v,\n dtype=np.float64) - self.n_pixels_v / 2.) * self.pixel_size_v",
"def __init__(self, raster_path):\n self.raster_path = raster_path\n dataset = gdal.Open(raster_path)\n self.width = dataset.RasterXSize\n self.height = dataset.RasterYSize\n # Gets the gdal geo transformation tuples\n # gdal_version = gdal.__version__\n self._txf = dataset.GetGeoTransform()\n # self._inv_txf = gdal.InvGeoTransform(self._txf)[1]\n self._inv_txf = gdal.InvGeoTransform(self._txf)\n # Gets the transformation from lat/lon to coordinates\n wgs84_ref = osr.SpatialReference()\n wgs84_ref.ImportFromEPSG(4326) # WGS84\n sref = osr.SpatialReference()\n sref.ImportFromWkt(dataset.GetProjection())\n if int(osgeo.__version__[0]) >= 3:\n # Output order has changed in osgeo v3\n wgs84_ref.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n sref.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n self._transform = osr.CoordinateTransformation(wgs84_ref, sref)\n inv_transform = osr.CoordinateTransformation(sref, wgs84_ref)\n # Find a loose lat/lon bounding box for quick check without\n # having to do full coordinates transformation\n corners = []\n for x in [0, self.width]:\n for y in [0, self.height]:\n corners.append([self._txf[0] + self._txf[1] * x + self._txf[2] * y,\n self._txf[3] + self._txf[4] * x + self._txf[5] * y])\n self.max_lat = -100\n self.min_lat = 100\n self.max_lon = -500\n self.min_lon = 500\n for c in corners:\n p = inv_transform.TransformPoint(c[0], c[1])\n if p[0] > self.max_lon:\n self.max_lon = p[0]\n if p[0] < self.min_lon:\n self.min_lon = p[0]\n if p[1] > self.max_lat:\n self.max_lat = p[1]\n if p[1] < self.min_lat:\n self.min_lat = p[1]\n dataset = None",
"def make_e(self):\n self.img[1, 2:-1] = 1\n self.img[self.l_i / 2, 2:-1] = 1\n self.img[-2, 2:-1] = 1\n self.img[1:-1, 2] = 1\n self.img_name = 'E'"
] | [
"0.6040166",
"0.60394746",
"0.59396076",
"0.59256494",
"0.58521795",
"0.5771095",
"0.5674094",
"0.56357586",
"0.5605683",
"0.5517585",
"0.5476492",
"0.5439007",
"0.53797853",
"0.53455025",
"0.53209114",
"0.53010976",
"0.5291954",
"0.52865607",
"0.5256215",
"0.52353615",
"0.52344245",
"0.5227932",
"0.5227586",
"0.5212382",
"0.5211804",
"0.52018934",
"0.51951545",
"0.51791495",
"0.516317",
"0.5147333"
] | 0.65401864 | 0 |
Get an imdb (image database) by name. | def get_imdb(name):
if not __sets.has_key(name):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def imdb_info(title):\n try:\n if title in _imdb_cache:\n return _imdb_cache[title]\n except KeyError:\n pass\n i = imdb.IMDb()\n search_result = i.search_movie(title, results=1)\n if not search_result:\n return None\n result = search_result[0]\n i.update(result)\n _imdb_cache[title] = result\n return result",
"def get_database(self, name):\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)",
"def get_ami_by_name ( ec2_conn, ami_name ) :\n amis = ec2_conn.get_all_images( filters = { \"name\": [ ami_name ] } )\n for ami in amis :\n return ami",
"def read_single_lmdb(image_id, lmdb_dir):\n # Open the LMDB environment\n env = lmdb.open(str(lmdb_dir + '/' + \"single_lmdb\"), readonly=True)\n # Start a new read transaction\n with env.begin() as txn:\n # Encode the key the same way as we stored it\n data = txn.get(image_id.encode(\"ascii\"))\n # Remember it's a CIFAR_Image object that is loaded\n data = pickle.loads(data)\n # Retrieve the relevant bits\n image = data[0]\n label = data[1]\n #image = cifar_image.get_image()\n #label = cifar_image.label\n #cv2.imshow(winname=\"test_image\", mat=image)\n #cv2.waitKey(0)\n env.close()\n #txn.drop(env)\n return image, label",
"def get_database(self, instance, name):\n return instance.get_database(name)",
"def get_image(name):\r\n return nova.images.find(name=name)",
"def get_image(id_num):\n return sqldb.get_image(id_num)",
"def get(cls, name):\n return cls.images[name]",
"def __getitem__(self, dbname):\n return Database(dbname=dbname, connection=self)",
"def getimg(filename):\n return np.asarray(Image.open('imgdb/'+filename))",
"def queryImage(name):\n\n width, height = queryDimensions(name)\n header, rows = querySciDB(\"scan(%s)\" % name)\n\n return render.renderPng(width, height, rows)",
"def create_omdb_poster_get(omdb_id, base=\"http://img.omdbapi.com/\"):\n apikey = current_app.config.get(\"OMDB_API_KEY\", \"\").strip()\n if not apikey:\n raise ValueError(\"No OMDB API Key supplied in configuration!\")\n\n omdb_id = norm_imdbid(omdb_id)\n if not omdb_id:\n return None\n\n return requests.get(base, params={\n 'apikey': apikey,\n 'i': omdb_id\n })",
"def query_imdb(movie_title):\n base_url = \"http://omdbapi.com/?t=\" # Only submitting Title\n response = urllib.urlopen(base_url + movie_title)\n if response.getcode() == 200: # HTTP status is OK\n imdb_data = json.loads(response.read()) # Deserialize into dictionary\n return imdb_data\n else: # HTTP error\n return {\"Response\" : \"False\"}",
"def image(self, name=None):\n return self.find(self.images(), name=name)",
"def lookup_by_id(i_d):\n imdb_id = 0\n str_id = str(i_d)\n if str_id[0].isdigit():\n #contact the moviedb api for inmdb id\n res = requests.get(\n f\"https://api.themoviedb.org/3/movie/{i_d}/external_ids?api_key=28dda9f76d76f128b47831768bc9a103\")\n res.raise_for_status()\n mov = res.json()\n imdb_id = mov[\"imdb_id\"]\n else:\n imdb_id = i_d\n # Contact API\n try:\n response = requests.get(\n f\"http://www.omdbapi.com/?i={imdb_id}&apikey=ced7be9a\")\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # parse response\n try:\n movie = response.json()\n return {\n \"title\":movie[\"Title\"],\n \"id\":movie[\"imdbID\"],\n \"plot\":movie[\"Plot\"],\n \"year\":movie[\"Year\"],\n \"poster\":movie[\"Poster\"],\n \"gross\":movie[\"BoxOffice\"],\n \"rating\":movie[\"imdbRating\"],\n \"website\":movie[\"Website\"],\n \"director\":movie[\"Director\"],\n \"writer\":movie[\"Writer\"],\n \"genre\":movie[\"Genre\"],\n \"actors\":movie[\"Actors\"]\n }\n\n except (KeyError, TypeError, ValueError):\n return None",
"def find_image(image_name):\n imgs = pyrax.images\n image = imgs.list(name=image_name)[0]\n\n # print image.id\n return image.id",
"def get_by_name(self, name: str) -> Optional[\"Dataset\"]:\n raise NotImplementedError",
"def get_db(self, dbname, **params):\n return Database(self._db_uri(dbname), server=self, **params)",
"def get(self, name_or_id):\n \n r = self.library.database.get_name(name_or_id)\n\n if not r[0]:\n r = self.library.database.get_id(name_or_id)\n \n return r",
"def get_movie_data_from_imdbcom(cls, imdbid):\n if cls.imdb_access == None:\n cls._setup_connection()\n\n movie = cls.imdb_access.get_movie(imdbid)\n return movie",
"def dataset_by_name(name):\n return _datasets[name.lower()]",
"def search(self, id):\n\n db = self.connection(\"imdb\")\n\n try:\n cur = db.cursor()\n sql = \"SELECT * FROM film WHERE id = %s;\"\n cur.execute(sql, (id,))\n return cur.fetchall()\n except:\n print(\"Cannot find the film!\")\n\n db.close()",
"def imdb_id(title):\n pass",
"def find_movie_from_api(imdb_id):\n url = \"http://www.omdbapi.com/?i=\" + imdb_id + \"&apikey=\" + API_KEY\n response = requests.request(\"GET\", url)\n data = json.loads(response.text)\n\n return data",
"def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db",
"def imdb_load_file(file_name):\n imdb_saved = open(file_name)\n imdb_save = json.loads(imdb_saved.read())\n Movie.__movies.append(\n Movie(\n imdb_save['title'],\n imdb_save['description'],\n imdb_save['image'],\n \"https://www.youtube.com/watch?v=\" + imdb_save['youtube_id'],\n imdb_save['genres'],\n imdb_save['released'] \n )\n )",
"def find_cinema_by_name(name):\n return Cinema.objects.filter(name=name).first()",
"def get_database(conn, name):\n\n if conn.hasDatabase(name) is False:\n return conn.createDatabase(name)\n\n return conn[name]",
"def getFilmBySimilarName(Name):\n try:\n connection = connect()\n with connection.cursor() as cursor:\n cursor.execute(\"\"\"SELECT FilmID, Title FROM `films` WHERE TitlePP LIKE %s\"\"\", ('%{}%'.format(Name)))\n\n return cursor.fetchone()\n except Exception as e:\n print(\"Error getting film id by similar name, with name {}\".format(Name), str(e))\n finally:\n connection.close()",
"def get(cls, name):\n result = cls.query().filter(FileRecord.artist == name).first()\n\n if result and len(result) > 0:\n return cls(*result)\n else:\n return None"
] | [
"0.616333",
"0.6082698",
"0.60265845",
"0.60114175",
"0.58648026",
"0.58400637",
"0.58213335",
"0.57996124",
"0.5794334",
"0.5785268",
"0.5768992",
"0.5731461",
"0.5723369",
"0.57161194",
"0.56995326",
"0.5677104",
"0.5616365",
"0.55860436",
"0.555823",
"0.5479197",
"0.5463535",
"0.542041",
"0.53960365",
"0.5363736",
"0.5361187",
"0.5360004",
"0.5359468",
"0.5344016",
"0.53283036",
"0.5303233"
] | 0.7363138 | 1 |
Method to read the csv file | def read_csv_file(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_csv():",
"def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)",
"def get_data(self, csv_file):\n pass",
"def loadCSV(input_file):",
"def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:",
"def _read_csv(self):\n with open(self._file_path, 'rb') as f:\n reader = csv.DictReader(f, delimiter=',')\n self._content = [row for row in reader]",
"def read(self):\r\n\r\n self.data = []\r\n\r\n with open(self.filename + \".csv\", mode='r') as csv_file:\r\n reader = csv.DictReader(csv_file)\r\n for row in reader:\r\n self.data.append(row)",
"def readCSV(self):\n\n content = []\n with open(self.filename) as file:\n sn = csv.Sniffer()\n sn.preferred = [self.delimiter]\n try:\n dialect = sn.sniff(file.read(1024))\n except csv.Error:\n if not file.endswith(\"csv\"):\n self.delimiter = \"\\t\"\n file.seek(0)\n reader = csv.reader(file, delimiter=self.delimiter)\n dialect = reader.dialect\n file.seek(0)\n reader = csv.reader(file, dialect)\n rownr = 0\n\n for row in reader:\n\n if rownr == 0:\n header = row\n else:\n # print(row)\n content.append(row)\n rownr += 1\n\n file.close()\n\n return content.copy()",
"def handle_csv(self):\n try:\n reader = csv.reader(open(self.options.datafile, 'r'))\n except IOError:\n errormsg(_('Cannot read \"{}\"'.format(self.options.datafile)))\n raise Exception(_('Cannot read \"{}\"'.format(self.options.datafile)))\n if self.options.var_type == 'name':\n try:\n self.header = reader.next()\n except StopIteration:\n errormsg(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n raise Exception(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n self.data = []\n for row in reader:\n self.data.append(row)",
"def read(self):\n with open(self.filename) as f:\n reader=csv.reader(f)\n for row in reader:\n self.data.appendleft(row)",
"def read_csv(csv_file):\r\n with open(csv_file, \"r\") as files:\r\n data = csv.reader(files)\r\n return list(data)",
"def read_file():\r\n #with nos permite manejar el archivo dentro del bloque y despues cerrarlo\r\n with open('Entries.csv') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n return data",
"def __obtain_data_from_csv__(self, csvfile):\n data = csvfile.readlines()\n data = self.__parse_string_for_delimiter__(data)\n return data",
"def read_csv(path, number_of_header_lines=0):\n # if not os.path.isfile(path):\n try:\n return genfromtxt(path, delimiter=', ', skip_header=number_of_header_lines)\n except:\n raise ValueError(\"File does not exist!\", path)",
"def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self",
"def readCSV(self, csvFileName):\n\tdata = []\n\twith open(csvFileName) as csvFile:\n\t\treader = csv.reader(csvFile)\n\t\tfor row in reader:\n\t\t\tdata.append(row)\n\treturn data",
"def read(self):\n \n self.df = pd.read_csv(self.path, encoding = \"ISO-8859-1\")",
"def reader(self):\n df = pd.read_csv(self.path)\n return df",
"def read_csv(path):\r\n data = []\r\n csv_file = open(path)\r\n for row in csv.DictReader(csv_file):\r\n data.append(row)\r\n csv_file.close() \r\n return data",
"def read_csv(self, filepath):\n try:\n self.df = pd.read_csv(filepath)\n return self\n except FileNotFoundError as e:\n raise OperationError(f\"File not found - {filepath}\") from e\n except ParserError as e:\n raise OperationError(f\"Fails to parse file - {e}\") from e",
"def readRecordFromFile():\n\twith open(gbl.sourceFile, newline='') as csvfile:\n\t\trowReader = csv.reader(csvfile, delimiter=gbl.csvDiscriminator, quotechar=gbl.csvQuotechar)\n\t\tfor row in rowReader:\n\t\t\tROWData.append(row)",
"def _read_csv(input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f)\n lines = []\n for line in reader:\n lines.append(line)\n return lines[1:] # remove header",
"def read_csv(path):\n csv_data =[]\n \n with open(path, 'r') as csv_file:\n csv_read = csv.reader(csv_file, dialect='excel')\n for row in csv_read:\n csv_data.append(row)\n\n return(csv_data)",
"def read_csv_file(file_name):\n \n with open(file_name, newline='') as csv_file: # don't need to explicitly close the file now\n csv_table = []\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n csv_table.append(row)\n return csv_table",
"def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))",
"def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))",
"def readcsv(path, delimiter= ','):\n my_data = genfromtxt(path, delimiter= delimiter)\n return my_data",
"def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)",
"def read_rf_csv():\n if os.path.exists(\"rf.csv\"):\n #print (\"--decision trees CSV imported\\n\")\n results = pd.read_csv(\"rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results",
"def read_csv_file(file_name, file_delimeter):\n \n with open(file_name, newline='') as csv_file: # don't need to explicitly close the file now\n csv_table = []\n csv_reader = csv.reader(csv_file, delimiter=file_delimeter)\n for row in csv_reader:\n csv_table.append(row)\n return csv_table"
] | [
"0.874578",
"0.8275162",
"0.7794639",
"0.7679718",
"0.76562405",
"0.7616285",
"0.7401663",
"0.7334804",
"0.7147184",
"0.70850873",
"0.70783514",
"0.7049833",
"0.6993045",
"0.6967052",
"0.6965624",
"0.6955517",
"0.69535315",
"0.6939918",
"0.6938868",
"0.6930313",
"0.6920754",
"0.69122034",
"0.6903964",
"0.68857294",
"0.68805856",
"0.68805856",
"0.686192",
"0.68491447",
"0.68458897",
"0.684218"
] | 0.898817 | 0 |
Method to check if the file exists | def is_file_exists(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_file_exist(self):\n return False",
"def file_exists(self):\r\n if os.path.exists(self.file_path):\r\n return True\r\n else:\r\n return False",
"def file_exist() -> bool:\n pass",
"def file_exists(self):\n if os.path.isfile(self.file_name):\n return True\n else:\n return False",
"def file_exists(path):\n return os.path.exists(path)",
"def FileExists(file):\n return os.path.exists(file)",
"def file_exists(cls, path: Path) -> bool:\n return path.exists()",
"def check_if_file_exists(path):\n\n return os.path.exists(path)",
"def exist(self):\n return self.file_path.exists()",
"def does_file_exist(self, fn):\n if True:\n print(f\"-=- {fn} found.\")\n return True\n else:\n print(f\"-!- {fn} not found. Try again\")\n return False",
"def file_exists(filename):\n return os.path.exists(filename)",
"def file_exists(path: str) -> bool:\n\treturn os.path.isfile(path)",
"def file_exists(path):\n\n try:\n with open(path):\n return True\n except IOError:\n return False",
"def file_exists(self):\n return os.path.exists(self._fileName)",
"def file_exists(filename: str):\n if osp.exists(filename) is True:\n return True\n else:\n return False",
"def _does_file_exist(file_path):\n return os.path.exists(file_path) and os.path.getsize(file_path) > 0",
"def file_exists(path: str) -> bool:\n return os.path.isfile(path)",
"def exists_file(f):\n if os.path.exists(f):\n return True\n return False",
"def exists(self) -> bool:\n return self._file_exists()",
"def file_exists(file_path):\r\n return exists(file_path) and isfile(file_path)",
"def file_exists(file_path):\n\n return Path(file_path).is_file()",
"def file_exist(file_path):\n return os.path.isfile(file_path)",
"def file_exists(filename: str) -> bool:\n\n return os.path.exists(filename)",
"def file_exists(filename):\n return os.path.isfile(filename)",
"def exists(self):\n\n return os.path.exists(self[\"~filename\"])",
"def file_exists(file_path):\n\n if file_path is None:\n return False\n\n if not os.path.isfile(file_path):\n return False\n\n return True",
"def file_exists(file_path):\n\n if file_path is None:\n return False\n\n if not os.path.isfile(file_path):\n return False\n\n return True",
"def _check_file_exists(self, filename):\n if not os.path.exists(filename):\n print('\\n[-] ERROR: %s is not at the specified path! \\\n Please check the filepath and filename...' \n %filename)\n return False\n return True",
"def exists(self, path):",
"def has_file(path):\n return os.path.exists(path)"
] | [
"0.8750944",
"0.8677664",
"0.86291355",
"0.8440019",
"0.83819354",
"0.8322929",
"0.8270689",
"0.8260134",
"0.8172603",
"0.81232107",
"0.8122489",
"0.809497",
"0.80892795",
"0.80884355",
"0.79643077",
"0.79612887",
"0.7935783",
"0.792067",
"0.7898609",
"0.78921366",
"0.78911746",
"0.7877763",
"0.7872153",
"0.7867013",
"0.786574",
"0.7864893",
"0.7864893",
"0.7858483",
"0.7854266",
"0.78463346"
] | 0.9018266 | 0 |
print the statistics from the field triplen | def printLenStats(data):
print "statistics of training trips length: mean",
print data["triplen"].mean(), # Mean of values
print "std",
print data["triplen"].std(), # Unbiased standard deviation
print "var",
print data["triplen"].var(), # Unbiased variance
print "max",
print data["triplen"].max(),
print "min",
print data["triplen"].min() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_trigrams_count(self):\n for u_v in self.trigram_counts:\n for w in self.trigram_counts[u_v]:\n count=self.trigram_counts[u_v][w]\n print \"{2}\\tc({0} {1})\".format(u_v,w,count)",
"def print_metric(self):\r\n print(f'\\n\\n{self.sort} metric of size {self.n}')\r\n print(f'algorithm: {self.algo}')\r\n print(f'number of comparisons: {self.comps}')\r\n print(f'number of exchanges: {self.exs}')\r\n print(f'regression equation for comparisons: {self.comp_eq}')\r\n print(f'regression equation for exchanges: {self.ex_eq}')\r\n print(f'presorted data: {self.predata}')\r\n print(f'postsorted data: {self.postdata}')",
"def print_peak_data(sp) : \n for field in sp.fields : print(field, end=' ')\n print('')",
"def print_bm25_field_length_info(path_doc_length_info_bm25f):\n\n doc_length_info = load_pickle(path_doc_length_info_bm25f)\n \n n_terms_author_total = 0\n n_terms_sections_total = 0\n n_terms_title_total = 0\n n_terms_abstract_total = 0\n for cord_uid in doc_length_info.keys():\n \n info = doc_length_info[cord_uid]\n \n n_terms_author_total += info['author']\n n_terms_sections_total += info['sections']\n n_terms_title_total += info['title']\n n_terms_abstract_total += info['abstract']\n \n n_terms_total = n_terms_author_total + n_terms_sections_total + n_terms_title_total + n_terms_abstract_total\n \n print(len(doc_length_info))\n print(f\"n_terms_author_total = {n_terms_author_total}, average={n_terms_author_total/len(doc_length_info)}\")\n print(f\"n_terms_sections_total= {n_terms_sections_total}, average={n_terms_sections_total/len(doc_length_info)}\")\n print(f\"n_terms_title_total = {n_terms_title_total}, average={n_terms_title_total/len(doc_length_info)}\")\n print(f\"n_terms_abstract_total= {n_terms_abstract_total}, average={n_terms_abstract_total/len(doc_length_info)}\")\n print(f\"n_terms_total = {n_terms_total}, average={n_terms_total/len(doc_length_info)}\")",
"def display_results_line(stats):\n # Line output.\n template = ' %5d |%6.2f |%6.2f %6.2f %6.2f |%3d %3d %3d'\n\n num_bytes = stats['data_size']\n\n P_times = stats['P_times']\n val = [num_bytes]\n for p in P_times:\n val.append(p*1000.)\n\n val.append(stats['count_lost'])\n val.append(stats['count_timeout'])\n val.append(stats['count_corrupt'])\n val = tuple(val)\n\n print(template % val)",
"def print_strand_stats(strand_statistics):\n print(' Total\\tFor\\tRev\\tDif')\n for base, count in strand_statistics.items():\n print(f'{base}: {str(count[0])}\\t{str(count[1])}\\t{str(count[2])}\\t{str(count[3])}')",
"def print_metrics(self):\n # num times regular barcodes appear in a simulated doublet nearest neighbors, grouped by value\n # TODO: this list is 2 dimensional... need to extract dimensione with counts for the counter\n frequencies = [i[1] for i in self.num_times_knn]\n counter = collections.Counter(frequencies)\n print(\"##\\nNumber time barcoded in sim doub KNN: {}\".format(counter))\n\n # artificial fraction\n print(\"##\\nArtificial fraction: {}\".format(self.artificial_fraction))\n\n # num doublets\n print(\"##\\nNumber of doublets called: {}\".format(self.num_doublets))",
"def print_time_stats(self):\n walk_total = 0\n bus_total = 0\n for passenger in self.passengers:\n time = self._passenger_trip_time(passenger)\n walk_total += time[\"walk\"]\n bus_total += time[\"bus\"]\n av_bus_time = bus_total / self.total_passengers\n av_walk_time = walk_total / self.total_passengers\n\n print(f\"Average time on bus: {av_bus_time:.0f} min\")\n print(f\"Average walking time: {av_walk_time:.0f} min\")",
"def report(self):\n print('total 1', len(self.videoids1))\n print('total 2', len(self.videoids2))\n print('total of repeats in_1', len(self.videoids_dict_repeats1))\n print('total of repeats in_2', len(self.videoids_dict_repeats2))\n print('total in_1_missing_in_2', len(self.in_1_missing_in_2))\n print('total in_2_missing_in_1', len(self.in_2_missing_in_1))",
"def printData (data):\n print(str(len(data)) + '\\t' + str(data))",
"def print_data_info(my_data, src_field, trg_field):\n train_data = my_data[\"train\"]\n valid_data = my_data[\"val\"]\n test_data = my_data[\"test\"]\n\n print(\"Data set sizes (number of sentence pairs):\")\n print('train', len(train_data))\n print('valid', len(valid_data))\n print('test', len(test_data), \"\\n\")\n\n print(\"First training example:\")\n print(\"src:\", \" \".join(vars(train_data[0])['src']))\n print(\"trg:\", \" \".join(vars(train_data[0])['trg']), \"\\n\")\n\n print(\"Most common words (src):\")\n print(\"\\n\".join([\"%10s %10d\" % x for x in src_field.vocab.freqs.most_common(10)]), \"\\n\")\n print(\"Most common words (trg):\")\n print(\"\\n\".join([\"%10s %10d\" % x for x in trg_field.vocab.freqs.most_common(10)]), \"\\n\")\n\n print(\"First 10 words (src):\")\n print(\"\\n\".join(\n '%02d %s' % (i, t) for i, t in enumerate(src_field.vocab.itos[:10])), \"\\n\")\n print(\"First 10 words (trg):\")\n print(\"\\n\".join(\n '%02d %s' % (i, t) for i, t in enumerate(trg_field.vocab.itos[:10])), \"\\n\")\n\n print(\"Number of NL words (types):\", len(src_field.vocab))\n print(\"Number of AMR words (types):\", len(trg_field.vocab), \"\\n\")",
"def trip_duration_stats(df):\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n # TO DO: display total travel time\r\n total_time = df['Trip Duration'].sum()\r\n print('total trave time:',total_time,'seconds, or',total_time/3600,'hour')\r\n # TO DO: display mean travel time\r\n mean_time = df['Trip Duration'].mean()\r\n print('mean trave time:',mean_time,'seconds, or',mean_time/3600,'hour')\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)",
"def summarize(self):\n txtStr = \"%s to %s, %d flight legs.\" %\\\n (self.origin, self.destination, self.nlegs)\n txtStr += \"\\nTakeoff at %s\\nLanding at %s\\n\" %\\\n (self.takeoff, self.landing)\n txtStr += \"Flight duration of %s including %s observing time\" %\\\n (str(self.flighttime), self.obstime)\n\n return txtStr",
"def pretty_print(self):\n output = \"Count: \"\n if self.soft:\n output += \"S\"\n output += str(self.count)\n if self.can_double:\n output += \", can double\"\n if self.can_split:\n output += \", can split\"\n print(output)",
"def trip_duration_stats(df, timing_off_flag):\n print('\\nCalculating Trip Duration...\\n')\n if not timing_off_flag:\n start_time = time.time()\n\n # Display total travel time.\n display_duration('Total duration of all trips:\\n',\n df['Trip Duration'].sum())\n\n # EXTENSION: display minimum travel time.\n display_duration('Shortest trip duration:\\n', df['Trip Duration'].min())\n\n # Display mean travel time.\n display_duration('Mean trip duration:\\n', df['Trip Duration'].mean())\n\n # EXTENSION: display median travel time.\n display_duration('Half of the trips took less than:\\n',\n df['Trip Duration'].median())\n\n # EXTENSION: display 90th percentile travel time.\n display_duration('90% of the trips took less than:\\n',\n df['Trip Duration'].quantile(0.9))\n\n # EXTENSION: display maximum travel time.\n display_duration('Longest trip duration:\\n', df['Trip Duration'].max())\n\n print('') # Blank line after final output improves format.\n if not timing_off_flag:\n print('This took {0:6f} seconds.'.format(time.time() - start_time))\n print('-' * 40)",
"def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display total travel time\r\n\r\n #displaying total travel time using sum() method\r\n print('\\nTotal travel duration is: ',df['Trip Duration'].sum())\r\n\r\n # TO DO: display mean travel time\r\n\r\n #displaying average travel time using mean() method\r\n print('\\nAverage travel duration is: ',df['Trip Duration'].mean())\r\n\r\n #extra statistics\r\n #what is the largest and smallest duration of travel time\r\n\r\n print('\\nLargest travel duration is: ',df['Trip Duration'].max())\r\n print('\\nSmallest travel duration is: ',df['Trip Duration'].min())\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n print('total travel time : {}'.format(df['Trip Duration'].sum()))\n\n # TO DO: display mean travel time\n print('total travel time : {}'.format(df['Trip Duration'].mean()))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def result(self):\n return (\"Precision@\" + str(self.length) + \": \"), (self.hit / self.test)",
"def stats(self):",
"def nice(self):\n print(self.getName(), \":\", self.getLen())",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n# TO DO: display total travel time\n print(\"Total travel time:\", df['Trip Duration'].sum())\n\n # TO DO: display mean travel time\n print(\"Total mean travel time: \", df['Trip Duration'].mean())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)",
"def output_stats(self):\n elapsed = self.timer.elapsed.total_seconds()\n count = self.copied + self.errored\n total = self.total\n # Time per key in milliseconds\n avg = round(elapsed / count * 1000, 3)\n # Time remaining in seconds\n remaining = 1.0 * elapsed / count * (total - count)\n # Time remaining in minutes\n remaining = round(remaining / 60.0, 1)\n # Time taken in minutes\n elapsed = round(elapsed / 60.0, 1)\n\n self.log.info(f\"{self.prefix}: {avg}ms avg, {elapsed}min passed, \"\n f\"{remaining}min remaining. ({count:,}/{total:,})\")",
"def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # display total travel time\r\n print('Total travel time: ')\r\n print(df['Trip Duration'].sum())\r\n\r\n # display mean travel time\r\n print('Average travel time: ')\r\n print(df['Trip Duration'].mean())\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)",
"def num_words():\n # Load the GT.\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n stats = {\n \"T\": {\"words\": [], \"duration\": []},\n \"P\": {\"words\": [], \"duration\": []},\n \"sess\": {\"words\": [], \"duration\": []},\n }\n\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n stats[\"P\"][\"words\"].append(float(row[\"gt_patient_num_words\"]))\n stats[\"T\"][\"words\"].append(float(row[\"gt_therapist_num_words\"]))\n stats[\"P\"][\"duration\"].append(float(row[\"gt_patient_time_spoken\"]))\n stats[\"T\"][\"duration\"].append(\n float(row[\"gt_therapist_time_spoken\"])\n )\n stats[\"sess\"][\"duration\"].append(float(row[\"sess_dur\"]))\n n_words = (\n row[\"gt_therapist_num_words\"] + row[\"gt_patient_num_words\"]\n )\n stats[\"sess\"][\"words\"].append(n_words)\n\n for speaker in stats:\n for metric in stats[speaker]:\n print(f\"------ {speaker} | {metric} ------\")\n print_stats(stats[speaker][metric])",
"def print_details():\n\n print('\\n'\n 'SCORE: {0}\\n'\n 'COMPLEXITY: {1}\\n'\n .format(pwd_score, pwd_complex))\n\n print('Password as list: {0}\\n'.format(pwd_list))\n print('ns calculations: {0}\\n'.format(ns))\n print('Scores calculations: {0}\\n'.format(scores))\n print('Entropy: {0}\\n'.format(entropy))\n\n # store string lengths for table\n plength = {\n 'counts': 0,\n 'scores': 0,\n 'heading': 0\n }\n # loop value dicts to get lengths for table\n for k, v in ns.items():\n if len(str(v)) > plength['counts']:\n plength['counts'] = len(str(v))\n for k, v in scores.items():\n if len(str(v)) > plength['scores']:\n plength['scores'] = len(str(v))\n for k, v in stext.items():\n if len(v) > plength['heading']:\n plength['heading'] = len(v)\n\n # print table heading\n # t00, t11, t22 calculate indentation\n t00 = int(((plength['heading'] + 2 - 6) / 2)) * ' '\n t11 = int(((plength['counts'] + 1) / 2)) * ' '\n t22 = int(((plength['scores'] + 1) / 2)) * ' '\n print('{0}Metric{0}{1}Count{1}{2}Bonus'.format(t00, t11, t22))\n\n # print table content\n for k, v in stext.items():\n # get description\n t0 = stext[k]\n # indent count\n t1 = (plength['heading'] + plength['counts'] - len(stext[k]) - len(\n str(ns[k])) + 5) * ' '\n # get count\n t2 = ns[k]\n # indent score\n t3 = (plength['scores'] - len(str(scores[k])) + 5) * ' '\n # get score\n t4 = scores[k]\n print('{0}{1}{2}{3}{4}'.format(t0, t1, t2, t3, t4))",
"def trip_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n total_travel_time = time.strftime(\"%H:%M:%S\", time.gmtime(total_travel_time))\n print('Total travel time:\\t', total_travel_time)\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('Mean travel time:\\t', mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_time = df['Trip Duration'].sum()\n print(\"The total travel time was:\",str(total_time))\n\n # TO DO: display mean travel time\n mean_time = df['Trip Duration'].mean()\n print(\"The average travel time was:\",str(mean_time))\n\n print('-'*40)",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_duration = df['Trip Duration'].sum()\n total_duration_h = get_human_readable_time(total_duration)\n print('Total travel duration: {}'.format(total_duration_h))\n\n # display mean travel time\n mean_duration = df['Trip Duration'].mean()\n mean_duration_h = get_human_readable_time(mean_duration)\n print('Average travel duration: {}'.format(mean_duration_h))\n\n # display longest trip\n max_duration = df['Trip Duration'].max()\n max_duration_h = get_human_readable_time(max_duration)\n print('Longest trip duration: {}'.format(max_duration_h))\n\n print('\\nThis took %s seconds.' % (time.time() - start_time))\n print('-'*40)",
"def display_trip_stats(self):\n\n self.trip_frame = stat_display_labels(\n self.stats_frame,\n \"Trip Stats\",\n [\"The total travel time was:\", \"The mean travel time was:\"],\n row=0,\n column=2,\n )\n self.trip_stats_data = tk.Label(self.trip_frame, justify=\"left\")\n self.trip_stats_data.grid(row=0, column=1)",
"def trip_duration_stats(data):\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n # display total travel time\n total_trip_time= data['Trip Duration'].sum()\n print('The Total Travel Time is {} Hours'. format(total_trip_time/3600))\n # display mean travel time\n avg_trip= data['Trip Duration'].mean()\n print('The Average Travel Time is {} Minutes'. format(avg_trip/60))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*100)"
] | [
"0.6339487",
"0.6272609",
"0.6054619",
"0.5972678",
"0.5954653",
"0.59238714",
"0.5839713",
"0.58144724",
"0.58086455",
"0.57821864",
"0.5774344",
"0.57310075",
"0.57287866",
"0.5718309",
"0.57071096",
"0.5701905",
"0.56911206",
"0.568461",
"0.56664026",
"0.5662793",
"0.56611216",
"0.5651066",
"0.5625517",
"0.5619321",
"0.5604127",
"0.5593166",
"0.55890983",
"0.558508",
"0.5573314",
"0.55694836"
] | 0.75883275 | 0 |
given the arrays, it computes the mean of the Haversine distances | def meanHaversineDistance(lat_sub, lon_sub, lat_real, lon_real):
return np.mean(HaversineDistance(lat_sub, lon_sub, lat_real, lon_real)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mean(self, words: [str]) -> numpy.ndarray:\n vecs = numpy.array([self[word] for word in words])\n return numpy.mean(vecs, axis=0)",
"def geo_mean(num_list):\n np_array = np.array(num_list)\n return np_array.prod() ** (1.0 / len(np_array))",
"def average_qa_distance(distances):\n\treturn np.average(np.median(distances,axis=0))",
"def mean(vals):",
"def average_distance(predictions, targets):\n total_distance = 0\n for prediction, target in zip(predictions, targets):\n total_distance += Levenshtein.distance(prediction, target)\n return total_distance / len(predictions)",
"def har_mean(array):\n return ((sum([1/x for x in array]))**(-1))*len(array)",
"def find_avg(centroids, short_cut=False, sim_scores=None):\n \n total_sim = 0.0\n total_comparisons = 0\n \n if short_cut:\n total_comparisons = len(sim_scores)\n \n for score in sim_scores:\n total_sim += score\n \n return (total_sim / total_comparisons)\n\n length = len(centroids)\n\n for i in xrange(0, length):\n for j in xrange(i + 1, length):\n total_sim += similarity(centroids[i], centroids[j])\n total_comparisons += 1\n\n return (total_sim / total_comparisons)",
"def mean(numbers):\n return float(sum(numbers)) / float(len(numbers))",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def approximate(inp,w_len):\n\t\t\n\t\top = []\n\t\t\n\t\tfor i in range(0,len(inp),w_len):\n\t\t\n\t\t\top.append(np.mean(inp[i:i+w_len]))\n\t\t\t\n\t\treturn np.array(op)",
"def get_mean(numlist):\n return np.mean(numlist)",
"def get_mean(self):\n self.meanval = np.mean(self.adulist)",
"def wo_mean(arr):\n\n return np.array(arr) - np.mean(arr, axis=0)",
"def get_avg_dists(state1_samids, state2_samids, distdict):\r\n # go through dmtx\r\n state1_avg_dists = []\r\n for sam1 in state1_samids:\r\n dists = []\r\n for sam2 in state2_samids:\r\n if sam1 == sam2:\r\n continue\r\n dists.append(distdict[sam1][sam2])\r\n state1_avg_dists.append(numpy.mean(dists))\r\n return state1_avg_dists",
"def mean(array: list) -> float:\n\n arr_sum = 0\n\n for element in array:\n arr_sum = arr_sum + element\n\n return arr_sum/len(array)",
"def avg(u: np.ndarray, v: np.ndarray) -> np.ndarray:\n \n return (u + v) / 2.0",
"def point_avg(points):\n if len(points)==1:\n new_center= np.mean(points)\n else:\n new_center= [np.mean([x[y] for x in points]) for y in range(len(points[0]))]\n return new_center",
"def mean(values):\r\n return sum(values) / float(len(values))",
"def find_mean(values):\n return sum(values) / len(values)",
"def means(self):\n return -0.5 * self.nat1 / self.nat2[..., 0]",
"def updateCentroids(self, points, closest, centroids):\n return numpy.array([points[closest==k].mean(axis=0) for k in range(centroids.shape[0])])",
"def haversine_array(lon1, lat1, lon2, lat2):\n R = 6371.0 # radius of the earth in km\n\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2\n\n c = 2 * np.arcsin(np.sqrt(a))\n km = R * c\n return km",
"def word_average_list(self, docs):\n return np.vstack([self.word_average(sent) for sent in docs])",
"def word_average_list(self, docs):\n return np.vstack([self.word_average(sent) for sent in docs])",
"def harmonic_mean(numbers):\n return 2 * numbers[0] * numbers[1] / sum(numbers)",
"def mean(arr) -> float:\n return sum(arr) / len(arr)",
"def angMean(angs, weights):\n #assert len(angs) == len(weight)\n # normalization is unnecessary as we deal with just the angle\n return np.angle( np.sum( weights * np.exp(1j*np.array(angs)) ))# / ( len(angs) * sum(weight) ) )",
"def computeMeans(list_of_lists):\n # Find length of longest list\n longest = 0\n for lst in list_of_lists:\n if len(lst) > longest:\n longest = len(lst)\n # Get totals\n tots = [0]*(longest)\n for lst in list_of_lists:\n for i in range(longest):\n if i < len(lst):\n tots[i] += lst[i]\n else:\n tots[i] += lst[-1]\n # Convert tots to an array to make averaging across each index easier\n tots = pylab.array(tots)\n # Compute means\n means = tots/float(len(list_of_lists))\n return means",
"def computeMeans(list_of_lists):\n # Find length of longest list\n longest = 0\n for lst in list_of_lists:\n if len(lst) > longest:\n longest = len(lst)\n # Get totals\n tots = [0]*(longest)\n for lst in list_of_lists:\n for i in range(longest):\n if i < len(lst):\n tots[i] += lst[i]\n else:\n tots[i] += lst[-1]\n # Convert tots to an array to make averaging across each index easier\n tots = pylab.array(tots)\n # Compute means\n means = tots/float(len(list_of_lists))\n return means"
] | [
"0.6321584",
"0.6258387",
"0.6237396",
"0.6234049",
"0.61874163",
"0.61680406",
"0.612857",
"0.60996014",
"0.6094646",
"0.6094646",
"0.6063661",
"0.6013036",
"0.600364",
"0.59932184",
"0.594262",
"0.5918531",
"0.5912743",
"0.5897897",
"0.589348",
"0.58902544",
"0.58897704",
"0.58772856",
"0.58726",
"0.5868179",
"0.5868179",
"0.5863038",
"0.5841951",
"0.58387536",
"0.58309394",
"0.58309394"
] | 0.68384725 | 0 |
Gets the next element in the list. Returns Null on timeout, or raises Empty when finished. | def next_ele(self):
try:
ret = self._queue.get(block=True, timeout=0.5)
self._queue.task_done()
return ret
except queue.Empty:
if not self.isAlive():
raise
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block = True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.is_running():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None",
"def get_next_item(self, timeout=None):\n if self.current_item is not None:\n raise error_classes.UVMSequenceError(\"You must call item_done() before calling get_next_item again\")\n self.current_item = self.req_q.get(timeout=timeout)\n return self.current_item",
"def next(self):\n try:\n return self.queue.get()\n except Empty:\n raise StopIteration",
"def get_next(self):\n try:\n return self.the_input[self.index]\n except IndexError:\n return None",
"def next(self):\n while True: # waiting\n item = self.get_next_if_any()\n if item is not None: # feature: value None is filtered out\n return item\n\n if self.nomore: # if nothing else is coming\n break # stop waiting\n\n time.sleep(0.1) # wait before checking again\n\n raise StopIteration() # tell next worker nothing else is coming",
"def next(self):\n if self.pointer > len(self.queue) - 1:\n self.pointer = 0\n raise StopIteration\n val = self.queue[self.pointer]\n self.pointer += 1\n return val",
"def get_next_if_any(self):\n try:\n ret = self.work[deepcopy(self.i)]\n self.i += 1\n # print \"Trickling item\", self.i\n return ret\n except Exception:\n return None",
"def next(self):\n nxt = self.readentry()\n if nxt is None:\n raise StopIteration\n return nxt",
"async def locked_next():\n async with lock:\n try:\n next_value = await anext(iterator)\n except StopAsyncIteration:\n return None\n return next_value",
"def get_next_item(self, timeout=None):\n return self.export.get_next_item(timeout=timeout)",
"def get_next_activity(self):\n try:\n next_object = next(self)\n except StopIteration:\n raise IllegalState('no more elements available in this list')\n except Exception: # Need to specify exceptions here!\n raise OperationFailed()\n else:\n return next_object",
"def peek(self):\n if self.is_empty():\n return None\n list_length = len(self.list) - 1\n return self.list[list_length]",
"def get_next(self):\n\n # pop the next item off the front of the list\n item = self.r.lpop(self.joblist)\n\n # gotta decode the bytes\n ritem = item.decode('utf-8')\n\n # if nothing comes out of the list, then it's empty and return 0\n # otherwise return whatever is next\n if not item:\n return 0\n else:\n return ritem",
"def next_element(self):\n return self.extract_element()",
"def get_next(self, pos):\n if pos >= len(self._href_account_list) - 1:\n return None, None\n return self._get_at(pos + 1)",
"def next(self):\n if self.isquiet():\n raise QueueNoNext()\n\n # Delete old item\n qcurr = self.base + \".\" + str(self.curr)\n os.unlink(qcurr)\n\n # Next item\n self.curr += 1\n self._setcurr(self.curr)\n\n return self.head()",
"def _peek_next(self):\n return self.source[self.current + 1] if not self.current + 1 > len(self.source) else None",
"def peek(self):\n\n if self.is_empty():\n return None\n\n return self._list[-1]",
"def next(some_list, current_index):\n try:\n return some_list[int(current_index) + 1] # access the next element\n except:\n return '' # return empty string in case of exception",
"def get_next_objective(self):\n try:\n next_object = next(self)\n except StopIteration:\n raise IllegalState('no more elements available in this list')\n except Exception: # Need to specify exceptions here!\n raise OperationFailed()\n else:\n return next_object",
"def _next_from_pool(self) -> type:\n\n if self.current is None:\n try:\n self.current = next(self.pool)\n except StopIteration:\n self._next_worker = self._next_exhausted\n self._next_exhausted()\n try:\n return next(self.current)\n except StopIteration:\n self.current = None\n return self.__next__()",
"def peek(self):\n if self.is_empty():\n return None\n return self.list.head.data",
"def get_next(self):\n return self.next",
"def get_next(self):\n return self.next",
"def get_next_item(self):\n pass",
"async def next(self, default=NO_ITEM) -> typing.Any:\n try:\n return await self.__anext__()\n except StopAsyncIteration:\n if default == NO_ITEM:\n raise\n\n return default",
"def get_element(self, pos):\n curr = self.head\n count = 1\n\n while curr != None:\n if count == pos:\n return curr.data\n\n curr = curr.link\n count += 1\n return None",
"def first(self):\n try:\n return self.next()\n except StopIteration:\n return None",
"def get_next_event(self, timeout=None):\n ret = self.inq.Wait(timeout)\n return ret",
"def getNext(self):\n return self.__nextListNode"
] | [
"0.8053774",
"0.7185805",
"0.71605355",
"0.69760185",
"0.6794046",
"0.6730466",
"0.6636796",
"0.6606605",
"0.6502341",
"0.64677364",
"0.64234453",
"0.6394917",
"0.6389111",
"0.6373149",
"0.63690454",
"0.6351756",
"0.6346494",
"0.63144475",
"0.6292954",
"0.6280215",
"0.62769175",
"0.62535226",
"0.6243247",
"0.6243247",
"0.62365246",
"0.6212677",
"0.62040377",
"0.6200659",
"0.61985594",
"0.61918473"
] | 0.8092506 | 0 |
If testing was enabled, returns the cache of all loaded RedditElements. | def get_elements(self):
return self._testing_cache | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all(self):\n if not self._cache:\n self.load()\n\n return self._cache",
"def get(self):\n if path.exists(self.cachefile):\n self.invalidion()\n full_cache = self._get_all()\n return full_cache\n else:\n return []",
"def mine(self):\n collections = []\n # Getting HTML snapshot with selenium, storing a soup object in .data\n self.scrape()\n # Returns only the parts of the soup that surround each collection\n collection_elements = self.get_collection_elements()\n # Turns each soup element into a CollectionElement object\n collections = self.get_info_from_collections(collection_elements)\n # NOTE THE RETURN VALUE IS MERELY TO PASS TESTING< MUST BE CHANGED\n return self.data",
"def fillCache(self):\n items = self.source.getRecent()\n items.reverse() # make sure the most recent ones are added last to the cache\n for item in items:\n self.cache.append(item.title)",
"def cache(self):\n return self.__cache",
"def cache(self):\n return self.__cache",
"def cache(self):\n return self.__cache",
"def cache(self):\n return self.__cache",
"async def get() -> list:\n if _cache is None:\n await _update()\n return _cache",
"def get_cache(self):\n return self.cache",
"def test_KanePage_cached(self):\n kane_page = KanePage(mocked=True)\n from_cache = kane_page.fetch_taplist(brewery=\"Kane\")\n assert not from_cache\n\n # 2nd read from cache!\n kane_page.ssml_taplist() # this puts it in the cache\n from_cache = kane_page.fetch_taplist(brewery=\"Kane\")\n assert from_cache",
"def div_html_list(self):\n return self.q(css='div.test').html",
"def test_suggested_topic_get_all(self):\n with mock.patch('suggestedtopics.models.cache') as mock_cache:\n with mock.patch('suggestedtopics.models.pickle') as mock_pickle:\n mock_cache. __contains__.return_value = True\n mock_pickle.load.return_value = True\n\n actual_suggested_topic = SuggestedTopics.get_all()",
"def _get_elements(self):\n return self._elements",
"def test_reader_content_caching(self):\r\n settings = get_settings(filenames={})\r\n settings['CACHE_PATH'] = self.temp_cache\r\n settings['READERS'] = {'asc': None}\r\n\r\n generator = ArticlesGenerator(\r\n context=settings.copy(), settings=settings,\r\n path=CONTENT_DIR, theme=settings['THEME'], output_path=None)\r\n generator.generate_context()\r\n self.assertTrue(hasattr(generator.readers, '_cache'))\r\n\r\n generator = ArticlesGenerator(\r\n context=settings.copy(), settings=settings,\r\n path=CONTENT_DIR, theme=settings['THEME'], output_path=None)\r\n readers = generator.readers.readers\r\n for reader in readers.values():\r\n reader.read = MagicMock()\r\n generator.generate_context()\r\n for reader in readers.values():\r\n reader.read.assert_called_count == 0",
"async def set_all_cache(self) -> dict:\n all_data = await self.storage.load_all()\n await self.cache.set_all(all_data)\n self.all_cached = True\n return all_data",
"def __iter__(self):\n self._fetch_all()\n return iter(self._result_cache)",
"def cache_all(self):\n if not self._cached_all:\n poss = range(len(self))\n uuids = self.vars['uuid']\n\n cls_names = self.variables['cls'][:]\n samples_idxss = self.variables['samples'][:]\n subchanges_idxss = self.variables['subchanges'][:]\n mover_idxs = self.variables['mover'][:]\n details_idxs = self.variables['details'][:]\n try:\n input_samples_vars = self.variables['input_samples']\n except KeyError:\n # BACKWARD COMPATIBILITY: REMOVE IN 2.0\n input_samples_idxss = [[] for _ in samples_idxss]\n else:\n input_samples_idxss = input_samples_vars[:]\n\n [self._add_empty_to_cache(*v) for v in zip(\n poss,\n uuids,\n cls_names,\n samples_idxss,\n input_samples_idxss,\n mover_idxs,\n details_idxs)]\n\n [self._load_partial_subchanges(c, s) for c, s in zip(\n self,\n subchanges_idxss)]\n\n self._cached_all = True",
"def getCacheContents(self):\n return self._cache",
"def __init__(self, testing=False):\n\t\tthreading.Thread.__init__(self)\n\t\tself.sources = []\n\t\tself._testing_cache = None if not testing else []\n\t\tself._c_lock = threading.Lock()\n\t\tself._total_count = 0\n\t\tself._queue = queue.Queue(maxsize=1000)\n\t\tself._keep_running = False\n\t\tself.daemon = True\n\t\tself.name = 'RedditElementLoader'",
"def __iter__(self):\n return iter(self._cached)",
"def cache_results(self):\n self.cache_manager.cache_results(\n self.parser,\n self.query,\n self.search_engine_name,\n self.scrape_method,\n self.page_number,\n db_lock=self.db_lock\n )",
"def test_find_all_hit_cache(self):\n # arrange\n movie_id_one = MovieId('123')\n movie_id_two = MovieId('789')\n character = character_random_with_movie_ids(MovieIds([movie_id_one, movie_id_two]))\n people = People([character])\n\n movie_one = movie_random_with_movie_id_and_people(movie_id_one, people)\n not_expected = Movies([movie_one])\n\n movie_two = movie_random_with_movie_id_and_people(movie_id_two, people)\n expected = Movies([movie_two])\n\n self.movie_repository_mock__find_all(not_expected)\n cache.set('test_blockbuster.movie_repository.find_all', expected, timeout=180)\n\n # act\n repository = self.cache_movie_repository_decorator()\n response = repository.find_all()\n\n # assert\n self.assertNotEqual(not_expected, response)\n self.assertEqual(expected, response)\n self.assertEqual(0, self.movie_repository_mock().find_all.call_count)",
"def test_reader_content_caching(self):\r\n settings = get_settings(filenames={})\r\n settings['CACHE_PATH'] = self.temp_cache\r\n settings['READERS'] = {'asc': None}\r\n\r\n generator = PagesGenerator(\r\n context=settings.copy(), settings=settings,\r\n path=CONTENT_DIR, theme=settings['THEME'], output_path=None)\r\n generator.generate_context()\r\n self.assertTrue(hasattr(generator.readers, '_cache'))\r\n\r\n generator = PagesGenerator(\r\n context=settings.copy(), settings=settings,\r\n path=CONTENT_DIR, theme=settings['THEME'], output_path=None)\r\n readers = generator.readers.readers\r\n for reader in readers.values():\r\n reader.read = MagicMock()\r\n generator.generate_context()\r\n for reader in readers.values():\r\n reader.read.assert_called_count == 0",
"def get_images(self):\n if not hasattr(self, '_BasePublication__images_cache'):\n self.__images_cache = self.images.all()\n return self.__images_cache",
"def get_cache():\n if not Genre._cache:\n Genre._cache = ObjectCache(Genre().__class__)\n return Genre._cache",
"def dom_data(self):\n dom_providers = getAdapters((self.context, self.request, self.view), IDOMDataProvider)\n results = []\n for name, provider in dom_providers:\n results.append({'name': name or None,\n 'data': provider()})\n return results",
"def test_get_cached_js(self):\n self.assertEquals(len(api.get_cached_js()), 1)",
"def load_cache():\n return {}",
"def get_all_cached_instances(cls):\n return list(cls.__dbclass__.__instance_cache__.values())"
] | [
"0.6270382",
"0.56245005",
"0.5519018",
"0.54027367",
"0.54011166",
"0.54011166",
"0.54011166",
"0.54011166",
"0.5344163",
"0.5297615",
"0.528587",
"0.52725077",
"0.5247935",
"0.522117",
"0.5188726",
"0.51864684",
"0.5170986",
"0.51583016",
"0.5130199",
"0.51186985",
"0.50906247",
"0.5080106",
"0.5065788",
"0.5042249",
"0.5038909",
"0.5036547",
"0.50324196",
"0.5031761",
"0.5026634",
"0.5022434"
] | 0.70427525 | 0 |
This method should be called upon initialization to do the string processing as soon as an instance of MyPassportClass is created. It should take a list of strings `self.entry` and process each string into keyvalue pairs. With the way the attribute are defined in this class, I do not have to pass anything into this method, and it doesn't have to return anything. | def process_entry(self):
for line_item in self.entry:
pairs = line_item.split(' ')
for pair in pairs:
if ':' in pair:
key, value = pair.split(':')
if value.isdigit():
self.fields[key] = int(value)
else:
self.fields[key] = value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, first_name, last_name, **user_info):\n self.first_name = first_name\n self.last_name = last_name\n\n # Similar to Exercise 8-13 user_profile-2.py except the dictionary is\n # an attribute of self.\n self.attributes = {'login_attempts': 0}\n for key,value in user_info.items():\n self.attributes[key] = value\n # self.key = value",
"def parse_entry(self, entry_string):\n entry_type, entry_string = self.pop_entry_type(entry_string)\n cite_key, entry_string = self.pop_key(entry_string)\n field_dict = dict(self.extract_fields(entry_string))\n field_dict[\"type\"] = entry_type\n self.force_field.citations[cite_key] = field_dict",
"def set_from_args(self, args):\n if args.type is not None:\n self.type_cls = _NAME_TO_ENTRY_TYPE_MAP[args.type]\n\n # Process options valid for all entries.\n if args.description is not None:\n self.description = self._normalize_argument(args.description)\n if args.notes is not None:\n self.notes = self._normalize_argument(args.notes)\n\n # Process entry-specific options.\n for field, value in args.properties.items():\n if field.is_protected:\n value = getpass.getpass(f\"Entry {field.name}: \")\n self._update_property(field, self._normalize_argument(value))\n\n # Finally, set the updated timestamp.\n self.updated = storepass.utils.get_current_datetime()",
"def __init__(self):\n self.__dict__.update(\n itertools.starmap(\n lambda key, value: (\n key[0].lower() + # upper case the first letter and add\n key.title() # title case all text\n .replace('_', '') # remove undersore\n [1:] # all text without the first char\n , value\n ) #lambda\n ,os.environ.items()\n ) #itertools.starmap\n ) #update",
"def from_dict(self, dict_entry, line_length=80):\r\n try:\r\n # Set the entry object's attributes to the corresponding\r\n # values in the dictionary entry. Type conversions need to\r\n # be done for non-string attributes.\r\n for key in dict_entry:\r\n dict_entry[key] = self._convert_dict_key(dict_entry[key])\r\n # end for\r\n # Go through the attributes and set them.\r\n if self._validate_dict_entry(dict_entry) or self.info is not None:\r\n try:\r\n for attr in self.FIELDNAMES:\r\n setattr(self, attr, dict_entry[attr])\r\n # end for\r\n return True\r\n except Exception as err:\r\n wl_resource.print_status(\r\n \"Error\", f\"Error creating entry: {err}\",\r\n line_length=line_length)\r\n # end try\r\n else:\r\n return False\r\n except Exception as err:\r\n _z_exc(\"logentry.py/from_dict\", err)\r\n # end try\r",
"def __init__(self, config_entry):\n self.config_entry = config_entry",
"def __init__(self, config_entry):\n self.config_entry = config_entry",
"def __init__(self, entryName, values, config, tokens, scope):\r\n self.entryName = entryName\r\n self.componentsLoader = ComponentsLoader(config)\r\n self.tokens = tokens\r\n self.scope = scope",
"def __init__(self):\n self.iteration_deep = 0\n self.max_iteration_deep = 2\n self.max_dict_key_length = 10\n self.max_string_length = 20\n self.dict_key_characters = string.ascii_lowercase + string.ascii_uppercase + \"_\"\n self.string_value_characters = (string.ascii_lowercase + string.ascii_uppercase +\n \"_\" + string.punctuation + \" \")",
"def __init__(self, org, course, run):\r\n for part in (org, course, run):\r\n LocationBase._check_location_part(part, INVALID_CHARS)\r\n super(SlashSeparatedCourseKey, self).__init__(org, course, run)",
"def __init__(self):\n self.keys = []\n self.hostnames = set()\n self.trust_anchors = []\n self.app_protocols = []",
"def __init__(self):\n try:\n a = list(map(lambda a : True if a == 'y' else False, [input(\"Include upper-case(y/n): \"), input(\"Include lower-case(y/n): \"), input(\"Include numbers(y/n): \"), input(\"Include symbols(y/n): \")]))\n self.upper = a[0]\n self.lower = a[1]\n self.numeric = a[2]\n self.symbol = a[3]\n self.length = int(input(\"Enter length of password : \"))\n except:\n print(\"Not a valid input..! \")\n sys.exit(-1)",
"def __init__(self, alphabet):\n self.alphabet = alphabet\n self.size = len(alphabet)\n # Map from char -> value\n self.mapping = dict((ch, i) for (i, ch) in enumerate(alphabet))\n if len(self.mapping) != self.size:\n raise Exception(\"Duplicate characters in alphabet string\")",
"def initialize(self, keys: List[str]):",
"def _initialize_attributes(self, string_as_file):\n for row in string_as_file:\n first = row[0]\n second = row[1]\n third = row[3]\n match first:\n case 'quadrat':\n self.quadrat = { 'id': second, 'comment': third }\n case 'waypoint':\n self.waypoint = { 'name': second, 'comment': third }",
"def Parse(self, attribute_to_args_map, base_fallthroughs_map,\n parsed_args=None, plural=False, allow_empty=False):\n raise NotImplementedError",
"def __init__(self, zip_code, house_number, house_addition=\"\"):\n self.zip_code = zip_code.replace(' ', '')\n self.house_number = house_number.strip()\n self.house_addition = house_addition.strip()",
"def __init__(self, input_string):\n self.words_to_counts = {}\n self.split_and_populate_words(input_string)",
"def __init__(self):\r\n super(ProfileParser, self).__init__([self.ProfileEntryHandler()])",
"def __init__(self, account, usernames, passwords):\n self.account = account\n self.usernames = usernames\n self.passwords = passwords",
"def __init__(self, mappings):\r\n for key, value in mappings.iteritems():\r\n setattr(self, key, value)",
"def __init__(self):\n self.char = \"\"\n self.d = {}\n self.end = False",
"def __init__(self, **kwargs):\n super().__init__(list)\n\n for name, value in kwargs.items():\n normalized_name = \"-\".join(name.split(\"_\")).lower()\n self.add(normalized_name, value)",
"def __init__(__self__, *,\n key: pulumi.Input[str],\n values: pulumi.Input[Sequence[pulumi.Input[str]]]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"values\", values)",
"def parse(raw_string, validate): \n # Go field by field.\n passport = Passport()\n\n if not validate:\n # Non-validation mode.\n passport.byr = Passport._find_field_value(raw_string, \"byr\")\n passport.iyr = Passport._find_field_value(raw_string, \"iyr\")\n passport.eyr = Passport._find_field_value(raw_string, \"eyr\")\n passport.hgt = Passport._find_field_value(raw_string, \"hgt\")\n passport.hcl = Passport._find_field_value(raw_string, \"hcl\")\n passport.ecl = Passport._find_field_value(raw_string, \"ecl\")\n passport.pid = Passport._find_field_value(raw_string, \"pid\")\n passport.cid = Passport._find_field_value(raw_string, \"cid\")\n return passport\n\n # Validation mode.\n # byr\n byr_value = Passport._find_field_value(raw_string, \"byr\")\n if len(byr_value) != 4:\n byr_value = \"\"\n try:\n byr_value = int(byr_value)\n if byr_value < 1920 or byr_value > 2002:\n byr_value = \"\"\n except Exception:\n byr_value = \"\"\n passport.byr = byr_value\n\n # iyr\n iyr_value = Passport._find_field_value(raw_string, \"iyr\")\n if len(iyr_value) != 4:\n iyr_value = \"\"\n try:\n iyr_value = int(iyr_value)\n if iyr_value < 2010 or iyr_value > 2020:\n iyr_value = \"\"\n except Exception:\n iyr_value = \"\"\n passport.iyr = iyr_value\n \n # eyr\n eyr_value = Passport._find_field_value(raw_string, \"eyr\")\n if len(eyr_value) != 4:\n eyr_value = \"\"\n try:\n eyr_value = int(eyr_value)\n if eyr_value < 2020 or eyr_value > 2030:\n eyr_value = \"\"\n except Exception:\n eyr_value = \"\"\n passport.eyr = eyr_value\n \n # hgt\n hgt_value = Passport._find_field_value(raw_string, \"hgt\")\n height_number = hgt_value[0:-2]\n height_units = hgt_value[-2:]\n try:\n height_number = int(height_number)\n if height_units == \"cm\":\n if height_number < 150 or height_number > 193:\n hgt_value = \"\"\n elif height_units == \"in\":\n if height_number < 59 or height_number > 76:\n hgt_value = \"\"\n else:\n hgt_value = \"\"\n except Exception:\n hgt_value = \"\"\n passport.hgt = hgt_value\n \n # hcl\n hcl_value = Passport._find_field_value(raw_string, \"hcl\")\n hcl_re_string = r\"#[0-9a-f]{6}\"\n hcl_re = re.compile(hcl_re_string)\n instances = hcl_re.findall(hcl_value)\n if len(instances) != 1:\n hcl_value = \"\"\n passport.hcl = hcl_value\n \n # ecl\n ecl_value = Passport._find_field_value(raw_string, \"ecl\")\n eye_colors = {\"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\", \"oth\"}\n if ecl_value not in eye_colors:\n ecl_value = \"\"\n passport.ecl = ecl_value\n \n # pid\n pid_value = Passport._find_field_value(raw_string, \"pid\")\n if len(pid_value) != 9:\n pid_value = \"\"\n try:\n int(pid_value)\n except Exception:\n pid_value = \"\"\n passport.pid = pid_value\n\n # cid is always okay\n passport.cid = Passport._find_field_value(raw_string, \"cid\")\n\n return passport",
"def __init__(self, string):\n self.logger = Config.get_logger(_LOGGER_NAME)\n fields = string.strip().split(',')\n if len(fields) != 4:\n self.logger.critical(f'line {string} has incorrect number of fields in proxy list file: {len(fields)}')\n else:\n self.query = fields[0]\n self.bid_price = fields[1]\n self.campaign_id = fields[2]\n self.query_group_id = fields[3]",
"def __init__(self, mapping):\n if len(mapping) != 26:\n raise ValueError('SubstitutionCipher requires a 26-letter mapping.')\n self.charsets = [\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',\n 'abcdefghijklmnopqrstuvwxyz'\n ]\n self.mappings = [\n ''.join([l.upper() for l in mapping]),\n ''.join([l.lower() for l in mapping])\n ]\n self._original = ''.join(ch for charset in self.charsets for ch in charset)\n self._shifted = ''.join(ch for mapping in self.mappings for ch in mapping)\n self._encoder = str.maketrans(self._original, self._shifted)\n self._decoder = str.maketrans(self._shifted, self._original)",
"def __init__(self, pair):\n\n if len(pair) != 2 or min([65 <= ord(char) <= 90 for char in pair]) is False:\n raise ValueError('Invalid PlugLead! PlugLead pair must an uppercase string of length 2')\n self.pair = pair",
"def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)",
"def __init__(__self__, *,\n key: pulumi.Input[str],\n value: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)"
] | [
"0.49940896",
"0.49859047",
"0.49709833",
"0.4953203",
"0.48999676",
"0.48774707",
"0.48774707",
"0.4860976",
"0.48323658",
"0.4814549",
"0.47757342",
"0.47644758",
"0.47550556",
"0.47471005",
"0.47459665",
"0.47355053",
"0.46886247",
"0.46850675",
"0.46790436",
"0.46672106",
"0.46636024",
"0.46468356",
"0.46408674",
"0.46304336",
"0.46226048",
"0.46100482",
"0.46038336",
"0.45914188",
"0.4590399",
"0.4590399"
] | 0.5435954 | 0 |
Handler for button event, redirects to handlers for specific buttons | def OnButton(self, event):
button = event.GetEventObject().GetName()
if button == "Button1":
self.OnButton1()
elif button == "Button2":
self.OnButton2()
elif button == "Button3":
self.OnExit(event) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def OnButton(self, event):\r\n \r\n button = event.GetInt()\r\n\r\n if button == AUI_BUTTON_LEFT or button == AUI_BUTTON_RIGHT:\r\n if button == AUI_BUTTON_LEFT:\r\n if self.GetTabOffset() > 0:\r\n \r\n self.SetTabOffset(self.GetTabOffset()-1)\r\n self.Refresh()\r\n self.Update()\r\n else:\r\n self.SetTabOffset(self.GetTabOffset()+1)\r\n self.Refresh()\r\n self.Update()\r\n \r\n elif button == AUI_BUTTON_WINDOWLIST:\r\n idx = self.GetArtProvider().ShowDropDown(self, self._pages, self.GetActivePage())\r\n \r\n if idx != -1:\r\n \r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_PAGE_CHANGING, self.GetId())\r\n e.SetSelection(idx)\r\n e.SetOldSelection(self.GetActivePage())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)\r\n \r\n else:\r\n event.Skip()",
"def OnButton(self, event):\n\n\n event_id = event.GetId()\n event_obj = event.GetEventObject()\n print(\"Button 1 Clicked:\")\n print(\"ID=%d\" % event_id)\n print(\"object=%s\" % event_obj.GetLabel())",
"def handle_mouse_click(self, button: Button) -> None:\n if button.name == 'BACK':\n self._clear_all_input()\n self.current_page -= 1\n self._focused_button = None\n if self.current_page == len(self.pages) - 2:\n self.current_page -= 1\n elif button.name == 'Show Graph':\n self._plot_graph()\n elif button.name == 'Multiple Regression':\n self._selection.handle_selection(self.current_page, button.name)\n self.current_page += 2\n self._update_ghg_coefs()\n elif button.tag == 'normal' and self.current_page < len(self.pages) - 2:\n self._selection.handle_selection(self.current_page, button.name)\n self.current_page += 1\n elif isinstance(button, InputButton):\n self._focused_button = button",
"def on_click(self, event):\n if event['button'] == 1 and 'button1' in self.options:\n subprocess.call(self.options['button1'].split())\n elif event['button'] == 2 and 'button2' in self.options:\n subprocess.call(self.options['button2'].split())\n elif event['button'] == 3 and 'button3' in self.options:\n subprocess.call(self.options['button3'].split())",
"def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))",
"def ev_controllerbuttonup(self, event: tcod.event.ControllerButton) -> T | None:",
"def action_buttons(self):\n try:\n self.ui_Action.clicked.connect(self.action)\n except AttributeError:\n pass",
"def ev_controllerbuttondown(self, event: tcod.event.ControllerButton) -> T | None:",
"def cb_something_4(self, button): \n print(\"Do Something 4\")",
"def cb_something_1(self, button):\n print(\"Do Something 1\")",
"def handle_button(self, event, event_type):\n # 0 for left\n # 1 for right\n # 2 for middle/center\n # 3 for side\n mouse_button_number = self._get_mouse_button_number(event)\n\n # Identify buttons 3,4,5\n if event_type in (25, 26):\n event_type = event_type + (mouse_button_number * 0.1)\n\n # Add buttons to events\n event_type_string, event_code, value, scan = self.codes[event_type]\n if event_type_string == \"Key\":\n scan_event, key_event = self.emulate_press(\n event_code, scan, value, self.timeval)\n self.events.append(scan_event)\n self.events.append(key_event)\n\n # doubleclick/n-click of button\n click_state = self._get_click_state(event)\n\n repeat = self.emulate_repeat(click_state, self.timeval)\n self.events.append(repeat)",
"def _setup_events(self):\n # Bind all events from our buttons (including 'exit')\n self.Bind(wx.EVT_BUTTON, self._process_event)",
"def handle_remote_button(self, request):\n self._verify_auth_parameters(request)\n content = yield from request.content.read()\n parsed = dmap.parse(content, tag_definitions.lookup_tag)\n self.last_button_pressed = dmap.first(parsed, 'cmbe')\n return web.Response(status=200)",
"def cb_something_2(self, button):\n print(\"Do Something 2\")",
"def __on_click(self, evt):\n if evt.button() == Qt.LeftButton:\n return self._on_left_click(evt)\n if evt.button() == Qt.RightButton:\n return self._on_right_click(evt)",
"def cb_something_3(self, button):\n print(\"Do Something 3\")",
"def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:",
"def on_buttonBox_clicked(self, button):\n if button == self.findButton:\n self.__doSearch()\n elif button == self.stopButton:\n self.__stopSearch()",
"def OnButtonAboutOKButton(self, event):\r\n\t\tself.OnButtonOKButton()",
"def on_mouse_up(self, pos, mouse_button):\n for item in button.Button.all_buttons:\n if item.collidepoint(pos):\n self.buttons_clicked.append((item, mouse_button))\n item.on_click(mouse_button)",
"def on_buttonBox_clicked(self, button):\n if button == self.buttonBox.button(QDialogButtonBox.Save):\n self.on_saveButton_clicked()\n elif button == self.refreshButton:\n self.on_refreshButton_clicked()",
"def serve_handler(self, button):\n button_pos = list(button.to_window(*button.pos))\n button_pos[0] -= 100\n beer = Beer(lane=button.lane)\n button.lane.puck_area.add_widget(beer)\n button.lane.beers.append(beer)\n beer.pos = beer.to_local(button_pos[0], 15)",
"def eventHandler(self, event: pygame.event):\n # change selected color if this button's rectangle was clicked\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n if self.rect.collidepoint(event.pos): # is mouse over button\n self.image = self._images[ButtonImages.CLICKING_IMAGE.value]\n self.beingClicked = True\n for func, *args in self.functionsToInvokeWhenClicked:\n func(*args)\n elif event.type == pygame.MOUSEBUTTONUP and self.beingClicked:\n if event.button == 1:\n self.beingClicked = False\n self.image = self._images[ButtonImages.DEFAULT_IMAGE.value]",
"def generate_buttons(self):\n raise Exception('Implement me!')",
"def setup_button_block(self):\n self.btn_contrast_invert.setText(\"Contrast Invert\")\n self.btn_display.setText(\"Display\")\n self.btn_display_hist.setText(\"Display HIST\")\n self.btn_display_color_hist.setText(\"Display Color HIST\")\n self.btn_compare.setText(\"Compare\")\n self.btn_equalize_hist.setText(\"Equalize Histogram\")\n self.btn_contrast_stretch.setText(\"Contrast Stretch\")\n self.btn_log_compress.setText(\"Log Compress\")\n self.btn_dload_jpeg.setText(\"Download JPEG\")\n self.btn_dload_tiff.setText(\"Download TIFF\")\n self.btn_dload_png.setText(\"Download PNG\")\n self.btn_upload.setText(\"Upload\")\n\n self.btn_upload.clicked.connect(self.btn_upload_callback)\n\n self.btn_display.clicked.connect(self.btn_display_callback)\n self.btn_contrast_invert.clicked.connect(\n self.btn_contrast_invert_callback)\n self.btn_display_hist.clicked.connect(self.btn_display_hist_callback)\n self.btn_display_color_hist.clicked.connect(\n self.btn_display_color_hist_callback)\n self.btn_compare.clicked.connect(self.btn_compare_callback)\n self.btn_equalize_hist.clicked.connect(self.btn_equalize_hist_callback)\n self.btn_contrast_stretch.clicked.connect(\n self.btn_contrast_stretch_callback)\n self.btn_log_compress.clicked.connect(self.btn_log_compress_callback)\n self.btn_dload_jpeg.clicked.connect(self.download_images_jpg)\n self.btn_dload_png.clicked.connect(self.download_images_png)\n self.btn_dload_tiff.clicked.connect(self.download_images_tiff)",
"def callback(self, event):\n button = event[\"button\"]\n\n cmd = self._callbacks.get(self._uuidstr(self.global_id, button), None)\n cmd = self._callbacks.get(self._uuidstr(event[\"name\"], button), cmd)\n cmd = self._callbacks.get(self._uuidstr(event[\"instance\"], button), cmd)\n\n if cmd is None:\n return\n if callable(cmd):\n cmd(event)\n else:\n bumblebee.util.execute(cmd, False)",
"def ev_mousebuttondown(self, event: MouseButtonDown) -> None:",
"def setup_buttons(self):\n confirm = self.centre.findChild(QPushButton, \"confirmBtn\")\n confirm.clicked.connect(partial(self.confirmed))\n cancel = self.centre.findChild(QPushButton, \"cancelBtn\")\n cancel.clicked.connect(partial(self.controller.show_selector_menu))",
"def button_clicked(self, button, button_idx):\n if self.turn % 2 == 0:\n if self.board[button_idx-1] == 0:\n self.place_move_x(button, button_idx-1)\n gameOver = self.check_x_won()\n if self.player_mode == 'pvc' and gameOver is None:\n self.play_cpu()\n else:\n if self.player_mode == 'pvp':\n if self.board[button_idx-1] == 0:\n self.place_move_o(button, button_idx-1)\n\n self.check_game()\n self.player_highlight()",
"def gui_button_event(self, evt):\n try:\n evName = self.gui_events[evt]\n except IndexError:\n evName = None\n\n if evName == \"OK\":\n self.save()\n self.gui_exit()\n elif evName == \"CANCEL\":\n self.callback = None\n self.gui_exit()\n elif evName == \"TEXPATH_ADD\":\n # browse and add texture search path\n Blender.Window.FileSelector(self.add_texture_path, \"Add Texture Search Path\")\n elif evName == \"TEXPATH_NEXT\":\n if self.texpathIndex < (len(self.config[\"IMPORT_TEXTURE_PATH\"])-1):\n self.texpathIndex += 1\n self.update_texpath_current()\n elif evName == \"TEXPATH_PREV\":\n if self.texpathIndex > 0:\n self.texpathIndex -= 1\n self.update_texpath_current()\n elif evName == \"TEXPATH_REMOVE\":\n if self.texpathIndex < len(self.config[\"IMPORT_TEXTURE_PATH\"]):\n del self.config[\"IMPORT_TEXTURE_PATH\"][self.texpathIndex]\n if self.texpathIndex > 0:\n self.texpathIndex -= 1\n self.update_texpath_current()\n\n elif evName == \"IMPORT_KEYFRAMEFILE_ADD\":\n kffile = self.config[\"IMPORT_KEYFRAMEFILE\"]\n if not kffile:\n kffile = Blender.sys.dirname(self.config[\"IMPORT_FILE\"])\n # browse and add keyframe file\n Blender.Window.FileSelector(\n self.select_keyframe_file, \"Select Keyframe File\", kffile)\n self.config[\"IMPORT_ANIMATION\"] = True\n elif evName == \"IMPORT_KEYFRAMEFILE_REMOVE\":\n self.config[\"IMPORT_KEYFRAMEFILE\"] = ''\n self.config[\"IMPORT_ANIMATION\"] = False\n\n elif evName == \"IMPORT_EGMFILE_ADD\":\n egmfile = self.config[\"IMPORT_EGMFILE\"]\n if not egmfile:\n egmfile = self.config[\"IMPORT_FILE\"][:-3] + \"egm\"\n # browse and add egm file\n Blender.Window.FileSelector(\n self.select_egm_file, \"Select FaceGen EGM File\", egmfile)\n elif evName == \"IMPORT_EGMFILE_REMOVE\":\n self.config[\"IMPORT_EGMFILE\"] = ''\n\n elif evName == \"IMPORT_REALIGN_BONES_1\":\n if self.config[\"IMPORT_REALIGN_BONES\"] == 1:\n self.config[\"IMPORT_REALIGN_BONES\"] = 0\n else:\n self.config[\"IMPORT_REALIGN_BONES\"] = 1\n elif evName == \"IMPORT_REALIGN_BONES_2\":\n if self.config[\"IMPORT_REALIGN_BONES\"] == 2:\n self.config[\"IMPORT_REALIGN_BONES\"] = 0\n else:\n self.config[\"IMPORT_REALIGN_BONES\"] = 2\n elif evName == \"IMPORT_ANIMATION\":\n self.config[\"IMPORT_ANIMATION\"] = not self.config[\"IMPORT_ANIMATION\"]\n elif evName == \"IMPORT_SKELETON_1\":\n if self.config[\"IMPORT_SKELETON\"] == 1:\n self.config[\"IMPORT_SKELETON\"] = 0\n else:\n self.config[\"IMPORT_SKELETON\"] = 1\n elif evName == \"IMPORT_SKELETON_2\":\n if self.config[\"IMPORT_SKELETON\"] == 2:\n self.config[\"IMPORT_SKELETON\"] = 0\n else:\n self.config[\"IMPORT_SKELETON\"] = 2\n elif evName == \"IMPORT_MERGESKELETONROOTS\":\n self.config[\"IMPORT_MERGESKELETONROOTS\"] = not self.config[\"IMPORT_MERGESKELETONROOTS\"]\n elif evName == \"IMPORT_SENDGEOMETRIESTOBINDPOS\":\n self.config[\"IMPORT_SENDGEOMETRIESTOBINDPOS\"] = not self.config[\"IMPORT_SENDGEOMETRIESTOBINDPOS\"]\n elif evName == \"IMPORT_SENDDETACHEDGEOMETRIESTONODEPOS\":\n self.config[\"IMPORT_SENDDETACHEDGEOMETRIESTONODEPOS\"] = not self.config[\"IMPORT_SENDDETACHEDGEOMETRIESTONODEPOS\"]\n elif evName == \"IMPORT_SENDBONESTOBINDPOS\":\n self.config[\"IMPORT_SENDBONESTOBINDPOS\"] = not self.config[\"IMPORT_SENDBONESTOBINDPOS\"]\n elif evName == \"IMPORT_APPLYSKINDEFORM\":\n self.config[\"IMPORT_APPLYSKINDEFORM\"] = not self.config[\"IMPORT_APPLYSKINDEFORM\"]\n elif evName == \"IMPORT_EXTRANODES\":\n self.config[\"IMPORT_EXTRANODES\"] = not self.config[\"IMPORT_EXTRANODES\"]\n elif evName == \"IMPORT_BONEPRIORITIES\":\n self.config[\"IMPORT_BONEPRIORITIES\"] = not self.config[\"IMPORT_BONEPRIORITIES\"]\n elif evName == \"IMPORT_EXPORTEMBEDDEDTEXTURES\":\n self.config[\"IMPORT_EXPORTEMBEDDEDTEXTURES\"] = not self.config[\"IMPORT_EXPORTEMBEDDEDTEXTURES\"]\n elif evName == \"IMPORT_COMBINESHAPES\":\n self.config[\"IMPORT_COMBINESHAPES\"] = not self.config[\"IMPORT_COMBINESHAPES\"]\n elif evName == \"IMPORT_EGMANIM\":\n self.config[\"IMPORT_EGMANIM\"] = not self.config[\"IMPORT_EGMANIM\"]\n elif evName == \"IMPORT_SETTINGS_DEFAULT\":\n self.config[\"IMPORT_ANIMATION\"] = True\n self.config[\"IMPORT_SKELETON\"] = 0\n self.config[\"IMPORT_EXPORTEMBEDDEDTEXTURES\"] = False\n self.config[\"IMPORT_COMBINESHAPES\"] = True\n self.config[\"IMPORT_REALIGN_BONES\"] = 1\n self.config[\"IMPORT_MERGESKELETONROOTS\"] = True\n self.config[\"IMPORT_SENDGEOMETRIESTOBINDPOS\"] = True\n self.config[\"IMPORT_SENDDETACHEDGEOMETRIESTONODEPOS\"] = True\n self.config[\"IMPORT_SENDBONESTOBINDPOS\"] = True\n self.config[\"IMPORT_APPLYSKINDEFORM\"] = False\n self.config[\"IMPORT_EXTRANODES\"] = True\n self.config[\"IMPORT_BONEPRIORITIES\"] = True\n self.config[\"IMPORT_EGMFILE\"] = ''\n self.config[\"IMPORT_EGMANIM\"] = True\n self.config[\"IMPORT_EGMANIMSCALE\"] = 1.0\n elif evName == \"IMPORT_SETTINGS_SKINNING\":\n self.config[\"IMPORT_ANIMATION\"] = True\n self.config[\"IMPORT_SKELETON\"] = 0\n self.config[\"IMPORT_EXPORTEMBEDDEDTEXTURES\"] = False\n self.config[\"IMPORT_COMBINESHAPES\"] = True\n self.config[\"IMPORT_REALIGN_BONES\"] = 1\n self.config[\"IMPORT_MERGESKELETONROOTS\"] = True\n self.config[\"IMPORT_SENDGEOMETRIESTOBINDPOS\"] = False\n self.config[\"IMPORT_SENDDETACHEDGEOMETRIESTONODEPOS\"] = False\n self.config[\"IMPORT_SENDBONESTOBINDPOS\"] = False\n self.config[\"IMPORT_APPLYSKINDEFORM\"] = True\n self.config[\"IMPORT_EXTRANODES\"] = True\n elif evName[:5] == \"GAME_\":\n self.config[\"EXPORT_VERSION\"] = evName[5:]\n # settings that usually make sense, fail-safe\n self.config[\"EXPORT_FORCEDDS\"] = True\n self.config[\"EXPORT_SMOOTHOBJECTSEAMS\"] = True\n self.config[\"EXPORT_STRIPIFY\"] = False\n self.config[\"EXPORT_STITCHSTRIPS\"] = False\n self.config[\"EXPORT_ANIMATION\"] = 1\n self.config[\"EXPORT_FLATTENSKIN\"] = False\n self.config[\"EXPORT_SKINPARTITION\"] = False\n self.config[\"EXPORT_BONESPERPARTITION\"] = 4\n self.config[\"EXPORT_PADBONES\"] = False\n self.config[\"EXPORT_OB_SOLID\"] = True\n self.config[\"EXPORT_MW_NIFXNIFKF\"] = False\n self.config[\"EXPORT_MW_BS_ANIMATION_NODE\"] = False\n self.config[\"EXPORT_EXTRA_SHADER_TEXTURES\"] = True\n # set default settings per game\n if self.config[\"EXPORT_VERSION\"] == \"Morrowind\":\n self.config[\"EXPORT_FORCEDDS\"] = False\n pass # fail-safe settings work\n if self.config[\"EXPORT_VERSION\"] == \"Freedom Force vs. the 3rd Reich\":\n self.config[\"EXPORT_SKINPARTITION\"] = True\n self.config[\"EXPORT_PADBONES\"] = True\n elif self.config[\"EXPORT_VERSION\"] == \"Civilization IV\":\n self.config[\"EXPORT_STRIPIFY\"] = True\n self.config[\"EXPORT_STITCHSTRIPS\"] = True\n self.config[\"EXPORT_BONESPERPARTITION\"] = 18\n self.config[\"EXPORT_SKINPARTITION\"] = True\n elif self.config[\"EXPORT_VERSION\"] in (\"Oblivion\", \"Fallout 3\"):\n self.config[\"EXPORT_STRIPIFY\"] = True\n self.config[\"EXPORT_STITCHSTRIPS\"] = True\n self.config[\"EXPORT_FLATTENSKIN\"] = True\n self.config[\"EXPORT_BONESPERPARTITION\"] = 18\n self.config[\"EXPORT_SKINPARTITION\"] = True\n # oblivion specific settings\n self.config[\"EXPORT_BHKLISTSHAPE\"] = False\n self.config[\"EXPORT_OB_MATERIAL\"] = 9 # wood\n self.config[\"EXPORT_OB_MALLEABLECONSTRAINT\"] = False\n # rigid body: static\n self.config[\"EXPORT_OB_BSXFLAGS\"] = 2\n self.config[\"EXPORT_OB_MASS\"] = 1000.0\n self.config[\"EXPORT_OB_MOTIONSYSTEM\"] = 7 # MO_SYS_FIXED\n self.config[\"EXPORT_OB_UNKNOWNBYTE1\"] = 1\n self.config[\"EXPORT_OB_UNKNOWNBYTE2\"] = 1\n self.config[\"EXPORT_OB_QUALITYTYPE\"] = 1 # MO_QUAL_FIXED\n self.config[\"EXPORT_OB_WIND\"] = 0\n self.config[\"EXPORT_OB_LAYER\"] = 1 # static\n # shader options\n self.config[\"EXPORT_FO3_SHADER_TYPE\"] = 1\n self.config[\"EXPORT_FO3_SF_ZBUF\"] = True\n self.config[\"EXPORT_FO3_SF_SMAP\"] = False\n self.config[\"EXPORT_FO3_SF_SFRU\"] = False\n self.config[\"EXPORT_FO3_SF_WINDOW_ENVMAP\"] = False\n self.config[\"EXPORT_FO3_SF_EMPT\"] = True\n self.config[\"EXPORT_FO3_SF_UN31\"] = True\n # body parts\n self.config[\"EXPORT_FO3_BODYPARTS\"] = True\n elif self.config[\"EXPORT_VERSION\"] == \"Empire Earth II\":\n self.config[\"EXPORT_FORCEDDS\"] = False\n self.config[\"EXPORT_SKINPARTITION\"] = False\n elif self.config[\"EXPORT_VERSION\"] == \"Bully SE\":\n self.config[\"EXPORT_FORCEDDS\"] = False\n self.config[\"EXPORT_STRIPIFY\"] = False\n self.config[\"EXPORT_STITCHSTRIPS\"] = False\n self.config[\"EXPORT_FLATTENSKIN\"] = False\n self.config[\"EXPORT_SKINPARTITION\"] = True\n self.config[\"EXPORT_PADBONES\"] = True\n self.config[\"EXPORT_BONESPERPARTITION\"] = 4\n elif evName[:8] == \"VERSION_\":\n self.config[\"EXPORT_VERSION\"] = evName[8:]\n elif evName == \"EXPORT_FLATTENSKIN\":\n self.config[\"EXPORT_FLATTENSKIN\"] = not self.config[\"EXPORT_FLATTENSKIN\"]\n if self.config[\"EXPORT_FLATTENSKIN\"]: # if skin is flattened\n self.config[\"EXPORT_ANIMATION\"] = 1 # force geometry only\n elif evName == \"EXPORT_FORCEDDS\":\n self.config[\"EXPORT_FORCEDDS\"] = not self.config[\"EXPORT_FORCEDDS\"]\n elif evName == \"EXPORT_STRIPIFY\":\n self.config[\"EXPORT_STRIPIFY\"] = not self.config[\"EXPORT_STRIPIFY\"]\n elif evName == \"EXPORT_STITCHSTRIPS\":\n self.config[\"EXPORT_STITCHSTRIPS\"] = not self.config[\"EXPORT_STITCHSTRIPS\"]\n elif evName == \"EXPORT_SMOOTHOBJECTSEAMS\":\n self.config[\"EXPORT_SMOOTHOBJECTSEAMS\"] = not self.config[\"EXPORT_SMOOTHOBJECTSEAMS\"]\n elif evName[:17] == \"EXPORT_ANIMATION_\":\n value = int(evName[17:])\n self.config[\"EXPORT_ANIMATION\"] = value\n if value == 0 or value == 2: # if animation is exported\n self.config[\"EXPORT_FLATTENSKIN\"] = False # disable flattening skin\n elif value == 1:\n # enable flattening skin for 'geometry only' exports\n # in oblivion and fallout 3\n if self.config[\"EXPORT_VERSION\"] in (\"Oblivion\", \"Fallout 3\"):\n self.config[\"EXPORT_FLATTENSKIN\"] = True\n elif evName == \"EXPORT_SKINPARTITION\":\n self.config[\"EXPORT_SKINPARTITION\"] = not self.config[\"EXPORT_SKINPARTITION\"]\n elif evName == \"EXPORT_PADBONES\":\n self.config[\"EXPORT_PADBONES\"] = not self.config[\"EXPORT_PADBONES\"]\n if self.config[\"EXPORT_PADBONES\"]: # bones are padded\n self.config[\"EXPORT_BONESPERPARTITION\"] = 4 # force 4 bones per partition\n elif evName == \"EXPORT_BHKLISTSHAPE\":\n self.config[\"EXPORT_BHKLISTSHAPE\"] = not self.config[\"EXPORT_BHKLISTSHAPE\"]\n elif evName == \"EXPORT_OB_MALLEABLECONSTRAINT\":\n self.config[\"EXPORT_OB_MALLEABLECONSTRAINT\"] = not self.config[\"EXPORT_OB_MALLEABLECONSTRAINT\"]\n elif evName == \"EXPORT_OB_COLLISION_DO_NOT_USE_BLENDER_PROPERTIES\":\n self.config[\"EXPORT_OB_COLLISION_DO_NOT_USE_BLENDER_PROPERTIES\"] = not self.config[\"EXPORT_OB_COLLISION_DO_NOT_USE_BLENDER_PROPERTIES\"]\n elif evName == \"EXPORT_OB_SOLID\":\n self.config[\"EXPORT_OB_SOLID\"] = True\n elif evName == \"EXPORT_OB_HOLLOW\":\n self.config[\"EXPORT_OB_SOLID\"] = False\n elif evName == \"EXPORT_OB_RIGIDBODY_STATIC\":\n self.config[\"EXPORT_OB_MATERIAL\"] = 0 # stone\n self.config[\"EXPORT_OB_BSXFLAGS\"] = 2 # havok\n self.config[\"EXPORT_OB_MASS\"] = 10.0\n self.config[\"EXPORT_OB_MOTIONSYSTEM\"] = 7 # MO_SYS_FIXED\n self.config[\"EXPORT_OB_UNKNOWNBYTE1\"] = 1\n self.config[\"EXPORT_OB_UNKNOWNBYTE2\"] = 1\n self.config[\"EXPORT_OB_QUALITYTYPE\"] = 1 # MO_QUAL_FIXED\n self.config[\"EXPORT_OB_WIND\"] = 0\n self.config[\"EXPORT_OB_LAYER\"] = 1 # static\n self.config[\"EXPORT_OB_SOLID\"] = True\n self.config[\"EXPORT_OB_PRN\"] = \"NONE\"\n elif evName == \"EXPORT_OB_RIGIDBODY_ANIMATED\": # see fencedoor01.nif\n self.config[\"EXPORT_OB_MATERIAL\"] = 0 # stone\n self.config[\"EXPORT_OB_BSXFLAGS\"] = 11 # havok + anim + unknown\n self.config[\"EXPORT_OB_MASS\"] = 10.0\n self.config[\"EXPORT_OB_MOTIONSYSTEM\"] = 6 # MO_SYS_KEYFRAMED\n self.config[\"EXPORT_OB_UNKNOWNBYTE1\"] = 2\n self.config[\"EXPORT_OB_UNKNOWNBYTE2\"] = 2\n self.config[\"EXPORT_OB_QUALITYTYPE\"] = 2 # MO_QUAL_KEYFRAMED\n self.config[\"EXPORT_OB_WIND\"] = 0\n self.config[\"EXPORT_OB_LAYER\"] = 2 # OL_ANIM_STATIC\n self.config[\"EXPORT_OB_SOLID\"] = True\n self.config[\"EXPORT_OB_PRN\"] = \"NONE\"\n elif evName == \"EXPORT_OB_RIGIDBODY_CLUTTER\":\n self.config[\"EXPORT_OB_BSXFLAGS\"] = 3 # anim + havok\n self.config[\"EXPORT_OB_MASS\"] = 10.0 # typical\n self.config[\"EXPORT_OB_MOTIONSYSTEM\"] = 4 # MO_SYS_BOX\n self.config[\"EXPORT_OB_UNKNOWNBYTE1\"] = 2\n self.config[\"EXPORT_OB_UNKNOWNBYTE2\"] = 2\n self.config[\"EXPORT_OB_QUALITYTYPE\"] = 3 # MO_QUAL_DEBRIS\n self.config[\"EXPORT_OB_WIND\"] = 0\n self.config[\"EXPORT_OB_LAYER\"] = 4 # clutter\n self.config[\"EXPORT_OB_SOLID\"] = True\n self.config[\"EXPORT_OB_PRN\"] = \"NONE\"\n elif evName == \"EXPORT_OB_RIGIDBODY_WEAPON\":\n self.config[\"EXPORT_OB_MATERIAL\"] = 5 # metal\n self.config[\"EXPORT_OB_BSXFLAGS\"] = 3 # anim + havok\n self.config[\"EXPORT_OB_MASS\"] = 25.0 # typical\n self.config[\"EXPORT_OB_MOTIONSYSTEM\"] = 4 # MO_SYS_BOX\n self.config[\"EXPORT_OB_UNKNOWNBYTE1\"] = 2\n self.config[\"EXPORT_OB_UNKNOWNBYTE2\"] = 2\n self.config[\"EXPORT_OB_QUALITYTYPE\"] = 3 # MO_QUAL_DEBRIS\n self.config[\"EXPORT_OB_WIND\"] = 0\n self.config[\"EXPORT_OB_LAYER\"] = 5 # weapin\n self.config[\"EXPORT_OB_SOLID\"] = True\n self.config[\"EXPORT_OB_PRN\"] = \"SIDE\"\n elif evName == \"EXPORT_OB_RIGIDBODY_CREATURE\":\n self.config[\"EXPORT_OB_MATERIAL\"] = 7 # skin\n self.config[\"EXPORT_OB_BSXFLAGS\"] = 7 # anim + havok + skeleton\n self.config[\"EXPORT_OB_MASS\"] = 600.0 # single person's weight in Oblivion\n self.config[\"EXPORT_OB_MOTIONSYSTEM\"] = 6 # MO_SYS_KEYFRAMED\n self.config[\"EXPORT_OB_UNKNOWNBYTE1\"] = 2\n self.config[\"EXPORT_OB_UNKNOWNBYTE2\"] = 2\n self.config[\"EXPORT_OB_QUALITYTYPE\"] = 2 # MO_QUAL_KEYFRAMED\n self.config[\"EXPORT_OB_WIND\"] = 0\n self.config[\"EXPORT_OB_LAYER\"] = 8 # biped\n self.config[\"EXPORT_OB_SOLID\"] = True\n self.config[\"EXPORT_OB_PRN\"] = \"NONE\"\n elif evName == \"EXPORT_OB_MATERIAL_STONE\":\n self.config[\"EXPORT_OB_MATERIAL\"] = 0\n elif evName == \"EXPORT_OB_MATERIAL_CLOTH\":\n self.config[\"EXPORT_OB_MATERIAL\"] = 1\n elif evName == \"EXPORT_OB_MATERIAL_GLASS\":\n self.config[\"EXPORT_OB_MATERIAL\"] = 3\n elif evName == \"EXPORT_OB_MATERIAL_METAL\":\n self.config[\"EXPORT_OB_MATERIAL\"] = 5\n elif evName == \"EXPORT_OB_MATERIAL_SKIN\":\n self.config[\"EXPORT_OB_MATERIAL\"] = 7\n elif evName == \"EXPORT_OB_MATERIAL_WOOD\":\n self.config[\"EXPORT_OB_MATERIAL\"] = 9\n elif evName[:14] == \"EXPORT_OB_PRN_\":\n self.config[\"EXPORT_OB_PRN\"] = evName[14:]\n elif evName == \"EXPORT_OPTIMIZE_MATERIALS\":\n self.config[\"EXPORT_OPTIMIZE_MATERIALS\"] = not self.config[\"EXPORT_OPTIMIZE_MATERIALS\"]\n elif evName == \"LOG_LEVEL_WARN\":\n self.update_log_level(evName, logging.WARNING)\n elif evName == \"LOG_LEVEL_INFO\":\n self.update_log_level(evName, logging.INFO)\n elif evName == \"LOG_LEVEL_DEBUG\":\n self.update_log_level(evName, logging.DEBUG)\n elif evName == \"EXPORT_FO3_FADENODE\":\n self.config[\"EXPORT_FO3_FADENODE\"] = not self.config[\"EXPORT_FO3_FADENODE\"]\n elif evName.startswith(\"EXPORT_FO3_SF_\"):\n self.config[evName] = not self.config[evName]\n elif evName == \"EXPORT_FO3_SHADER_TYPE_DEFAULT\":\n self.config[\"EXPORT_FO3_SHADER_TYPE\"] = 1\n elif evName == \"EXPORT_FO3_SHADER_TYPE_SKIN\":\n self.config[\"EXPORT_FO3_SHADER_TYPE\"] = 14\n elif evName == \"EXPORT_FO3_SHADER_OPTION_DEFAULT\":\n self.config[\"EXPORT_FO3_SHADER_TYPE\"] = 1\n self.config[\"EXPORT_FO3_SF_ZBUF\"] = True\n self.config[\"EXPORT_FO3_SF_SMAP\"] = False\n self.config[\"EXPORT_FO3_SF_SFRU\"] = False\n self.config[\"EXPORT_FO3_SF_WINDOW_ENVMAP\"] = False\n self.config[\"EXPORT_FO3_SF_EMPT\"] = True\n self.config[\"EXPORT_FO3_SF_UN31\"] = True\n elif evName == \"EXPORT_FO3_SHADER_OPTION_SKIN\":\n self.config[\"EXPORT_FO3_SHADER_TYPE\"] = 14\n self.config[\"EXPORT_FO3_SF_ZBUF\"] = True\n self.config[\"EXPORT_FO3_SF_SMAP\"] = True\n self.config[\"EXPORT_FO3_SF_SFRU\"] = False\n self.config[\"EXPORT_FO3_SF_WINDOW_ENVMAP\"] = True\n self.config[\"EXPORT_FO3_SF_EMPT\"] = True\n self.config[\"EXPORT_FO3_SF_UN31\"] = True\n elif evName == \"EXPORT_FO3_SHADER_OPTION_CLOTH\":\n self.config[\"EXPORT_FO3_SHADER_TYPE\"] = 1\n self.config[\"EXPORT_FO3_SF_ZBUF\"] = True\n self.config[\"EXPORT_FO3_SF_SMAP\"] = True\n self.config[\"EXPORT_FO3_SF_SFRU\"] = False\n self.config[\"EXPORT_FO3_SF_WINDOW_ENVMAP\"] = False\n self.config[\"EXPORT_FO3_SF_EMPT\"] = True\n self.config[\"EXPORT_FO3_SF_UN31\"] = True\n elif evName == \"EXPORT_FO3_BODYPARTS\":\n self.config[\"EXPORT_FO3_BODYPARTS\"] = not self.config[\"EXPORT_FO3_BODYPARTS\"]\n elif evName == \"EXPORT_MW_NIFXNIFKF\":\n self.config[\"EXPORT_MW_NIFXNIFKF\"] = not self.config[\"EXPORT_MW_NIFXNIFKF\"]\n elif evName == \"EXPORT_MW_BS_ANIMATION_NODE\":\n self.config[\"EXPORT_MW_BS_ANIMATION_NODE\"] = not self.config[\"EXPORT_MW_BS_ANIMATION_NODE\"]\n elif evName == \"EXPORT_EXTRA_SHADER_TEXTURES\":\n self.config[\"EXPORT_EXTRA_SHADER_TEXTURES\"] = not self.config[\"EXPORT_EXTRA_SHADER_TEXTURES\"]\n elif evName == \"EXPORT_ANIM_DO_NOT_USE_BLENDER_PROPERTIES\":\n self.config[\"EXPORT_ANIM_DO_NOT_USE_BLENDER_PROPERTIES\"] = not self.config[\"EXPORT_ANIM_DO_NOT_USE_BLENDER_PROPERTIES\"]\n Draw.Redraw(1)"
] | [
"0.7044635",
"0.6786346",
"0.6749938",
"0.6737374",
"0.64038026",
"0.6324577",
"0.6317142",
"0.62803805",
"0.6239058",
"0.62135565",
"0.6211251",
"0.62016696",
"0.61881936",
"0.61719483",
"0.61423606",
"0.60934865",
"0.60849035",
"0.6071122",
"0.60655963",
"0.60495496",
"0.6046886",
"0.60166305",
"0.59572715",
"0.5953304",
"0.59502083",
"0.59452593",
"0.59435254",
"0.59154177",
"0.5910431",
"0.589834"
] | 0.76285523 | 0 |
Shape the rounded corners for the window | def SetRoundShape(self):
w, h = self.GetSize()
self.SetShape(GetRoundShape(w, h, 10)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rounded_border_box(self):\n return self.rounded_box(0, 0, 0, 0)",
"def draw_window_pane():\n houseturtle.begin_fill()\n for y in range(4):\n houseturtle.pendown()\n houseturtle.forward(35)\n houseturtle.left(90)\n houseturtle.penup()\n houseturtle.end_fill()",
"def DrawRoundedRectangle(*args, **kwargs):\n return _gdi_.DC_DrawRoundedRectangle(*args, **kwargs)",
"def round_corner(self,radius, fill):\r\n corner = Image.new('RGBA', (radius, radius), (0, 0, 0, 0))\r\n draw = ImageDraw.Draw(corner)\r\n draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fill)\r\n return corner",
"def DrawRoundedRectangle(*args, **kwargs):\n return _gdi_.PseudoDC_DrawRoundedRectangle(*args, **kwargs)",
"def DrawRoundedRectangle(*args, **kwargs):\n return _gdi_.GraphicsContext_DrawRoundedRectangle(*args, **kwargs)",
"def draw_round_rect(self, x, y, w, h, r, color=None, aa=False):\n self._draw_fast_hline(x + r, y, w - 2 * r, color, aa) # Top\n self._draw_fast_hline(x + r, y + h - 1, w - 2 * r, color, aa) # Bottom\n self._draw_fast_vline(x, y + r, h - 2 * r, color, aa) # Left\n self._draw_fast_vline(x + w - 1, y + r, h - 2 * r, color, aa) # Right\n # draw four corners\n self._draw_circle_helper(x + r, y + r, r, 1, color)\n self._draw_circle_helper(x + w - r - 1, y + r, r, 2, color)\n self._draw_circle_helper(x + w - r - 1, y + h - r - 1, r, 4, color)\n self._draw_circle_helper(x + r, y + h - r - 1, r, 8, color)",
"def DrawRoundedRectangleRect(*args, **kwargs):\n return _gdi_.DC_DrawRoundedRectangleRect(*args, **kwargs)",
"def DrawRoundedRectanglePointSize(*args, **kwargs):\n return _gdi_.DC_DrawRoundedRectanglePointSize(*args, **kwargs)",
"def get_corners_rect(shape, TLCorner=(0, 0)):\n\n h = shape[0]\n w = shape[1]\n\n corners = TLCorner + np.array([[0, 0], [w - 1, 0], [w - 1, h - 1], [0, h - 1]])\n\n return corners",
"def addRoundedRect(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass",
"def addRoundRect(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass",
"def DrawRoundedRectanglePointSize(*args, **kwargs):\n return _gdi_.PseudoDC_DrawRoundedRectanglePointSize(*args, **kwargs)",
"def DrawRoundedRectangleRect(*args, **kwargs):\n return _gdi_.PseudoDC_DrawRoundedRectangleRect(*args, **kwargs)",
"def _get_round_edges_bitmap(width: int, height: int, radius: int):\n mask_color = opts['gui']['attrs']['mask_color']\n background_color = opts['gui']['attrs']['background_color']\n bitmap = wx.Bitmap(width, height)\n dc = wx.MemoryDC(bitmap)\n dc.SetBrush(wx.Brush(mask_color))\n dc.DrawRectangle(0, 0, width, height)\n dc.SetBrush(wx.Brush(background_color))\n dc.SetPen(wx.Pen(background_color))\n dc.DrawRoundedRectangle(0, 0, width, height, radius)\n bitmap.SetMaskColour(mask_color)\n return bitmap",
"def draw(self, window):\n radius = SQUARE_SIZE // 2 - PADDING\n if self.stack_size == 2:\n x1, y1 = self.x - SQUARE_SIZE//8, self.y - SQUARE_SIZE//8\n x2, y2 = self.x + SQUARE_SIZE//8, self.y + SQUARE_SIZE//8\n pygame.draw.circle(window, BLACK, (x1, y1), radius + OUTLINE)\n pygame.draw.circle(window, self.color, (x1, y1), radius)\n pygame.draw.circle(window, BLACK, (x2, y2), radius + OUTLINE)\n pygame.draw.circle(window, self.color, (x2, y2), radius)\n else:\n pygame.draw.circle(window, BLACK, (self.x, self.y), radius + OUTLINE)\n pygame.draw.circle(window, self.color, (self.x, self.y), radius)",
"def update_corners(self):\n self.upper_left = [self.x, self.y]\n self.upper_right = [self.x + self.width, self.y]\n self.lower_right = [self.x + self.width, self.y + self.height]\n self.lower_left = [self.x, self.y + self.height]\n\n self.upper_left_x = self.upper_left[0]\n self.upper_left_y = self.upper_left[1]\n self.upper_right_x = self.upper_right[0]\n self.upper_right_y = self.upper_right[1]\n self.lower_right_x = self.lower_right[0]\n self.lower_right_y = self.lower_right[1]\n self.lower_left_x = self.lower_left[0]\n self.lower_left_y = self.lower_left[1]\n\n return",
"def rounded_padding_box(self):\n return self.rounded_box(\n self.border_top_width,\n self.border_right_width,\n self.border_bottom_width,\n self.border_left_width)",
"def draw_rounded_rect(self, context, x, y, width, height, radius, lineWidth):\n from math import pi\n degrees = pi / 180\n\n context.set_line_width(lineWidth)\n context.set_source_rgba(0.5, 0.0, 0.0, 1.0) # Red\n\n # cr.new_sub_path()\n context.arc(x + width - radius, y + radius, radius, -90 * degrees, 0 * degrees)\n context.arc(x + width - radius, y + height - radius, radius, 0 * degrees, 90 * degrees)\n context.arc(x + radius, y + height - radius, radius, 90 * degrees, 180 * degrees)\n context.arc(x + radius, y + radius, radius, 180 * degrees, 270 * degrees)\n context.close_path()\n context.stroke_preserve()\n context.set_source_rgba(0.0, 0.5, 0.5, 1.0)\n # and use it to fill the path (that we had kept)\n context.fill()\n context.stroke()",
"def rounded_content_box(self):\n return self.rounded_box(\n self.border_top_width + self.padding_top,\n self.border_right_width + self.padding_right,\n self.border_bottom_width + self.padding_bottom,\n self.border_left_width + self.padding_left)",
"def round_rect(x, y, w, h, i):\n X, Y, W, H = int(x + 10), int(y + 10), int(w - 20), int(h - 20)\n\n pygame.draw.rect(gameDisplay, i, (x, Y, w, H))\n pygame.draw.rect(gameDisplay, i, (X, y, W, h))\n\n pygame.draw.circle(gameDisplay, i, (X, Y), 10)\n pygame.draw.circle(gameDisplay, i, (X + W, Y), 10)\n pygame.draw.circle(gameDisplay, i, (X, Y + H), 10)\n pygame.draw.circle(gameDisplay, i, (X + W, Y + H), 10)\n\n pygame.draw.rect(gameDisplay, i, (X, Y, W, H))",
"def round_rect(x,y,w,h, i):\n X,Y,W,H=int(x+10),int(y+10),int(w-20),int(h-20)\n\n pygame.draw.rect(gameDisplay, i, (x,Y, w, H))\n pygame.draw.rect(gameDisplay, i, (X,y, W, h))\n\n pygame.draw.circle(gameDisplay, i, (X,Y), 10)\n pygame.draw.circle(gameDisplay, i, (X+W,Y), 10)\n pygame.draw.circle(gameDisplay, i, (X,Y+H), 10)\n pygame.draw.circle(gameDisplay, i, (X+W,Y+H), 10)\n\n pygame.draw.rect(gameDisplay, i, (X,Y,W,H))",
"def rounded_box_path(stream, radii):\n x, y, w, h, tl, tr, br, bl = radii\n\n if all(0 in corner for corner in (tl, tr, br, bl)):\n # No radius, draw a rectangle\n stream.rectangle(x, y, w, h)\n return\n\n r = 0.45\n\n stream.move_to(x + tl[0], y)\n stream.line_to(x + w - tr[0], y)\n stream.curve_to(\n x + w - tr[0] * r, y, x + w, y + tr[1] * r, x + w, y + tr[1])\n stream.line_to(x + w, y + h - br[1])\n stream.curve_to(\n x + w, y + h - br[1] * r, x + w - br[0] * r, y + h, x + w - br[0],\n y + h)\n stream.line_to(x + bl[0], y + h)\n stream.curve_to(\n x + bl[0] * r, y + h, x, y + h - bl[1] * r, x, y + h - bl[1])\n stream.line_to(x, y + tl[1])\n stream.curve_to(\n x, y + tl[1] * r, x + tl[0] * r, y, x + tl[0], y)",
"def rounded_rectangle(src: np.array, top_left: tuple, bottom_right: tuple, cornerRadius: int = cornerRadius, color: tuple = (255,255,255), thickness: int = 1, lineType: int=cv2.LINE_AA) -> Any:\r\n # corners:\r\n # p1 - p2\r\n # | |\r\n # p4 - p3\r\n\r\n p1 = Point(top_left[0], top_left[1])\r\n p2 = Point(bottom_right[0], top_left[1])\r\n p3 = Point(bottom_right[0], bottom_right[1])\r\n p4 = Point(top_left[0], bottom_right[1])\r\n\r\n # Fill\r\n if thickness < 0:\r\n main_rect = [Point(p1.x + cornerRadius, p1.y), Point(p3.x - cornerRadius, p3.y)]\r\n left_rect = [Point(p1.x + cornerRadius, p1.y + cornerRadius), Point(p4.x, p4.y - cornerRadius)]\r\n right_rect = [Point(p2.x - cornerRadius, p2.y + cornerRadius), Point(p3.x, p3.y - cornerRadius)]\r\n\r\n [cv2.rectangle(src, rect[0].toTuple(), rect[1].toTuple(), color, thickness) for rect in [main_rect, left_rect, right_rect]]\r\n\r\n # Outline\r\n cv2.line(src, (p1.x+cornerRadius,p1.y), (p2.x-cornerRadius,p2.y), color, abs(thickness), lineType);\r\n cv2.line(src, (p2.x,p2.y+cornerRadius), (p3.x,p3.y-cornerRadius), color, abs(thickness), lineType);\r\n cv2.line(src, (p4.x+cornerRadius,p4.y), (p3.x-cornerRadius,p3.y), color, abs(thickness), lineType);\r\n cv2.line(src, (p1.x,p1.y+cornerRadius), (p4.x,p4.y-cornerRadius), color, abs(thickness), lineType);\r\n\r\n # Arc\r\n cv2.ellipse(src, (p1+Point(cornerRadius, cornerRadius)).toTuple(), (cornerRadius, cornerRadius), 180.0, 0, 90, color, thickness, lineType);\r\n cv2.ellipse(src, (p2+Point(-cornerRadius, cornerRadius)).toTuple(), (cornerRadius, cornerRadius), 270.0, 0, 90, color, thickness, lineType);\r\n cv2.ellipse(src, (p3+Point(-cornerRadius, -cornerRadius)).toTuple(), (cornerRadius, cornerRadius), 0.0, 0, 90, color, thickness, lineType);\r\n cv2.ellipse(src, (p4+Point(cornerRadius, -cornerRadius)).toTuple(), (cornerRadius, cornerRadius), 90.0, 0, 90, color, thickness, lineType);",
"def AddRoundedRectangle(*args, **kwargs):\n return _gdi_.GraphicsPath_AddRoundedRectangle(*args, **kwargs)",
"def corners(self):\n return self._corners",
"def GetCorners(self):\n ...",
"def getCorners(self):\n return self.corners",
"def _set_frame_shape(self) -> None:\n width, height = self.GetSize()\n self.SetShape(wx.Region(_get_round_edges_bitmap(width, height, 10)))",
"def _set_frame_shape(self) -> None:\n width, height = self.GetSize()\n self.SetShape(wx.Region(_get_round_edges_bitmap(width, height, 10)))"
] | [
"0.72191507",
"0.66822755",
"0.6522677",
"0.64682794",
"0.6444937",
"0.6425309",
"0.6359084",
"0.6307169",
"0.62756234",
"0.62698317",
"0.6259612",
"0.6259098",
"0.6244389",
"0.6243569",
"0.6236145",
"0.62334776",
"0.61594945",
"0.6116691",
"0.6098656",
"0.605642",
"0.6045871",
"0.6012235",
"0.5975431",
"0.5964994",
"0.592419",
"0.5903873",
"0.58463365",
"0.58279985",
"0.5813005",
"0.5813005"
] | 0.72325355 | 0 |
Loads icon files by name, returns wx.BitMaps | def LoadIcon(filename):
# wx.Image.AddHandler(wx.PNGHandler) # This should work but it doesn't so...
wx.InitAllImageHandlers() # ...falling back to this instead
filename = "icons/" + filename + ".png"
image = wx.Image()
with open(filename, mode='rb') as file:
image.LoadFile(file, type=wx.BITMAP_TYPE_PNG)
return image.ConvertToBitmap() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_icon(fn):\n\n fn = os.path.join(os.path.dirname(__file__), fn)\n bmp = c4d.bitmaps.BaseBitmap()\n if bmp.InitWith(fn)[0] == c4d.IMAGERESULT_OK:\n return bmp\n return None",
"def xInitializeBitmaps(self, basedir):\n # ------------------------------------------------------------------------\n self.bitmaps = {}\n basedir = os.path.join(basedir, \"bitmaps\")\n if not os.path.isdir(basedir):\n return\n sizes = os.listdir(basedir)\n for size in sizes:\n sizedir = os.path.join(basedir, size)\n if not os.path.isdir(sizedir):\n continue\n self.bitmaps[size] = {}\n bitmaps = os.listdir(sizedir)\n for bitmap in bitmaps:\n if not bitmap.endswith(\".png\"):\n continue\n bitkey = os.path.splitext(bitmap)[0]\n bitfile = os.path.join(sizedir, bitmap)\n self.bitmaps[size][bitkey] = wx.Bitmap(\n bitfile, wx.BITMAP_TYPE_ANY)",
"def get_icons():\n ICONS = {\n \"http://files.heuritech.com/raw_files/surfrider/bottle.png\" : \".mot/resources/bottle.png\",\n \"http://files.heuritech.com/raw_files/surfrider/fragment.png\" : \".mot/resources/fragment.png\",\n \"http://files.heuritech.com/raw_files/surfrider/other.png\" : \".mot/resources/other.png\"\n }\n\n home = os.path.expanduser(\"~\")\n if not os.path.isdir(os.path.join(home, \".mot/\")):\n os.mkdir(os.path.join(home, \".mot/\"))\n if not os.path.isdir(os.path.join(home, \".mot/resources\")):\n os.mkdir(os.path.join(home, \".mot/resources\"))\n\n for k,v in ICONS.items():\n path = os.path.join(home, v)\n if not os.path.isfile(path):\n wget.download(k, path)\n print(\"\\ndownloaded to \", path)\n return [cv2.imread(filename,-1) for filename in [os.path.join(home, \".mot/resources/bottle.png\"),\n os.path.join(home, \".mot/resources/fragment.png\"),\n os.path.join(home, \".mot/resources/other.png\")]]",
"def load_sprites(dir=\"/home/robin/workspace/python/ipt/chess/sprites\"):\n arr = []\n chdir(dir)\n for i in range(12):\n img = mimg.imread(\"sprite_\"+\"{:0>2d}\".format(i)+\".png\")\n arr.append(img)\n return arr",
"def getIconImage(self, name: str) -> Any:\n # Return the image from the cache if possible.\n if name in self.iconimages:\n image = self.iconimages.get(name)\n return image\n try:\n iconsDir = g.os_path_join(g.app.loadDir, \"..\", \"Icons\")\n homeIconsDir = g.os_path_join(g.app.homeLeoDir, \"Icons\")\n for theDir in (homeIconsDir, iconsDir):\n fullname = g.finalize_join(theDir, name)\n if g.os_path_exists(fullname):\n if 0: # Not needed: use QTreeWidget.setIconsize.\n pixmap = QtGui.QPixmap()\n pixmap.load(fullname)\n image = QtGui.QIcon(pixmap)\n else:\n image = QtGui.QIcon(fullname)\n self.iconimages[name] = image\n return image\n # No image found.\n return None\n except Exception:\n g.es_print(\"exception loading:\", fullname)\n g.es_exception()\n return None",
"def image(name):\n\n # the path where all the images area\n if getattr(sys, 'frozen', False):\n # The application is frozen\n datadir = os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n datadir = os.path.dirname(__file__)\n return str(os.path.join(os.path.abspath(datadir), \"icons\", name))",
"def api_get_icon():\n pkg_name = request.args.get('pkg')\n if pkg_name:\n pkg_files = Database().db.get_pkg_files(pkg_name)\n for src in pkg_files:\n if src.startswith(\"/usr/share/icons/hicolor/32x32/apps/\"):\n return send_file(src, as_attachment=False)\n return send_file(\"static/images/null.gif\")\n else:\n src = request.args.get('i')\n if not os.path.isfile(src):\n #abort(404)\n return send_file(\"static/images/null.gif\")\n return send_file(src, as_attachment=False)",
"def loadImages(self):\n for map_name, img in self.maps.items():\n if img is None or map_name not in __class__.input_tr:\n continue\n getCyclesImage(img)",
"def get_imlist(path):\n return [\n os.path.join(path, f) for f in os.listdir(path) if f.endswith('.bmp')\n ]",
"def get_icon_path(name):\r\n # get paths\r\n paths = os.environ.get(\"XBMLANGPATH\")\r\n\r\n # validate paths\r\n if not paths:\r\n return\r\n\r\n # loop paths\r\n for path in paths.split(os.pathsep):\r\n icon_path = os.path.join(path, name)\r\n if os.path.exists(icon_path):\r\n return icon_path.replace(\"\\\\\", \"/\")",
"def load_icons() -> str:\n return _read_text('icons-svg-inline.html')",
"def icons_from_folder(folder, resolution=None, col=None,\n cmap=None, border_type=None, border_width=2,\n make_square=False, circ_cut=None):\n icons = dict()\n for filename in os.listdir(folder):\n try:\n im = PIL.Image.open(filename)\n icons[filename] = Icon(\n image=im, col=col, resolution=resolution,\n cmap=cmap, border_type=border_type,\n border_width=border_width,\n make_square=make_square, circ_cut=circ_cut)\n except (FileNotFoundError, UnidentifiedImageError, IsADirectoryError,\n PermissionError):\n pass\n return icons",
"def load_images(path):\n images = []\n images_names = []\n \n for file_name in os.listdir(path):\n image_name = file_name\n images_names.append(image_name)\n images_names = sorted(images_names) #use sort to insure linux file sys behaves\n print(images_names) #check for proper order\n\n for file_name in images_names:\n image = pygame.image.load(path + os.sep + file_name).convert()\n images.append(image)\n return images",
"def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images",
"def load_images(symbol_dict):\n \n args = DeepScribe.get_command_line_args()\n\n if args.symbol is None:\n symb_query = \"*\"\n else:\n symb_query = unicodedata.normalize('NFC', args.symbol)\n \n query = args.directory + \"/\" + symb_query + \"_*.jpg\"\n count = 0\n\n for fn in tqdm(iglob(query), desc='filenames'):\n # find first occurence of \"_\" after directory name, which marks the start of the uuid\n fn = unicodedata.normalize('NFC', fn)\n separator_idx = fn.find(\"_\", len(args.directory)+1)\n extension_idx = fn.rfind(\".jpg\")\n name = fn[len(args.directory)+1 : separator_idx]\n name = name.upper().strip(' »«')\n uuid = fn[separator_idx+1 : extension_idx]\n\n # not using cv2.imread() in order to read unicode filenames\n img = cv2.imdecode(np.fromfile(fn, dtype=np.uint8),\n cv2.IMREAD_UNCHANGED)\n symb_img = Symbol_Image(name, uuid, img)\n\n if name not in excluded_readings:\n if name in symbol_dict:\n symbol_dict[name].append(symb_img)\n else:\n symbol_dict[name] = [symb_img]\n count += 1\n\n if args.limit != 'max':\n if count >= args.limit:\n break",
"def _get_icon(icon_name):\n theme = 'Adwaita'\n size = '256x256'\n path = f'/usr/share/icons/{theme}/{size}/mimetypes/{icon_name}.png'\n return path",
"def callback_LoadMap(fileName=None):\n loading_msg = 'Load Bioprocess:'\\\n '\\n(will check processes/ by default)'\n\n # get fileName from user\n if not fileName:\n fileName = sg.popup_get_text(loading_msg, 'File Loader')\n\n if fileName:\n # add default path and .json ext\n fileName = brf.default_path(fileName)\n jName, fileName = brf.get_file_ext('.json', fileName)\n # attempt to load in specified json\n try:\n with open(jName) as j:\n currentMods = json.load(j)\n except(FileNotFoundError):\n sg.popup('Error: File could not be opened')\n currentMods = None\n\n else:\n currentMods = 'cancel'\n\n return currentMods",
"def get_image(control):\n file = _icons.get(control.Id)\n if file:\n path = os.path.join(os.path.dirname(__file__), \"icons\", file)\n return pyxll.load_image(path)",
"def load(name):\n return []",
"def ExtractIconReps(icon_file_name):\n with open(icon_file_name, \"r\") as icon_file:\n icon_file_contents = icon_file.readlines()\n\n current_icon_size = REFERENCE_SIZE_DIP\n icon_sizes = []\n current_icon_representation = []\n icon_representations = {}\n for line in icon_file_contents:\n # Strip comments and empty lines.\n line = line.partition(CPP_COMMENT_DELIMITER)[0].strip()\n if not line:\n continue\n # Retrieve sizes specified by CANVAS_DIMENSIONS to ensure icons are added in\n # sorted order by size descending.\n if line.startswith(CANVAS_DIMENSIONS):\n sizes = re.findall(r\"\\d+\", line)\n if len(sizes) != 1:\n Error(\"Malformed {} line in {} - it should specify exactly one size.\"\n .format(CANVAS_DIMENSIONS, icon_file_name))\n icon_sizes.append(int(sizes[0]))\n\n # All icons except the first / default icon must start with\n # \"CANVAS_DIMENSIONS\", so rely on it here as a icon delimiter.\n if current_icon_representation:\n icon_representations = AddIconToDictionary(\n icon_file_name, current_icon_representation, current_icon_size,\n icon_representations)\n current_icon_representation = []\n current_icon_size = icon_sizes[-1]\n\n current_icon_representation.append(line)\n if current_icon_representation:\n icon_representations = AddIconToDictionary(\n icon_file_name, current_icon_representation, current_icon_size,\n icon_representations)\n\n if not icon_representations:\n Error(\"Didn't find any icons in {}.\".format(icon_file_name))\n\n if len(icon_representations) != len(icon_sizes):\n icon_sizes.insert(0, REFERENCE_SIZE_DIP)\n if sorted(icon_sizes, reverse=True) != icon_sizes:\n Error(\"The icons in {} should be sorted in descending order of size.\"\n .format(icon_file_name))\n return icon_representations",
"def _icons(self):",
"def __make_icon():\n icon = pygame.image.load(str(PurePath(\"res/Images/bird_wing_down.png\")))\n return icon",
"def createIcon(self, name):\n path = 'data/images/' + name\n icon = QtGui.QIcon(path)\n return icon",
"def GetIcon(old=False):\r\n\r\n # Imaris icons used with permission from Bitplane\r\n Icon8 = \"eJzsfQd4VNW2/yDSpCQhvTfSe++Z9J7pvfeSKZkkkw4kJKE3BVTs2EEBBRsqIkgRQXqV3nsHARFY/73PSRC83qved32+/3vs71vfnnrOKruv9VuHQulHGUhxdKSgOoBieJJC0VEolIAA8v189Plm9Fl8fO/7EAqlyZVCycsj34cXUCh+WRSKQtH7/RwKpWQMhdLZ2fv94H6US079KOHomo74uhTy898rUqX+d3/zW0UgMxF1TV2Lf421habS14TwxdIn/8w12MJarZjTOYNT+uWXZRmtDWaryC2/SOkbGFjyyO9oLHnvvZpGmevtg+3tHRSxLs+Nzz49WUwHYMScBlrCUQgIFCxOS5dvENdWD3j4/1y50bGyuHGr3KjQW+1Nz9iae4aq1Sd15tJbwKCsgGrKQuAEnYCkYVOgsqhpW2KsGbxctUfdXFRrEx2mrhJlL7/AjP7wp0LX9+8EuzffpHOEkQY67KJTPoQiyjzIp8wCcdx5yPf4FHJdXj1fY5x+LDGscU/a4DdAEbnvjjLv0F1e4r77XPfj4DSkHTKCFjbwI89APKUTwiltkOj9LPToATJit0LuqG+gKvXVm+PGrL6iit66xFJ88bKx+DLYkJw1aT+DC8UMcf4LvuClX7kTSLFDTPAH0GUEEDAvQn7+YcjP2QNF0XuhpnjH3SbBqUMN3KP7x8gBpjQAdCgBAgc/Db6DJp+S8HZPEBYCqJJvE7rLyziK/nsaWLRLUKe/BTPHAMxqhJ+n1QF0mwA6EX/TmwEyIz4H5/4Nd+r0C4pNE1PcaPFvzav2WwOVkfugKvkIVGScBWbeNZDT7oOJj3hmAqjLAfj5AKzsu5Cdsgc8Rhiu2fRrhfba18ymRn4xK24HMNzWASNgLzDiyeuUph1FdBhK0/dBUcouyI7eDrlR391Mj/xmZ6bPy1Zr7dzxje0Tlyi4n3zLSD4EEu+jIHTfBtX+26AgdDPkhq+HrJCvIDf04/crcqZ2CblCv9EbKP07vqA80kbLg3dx+PGnQDPqFIjd9kGF73eQF7QC8gI/gRy/uT3tX1D6SxTmqQZzPZ3LF/r+VjsWex16Vu52Clieu6DAfzXkBH4GuX7zbyV6a4l2qDPa3YSy2qliceswJov9yH+TAtqJOt15ul+h+0fNVN8PZlF936/Ndh43IMfn5X74O5ma7GsytfnPdK9/WQBI2oJo3I9oHMF36vfob1YiOoI/c0B1HKJmCuVe7//wOBOAKI/yx8aZx4Us5vqWB6/lyjGD/7vvrzO3hOgtzTadqWVxbaO9scZaX8cVCEP5QrEb/p7BFvxl9+ZLmieLZPYtUkXzSpm6/hWxyjTBam/gavX6QDaH68XmiUPpbMvgnHzef/zebH5DK5NrahfT8oN5GTue5uSdOcBOO3qt2G/+D4nJsmnmWm0gX8gdmUuVER2SWsj/U9c31TUn662NMou9lYrf29vGkvcV1CBq8OPLlAx25lYNHY3jbDQeM9G4iF/L0RiZ7vgyRIQqDhvNqsDEBPWLKSnij37vftW9duJKtKUcgWGh2mDqESlqms0NLR+0do5/9uHfChSqUlb+1dEWOxrvqwEk/qeBM3g7MPp9C6yki8APOAF+HgYIHSX6WiJteS46kgfJKRLjr+85ahSNqAMCyJpNH9NJz58M1GQ7KI1qEZsvVjI5UrZKb51R39pZam5oJn5HL/lm2uRpSNaUn4E/5Ac0365E9ClUUpZC2ZBvQZx6F+IGTYZIjxbITtPOYrGaIDpKDcF+unsB3tr1iF5D1IOoyd9LUxfsZWgtjJg5RVy4BBiRH0JF/Jy7JcHPb0x1nwmernoIDVa/KZLppIR+SmwB48YBGNHcIxi4E+iUL4j7VlAWQxnlPSgb9DVI0wCog96GdJc5kOgw7oxA3LSzumw0JEc1gLuTFjxG6ghycdRAvNNUqBj2BWhK1oA8ZxOwIpAOY5ddZHhugryhi8DN0QCZnm9CRqqqlpC9Eka3qgFEA/dCFeVjRO9BKWU+lFDeIdYORa4boSb+PuQ5LkNrh/mQ6fASFMR0fFxTMwM4lZNvJ4a3nHN+qgZGDNFC6rB5wHXaBqKIPWCkHbknSPkBBCkb73CjDt/njjoCVU+tg5GDzRDoMQ8yfGdvwveXi2GHPOgcIW8+5UXIo8wFKmUOZCHKpEwBdtEJqAm9Ablo3qL6fgX5bouB6v7mT3RG897pU9/+sGvylMkJHj23qU++BzyHH0DkehpquHvOKnKP3JLl7gZO/AEQJp4DSdxl4A49CF5PtsEAihUygr8CSsAT/bVCuI/1nonWSImUCYh6IIYyCSIpYyGEMgOmI9toMi9AVvRmyItAc2/w11Dg9ymURM47JuBMg5fmrtkxoWf5IXnQZpA7XkF2vHDLVnL7hib/JKjzzoAi+w4oqdfBhNYyipGXILT/ZBhIkUA8mvvjHcYzlVxAOl+B7tkJoyhNEECxgVf/WnAfrAGT9SeYhdY3dfrzkJ68E9LQOiQvZhNkh3wLeX4rgV38/AVJ1WJobth1t0wyOqXdtuJtC3fHJlP+1Tumsgv3mlHfbUdrKrz+apeivhVxFZIHvQSDKDLwc38J0p1ff0aK7l/R/2vwQTrxpNSAK0UF7pQWEEp/hmfQmmwiIi7zNOTlHYDMzN0EH9nx2yE7fCsUBG772cJZcdtasROaWBeW9LQcWVfXNmaOXXz0EL43XjPObAV4sRsAr9MMGQB5Tp8Q8j/1RBtkui38qqTk7jFp8g2IQ7YPH/gcpIfvBKMNYBwagxRZSDdF54BKPQ5FqB2Ulx+HqvKjUFV8ECqo+4GedhAkacd/tlUcuFlfdu6+hXXkfn35T9CO7t0mRnJrAcYiGoPadwuSf5IFXS9sDyE/pkzHdw7ohUsm9o15XPQbK6prc9C9E29AVdpJYt2ZmXoCMjJOQ2EhWjtWnwel6BIY5ZehRnIFGqS3YKIJ7o+V3L+OeL7ThfTdoybXslj3+N54bTqxFuDlyQBK+llwRDYegPQcOXjalSbrG3pV4tIU6ahDe9TBJ0HucwrYQUeAFovWeomIUg9BEV77ZpyEwqyzUJ5/EXj0q2BW3YZOZJvZ4wHemA3w+lR0ffR6TgfAM+1I343kPfHaeLSe1MNYJL+AcQ1Chs9E8hvAf2jr7Trde61NTS1Oz8LQ/mL6TD49cvEOvF6u8t8CFaP2QmXcD2i9ewAqUo5COeKhLPsMVOZeAnrhdRDTfwIz0lkXsu0kdL/xqB6L2hoeT+zo81pkBxMb2Z1OrrHxej4v+xJkBX0BQ580gJeT6bJF/XmbzWZzlGuZDh09LWFqizCRVTyuoTpo8S2697dAC9gOVSH7oToB83EIaCnHoTr9DFrHn4dqdC167jW0br8NnNw7iO4+IDYaT5lZPyG6hb6/Aczsa1CVdRG132OQFrUDgkeMBn9P7S6bYeU4odp/cJ3uY21X5ztPq7SGDKlaXCKv/uo63e0bIMh7G1QFovtH7Qdmwn5yH4HaRFXa6V5ezhJUmXkGyvH7zFNob3CS2COUpR8i9ggFqbshD/Wd5OidkB6zEXJ93oMoL/uYhpr1rxvkqxlaxVyDtd5e1NDYSNMZRrdWp+4DXsx+YDt/BxxXNIZ7bQW6/x6oDtsPtBhSHxVJP0BZ8j4oRuNsMdq34H1IfuJOyELjRHb0VsiO2AjZkWuAGrkCqOHLruWGfbIxP2zREmrgks4cvzfyiHWB9vOxNTVzJhu1z7w0pmfyvIaG2YtEjOU/lKFr8ZNQ/0JtUeS8BfhumxEP24HmtxvpYheUhu6EPNRX8yN2QE7EFsiI3ABZEWshI2wVUEOXQ0nswv2FoYtbKjOmM2TiPIex9ylPCNsog0QaltfDc3Zr9xh/e/O0bVZ751djx0+Zb7POaC9LRvsspFs8dmsiT4HU/TAIXHYD130H0Hw2Q5n/JigO2gDUkLWQHfoN5IZ8DTmjlgM14MO7haHzxtKry33YIu0yvojjKJHKwmVyhTONznBkMFguAonKTShVPbj/hNmd/7B2ocft+Zmdchbp/yCow86DyvM0iF0OAM9jJ3H/Ev9vgRqI5AxaDtnBX0JuENpX+70DWb6TU4l1B08czeDqppit9ZaWlpbIahrdmc5gOvOF0nB1/Ye/t3SiVPp/1ySIPQLq6EugCz0Haq8zIHE9DHy0f6P5fI/2cGshN/AroAZ/TuwD8/wWQqbPrAP4vzSWhMKT1gwWK01csaJ2FVpLbxJJVXlMNtNLLNN7/d69+wrXZ/c3htBLYAhCc5jrWXT/g2j/uIPYfxYErIScgC/IfaT/EqD6vAUZ3tMrUgN7iP/yJTqiVuntdsTDLb6kFkQSe+kfvXdW4BSiZrismCF02QtS16PAc0dt32szFKOxKc//S+Le2f6fIdkXQ7b3SzP+1fVWffvBE7gWitv+KAsUz8A8og7wljuXO78/udTt00PFnkjXPp+iffdSyEb3RX14LdoDE3JFBqr/8LX/TPEOePSsKd7b4pzo2TCy731a8KS/5L7/Vwo8VO4hOoLoNhXgijPMPtL/flYnxad/J4WCqe8Eoh/lV+cJKxFdQXQXkRl9P5r4fT+YRBkOtym+cJfi13GXQqXep6ALd8IjBZ9TxCNSUB6fUzwu//3l4fMeXNichn5yVfsTA3/1OxbnP3/m8V8tSr3tweuO8RODW8Z05lobmmgGq51usNiK1DpTCF8kdvwbWfynRSC1ELXB2hZssjU3SRT1i0Qy+yb0ekKtvdFstNbZVPqaHJ5AFMLlC8OFEulw/Pu/8vzrzxRx+cR+IllLk1TZ8LlKa5+h0oy9oFCNPi+StJxDcnxqa2qrs9ps4Wj95cficLxZPEEYVyh3r6DXEOfbf8UZ2h8pTF4NRWnofoLDtz/P4tUf5fBtb0vVent1NZfHiJ2h5ocvtNAjnh1bkGGZxBaLadZ6Q7hao/Xm8HhuLK4gpLhMTa9gGX7dPf6tYmlo+qfflVQV/sNnLAF5vs4R2iVMnuU1vkiZKJZmBHFiVk/n51w9ySuHG6xyuMOl/vwzM+DA7RSvrssJ8YoFMoU8RW80eLM4bJfCQnFOZqZi0uRX7X/6DPG3yujuScNRn6vUmxtmGmqb3rQ2tnSb6ptH4e/6zhhxweeMuHDFzU4snvV9tYLuRstcWsoruHSSKwRgor0yHe3Z6XyShFwAlvdBCPFowmePZ0tKRRUGg8E7PV2RmZQkh8xsSR6+XmoG8w/zis8kqxnk2lSq1Y+Qqkxipc7Srbc2LBLL9RPQ+7GI92UtnRO+b+0cn4V/1zqu55FrsAW1Y9Vqph+LerJFzId7UgOAHPHNQ1O7IO4qCEJPgSj8DCBbgLwYIPOp+eDloYPwEOk+JkeaWlFh1SObQEKC+FpBkcofX3PFAe0f4p9WTZ5XckXaODq97svSItv90nLtUYPZbJbINVy5WstDZEIyPN3QPm5DW9cEwh+kNpD+GRa/PkSqZ2fyi861GS0AFhvJoywM7XVG7APOgI1Ao6whfI60wZsBn00xnXaA7/A6CPEzQFgI/wtLbWN+WZkZIiN5kJAo3pGaaR7+W7x6eXEenMfi0ncmy2C2hHIrJx5lFsyC4vgpkJvUADyxpoUnEbMKikR1efkyvUQhSmHz9a9Z7O3z8H+QLMR/BVJ+rLB4R73JCjChG5/X3gPByFPAH7gV8f0NcXaKzzDJ89NlxBmA2Pc0RA2YClEeoyHUzQwZKfIepbp9a0aGHqKilBAdroFAH80hRG8FeGvrgnzVNFRHI/JD5OPvqfX28FD4RHo2+KX79YQKil9ZLCpaCMzEhVAe+gaUJExF+/3Ombm+z0CCTyf4e+oh0E93JSNDXMZkq6faR3d6YN5FdsoT5bkfC41GuP3qCwDq5DsgGHEE2P039vp4yfPmX85+PwZm1nVQBJ+HzCfmQYrrTEgY2QVRrg3nK6vqPtTruiApvgawLzjIxwA+bjrwcdc+ID8P8r2XmwYi3Nog0+VpKAl4FeQly0GcuQ5YYV9DdeRSYKS8dqDa44vrFa6fQdqwueDiWAOjPOohyqvpUimNS+WJdNFE28lcOJRRfXXZSy8h3jNQ/xxxDJiUDQ/p/EPCT12OiDw//hCY2TdAEXQJ8vqjPaLz65Dh9CwkO0y8nx3W/BGNbr9ur58FSZH1kB7TDK4O5Dm2u5P+ATkNV6Pfz4Yqhy+B7rAaNKWrQV2+DfgJO4ATsQn4KZ9fZwV9f5vhsx0Yrt9C9sD54DbcAjEecyDbaz7E+dd9om9mEn4jVjH42ixwy47GGonjKeAh3rHOsa4xz2W9597llLfQ63mI/8XAR31D53MJioaifabrIsh1eps4/04fMeNQeXndFxJJDxhUc6A0p/vn1JjWmy5DjeA8XA+4dkSUOewNYDtsAYbTNpDEbgcDa999df5hECXvBmHKlvuc2K232OGH7uPzvkq37VAx8AtwHVILjkMaIS3kK8hwf+lqcrognmj7lWCuUSPdoz05u98mgu9ixGsx5Q3inL6Q8ipRF1BehjzKc4TO8Vmm3vUyFLiuAqrXMsh1+xDyRs6HbKfXoChs5ntsXvOF9tZXwGqe9o7ZOuGVxIDOGx4D7eA6qBZwbADHAel55D7gex8GI2vfbT398BlZ5gkQpe8Dcc7G+7TwA/d4sWdAGHMaWF5HgDlgC/gOaifONr3c34TsoBV3E1y6iMFNyIQ1mkoA/vDDRFvp4xOf9edSZkEO5WnIRJRBmQGplAlQFr2XOEOV+J0BasA6oI5aA/kBK4Dq/QlhiwLvt4FV1fkRnzseXn118fMTpk1tstlmfBQ7rOcu9YnFwH1qO4hGHgKe2xlQ5xy+ZxZu2aWkHrwnyzwGquItgH3qjNhjIEv5ESRJiEJPgGTgAYjoPxUGUTQwkGKFxMjtkOL28ryRbhUDxGy4o0i9g9rN94SfAvOZTJmCeJ1E1Nh3EEvpQtQGMeh9bS0ao/A8lnyEOCunRn9PnFdh/0GR/5eI/yVQ7P/uZR6n84yU8yK88/baVVOnv/9uz5gPdvAcN9wXDT0IYpcLoI08d9/KPn7MVHTnriH3CqiLd4Ei8wJIM26BLPsaKHKvgQHNPaq46yAefAKS+r2A9K+BIRQJBPl/Bum+H+yMd+pwwvOpNPIS0eYx79jfEE0ZjfafLRCGXocNbIPAJ+3gR6mHipJT8M7zAB2IfwX9KGQkbofMuG1Ajd8M1PD1hC3y/L+CfJ9l96vTXzwjYk+/rxG9B7Omb4EJXbvBpFm/VRGy6b1a6v7TpsKLl2oL793TZ94GU/FJMBSfJ3whtdUADWyAOtYdsNLugCHrZ5A5XoDM/gvgKYoB2UABro6TCD9N0shZfph/GZpXC1EfxXoOozRBMOLVv9dv4fyEAfUdOcRFfQvvL0C6R21n9jj0H/5xSE/fCSkJuyAlZhfkJm6F3KiNkBNGnuHl+qy4xac/c0XFfB8MwlWg02w/RROpQvTijyo7275YUlf70uvG4ovXLBWHThoqT99uYpK+FOzbwOf6k+pIakDy4LG6YMhnMAzxg/kf0q8RcpC9Y90mVfKRrOLYS6jdv0X4eTDPmNyQrCNRf3FAdXLEWpg5C2Ai9ps0AoxuuAU02gmgFu6HjIw9kJFKykHYI3IrZIZtgtyg76Ek+LurVsHHt83MtWAu23m/jX/x+9k9F8Z3ta96G63HehpaJ75mFx/da6cD4FgbzDuOt8F+mRfGA8xqBRiL1jCahFtQPHI9OFPsBP+DEH/x7osg3nlmJ7caTglysK9oFfhQzOCK2pgL+o0XRQcRw9+E0sKz0NGFrmsCwDqqEf8ELNo5KCk5AUVFxxAdgfz8g5CdvQ9yMndDXsYOKEnbAUXxu6E8fB/wknfeaOBsvmMrPwoNpVfvNkmvH7RJj16p0yzfWcc4d6NDSvp0sO6xLwn7NzBh3WP/Vgsa17F81YEHwfvJzl7+NRA69BVIdHpmgbBq5zo2soEk/TZUDd8ICYPnQ47HeijJ+RH4SCdNSB/Yb2RCMvLRZ2V55yAv+wTi9wTk5Z2A0tKTUFFxAqqqTgCt8jgwEJ+M4sPAzjuE1hlHQJx1ErQFp360lO6/UVdyDkxlh8Ba/cPtumK4P4YLD3xSmH88rvX5h1rlpG8K+6rmtKPxvegcBA2ZifqvghhHfftPh3iHmesVghefZVbDUSaSky0mCa+ZpUrS12JHfcqQhNagKZehKv0UUNOOQxaitLSTkIV4y8s7g2Q4S9hEwLkACuF5RBdBw70IOs4lqGFfhyb+PehUw+1m3o1zDYybF7vEcK8b8ysl/XGYd8wr5hkT5h/7lbCPCcdevT4T6VFzG+Kc30b8qwj9u6KxJWbYlCMG+QvPNIp3MQTUqwewH0iD48VKkb7TEEXfAWnUReDGHQd60mEoTz0EZelHCN8bliMbyZObeQYKcs+hsekC8OiXQSm6hsaZm2CvuQ1tlp+hG9nvmQ6yPT/fQbbpWah9TG/obS+9NN7y24T9nM+h/7YiWdICv+4dgwzgiPpCxPDuy2bFO8998Nl0d3XBfC9e0Moevt/396Qe+0DouR/4vvtRu0OE2jH2l5UnIEo5CMVIDuwDKkFzZkHmKcJ/V0o9D1VFaA6vvA5a0U2o092FLsTn0+jeLyP9vfUsqcdXe/15z6M+OmsM6UfFfXZSrwy4zWO/HvYvNitJakXvNYK7kI/GOY/BbQT/TgNMEOzQfq1G8d7H85d0jDSbrSPq33AbxOTp/dnZ896lh356tcrvG6jyWQ+0gK1QHbwHKiJ+IPxL5Yn7oCyZlKE8/QSS4xTh/6vIuQjVeVeAWXwdRLTbYBTeg0bs40RtYEYHOe7i+LxpraReuyy/+AWb5GR7rRc/6h/UofFTitoDreAGZCX9AMnur6D2b0BrKT34j7SdsciWffPKR8XD0VbHEe3HHQ0WrfuceV3RQo69jJH68puVwUuu0bxXQbXPBsJnSMO+y8h9pJ8s8QDhs6tEbakMtSXSV3cBaDmXgUG9Csy8G8AtugnSsrtE/ImWSfKFSf8Qf3j9oizt3TMVkjGDnBxM94CVfQdo2TeRjS9CXNx+yEVtyPOpemI96ONm2GpRffPFmAk0J6PR6KBSK510Zo5rz6SuOKVGGy9VaUtE/Do+PemVg1WenwDN81vCz1QZsBsqRiEZovdBVTxqW0kHoTrlKFT2+h7L088RcmC/ZnX2FWDn3kB83AR2zi3CJ8rO+Ykg7AfF7xloDia//5FYm2OfKP4fIwuNGdkXCP9lbuoxiEb3S0fr61SX54h9RYCXbn6tbv1io7HDR64UDjdqXg5orf3k88k9S5+XqxWRaM9ZJlXLK5TC1/YxgpDuXZYBw301ML3RftJ/B1QH7SP8lnSkl6r4A0Qfr0o+RvjX8ViFfaiVmeeAlnGekKePqrLOE35eXFdmne31s54m/dCoPZZnkDGYJWn7oTB9L+Sn7oas5J0Qi9ZtaWi9hf00ce5jIMTbwrQZN71qrXk5Nr2M0t+qWDGzRj9dM258N725rbtIqlKXoT20iV/+9U1B8gFgem4D+siVwHZbA3SP74Hhs5OI/aSPOgCMKFIOZsLBXjmOkDYh4kGP9vp8j0BJ6hHS95t2kOCvJHUfqvegMWEnFCIeC5J29PpptxOxJZlR3xH+08zwb9Da5GvIDfkSrXuXHsnxfGFCketLA2yGzVNt+rWzjfJvF6gVk0QqnYpqb2krHdfdzZBqBJU61YuLaeg+7GS0No89ADwkA3PkGsJ3zCZk2E74j2lBBwhbYF82Hqsq4/eifr4XSpL2QCnal2A/cn4q5g+tlXp5zER7l/TorYjP71G7+A7xuAbxuArywpcjWgb5YUvOU0MXLy8IfW9Ofvi8hqKYlxjUgA/9qf7vuhR6v9Yfr/9N2i/LDcqvNmiNDUyldDLa2nd1t3dPfK574rOL1aquTn7ZunMVaT8ADfHAS0B7pJDDIHTfAQLnjcBz3QQcj81A9+n1PwehNUMw9kHvRvuYPVAYuRtyEVGjdqC6T58bCV4f1ik1fNmd/NCPz+eHLVpVEvN2R0XaZI5SmTB8PQztJ5tBGSBqoQzmyYpcpXqxw6/PM6zWl31sdXO+ra+f851ONnfT2AmT1nZOnLZ2dNeUdWrhkiPlaTvulSP70pH+sd9aEXOciPmVuuwGgctO4LrtJNoVtgOOAa4I3AZFwdugOGQL5OJ1XNhGxOc6QrcZYashO3QlagPYx/sJ5I9avKUo8p1p9KrRO9i0mljMj8qe7qwwMLxEEp4D9m3TaAwHOoM5ki+SjqqxtvT/Nf/N49qeaOuaoLA3T9pgbRz9SeOY7neR/sc0NE0tqozfuRjzzkZ9kJVyEjiRB0AVcxYU/icJv7PM+QcQ9vLP9t4GlX7fQ5n/RigK/BYKgtdCVsgqRF8T/JI8Y7/0R4DsfzQ/+PVUellNApNT5s3iG7awhcpavlDwlEimSKqpqXHn8HhOdCbTCcngxGJxPAUiVTCxX3zIR/97pTR401ga6m9cNB6yE08AN/IQaCLPgCb4NCjcT4LC5RAIXfegPrGDiKOo9NsApWhPWRDwDeQFrYRstMfAhP35uUHLcBwBUH1fO53tM8EJX5/Dk3mxudwQBkc3mcHVLdIZzXKtXh9hq6v3ZrLZ2Lc/ksXhufMEkki+pO5Px8yW+yyPq4zc+yMn6TiwY9C+L+o4aCMuEvwr3c8S/m+R216Cf+wDx/zjGIC+GASC70AyFiA/APul50Gmz4zuvuuzePYneCJVAlcgTq6oYkQ2NI9b09TW2SGVy2M5XJ4Pg8Vx4wlEQUKpNoD8vfLPikCh+a17CcdLKNAeRxN9DfShF0AXcAYUaP+N+cex6zh+oNp7E5T4rSfi0HEMQ27glwTvOYh3HEuA4yiovs9fS/Mdl0JclyUhrk/nKZ+QKK2lIoV1vkBm/Umpa7htsDQtkkhlmSwOx5cvVoaJlaZBf5rxhwrHZ+c2TcR5MIZeAS3ay2l9z/bGPhwGvvsPRPsvR+uLEqT7vhgIkvfPUJv5mIhFoPq8gXT/9Oe/vnZfTITGaC9W6e3bJErbHbGiFiRy+0GewJqv0NT7/7t8ZweR8RL5LnMcWe4bl+E2j3nHbUfudoKI3cCxI7jtlKF1Ho6hyAlY/oD3B7r3eQ+yvV4+nuHT5YyvFxto+s37sdg6B4HULEL0NFdkmSGStlD/Xd77Sl/MR5nLOwNKXBfUsUauA5kLmstc8BnOXkL31Wg9UeS7ljh7yO6NASGxDB8T8RhZPvNOZXo/G4KvkxjY/Jv34Qh+idPomjjpAaKgklP6p+JDfqvgmJEsv+nE62ERmcMKnF9/nub8+Qm625obVe5r7xd7fQMFPiuA6reMiF2h+mO9f/Qz1XfRJdRnl2Z6Tyb0/lfFkvzRMipA/OD10JSEoenu0wtz3F+xZnu8MT3H88152d5vv0X1fvu1bJ+3JuZ6vy7P9pwd0vf75KDOv4Plfyg4bibO3/7oh09RKHHedQNSPMYOTPBsfgRnkzXqWUpC4D/3xz0uj8vfWchooT9Q+5P1XQeyvnqLrG/2uwyAZpgLlMtwl06h3Kbo4eppHLcUDEevUyhHKJSOlUPJWCYKmkU7cd3/H2sKftk3aj5ae/wan/VbJa+3VvTWfdEnfZ7ijk6ybugkr1vbWxOTI5plcPSkA5D8UFG9DnH1NZYbcf1ab32ZrB1W47qT4nAU1ysp/j/h+goOwkL1bUoHqbfuP6TX8F7eSXYCfl/Qx+VxeVwel8flDxW1ttlDpW5xLS0tH0Ityn3i7+bnrypqIxmzaGvpcmvp6JHZ2zon1Ta1P2eub5pQU1vfbaqt60a1ViiWJvBFYn+ZQvOnchb8Ty+mulY3vbl5hqW+cQJPZPuWw68/gmy/oLbRbkCf4bjHOoPFxkayj+IJRKFcvjAevQ7E/2Xz/uvxan9H4YnJ8xSputms0NiX6kxmrlLb+oFW1wEa3VgQy9pBIGoBsbxxdUPzmK66+oZok6U2mMPj+TDZXG8WmxOIdBErkKie+ptF+dNFrCBhukJJ8xtsbiOwuPUgkTf9JBDVL6moNrTQWeoytYkZbGop81capYkCiVmpqbGxFRpdjNlqDZbK5V4MJsudyWL7MDni1Aq6IfFvFulPF76k+Xk6qxZYHPMLErkhmydk+sjtqb60XCuNFbnwWWbI12vYwV/vKPdfsC8jeNyapBTpVKFUXiRVqtPUWn2wVqf3ojOYrgwWx6ugSFFbXKZO/7tl+qMFyU5jcKwnJXJVjEAkCRDURXmxEl4x8JL2HBXk3AMxG4DLJeM9FZUAsugLUOz4IYQFGSAhXrFEIJbky5SaUTq9wYvGYLiM6UodkpYuh9JKtc9fFUOs1JOx2Wh8dq21N9OM1sY5WnPD14bapm+s9pb51oYmDf6+ps7+L6+DC4tn26fUKAL5QrEvm2qK46cf2iFkAjClpP++WoZI9AsRcS7xNyBv6Pvg52OA8BD5z8VlQpFcqR6F2oGnrSl5cGKCGtLSxFfwxiS/WPgfk5svIWN0DVaLt7m+XqOtsbQg2VeKpAqLVG1ZqNCau2rqWla0d01c39LZs2rMuPGDjbW/fc6GC5tfN1ksUSeL9eXu/NgVAi72maoBpGYgYi7EfCQr9vvmAAjSAETlSCdCADX6XOp5GiKGjIcgfyOEBEmgrELA1GgNgYWF+oL4WBXExkogI1O68j8lu0KrIGq51kSvZuo3FhVrgM3Vf6k2Wtolco1GpjI3CcSyAqlKazVY7e81je36DLWPf5kHgSuwzKppLXcXpu2xs+kAlmYArRERklGcheQNOw9C15NE7C3/qd3AG7ILBJEXgMklcxGUDPwQXBwNEOyvhVGBwmsSmTTVaGwvyc01QFyMGBEfsnMVlv+UDth8y/TqykaoLOyAgvQmyMtTXTbW1tZKFCquSCbLUqgNdolCZxTJFEy0XhlvsbdvaxrTI/uH6wgMOHeSWGUShklyjtpxfPfkZwCMWtJ3rgi/BLyRx4A7ZAewn9xAxO5WUpYTsaQMylKgFZNjAfup78F3cAuEB9gg0EsDoaN4q7Rmnq9WN+5OUoKQiEGOixcC6g+5D98/MJDxp2VnMcY2ckqnA6dgLlSmzIaCyEmQm4DnK90ksVqaX0mXN2ZmCuoyMqT11TRxkUDMjqpiaN5D65lNv6lLgUmrLtkqwbK/9g5AvR619SRkc++zwBm2FzhPfv8gZplGWfZI7C8t+gjRBwQjDkHkk9Mg0rkDIryaINhDB5npIhuNYXtPremAqEgFREfJITlRAQHeagjy1W5E9Hywv9rk66HLCQtSjUqKl/2DL/DXhVY8PlRQ8iqICxcAK+19qAx/C0pC50JxwmSgVZtsecGTr2T4ToZY/3bw89JBoI8OIsO0X2mM7OBqhvr99q7u6F9fk8doseE2v+gjgNZaZPf4OyByPgvcwTuB+cR6MsYcyd0Xr41jQvticcvdfwAl0ptw5BlIp7wK8cOmQarveIgc2YZjuH/kiWo0YnE78HgtEBGugfgYPcRHGR/EPWPy99Q+oADvB/QjoquIrvl6aX6O9miBJJceYOe+BbKSL0CUvQLYUZ8DI2QJVES+A7ScmUcrfd7bXe6xGArd34WU4TPBw8mI4zkgyKsGIkbplklkjDCu2KR5WHaOqiqZUXhv2/OvAsyciNpy/F2QIVk4OL9Hv7VI9uVEHGZf3DSW+5E4ZLedhPw4X0Q++ixjxIuQ4f0MJDhMhjjHsZDoXzenvKr2qN0+DYryGyA+wgIZCQ0wytcIXi66f0nuLhqIcG6HYocFUOqwCNijPgZ15degLlsHwoRNwAlbC6zw5cBJXgzMyKXbcSxKtccaKB/xKSQPfg48HCxIz3rIcJsDca7jIClJqmRwVI90OF7+qgYl6usL3gZQonFdiOwuGLgd2JQ1D9mclLv6odjryt68I6Ve+0CP4xEdr0BJ/08ge8Q7kOvxKqSPeA5SRkyDxBFdeC1lKymzwYQJLxOyp0Y1Q1Z824N8KH0x5K4Ov9ROw7WQPGIOMB2+BrrTKqA7rAVNxRrQ0baCIn8HcGN2ASdiC3DjVoMg/eNjdO9dwPTdRfieKhxWQebAt8B1mBW8nGqJHAZUr/cgxrP5qEghyXhkLCmHHTifTC1a28i9zoFgwC5gob6O5e6TubyXsOw4jru8N5a7FLX3ioSjUIPkVz51DUoGL4NcpyVAdXsHcpxehwyHF1B7eBqyPLoWMti2LRzOWJg08XXIThgDpVndkBo9Gvri0PtoJHqP87NQh72DZP8O2E6bgDlyK4hjtkENcw/oKw+DLH0fCBL2AD9xO4hyVt9iBR68xRh1CDiIqr33Q5njOih98lNwHlIHTk/VgBeyA8b6p7u+BGmZ8gftXxF7JJiPxi6cm0IVfgP4g/cBk7IScC66PvuSsj5KOKaazP/yNFTkA9ShOVAz5CLkI1vlu38JVPclkOvyHhGDn+X4KqI5wKDZaqvpzdDSNBc62l8HLr17p0rd+UqM9xjwQHy6DrGA0yATDB2khayn5gNrxGYiFp/jshvY7vtBj2Q3cQ7dUuedAmnqIUT7QErdCJWR627wo48BJ+Y4CBBxgo5BpdMWYPb/FnwGtcIIdM0hFBnEor5C9fsCYr1aHoD6+cXQLVeTsY0YZ0CnrOvFczwao98nL6ZcgmYRlIe+xzGdzej/yuE4Z986yPNdBXleX0Cex8dE7DuO4c91fh0KAuZu44oa1lRVdMJzs5fcmjh1krZr8rTp9fVPfxL6VBf4PTkaPJ+0Q/7AxcAbthX4jnuB73IIcJ4/ZfY+MIv2XTBU79+tyD4F8rSjIM3eC+KC9ffLAw8TsZjChIvAj7sIvEgkx8h9wHtiJ4Q+OQmGDTDCIIoKXEfOhfTwDZDiOhv65OdWw7dKtLZVRN0C0eCdhN2xvA/H+fdRNmUGZFGmoTF+GpHrB8fSUwP3wOhWgMYEAI73Scjy/w7ykZ4LRn0DhUErIN/3M6It5LstRHp5G+h5k+bhPsBlTLq09OPlMydNnVHXOWHK0w31z30fQZkARU8sBd6gTSBEc67E+ShI3M+CLPgkWCWHbus5G76X5+wHefYxkGQeQGPgBiTveeBhbEDcYZCk/QSylFuon1wGntspEPbfD7H9n4HhT9b0xhcb0BpkL2R7fAj+Xpo899goCprzbmtxrH3gaeAg22N7Y1wDljENyYgpuZdSKD1EviGcZzGOMhrRWML2OC67NgKAl7gPMsO2QF7MFqBGfEvkXsTxKoU45sP7E8j3/gAKPBfcEIrbt3JZE+4rBPN+/uzTzcunTntt8tjOee+Pa5q/Fc832A4YryNxOweywLNQU3Lktl12bKO14C4Ycn4ERS6SvWIzGgMuoXbwI4jTb4A44yLIsq6DDq3N1Sk/gSL4LEgGHIYMJM+IAaZe+RXg6/s5ZAeuhRjncYTjG8dYa1HbFTkdJeZ1bPckZNcEShchZzSSEcfvRw3oRmubTghD74MpdghAuqQm7YFXXgEYh/Rnr/wJeEUHiFxDWX3Yg+gNkB1Cxr0QbcFnGaGHkpA3NwuF44FPfwZ00kWw6P0fYNL4tTBlwnZot6+5pPLec1s9/Cqo/C6BMebqvQbetTNWNMZgTII+5zLoy/aCLv8iaLPvgibvPhhL74Gp/GfQl14n8lpivII68hqIhxyH3H6LwOlJa6/8KnBzngmpkdsheeScPUT7R7yrqWjOG76HGOtSkN1jevNoYqwFlhVT0IBmCBhYD76DasF7iB4i3d6C1g6A958H6EFz/4R6tF5iHIKstB2QjOYlrIeCpC1E/iecOzI3GOkBxy/5f4Xmxi+AWTx9p5Q/E3jVL4BW+jnMnb0H2pvQOpO55WtVR1KMkbd4Rkft6m02/v5dpty7YMm+AwaMHSk7gsaq88R4hWUlckZJf4lLt7FuQC3ShS7pJxA4nYXiAV+C+4CG3hxKZB8oyNqFc0lBn/1VOIZ+6C6i38eh9h1CyF1P5LbCeA0P9B+Me3DqrwWnIXIY5T4ZlAaA1V+heYNN5jLqbr4FXNYRyELXTkvaBUlxiKJ2ATVlC5HbJidyA4HhyAleDTl+30CB96ofRfzJlzSc90BT/TnoJOtBLt+xt1pGcRWK5cF1in0dXR0ffdDa0fFMDWv79+aCG2Bh7tpXU33yJsYN4Nh8HJPfh+vAMdYYX4BzKeHP9On3ib5T9tQ6CBg8+iH5ZWjOXQE5rkvAx1OehOXHMcI8hz1ofHuOsHtfbq0+uZ0pOnCkaGA4RQQRaE6XorXSZ58hmZG+cc6tWWjNaNFfBhrtGOTn74fMzL0EhiU1cTehhz48Do7RzQrbiNoCGiMDN0JZ6JpzVukHP9s4K8FUsR6s5XtgjOjS/u6W5YUvTL/11djR82dZG9rG1TW3Tmzp7nm3QXDqgK2clL1TTRLOGYXjzXEc/Qu9+aTGoz1rfSmZP6wMrRvCnprwkPwqCHZ7FXLclkO4c7OOWQ0/Y9yowOsEMZfhHLDevVgXLD+pA7Q+Q//LGbUbMH723XdJfAHGG3WakOzq66jdniXyfRUXH4bCwoNApe6HbDQ/ZWTsgszUnZCZuBOoydshNw7HfO6EgpBdUBT4A3Djvz/TqlgPjZztUFdyCOxFV6CVe/ndsdabJ22KrbsbWibOa7DN/66Ocek2xjWMRn2tkUvKjjE3GDeAbY51gGscc4+/w20Ejxd0n2OQitZgOIdZn/wO/bvRnLQKYhw7p3JK761nIznkqbeBMXADIb9Lr90xZsYb1amOS4GL48xt6D4zSR7syA56RKLKq8AoO4dkPolsf5zAzpSUHIPS0qNIF0fR54ehKO8A5Of8AIVIH8Wof5Sk7SJid3GsOxfNW4rcrWfsrN3QUHIKGisvg63syqV6ybV7tYITgPo/1KJxrZVL4jpwX8fyYxtPNJHtHuM5MGGMQV/utAY+mddMnHIacka+B08+ZH9MGGMY6zB+saj0qy+Zved5krSfgeN2DK29lkG64xdQHnqIwBTjXGumenR/pO8WpNf6fLxPuAfM7CtQlXcGcjJOojb/C/YG66Ks7BRUVJwCZvVJghhVJ4BVeRRYaPxiEzicI8DJOQrCnNMgR2O6ofTk2bqyvTfqSi6AsegImMoOgLlyzw3Uju/idoblb34Ik4NxUWPkv2BbWnqxLhirgHEuU1C/eGEc4ptxE3Ldv3qAUcOyD0B15rDFEDNiwhq1ctx0QcH9vX1YdqwL3B6I9wqkF9TX9bg9Cck1njEFyZ50C+ipF6A84zTkp5+AjBQSu5OOXmdmnkJt/ySUFp6G6nKcfw7toVnnkH7Pg1xwjiAFeq1An6kYF0DHuAIW1k3CpkiuH+2sKyfrq8+faay6e74T3RMTxnp19OJ6msQk9cmLCWM0MGHbE+2id0x48xmyf2b7bwLvgS0kxo2QX4Pm8+ex/D+oxbPfeOPZJYnCrOu38FzYh1fC5yBaKbmubyxGfTwJ6SHmJ5DGXwJO4ikCD4Bj54szjhIYBIz7wXn3SOzPKchBdi0pOAcVaK5i09HahH8ZtLJrUIPGijr9dbDrf4QG7S1o0twhsCYYj4TxVAQGCPE/DbXrycZfaLzxoTZvJMd8jKfBbb77oX6AazwG4HHwlenoOkgP2Wg+jneY2TsG6og5MKj/RIge0XVGK35twYdLF4c26Tt8hYE/rNfEXgJ9ElpPoPWsKQrVYT+CPuQiSMNOgjDqMHDiDqJ+ux8qk9AeK/kAlOB8f+lHiJx/WAc47x817TSBf8rLPgeVheeBVnIJ+MjOcgGaw9S3wF5zB1pr7xE4oqkdZE5AzOs8RC9NJHFEuO1iXNSzY3r10krqCM9xfbgibGdMvx4DCD2g76ei/+F75KG9ItVjce8YYIDB/fTg2b8Voh3GXTFK3130/uK3AsePH++6GYY+UR3yGofj+/lhofdGELhtJ3Kocbz3AMtvL5FTD+NbME6HwE0l7iVyDZKYo8O97YHUQQHOv5h1FoqyzkNZ3gWoQuM6xlApeTfBrPwJ6eA+9DSTOKrnppI5ETGWCrdZjKfCuRdf6MVT9eVIxPI/jKnqyyOOMW2tvbiqvr5A4JbQez3qy9m4jfqvBteBNkL+YQO04IzWcWGOo68YZe9tXbR0rmdXd4+rXq8frlRo0MpzaL+q7EksVsrbO+ghnwMd7ecqvdYBzXcjVAdsJbAMlUgPVVG9uCtEGOtT3osfK0XtoBjpoDTrNCK0BsFYndxLQMu/SmCwBFU3kR7ugEVK4tcmINs+PY7Es2F6tpvEZM3A81kzicnqriX7Mm7zWNbWXlxWHzbLJvwFn4VzpGP8kwzNT6yS25CSchKtPbZAwsgpBEbLaSiS/6kaCHZuOFMjW3r6/a8sjh2d41z0euMIjUYzQqXWjDSaa/2fmTspiVM9hl8d9+ZuZvgywHitSu9vkT42EZgtjD2rCt0H1TGkHnCfwHqoSCWxWzinIsb/YBwQ1gHO/1hFRfMl9Tqw0HqOV3ITRBV3QIHP2hH/Nilps0b1o/Z8WEYL91EZMcZL3Zu7A5MkvxfnlXsfGLl30P2uQ3LyCYgK20P0AZzH0nWYGVwcdeDratppkX91/LmFeQ5tbW3OOoPBQa3VOmp1emdLXUPg5ClTsxRqXbzaIs2i5Y+pr4h860KV36dQ5fENVHl/B1V+m6EycAeJXQvZT+QA/QW/dgjp4RiB/erDsJHYqIuEHjCWjZZD6gLjv3A+TH7+PUBzEZF3E5O4F7eGqU8uEZUkjGPjZ9/vxbLdJfBsffkzMdFyrgM99wqUof6XmHgEQpGdstE+DJ9ND+1nIs5e/T10X1nVa/c2tRu96uobnLRarYNKo3FU6UQjLQ1i7/GTu5Pkam20UqMvkaq0hSqrqICVN2tTlc9CqEbrRxL39j2Bj6kK3EOMDRUR+4mc+3iMpCUc7M29eYzIv1mZeuqh3JskDq4q6zKBa8M5PzHOrY/IPKA3H8jT9578rhcPh/a7+L+MrKskJo643kVCzwT+DM/FqD+mIJuEh6M1eQRae6N9m49LDXG+7Oelm1Or27DRUjMjBuP85ArlCImCMbzJ8mXtGPunt6ZP+fQZiZIfjOSvkKu0RVK1qlKlmrZZkLIfSl0/AxoijJtjeG5E68ytqE/sJMbIPrxWnx4qe3FneM6s7NUDxtDRkB4whq7igS4uovePEv6u4kFN4usw9u5hbB3GepYS/Q2NPemHoDDtIBSiMR9j7HLQ2jsezX8kxm4DEcue7DoVgj0tEOpryrfqtyyz1bxRKpXJh4tFiqEW3ccJtZqle+taavImTJkmtDdOSVdoDZWoHZRKFUa+XPDGBVHWYeCi9lTpsJbADrJcVwPLfSOwfDZDte8uQgcYt0YLP0DkPsV6oPXh75KOEnog87GeJMYIXBNyPEwEXvA4IVdZxtEHspHzzEEiR2sfRg/nas3vxelhDBy1FwOH953paP+N5cZ4PYzVIvA3Hm9Aiuf4z/D+t65m6wsN1k8bBXz5cLl44giTfPVeTY0q12C25nd0ja/u7JlQjeSvQuNAlckyZjYzZ9M1Bprz+Gj+4/nugWoHZHuX5cB2XUvqwGsrcQbNQP0BY4zpwQeBhXRAjz2A9HCA0AMeI/uIwB33Uh/2sDiVlK04dS+JQXwY44f2UTkJv2ARH8b5ZUSuI/LHkvi5VZAduoLI7UpgE4M/gbzAJfep/gu+zvN5mXgASkxADcWq39Reb/z2oEm1Rm6Uf7dLqbaUa/Q1xbqamtKm1rbyiZMnM5RaPVeqljP0irc20NAcx0D84LMuccxBEPrsBPbIDcBxXkXmvUU6YHpuITCYGL/IxDoYRWIY8bqhHM0VFYjK4vdCaTyJY8RUiPZDmLCMVGRHanxfXtydj8iJ7YjlJHF3qyCrV8b8iM+hMOIjKAxdfL4gZNGqgrB3XywIeb0lP3Quvyp7LMuHYv7NuCyL/oNko2rDGZ180dMCeWGiQjaaY7a1WVvGTuhsHzdx1tSZz38qFXfU64yjWwTl392uykDrvoRdwEX9WdSLexR47Qahy/cE9pHruhHY7ptITKr3DgL/iNtCOcZpo/1zScgeAgOZH0HiIAsJHOQuYk+cFYXsGrWtV86ND2R92KYEZi/0C8gLXYqus+hIUcT7L1akzuopjp+SaeqiDJyJ1nAYF0kX5A4lMIZ0potQogz9Z75Es3zb0FrT29+ZDW+sbe+avFRVvehgS2fPyo6JUxeO6Zn8WcfEyR/o5K/vsuqXnMd5fjHusBDxWxG7l5BfHn0MZL7HQez+A4idtxGEn5vCQYTzD/fhJ/E8WRa0E4pHbUc62IHstB2oYdsgF5+X4nOR8A2QHfEtpBH4T1LePkwlbr84Z29R2Ad3q7NeOFmVPaMlmjKWeBaK1k4bwuULY9RqtTNPIMDYyhE0BsMByT6CzmSOZLLZvkqtxe2fyY9Lc2f3802jJ2yoNb+4R6cd/0lr58T1HROmruuaPGPNmPGTN5gMs1ajdn+2ALXHCtQvmWjs5iD786IPgDr2DGhCjoPM/SjIXX8g8h5j7CVeN+M2QOSB9t0CZf5boDxgM5QGbYaiUZshL2Qjou+I3PDZoWRbxvISuMzQh/IUByyG/OA3VxfHTZeJbBQnFt9wnieUcfkisSfmXSiVh3J4PG+z2ezG4nAckMxYBw5YB0wW200gViX9K9lbO7spYub0J9q7J75qb53wfW1T+yd1zWMWNo/tnt46bqKuuaM7utRpW24lsj2WnY3mIQ7O9Zx0HFhofFfHnAdt6AlQeZ0isacu+x7ogOm+g8Cf4jVCBVo7l/p/R2BQ84LWIPqGkDVn1AqCsJ2xzBiLmhP0MYGLzPN7Y1Ou60uOQnVlEJPNDWawGP5MrnETm69fyBOKymQKxRMiqSLbYq31QutWFwaT1YdNdUSvnVlsjpdApAr7V/Lj0jRm3O/9hFKK2isDzVPM9HPATD4FvPhjBG5VEXUatCGnQeOLyO3EA+yqyG03YX+m12ZivYwxoDgPdWHAGuI8HD8TCM9FfTWWG2NZSRzuAsj2mbsX3zc9fDQFTvlQOHxJGrJvCJOjH8/gGEAkUchkSg2LzeWHd47rCkRt3xnLjLGtWAccnsCbKxBG8cW1w35XuD9QKgPXfkpH8xUr5TSwEo4DPwbthYMPgy4S57w+Q8j/ADvquh+4D8mPbY8xpBiHWUhgeFc8wMH2yY4xjRjHS+BJfeZCqk9nYmJQA3HvaoaYgmzuxhFI0lkcRgCLp9HSWeLqnskzlvZMmEA119ano7buits70oELxscKRJJgZPt/iHP4d0pByIuUaq9Po8qCdyDZD6P5/CBwIg6DOvoKaMMvgXbUGdD6/YKdxfhNnP8at//qPvmR7fPxMzFwDqPAL39lcxLPie2OscBZ3lOP/BYfPLEhlidUVXG4bD++qMpZb2wFW+PYgwqNrkosU+agcTAAjXfuXD7fVyiWxir1DSNY/D+Wq/CPlGq/NfNxm5fHngN93C3QR10FY/gFIue4xvvsP8qPMaCE/N8h+5P4bQJDHPQFSYF9dl9KYHGxfxDbPs2n558+RE6issUrtLYFfKllL19iAYzJNde3XZUqLdPEclkym8MNQP3ET6KoifqPCU7Bz9UisYZojXtWGXX+vj7yGoE71oWcJ+RXeWLs7qkH2Ok++fHYV+5D4qcfYJCDHsUhE7b3fZ+wfbb3M7f/FR86SwOlrFQ7WFNjf0ult9+WKG0gVdeBSI5qZfMaDlsbK1c3VP8nZe8rOUEzKXGe+uFsjw1XVEGnQBN8lpC9D3dNYpdJ7DXGjuN1YHkvBpiUffkjGGwCU9tr+xyf+YhehnSfKb/LO0dI4m15oppMocyyVCAx3+eIzMATWdG42PQiJ/eb/jJ1w1+hAkp64AQKJfnJJ2mun2zBYz2WG5PM9SRhe/5Dbb/KcxNh+2Ikf77/14/ITsj/wPaLiJz8md6z5/4ZXphcEmv/9qKX+ptsrcH34CiB6OOL/71nMP7REuVPxp1mu82poY1cfl88cieIMPYbjfsE9tudtD0+IynqlR0/AyDb/yHZ/fGzCD4icODZ3u+iMe/ZeX8p039hyXKd3VLkvOAKA+2BmG4bgIb2QJWe30Gp1zoo9PmGeA4Slh3L3EdYdmz3LJ95V5HdiTYfE6j5vVv9jy6RPvXJme6zp+S4v7IF77FzPRdAntdCNK4thlzvD1C9kMxZ4PPW9Rzv197J8X62Ev8vN+Sdv5v1/0jpHzTikfc+ATTHBO/2+BSvnoxUz8npqI7uF0158CzRx/j1x+VxeVwel7+vwJ8tVzrIurv3fR75/q4/+fb2FSpR/7zS4Wv0ruMWTnMAN/wvUfrPBJg58wKl30y477AFLfInwv1Bp69QKJ0ddwe7HyHTFTisJNnCdQCqB3WS73GNn4uBsQH9emuc26APK/BIZocne+sRv3r/r0peb63orf9ZnoeOI731FbIeTdb92nvrZvL7QR8QdT94mbggFYi/DwKyTgNYhQW9jhSF6n69df/7qD6C6w6iHnQbKRVddxBOjnEb55FAF7iLazLHA6nylRTSAlf6/WlbAvk8H8ziSsrjPBGPy+PyuDwuj8vj8rg8Lo/L/75CY3J/9zcs7v+8Zwf+/16MltZH3ksUZgetsTFCoxubqtCag5gsrhuLw3NiCwW/i/9+XH6/9OVtwkVrau7XNKZnRH1zV2LLmEmjG9o65tma2l6utTfPNDc0Taqx1k8x19mnGGsbzAq1rlwolScLJDJ/gVgyWCRVPLbHnyxCec2D1/bRXfF1TWPM1sbRL9TUt32m1DW8oTHauoyWWoveXFuHyKY1Wep0Jms9et1Z1zLGJBBJInE+LZ5QlMQTiMKQPVz6rscR/Ofyp/xvLHItmV7EVN88WGdumac2NMw3WurscnVtF4tru8Lm1d0WSppvGmpGf9U8ekxdc1u7ob65zWKpb7QZrXVGHO+M2n0w0n8whyfAFIYoBn0WK9cbCd/F/695vf7K0pczTKS39VPqm0sUmqbD2pq6Vm2NRarS2yxiWfNBjX4smC09BOlrxhG5xDT60d/UN47tHt3Z09bR1c2rrWuIUGv1gUKJ1A/NBz695M/icEJRX0hC45GzwdZF3KuS8efzvv9vLAKZiUJnNRKvxYr26QJpC3CFDYeEkrr5CnXLKoWm7WeZog2E4uaTDKZtbVW1eX55lWluJc30XDXDMo/Osizgi2ufVRsb2k21dVSzrT7caqsPVqg0Pkwux4PB4ngS+czY3CAuTxjL4Oge5I8orhD/U77+LxScL66SXke8FkqbP+Pw7cBg2wCNNUjfjSCUNHzL4Zm7OTxdKYenyuaKlGkihTRerudHK4zcaJWJH6c0ylMVGl2eXGMsVWh1VLlam4LGoUicU89ktvjy+AJ3Bovl1msHPzaXG15Rrev+Hdb+TxXU5pewOQ1AY1pvsnjWzWJZrVkik4UIxFx/kUwwii/mR3Np5kxe8bjiyuQZtvLIF18uD3/1/cKwZz/NDe/5OCPWOi+fqtRX0qR0gURVotRos8QyZaxGZwg2WSy+QpEYx9a5IvJANvAtKxflFJRoZlbQDcScUFIl/btV8N9euCILpbHtaZwzr4XJrQM6y/Q5R2Dgy5VCH76Q7y+Q8kcJR7v50NPHcllRn73Ai9y5nZd4AMQp50GM8zYgkiVeAnHYYaB7fgdU9xcgOrD2YkyM8ousLGmrSivPkikUiag/jML9QCKVudEZTBc6k+leTeP6Z+coPigoUkzAvKRl/fkcWn9VMf0qz15hQdEgmdqWZahtkeutjQZrY5vJ3NBSVdfYGtj3G3P9vxcLgXQfwuKh8YZXM04iV0WKpVJ/NE8G8pSV/vQiTSY7bPVqadrFGwSmigmA8f7MXpwrrvFzHuWIpGn3QBZ8nMibFOZhh9BRujtRkbJDpWVCtVIji5fJFcGW2lpfdH1sA2c2j+acmqbqTEuTQmGxsh3z8nc9I/3h0pcLEZfG0eMK6lrae9Ba8CVEH6oM5rlaU+MqtaF2bm1j62KLvfUTa2PLzLqmjiH4938kP+KvC0dQv5HJM82Wq9SjRGjPxBFX+3ArayOYCZ81CtJOE7p9GE+NicihKPsll2Lf88Mxxg3neikZugQC3WshOAg/h1sOaWnCaWgPkIZsEITHIoFQ5MrmVjsmJ6smJieqIDVVdBuNRaWYn/9kfsU/WvryMNaYrP30JsNAg9XOR/vKaSpdTaPR1rwC7Xfmo7ZZj/b5cwVSw2yDpekjY13r+tZx478ZO2HKTvvozu/r28bG4Gv8q/yMfYXFJ5/zwhU11rF45rVypSJYIpH5suRpbrzEVwt5qUfW4GchY73j/I18PQBX2ZvDkvtL/k5MuD88/Ax3aRoQMYwZQ14CX3cThCAbhAbLITFR8LLerIhWqrRBVqvVp6pa5JGYoF4QH6eG+HgJpKeLDhSValwxXyJZ/V+m618XkUxJsY8l72esNUeL5caxLJ5uAY2lellTY5uqtzS8jtYVTUj/BqnSNEWssIwRStVGZAu0DzV3tHROWDW6e9Km9nETV9uaurA7mKK3/D7/QukYZxbXulMql0cq1Vo/tinEiR3zCYOXffMGnU3qXWkGIl8GxtLj3Jk4RyDONSJOuUc8w1iAnwGceJPAcxLjEvodzrcgj7kMtKdWQuCwNvD1NMKoQC2RUzM+nveS0aIMR3NyoExuDUtN1qyPw7k14xUQGyeA7BzZGsxbWhbzr1T5g4JzcBZXGYjXSJciOlu/rqyy5kZRkeFeQaEKkC1eNNTaZwslcrZUqVYgOxj5InUesgUXzWl1cq1pGtojLbQ1j363tWvit62d45f90XuzBXVGrtCk1eg0AXI13Y0Tu1XPy7sPXCmZp6O2iXzWqpxPPrtUnILad/QlEAaeJnJ4cp0OA3/EARANO0A841oQeQmYTLJvKLJwLsN9kDjwOXBxMBI5Tf19VMgO4p8yMwU2NA9H6Y0NUXSa7f+xdx7wUVbZ348oivQSSO8hvZBAeu89md57n0mZ9E4SaqiiiGJBcVFUbKhYwAKKoCBdQUCkI00ExEL3vOfeZyYZ0N119++u//d9uZ89+zyGlGd+59xzz33mmfPdRHq7RkUqISJCBNHRYsjIVk//jwn+O6NMIB0glldPL6+ogZLCZijJ6YCclCZISzFCaYXh88q6pvlCibRIodbJ8brTJApVAdbaTeiLqXKVsRH9YtBXWqc1dvR83NDRvaVp8rTJ/+xv4t5qMF9snSGRG2JqG3We4qTDzdwSoH1xOqYDNKD2pKcY4XIS3QnHWux+mmrOHfoVcO7bQfuIkr6SpN8V6btF2PS8CedpfiJMT9KPK2/gShg3uBZ83DAP+ZrA10NJ+qoeYXOlhXKVPtxinrK8qKgaoiIktLdoeLgQYmKl1zNzNZn2a/X2/s/MhfLyJqfyquwBLFbzQ6zSdmAV9AIrex4UJs2G7JgpkBrTgD7Qg9ZSt0wslRdIlepslUFfpDEYlCqDlitTChPFCkEcXyyOx71+tlRZvUyhrT/QNHnKe209vb5/7++SXq0ccc1Ynriqp3Ea31OWultPtNdaAB5+DKClBcCE2muw3lFF/ER15I86SvvVsu/dTnvildPehmuh2NbLlPS2JMYe/AWUlDB5ShH+A5QMXAcB904Fv5GNEO5fD/6Yi/w85LgmC9dYrNpoqbSpoap6+rX0dBWEh/Kp/mQexMVJ94YEaH93Y/zv9Hq9fbDLuhktWD18XvE8EOYtBn7Ok8BKfgKKJyyCnND5kBU2FTImNUN+oeFdlUGWJJSrBZlZsikJ8bIHcb3624Qo1dMJkxS9FWxpplwjiOQLxfGFpZoVUnXdubaeacp/9Pf5EitXpFAUaQs3iSUYq+ZG+JX0jO2ZjDHPZ3poysd/B5KxqP2w/cC9bxdw796Csf4J1d3eQ7a/lyrTT5UcWbg+yMnviL2G82QbRN/9MAQM7oIoz04I826EwHFV4OemhPg4aQtPpMuWyNq/r7RMxRqI9JZVow+UuE5IcE7owM9TR3rM7grw0T6EJsDzQH8vrYu3q2Ekng8N9tcNCh2v/bfubwvKFjiLCh8FSe4yEOW8ALyUF6BiwvNQHPIM04s27EHIjZ0OxQXVz7Jym7UpAd03EnynQoxvF4T7NECgrwF8vYwQ4GOEqHD92tJySZJUyQ8pKVUuVenrn+uaMev+2/+m/bNMPLFlgTxjXZ6gCH4iuX7FSoBZM5h8I4+9AUr/0yAddRwEg/YBf+A2YN31Ge1fW+EQ77drb+/5yEq/wfT0ngj05xPxayH39UKU83SIC5gCoWPaIGhcDQS5686yWLLMgsLqnaS3r1I1GchaHB6qhcgIrEkn6SA8yIQ11C39bIlPSD/bXXj8GO01tMfQZqB1o0+a0KrwXIemsZkWv2b29lLW+3lqOkM96npC3ZtnC3KW7pbnvgKK3DdBkv4W8CauAlboG1Ac+BIUBS2DkojHoChxDnDyWmpzvZeczXNbCtloKa6L8LVMBbcxRvAYawYvV8oLhuBAw8ZyliiewxMl8SXm5egD55au3y4FUlXTEC63YTo36/o6KdY4q1YDLH4E5wBZN2Ougtz3NIiGHwHRvbtpz2TSQ9auvV13xx7Cjn2ES52WASvhe6o/7UN7/0HIdHoJou99ACaOmA8p/nMgevRUiByNc2FMA0wIMC7lCaqai0rroLFhLpSX10NEqB7CQo0wIdIMSXGVEOhpor0u7GbvN+xo9r7DtvObeLyBdt1uHi7am6EubZA4Zj4kjV4IhcFPoe5vgabgfZDnrANRwsfADv8QWEHv0V7E5WEvQsXEpcDJn/5imefrx0tc8bWiFbq8ChnDl0HE0FkwbqQFfWDGv2mCAI9K2o8hPFj/ttbIDuSK9Z0SVZXb7807nsiglmTv7Cb14rMrUP83AGqkTL9mqdcZqj3pW0zyDelhS3oX23PN7boTK7qlt+0zUDbpLK2BtEkAksFHoAC/P/7ex/C1L4ZU74UwafQ8mDBiBu1rHOnc+GMp7rtKyqzHBIJ26O5aCLlZdRAZaoaY8GqIi7bCxHArZVKTXsbk+K+an3MtpI9cAvkjX6JWNPo1kKWuBl3JOtoLWJH1GdYNWDuEYA0xfh2wQ94DdvQbwEl/+gwr4N3dpFcLi/RrcVsHhaPWQM6QFRB1/3wYN8wKrqMt6G+MlXE4t13nQJBbA+5zFBYOT1ImUVjiHXVnC5m26cKKyVMw51+ePI3pAdhhxrU2BmivQSbuv6C1DNND973bcs3r6IvXbukpXIrxbe+nXOS0BNgJF5i9AuovHXQCv3c1JA96BtJHk88GPg7JoxdB/IgFMGlEL0SPmAKT3Frek+vN6oIiK5jNM9AHj0JaYj1MCK2DuIhGSI5thmDfKtpj2dHI/L/9a66jTX3HsaP06ONZUDDsDXpPpGTEGigd8T7wg9aCoewT0JV+DqqCbaBI2QGCcFzfQrai9p8CJ2ItcOPfvMmNffMA22PLTbbHNiBG7m8Vj1oHuUNexbp6EbgOrYVxo8wQMLYFMse9AxkeyyFx7EMw3qv6OIvHSyhl6bi/F//inIPfiLGuf3Y50zuL6KT0P0v7pdq1v71XeYUt1stsepc69K8mx2Jbj+MCp0ehPOsmyHEfZsD8o7rvW/TNWtqLOANjL90VfTDqKUgc/hj1Qfyw2bS3dU58jbWcU/tJbn4tdLQvhMb6hZAQ3QSTQlsgIbIN0id1Ur1Jr2vHPte3m/NwE7WRQzWQNHwJlA5bB+UjPgbWyA3AHvUZcMdsBmPZRtCX7QB96W7QZH8FsoQvQRj2FfDDvgB+xFbgT1qP+8k3vmP77PqB48X07SL9SEpdd0LpqI2QNfhNSB34Nxh7fwPNP6Pus0K09+uQNn4NZLi9DLHOs25MiFDhHlqXd7v2ypijUXw2XKhrBXjyYQAL6TEVdB6Uw0/QfM9oz/RKJ7rbY72vT7ytfzbps1psM6J7IeZ90kub5J+iPKbvnAnXX+XAb6F04MeQNvQ1yHR+HTJdXoCMMc9CyoiltK92woiH0RfzMDdNOyiUVdaVlNddLSltgQfmLweVbA4kRbRDZlw35CRNof227frajfTath/7zQyZQ1+CiuGYT0ZuAs7IrcAbtQNr6F24L9wCxopdYK44APrCQ0B6EUsmfQXS6L0gjPkSBDE7QJy+9iY3eNcJts9hIMbxPQIVXgcx/vdC8ejPIW/wB5B29wpwHlSPOdECg5xqYNS97RBP+l3447+NWwaRXg3ry9ja3Nv152fcnEzu6cyYwfQB05B+fKOwxrxvL+q+qZ/HgHrb84o9tzhaLu1hvoz2Oib9y/Mx7xAruPtdKCrC9UQJUBWO8X/vSSgZvB7z75s4R1dD1rjXIG3MCsgY9QKkjnwGkkc8SXu8J41YcD07fPJkFq92Z3FJI+i1c+GpJW+CXDQP8pOnA7twJvDKeo/EBnXCuCFWcB5SSfuPkT7nowabHMwCKUNeANbwz6nuRHPOmC9p/2+R71eYd1B77kGorDgOuuxjIE08BNJJX1MfyBK/AFnGZzd5MRtPc4OPAi/sEAgiDgE//DCwAo9AmcdBKBuzA8oGb4Lcu1aB272t4OXM9IMjFuT5Iu09kub5DkwcN/NmVqauxFF7ecL3wzlF8CnpFT61DeMzA3MP5nymX/rnttryNao3ietb+6Uv64txYtn4NaK9Y0/xTMw9+RgfZF0nPc+qAn8FFeb/nJGfQrrzash1+xAyXFZB+rjXIcP5FcgctQLSRj1Le6snj3wC0lwf/ILHtWrY3CYoL+2Ers4n4LllH4BCOP+QXNbzvKmqsU0umrNx/MhO8LyvCVww/sYNqoEx9zE91wcP1EPG4FeAN3QrcIbvRO1R9zF7gT92P/BcvwFV6m6o5B8Ei/DANUPR8YvqtG9BEX8ExHEHQZG4D2Tp20GU/OkveX5bbpAe7KQfOS/yW2r8sGPA8TkOrLF7gD1kF2WHeN/TA96jm1F7He3J6Tx0OkSHbILkwPWQPO4ZCPWq63bUX5B5w5eN+cZoZHrBKcN+oTmfN2An1Z7kGLvWpG+73UhcZ9uM6EyO6bS/+dNUc3sfd9L3Oi/iG1Crmf6iVd7XQDTsOOSO2wRp7h9BjudHkOmxBrJc36E+IH3dyVwg/e3TcG3OGLcEimNndfFE9Wsq2B1QXtwDCxe8Ci+9tHpVVW2toqVrSsechQtXSNmLj4wf2E17vXsNbAH3gegLtJz73gDekG3AHf4FCEbtAT7pozL2MAhcj4EY49fM+QaqJN9cM4s/XGbMPQvK1BOgjD8GkoSDIEvZC4p8XHuj99ws9MV4xJgXTThHjfSEF0VgTR6CPhh7kLJMSF0Yctd88B7Zrz85+vl9AnGkR5r7WxAxrvOYo/7CtMtaUhdaMP71WUD3WJJ799E6h9F+GdU20+kxh5h+jGpMtLVrbe8rn2zrLZ9s6ytPei+XZP8Kk1sAJlfg/PL4Gbhjj6Lmm2nvRto725v0EH8P1+HVdC6QfEQYA2nOz9OeXjk+T90QcFtqecL266zyqSDizv1lxSurHl/+wgvtTe2Tq6b0zl04ZdbsZ0Slj54IHjAd/O/qhoABUyHz7pV0ry0YvBv4w/eBZDTp4XIcpONOg9DlDOjzDpL+8z9WyTe+ZSg+clWXeRYUySdAnnQU5CkHcB+wGeRpXwIX456NOacsCPNS7CWQTfyRsegLIAvHXOF6kt5zFDhthRjUwXdEC+3HyvRlVYCz80KICPmK9umLG/MIuHiK+3pY8HPgFSHGfRXGpjbqBsidTwD3rh003xPtida/17PfbkkOvfvtfe1J//54p5m0r30a1saEAbIA69qWFACd70Uo8T4Emb5bIT3gM8gK3wg5YRsgN/gTyAtcC9mEp+HxNsb9m5BO9jXuL0GW9/OQH7hkr1TdtJrD7QIeaxYY1Yt3r1v/yfznV7zS1do1tba7d+7DU2c9+Do/e9G5aLwmcg+Kc/dm4A7aCaIhe0E84jB9baTvv8z7NGgmHAGr5NANq2rTx7qivZcUaftBmryPak8YAMq8HaDO2kO5B4QFIIw5BayoYyCYdA7kiddAlXCV3msnTAByL4zkDInTbnz9SyBgeGef/tQHA3QQFP417ROYgnEV4GKlkHvCCRCVwCnCyTBhbKpDL4Bk2H5aZ9pZGHaNE20a23WOczgnWsdSm97XX58wBCKdWmnd0z0VYF4dQGM06h91Dsoj90N60HZIDd0GGZE4L7G+zgjcAFkBH0MOxgjxQ5Yf5iS3VZQtkOX5CuR4vHC1LG3uGpG46zKfPQNErEXQWPXcibUff/7U8hdend7ZM6exa/qD8+YtXvK2KvadC+SeFNmnC+7fC9Khh0A26lvKHSBaEYZAJfvgzWbNvi317O+PVGdfA1PqL0D4E/K0PaDJ+RK0RbtAHvcDrgWXqEkSL4IsHnNP/Gn8vhtgSLuJe/nroIr9BeRBZyljQ3HXQYzT5yF4aM+t+uO5m9sKiMV6Ns39A8Io+KYv/1Qw76GQ+5ok94ju20Xre5JziPZEZ0d9Y/tYDbNQ3zmo80za2z6SMg06IQw1D3NqhkCnKkjEGFfiuvLcU0w/+7qJJMedAVbyV3092bJit1MfZIZupkyDfrYDzgW/9yET64Zsj1WQ6Ub88NI3Qn7HlxLRTBCw5oGY9Qx0NL0N77y179TTSz491N25+kLv9LUnFj2+9nAj993T4vt33ST3OqQjUPMx34PG9yzo/c+DPvHklWbVsT1NnF++rcHcaEq9Aqa0H8GY8R3mpMOUg6NJPQ+apKuYj66AKu0qqNOvgDrjZ1BnXqRWXcLwEAyJN2g/ePFo9ME9R3BtfBNCh02z5X+VzXQwbHAnTJiwjzIiEkY+9r2zmyCA6s9l3ksh92WkY8/QmofUOiSvk/gmuhNt7UY0JhaOcR529zQIvaeLWhCue4ED2sHXqR68Ufvg+xdDTgHAI4sZjkA7zoOW8l+hknMWWFn7sX7fBYkRTP/8nLhtlKfgOBfSAj6mfsjyZdaGTI936XwoCH52jVDUfV0imEc5E1L2Cmivfw9WvnoAHn14B3S1b4IZU/ZAc+POa5XydSd0XgeuaYZdAIPnedD5/ADG0B9v1pZfvFRT+OvV6sxfoSrlOhhTLoMp/SKYsr4Fc/l20KO+OoxxY8ZNMOQwDJlKrJ+rS3+FmrLrYC76GSzFTG9v4gNdzGVQeeJeddARKBzwAUwYOgdjXndLb/7776mB4ODPICloK6SOfvFykHM9h+hPOQVicl/yJkhHH6NruJ1VYtee0ZvhVgTT2GYsAGvdgHvJsdHGsKgGr0EWcLmrCdLjT0DHFICN6wB65YzNrPkVapXfQXnBfkhLQv1xb0NYDtQHSdsgjfBN0AepoUz/OjIXSE6y93khfkhzWQOs3HnvyySzQMx5AATli0DOeRNz0VpYuuRrWDB3PzRY94CIv+cXLndVrXFaRW6lcPniuorNX3XJTx1sFPx0qSrz2k0L5g8z0ZhqfwksuaehsvwrMOd9D+YsRnMr1oUN3P7+8H294MU3oZ5zBWrKr0EDfo8F85Aa55VwyFEov2cTxA5daOsL79gbH/cCASshAed90tiVEDG6q8euP43/2GsgHnWQ7nPTcM0lXBhHXgjRneGjMOwMwpLwwnMP9CvRnLBDxg1W4f5bTnkIpK/+J58w/b5J7iEsmQe68TUZzmIN+Q1k3MbTmBSxmzI10ifugFTCFgnfQvkiJCel+a+n/cWIH9I9PyDsnYt81qxjavl8EJU/CtKSV0EpWA1Wy+cwpetrqDFhjclZ3yXXRXiKZcowkXhqamfD3vdnLnhwTWv3zCUmwYYPqrOv3KzGvGPO+BGqcs+Bhb3/RlXJGaix8UdIbBPNaT9+bT+Pg5yTPvzE2iS/Uv1NyTdAH/wDCHGNKR+0HeIHP0F7Effrz/jA1+MR2r812fU9cg/qhbFe3Hvt+msmXgfRqAO0bkjBeofhlbTbNGeYJXbtPWwMC8JzGOVkgMF3aWHEvSrce0poX1VyH+mNVQAvYu7pYjO950kf8lmdGG9qzP+lhyA9/SvKsIiLQ4vp90PKhC/Qf6Qn6Xbaq5P0ryR8k4xA3K+RPmdoGZ7roTDy1cNi9kPXazWvgKrobdCXvA9a9segFO8CieSjbqHZzY0rKfYWSRQBWtkTqV2VF75u71r47ORpc5dbrJ2tRtbXp2ryL0FVyalfqpQrV5jLDp8h2rdw+3Un/c/tfd/JOTmS/v+PTmGOpG86YWLUkXsrEb+AxB1ja9iXkDxkOYy5p/Y2/XUwakg37Zub7PExTBq1YLO/m3E4n82sv8pJv4LU+RDVn9TwYVi/EGYGw0ypoubhwA4h7JQx+DtHol+H4d8ZQdbb4L2UJ7r0OYB3X2U4IjOUTM/3RWit1h9BIT0DJSWHITv7axtHZC/Ex++mc2HShK8gLnIPtaz4nZAUuQuSw9AXIVshLehzyldJ898EGYRF5bENeGkv7DMKX4V65ftgKfsQTCUbMU9vwxy9//Jk9alPZjcdNtXU1gRPa9tWNX/yD0fbOh+a19Te+1BNQ2dvY0fPrIaWBUvrWrsfqheeOVaVx7BFyB6R8GeI3j0OHIJHHFgsxIgPHkL9p+qZnzPE3gBF8BmowL112pCV4Hp/02/ifyAa6fua4oH7sZGLzgU5N48jbEay/yL1jwz3EWUD1tLcP96BV3Mrs4XhthCGCdF/lJMI/O+dQ/spk/fpn3wGYNXLDFOC5J35DQwrobXmChhV54HPPgVFRYRhcpAybdLTGT8kY01E/ECYLmQ+EB8Qvk0S7bf8BaSjHzJDdmLduhMy/HdibbQLcr32XBZnrj7epPwQ2tSfYSxvgcqCL6Gu4CAQ3gdhXLRWrsiZ33N2xeLeq1ebW+f0VNbVttU0Nkxv7Gx/vLH++d3NotOX7PyX2op+5hHxAWEBkBgnvASiN8mlJI9S7TuY10bin8wZcl9XjfsxjttxyBmxBgIx1hn9+9cAwooJ8XgZUjw3wMSRj0H46C5vXgFcIPfe1DgPVGGngXP/Fqx7FlFWEol3won5rQ8M1JwJb2TMGijIYXgii5cAPPO4jedSxhxJv/tazRXQSy9Qlk5x8QkoKGB4Orm5RyhHJjPzG8rVScM9UAru+dMwLxHeFPFFKumZPPFLyIxhjPQUzsR5ljN+P+T5fw2l4w9c1uav/75VsRW6DV9AXdF+qM8/Bo24jrYW34AG8Z7mVtPFfS2681Cl+nx/Y9Nj7zQ2PPtZnXbjmdqK729MlTLMn6lqRn+y3hJNyX6FcHiI/oRRscDByNcoq8HM5Cl7HUTWcylqmOW8HiaMmmtj8ziuwSpwHTQP95dYg454CoLHNqXxMn58lXJ6hMyzmmKXw5Dv9BqN/3G2eO+Pe+bck9SXAx4EVuxZ+gyW0oLXtZDJMSSOWrFWaylj7mNrWZgXKy5AaRFqn38SNSccG4bnY2f6EJ4PwzdiGEeE65ON8yIT50UO+iML50Yu+iQP/VGIRvpaF6OVTvgauFG475+497yh8NOfCOunmX0YGvJOQVP+eWguvAIN5Wc316ku/lIjOQtW0RGolRyh7B9L/kXKvrEzYIgPSL1D9CQslFnVjM2xsUDm2Hggdj4GyU9kDSb1EKmPiN/I79AnX4QcN4zhUU9SHs7tOWgwakdYhgnDnoXx4xqNosK188kaQJ7PIc+oySf9BIIxBynv1A9rnlGUkaWhmofgPjhh1CooizlM38slzx6aG5n71jOamHlItCdMN1UqridZPwMv9zyU5JyBrLRTmGsYllBGxnHM/yeoL/LyvqV8qVvtKJQUHsG982EoxTxVgj4pQZ+U4RwpTTsAZclfQ3nCAeDEHwJB3FHKQlIknzhvLvziu7rCr2/U499rKj4PjUUXwFJ4EuvKs1DNOgu1nG8wxg9cr827fqOdzeRtuw9I3mkVM/qT+Cd53W5Ea1L3ONY+jpwe8jPEV4QZVl16DbJ9v4LUUSuwBjLfFv/MnoDck0kZthzjv2WWnPvkI4KC767YGUXkGRFRGu4HYi6AOOA4cNyPACfoGPDiLgDhdrNtzHIxzr1q1L61jrmvTPhNhOdTlYTax98AUdKPwCLMhLQzkJ18ElITT9g4RowPiC+ycL+Tm3sS58BJ1PwUlJaepMayGaf8BPDReJTxdBx46Bd+wVEQ4rzhZx8DYeYJEGWeBnXWd6DJwr1V9qUr1SUHD1YX7blmxd9Zg/415hyAyrz9YC7efrWm+MQ5cp0dXKY2IHPVzn8i2hPrkN/KgeqQ36q1nQXVaDsndSmZG2R9JnwdsmaX4j433fkdcBtU/xv9yZyIHPw0JA9+HkLGti3RiBYtVpauXYFz4EaF7Xlmko/sz9Lyb+NGcbXMs5/0frWWmXck5psymPe2FJOuAD/+Iu0nXZJ8mnKkMtEIR4pYPO7LkpK+xTz/LfrhJOb+k5hzTqEPTlOuVEXpaaioOAMC3CeLeIxJbabgngEZriFy/HdZ2RlQlqHupefAVPYD7oV+gXr2TejAa23h3Thcyz24t7Jsyx5L6da9taUnjqLu3xPNKdsNdZ7qwJ6ieyvx7zOofk97Ev+UzWdmtCfrA6mJlmON/RCu1ez0Q7hX/wTChk+/hc/G6K8Dv3vmQdr9LxD+8bta4VPPWmtbuNrc02vIc/zkWVmitf05cvuz5Wy1jVkvZ97HImtOUwmjO2FYkXunipgfgR97DirizlAeB8PrOU45VsQIz4uwrFISvrWxzU6iH05hPjqFazj+TMEZ9MF3wKn4DvU/BxLh9yCXnAed/AI1k+IiNYvsIpilP0AVWo3sJ2hQXIFm1Q2aH0juJprMwWMvajQLvzZTzayvJD/b11q7kVxDeG6k3iFm554RjVtsRmqIDv2t+wFidF1oYPYChC9FOEtP4BooKsb57reZ8vEG9t2H09gYWQbM6a2QOug5CBrTtsUgXP5KbW2XYNWqVROkcXvXCzJujXtyJO+bk2cPCcunns0wCOtTcL2Kxror8iqooy6BNPo74JJ7tBOPUxYIYQXloxGGVk7CUchMYozcl7D7gDDF0ghrDdeGNMxThCtWUvAd7o/PA491HmS8i6AW/0D5Ymbtj2DV/wx1xp+h3ngZmkxXoNV4FdpM16DD8ivlYhGuEmFKkXqRsLVIfU5qxAV1DHeMGPGL3exrrN3s625vZb/GdrOvvY5m15/UooTvtHQB5iH0g5z7PaQEf0nfRxo+oMp2D1RHGU3kPsQw3Mcm3ruE6L/fIF7xVq21V/DayjejH1o01U3it2OpIurkVVX8NcrvJPdCyHpak8LEOXn/tir4Mlhwv60L/Q6UYSdBEI65OfowsKIPQnnsN5RTRBhmDHvkMDXCc8u1+YDko4yEb6llJdmZZqex5jmL84Bwzc4BG30gwLpJxrsEWoxxs+YX1B51r0TNrdehq/ZXyiDrbWH2F+T1k9h7ei6z3yO5mNTqv8c7o2zHFpt/Wpg6084/s3PAbuegOdrtfiEcrXltDFuLPDui5v8IibiXzHBfTTnRZA0g+g+26T90QA199ixoTMtxs/TVd+us8/mvvPZqxAMPPuDWtd3pXq7v6xKRx2efyTy+BBXuJ9QuZ8Dodga07qdB63EKlJ4nQepzHIR+R2mvfHbQIagIZThRhJ/GsKLwfNLXlAOTH9/PUcu1+YGsC4SnlolzIDMRtUcf5KaehXz0QVHW91CG9Tur8CLwMLfLeD+BXvIL1Givog+u417Opn0H874Oibkn5vez1kgeJqw1wp1bOpvJDfZ9q33/ROaJna1pr+n/EYOO5B1inTYumz0fkSP5GmGY9VgZ7pmY8xPu5/dChu8GCB8+py/3D77bDEPuMcCwu6vps5eBzs3fmiVvvldb9aDwhRUvhs6dP9/NarUON7YV3c/JmOJSPP4pVqn/K8e4gWuB7/k5CMZuowwWgdsu4Hp+ASyvL21sJobDQthtRSEMt6wEawDCsbPz/Ox+IDnJ7od8rIUIzy0f1wGiP+XaoQ9yU89RH5Ri7VqW9wOwi37Edfdn0Apx3imvoh9uQgvqQjh386cwnFjCunv6IUZ/R94d8QGZE3YfLHbQ3669o+72GJ/qwEK0156OHLxG+W+NcNbIc8qc4p9gIr7u5OBtkOLyN1vc61B7Cwy7Tw+jBpnos8eBzvUnLNJ311UZFpmWPbcsaO7cuW61tbUj9Hr9UJVaM9RYKxhM7pHmx8yVl0Yt28uZ8OZVVtAHUOaxDipwD1fq8Rll5xFeWrEfYYXtoSycEhtLsDiyn6NH5kPBxIOUKWj3AeE/ER4Uw9RDs/mAsM2K076HgvTzUJR5ESpyfgBuAcPXU3CugFZ0DarkN6GB1OVWhrNH/EB4i3QuzGbssd5+9h7JDaQ2eaCjP9Zn2OJ8ajUTv4RX2aHvr/NpXS2/lcfnyOQjzDpihFlHPouiwnpEXHgDY+cHiI09BLFhu+l77p7DG+kcGDqwEkber4fR95vBc0gT+I6r2V8pe29jpfbJuU8984T/zFmzXKn+BuNwrVZLTaXUj6rtFo9pn9kYpmItbCqb+Ld3KyJX/kr94PsRlLmvp34gLIpSnx1Q7PuFzQ/7oDCI8UNRxNd9OYlwHQnDjjANyRpNGG6FlMnFzAPig0JcC+w+IHzDMvRBWeYlKM++BPy8n4FfdBkU5ddAy71OOYzkvWviiy4St6jpNBLLTf1G8jPReppN6y5bHunQ/5YHeDv30K4xYQOS92iJETagnQ9IP3eVxzD0uFk38TqvQF7aBcrGiwzdS58vmDR2Pq69Bhh0txXGDNPT58HcR1jBx6VyU7Vs/SdV2mcfeOTxh7xnzJjhQvQ3Go0jtDo9sZEanW6MyVLlMbmrO2rOvLnJal11LKeoQ1kS/fgXZeErgRPyAX0GtQzr3b75YPeDP86F8TgHQhg/2DmCRegHO0uw0MbRs3MVCVOQ7BsYlt5ZKLXxFUvSLwIrA3MR+oGd+RPlLPJyfgZhwWXKWpSX/kqfkyfPa1cK+7mLtbflCLu2jvrauYt2fe38RUeNidmZhXZeIa0V0xlWITf1BpSnXaZcxrSk7yAm5ihlEyYFfQ4Z454Hz1HV9N6D60j7c6pm8HE1vVKp3Pheje6lh+Y9ONOze8qUcdU11pF6or9eT/LQKJwLzpXVVq/unikTeufMTdUZTeFKjT7V0CBNKc2Y3FEY/NS3rLDXfy31ehdKXNZBufsGKMO1guQkyqfCvbh9bSB+IPw+skaTezekViKstjKcC2VxR6kPmH3Dt5QBbPeBnWlI/FCWepHyFwnjsSL9EuU8sjJwTmRcBUEmw3q0cx7lBYzZ9XPUkhjhWkptfMvbWZD2I+FBEuOm9uvMvY0LSfiJFek/0WsqxOvMiD8NEyYcghB8vQnhu+h7p9HOM2n+HzvYAh7jdPQzAn5uxoeqVJter9K+8XjP9C73jo4O56qqqhE6g2EkiX3Uf7TBaBpbWV3tPXnKlOjZc+Ym643mUKVan6rSGfI1FlWGRG4oLo2ft6Q0bNm5Io83oMzlfcrRJM8FUz947aJMUTIXivy/pjxNMhfKMCf18SRtc4GwFMvjjtL5QNiBpYkMT7FvLqT0MxXtrE3GH5eAi6+f6MBJvQLctKvUCHuTWtr135hdR0ZL5shOuU517df2KrCSr9zKqLTxKctsfErKyrYxKknOJPV0Ml5/DL6u8VgTkjWAvH+X6vwM+I+uh/txP+DjrqOfz/DzNNTUaD5/tlq75rnWtnb3hsam0UaTeTjqPlKj1Y3U6o2jNQbWKHMtz7Wza3LErDmzEwwmS4hCo0sj+ss1+lw8z1MZdKUS4eTF7JS/XS90W0H5leU4F8pcPwWWx2Yo99xO/UDmgp31a+f2kZzkyPV0ZFraWY52rmWRzRcMq5LxQ0nyedvr/8HG7fyJGsdm7Nv4n/0c0P5z+/fRn7exP/v5nwwDtAL/DvO3vqdxQIyZm6cZFi2uXYQrSfabKTino6MZNijRPzlkE2S6vUGegcaYNxPd6Wdj/L30E2r0Wx626j9cVV3d4VldXU3ifjjWPiPUavVImSZ/eFvN6sqO+o+gt2ftqTmzX2CZLOZAzD/phJtJ2KEKta5AodWVq9XTVitLP7pUEbwLipzfglI0wlJluWykc4E8K0+Zed67qR/KyFwg/ERbTqqIPtA3F8pjmblAchLxQ3nc8T4/lCac6mOMUl4o4YfafEE0+qNWcoue39t8eY6ZY3bOKv0bjvr28ztp7ZZE9pWHUfNDWE8foGzS9IS9kDLpK4gO3wsRIbtR/530eQLKCBr7NwhzbQd/dwsEeJpOBnmZnav12+bUGdZ/ajZO8SGcWqVSOVQmUwyVKMqH1pvfFTSa37/ZYH18UVf3vKoF81c+0NAwJVSu1qZg/BegD/JwDhTLtcoKo2nuIWHexu/EKQeBE7QTSkZ9DGXO71CGZ8W4DcBx3QQst21Yt2I+8vmSMiwpzzTgQB/X1c40tc8F4ocSG9uU+IBYcVy/H+y+KE0408e/Labs11v/m1hR3/w51acpWeuJ2dmohTbeax8X1cZGZfaPDEM0x4GPmmXjo9otM+5LyhCdFPEF1X1ixHZIiNhK+YtpAesosyVp3CKIdOmGCI+mmaG+VQOsxh21DZZP9xt1swIVCsVwsUQ6WCJWDtHLl46zaj46btEvbq5prM9tau8omT1vgXjajMdZUqUyAfUvUmkNhQqdtlSpbKo1mRdeKE/86rIk9RDwI/cA1xXje/inUD5mTR9Hlev6OVS4bqU8WcIRpQxN330MT9XGEmWFfUPnAvED5aqiDwhftgLrOeIHMieK0A/EGPbwCRuL+ffNrqejpvm2e1NkT27X1q6vnc+abeOz5sbt6WO0Ztg4tJl9nNbd9D1q8qwAYXsy9jnaJkgO/6yP8Un4nmmBH0Cm9+uQ4fY3SHSfty/c00T3VrXmbZzGys3XDbqF4SKJZDiLlzpIwNcPtmo/W25SrnzNaKnOIsxaa0NT/ozZc9lz5j8gwLjPUOuMJeiDErlWVWoyzXpNWv7eyXLccwvx9XGi9oEA8wp/LMb4CMz/oz8AtvNHfSxb6gP3nb/xgX0u2H1A2LZMXrLPhYO2dfpQ3x6ikPKwD9n21rdan6ZolOubsJ+anX9LjOhL3lcjlon6ZsUSrTGOY7+4hfdLjD6PYWf+OuhLmKJ2jirhxhI+LmFtEn4s/fxLwNs3Mnxfv5Tptfxshsfjfc+fE05urXlLRH3lDrBWLmUPHTr0Lr3sedcq5a63zYr3NuD6m4X1ZoahsiqrurYhb+qMWeyHHlms0BmrytV6YwXGf7lCaxJVVz9yAPdVv5Tg9Qtx7efZOMJ8zC+8cV9AxcidwB39EQic1zMs3XFbcS7YffAF/RwPyUcs368pX5mwhQljmYU5qSzC7oP+vbTd8ifuxz11v+Wg5cX160vyA9GX8HftOYKwlHNsHF76fjJqbH+Pn2Hxbu/j8TpyalNCP0Fd1/exTIm2mcHv4/E9yBr/LmSMfwPSA1+/kRnw6vHMgJe3ZQcv/zTT96WnMn1fnJbl/Zw+3XtJSrJP931EdwJ5mOjH9EPB/O9ZadgNdaaPTlfrNiyyKHedMSpeeUytqyzUGoy5OqOlQG+uLDZXWQvbJ3cVPfjwIq6xsrIC45/0muBaKnvmSNnvHyMxWIqvjx13gDJ8RZg/xKidEHXlu+4EjvMGyjIWkKPz530+IDyzCpsPyFyo8NnPzAX0Qwn6gazP5J5SAdZKheH7KR83H41wjonlxeztt9ivqGWjZcbsoRrfyjveTZ95ZFjA22/jAW+ieZronBT8CdXZzsklWmeErIa88LchN+pNyI14BXKCVxzNCnr+lZzxz07LDn5SmBv+SGR+zOzAgviewOIsazCfW5ky3Onzf8oHqTauG1Wl37LLoPrqqlm5+aRG8ViTSq/MU2rNeUqNNU9jtOSaqzt4tU2d5q5ps6Y+uGjJkurqBY8rNdVKld4krzQ//m556r6bJTjXCUO4YAKZAyeAj3tcUfRhkAQdAqE3+gJ9IEbdRWM208+72X3Ac91OfcC9xQfM2sywlfdRtnIB7qOJ5eOenlhOeL9l4npDjDwbQczOlibG6Hx7TG9yyB23M4k/pDkjc/zbkBW86qecsNfO5oW/cDgvaslruWGPVRXE9MYRRgkx2VSne8W1zvezpEmDy1ilwwiflvTx4guE4yVKg/c/056MGuOz91mUa6bqxKuWyYXztTr1gsq61m6rjvficovpwVfr26dvmDJr7jud02at6p45953eBQ+/W1/38Cd69ZO91ZYlbyoF73xXkvj1TTQojCN6YG0z6Wug/chijoMi4hDI/I+AxOMASFz2gGTMjj6z308l84Nyrck9VVwTSI3K3FfdAwVoRQF70AeM5QTvgVys6zLRskIxfwTjehi6ixp5Rig9bDukhW6jOjPsckbrfv7zBhvDvF/vlIA1kIPxnR/5BuSGv7oPY/v5vODljflhz+TmBDzsrmuJ85CYEn2Uzf5DzXXc4XwhfzjqTHjQ1MrKK6gRPi6LzRknVqhj5BrjvXyJ6o+4wKmp5ZEGa82SA/V1Tx2ob1z4Uef0Oa8r8tZcrmub/GzL5Bnbe3rnvNndO/ftrhlz3mmbOuOV+qY5WwzyFafrKl8+V5ayC/Vn+NlZGIMF0biehXxF6xfphJMgizwO6tCjIPc8BnLXb0A+dg9Ix3xJTeS8m/KUyRrBd/mScrVJfUr2auTeBdk3l/h/AQVohFFLGNvZWNtSC95BGdvpwdtstoXuc5JDPrXZBhtL/ZM+BnU6YVDbuNupgQyfNnv8SsiPXvFNSU7veXZZUwu7rDrLw+nYALs26mr23RKZIobD5XnjvmiMRqMZzeXzGR41i+FREyY3ZTNzOM5sDtddIteEaQw1A/+R5o4DtZ3Y0NG9taG1d+vk3hnv1BiWfakRPba3afLkF5q7Z36Fcf/h1NkPfDxl1vyN+L0b26fOXG/Uzn+5PG3H6iJc65hncnANxRzEwbqahXsmLq6/wqhDoIg6DZqwY6D2PwZKN4ZhK3f+CmRj9oDMpj+fcL0p19TG9bb5oJSyvXdCvt8O9MFOyAnYAbnjt+F6hxa0FY15NpSxjTat+/VOsdUh/QzsdykDG2uRm7guns8a/9TK4oyOTLHJcxxXbNzNFRpfEoglRUKJ1MuujUShul8kkyXx+AJXq9XqgjX6aNT4Fh44McKHxq+78IVif4nK4PeP9P690dbdW9PaM/P7linTvtQIn9pmrGx4ora1a31z1/RtaPs7p8/a3zVzzhbMQ8tbpvRU8hM/ep/cSy6YxDwTVY77Qy7uR7m4N+JPOoH6H6AccWXUOdCEfgsa/xOgdvsW5C6HQen8DSic9/XxxKkPXPr1J/critFKvLdCoc8Wanl+jGUHbIaMwE30eVzCV7fXJP3xve42zddQ3jrh0RIOc5b382uygh5tLsszJkib3cdyBKU+LA7Ljycyvcbhm84JJUoJ+iBbKJEPI7pIFerxfJE4ymg2u9bV1bkKxWKSY0aSXIM2ksQ9Mcqp5vLdBCJZmEhW5fKHde9h2p1WW2cPQG3TJ0+fvRpzzp6G9u736tu6XkKb0TZlhnnyjNkTMe5dWrqn0esq9/60tiR6L+Z8XDMx5itw787BfaZdf/6EI7SO1EScBW3Iacpy13qeBKXLccryVY49AIox+0GM+cg+Bxz1L/HaRt9TIHzzAt9PIQct128j5gysTQJu5bs7GtnnEM3tuhPueZbPq5DmvQywBpRneiwdRK5fIBLFol7jCf+dzWX7ofZPVfDMwBPpejH+y0iP8O6pvQOkSnW+UCT1be/o9DYajeO4PP4ozDOj7Dx025o7Gr/uzuHxPARSXcq/Gvtk/BFWvOPguL3vVYx5uAzrzQrcz3OSvgNewhkQTDpJOeqiqCMgwNpHEXYCdKFnQBd4EnTeJylLW0NY0s6H0Af7+5jyjvqXeTDa2/nqOVT/DZSzzTDmP6Ks9dSAD6kRzYn1x/tqW54hDGbCZn36Zprnwhpy3Qnjme2PRKoYzBVIk4gPSA/qCo6axeaboYJnPCpRqiTog1KVxlAkFEsnVlutvp2dnT5SmdwZv3cM6j+a6m/zAeGzY83jzROJgoWyysB/R/9/ZfBiPqfHUr/P9pF6vALrTaI9YdhzY/GcfE4Q6x7yOX11+Bn6nIQu4BQYfE6Bxu1UH89ainlIgj74jf42tjvlm/t8hvG/gervqP0tnHs7893PIeZ9X6PM+3SvpyDZ84FNjtfP4vDokSvQB6EPEjg8bhCbJ45kC4xPsPimszyRYZHOVGVVaU385tbWuBkzeidh7vdAjTHHcJ3RxqLuY1gsDvGFM+Z8L75Q6CuSadL+09o7jlKfD/lkDpD7M2yMe17sSRBMOAbCSOaZCDnWPcbwC6ANOgf6wNNUf/IsBeGp2+cAYar/Pf3zbfrn+XyC+ttjf+1vdLfHfKYDcz7d+0XKXce4h3ivbpoTYv372eFlLKkTl2+9iy/RZ3IEoigyBzh8USSHL0tn8SQp5tqWyc889+Inbe0duV3dPRm4D83GuscN54sLWWdJvifac3gCN7FcPl4oVsYIpWZ36l/Bf573nD3+Cacyj3UjSnw/3Ufu43NijlLjovbckEMgDjsCusgfwBB2HoyoP4l/o1+//tqxzDrA6L8PBLg36M//W2nsM/G/kTLuGf1xbfV7n/azcIx5wv2+RXuvFZDp9Sxq/wQkeM48SK43yq/qd1+HWFw3kC8xVvKEsknog0CeQBCIa6xfW2fvzKraLujo7j1uqanXSOSqBLlKWySSyIKwFvXAOUD9IBBJfMRSeQjut6JUhtqB9l5i/61R7rnWWur/xa+l4w/QezYk7ygiz4Ax+meM/R9AF3wBDEG4BvvjGux9mj5DRPRXOOgvcNC/zEH/PO9PUXtG/3TUnmhO9E/3f4+xPuY8o326z0om35O84/k0jf0Er2l/t3Eum6+iR7mmbqBIbrFIVdUdYrm5GPNPEYtrWi+QVAH+G1hqW0+ZqpsXy1SaTJlckYQ+CEYfkL757gKhyA/XiFCFpuZfrjn/rMHy2viVNOIYyCacA23Mj2CI+gX0ET+BIfQHMAWdB33Ad2Cy6W+Pf7v+QqK/y17g2Op/kntu1f8TXHeZz6H26f4b7d9C7d9wyDvLaOwnec47Guc1bew/unauiGE9SJXVd6sN1hKVoX6TVF33o1hpBWISZQ1oTI1QVd9+QaGt3ydTGVS4J0jmCUSBDD+C6y9XV6b+V4S+bUz0ZXp7J7g1jeN4br1KnkXURV4AXcQlMIZdAjPGvnH892AOOAMG39Og8zgDqrE2/ccd649/1J/tupveByK1T5Hn51T7PG+m5klH7dN8f097Rv90n1V9sZ/ptZzm/RTPRyHRc84sn7C4f3oPjIyaVoaIKxRXxpmsrZ9oTQ3HUe9fZepaUGjqQW1oAPxvEMqtIFfWdQgkuniRWBaOcV+m0vfc959T+R+PVH/aysAp3/W5VL7bjgsqUucHfwfmkHNUe3vsE/01bnb9T4B8LLMXFrrsp7mHxH8ZrT232PTfYNOfxP4HVH/qg9u0d4z9DK+XIQ3zPsk9qR4PXZjkNeUP3QOzD4GEyd0VHP0IzOUNGgPGO+ovVVlBJK8BgbQKsL68IpZOPsfjdL6g0jWaNYYe2k9Povz99eW/MdL8F9BjtvvThXyXz26Qfa4G11qiu117rfvZ3+SePv1dmdqHxD6j/yaqfZb3epp3HLV31D/V9y1b3n8DtX+V5p4MzD2kn1yi56x/q0EuT6ylx7wS7QCxtHK0UGbGpbnyA6GsGqhJK/GIeUlRP09vmkx7eSq0Df/wd/43RqIfxSU4RXk1RVWMXXOe1PXasd9SzUnc92v/bX/s23MPvfewg2pfiNrnem1k9Kefe//w97X3edcW+6sgldY8L9Pck+axFJI8Fswn1xLj96/33XccbL6s79xibfU3VrfkGKta+va3fKneSSg1/o/+xp85JvpSXIJToKfBJc/5xaWsMR+DfMxeUDsfAbnzMRr3jPb966499sl786Uem5nYR/1J7BPtSd+B39OexH6691u22F+JOeclSPf4G+b9RS+FuzffhT74Q3n/nw02l+Nkqf/tvQHTH+gl/1eMcB+GWRDsabo73n1mfObYv+3njlwP4lG7qC/Ezvvonpe8N0z3XI6xj/oXenyG8f9JX8+HVJ811G7R3q4/xn4aeU8bY59on+y58KmJbtV3uHo4Anz7y+4o93ZtzphnD5WOWvMLezR5H3ILcMZuo+9/lbtsgRK3zdSI9jmeGyDHC2t9LxL7a/r0Z8wx77xB806m54sk55zFeKd9ueP8pztF+v3n95//143igXfHuHWXpY1dvChr7LO7sseugPyxb0PhuPegwGUdFLqtg1z3DyCL9NfwXI255Z0+y/B5mxrJN2neb9K1lvQ1Rd0/w7W2Gddaer831q/tn13F/5fD37efnzM0MXoA+mFknOusgInu0yvjXRa8leK66KcU16ch0/U5SHd7ATLcX6R9x9I8cE1Fo//N6I321JZUjyfbUPfAVLeHh9l/71tl8Je8tv8bx93+w3/7xcR7BkZ418bEuvfwEjx6TQkesy0Jrg+YE9zmGuLdekuiPBt/s7+P8fv32E53xr8/gn0Vf/Ul3Bl3xp1xZ9wZd8adcWfcGXfG/7UD/qvjSHf/+Yh1fadX7+v/8mWHS/rIqf/7e/Cf+n70Yvd1+3mG04irttNfO53upuc37oIbV5ycrpDzn3rg5hEnp1bym46+1X39I9zqZeD5lCsZ0IPnPvhzd/2ScRm1cPWhP+rzC54PHUHOpy29Qc7xkq7c5bTjNJ7fg5J9NMDJir/SyfaQaMZFh3P89U62G68+t5879dDzEY7n9JDZ03+eRf//Pvr/fsw58yvoT9nuLfo6nDv9nfO7evr//N0Ol3Lfhv7zuxy+7jTL4dzhZ5lvdPoDI9PhXOVwXutw3ulw7ghC6P6o/5wKaj+/4XAOjuc9fdeWAR/1ny91OI/uP1/b/z13f9D/syPW9J93T+v7/d2w4LjtFGNiif0HaUj2fbnvfAeJ7Hvo6X3k/CLzVqrPZzQomcs95XB+nEYr84d+dTi/QaOdXg/c8Ok/vzgC7H8NrtCpcoS8JIxbcn6RubIp5PyK7SrpJCCXY58l5KX2zyqnDPv5RccM4OQwDf9bg4Bp6au8i/zP1+nOuDPujDvjzrgz7ow74864M+6MO+POuDPujDvjzrgz7ow74864M/5TQ6GpcdGZGiP1po5MvWFKhkKrC2JzuB58odiDLxKPFCilf/Ul3hl/4tAbJvt1TJlf19wx/fHGjp7Xals6l9Q2tT5Y1dA8q6q+caalpn5udX3T/Oq6plZjpZUtksjSxXJlIvksrkylGUp+B4vN/atfxp3xL46GjmnVTe3TVrX1zHitqr55ZmVdUw/aZPR1e1VdYwtas7mmrslYZW1GazFWW9ss1vqZDe3dlRKFKloiV8ViPkjAOEjEc0/yOwvyC//ql3Vn/IMh09UMxjneVtfavQH9PFdralgqVdR9LJLVfiJR1L6iNVc36SxVVXpLdfVtZjVU1tSjtde3dQrFMmWIRKYgFsYTiNAE0UKJNAXjgvZAEEjurBH/W4appp0+v2C2tk3Rm1tOK7S1z+pMNZWGqiqJQFL7OptnBY6gHoSSJpAr26Cypvvjlo7ulrbObktLZ7e1sa0Ts0CLFfNBnamylod1gK9IqvDHeU8sgMsX+pPPo+MxmMcXTBBL5QlyrX7QX/267wxmmGqbI1WG1u+UuobnLNZaBc5llbGykiOW1T0hlrWA3tAN1rpeaGyaBbX1s6CqZjqYLVPAZOn6qq6p+4HpM2eap8zobZzWO7u7ta09otpaF6JSa/xw7nuT3jMcLo8xnsCHyxMG4DFEIJIkYi6gn/+u4P53e3LcGf1DqW/plMhbQaJo3FZZV803VldWyNXWdqW6bafJ0oO+ngp6/WQQipqgvKIWiktroKDECkWlVnpeXFYLFRzr91KV9ZmaxobKKmt9bkNjUwTGQai1riFQrlB64rx3Y3O5pDeOOx49MRZ8MAbG49cnCKXqBHIdpRzNXy3F/zdDpmY+AyxTNa8RSiYDT9QMbH4DsLhMjtfqO0CNxhc2QEmF9ZfCEsubJeXmGRUco0gkVU3SGcX++kqhn8EsDlBo1DF8sVnA5Vd388Q1SyzW1rk6c1WuUqMPwzwQ3NjUHGiyWLxxvrvaevK4kh6QeE5ygh+bywmu4Brr+NKGIYVldz679J8eEqWVHsXylu1cYRP6vQ4q2NXA5lhBJGkEqbzpMl9gfaWCbW7gCVWxWiPXQyqXh+F+bgIxUtOLZeo4PMZLFep4hVqTIFdrk+UqbRrhrsg06gSFRpes1OqT8GsTlGptMMaBv7WuzkcmV7iiz8exOBw0HokDD8wJvlxhiVdxmWElW2Qeklt0py78Tw/0/UaukMz3WihjVQOHX3NaIq+dI1eZ00XyknFiqXQ81mihaOFimTxSolBE6ZryAspFnMTc+HpLTuS0uXnhsxdnRcx8JjW84+X46Mq/xcTKH87Kkmr1FkGMTCFPw9jIJhwMlc6QgOt8mE5v8qutr/dRa7VuXB5/LIvNIeaCceCOceDN4ZX7ZOfrvmOxLcNzCiR/tUT/zw70/WNcQQOUcaqhnF11UCw1S3QGmTvWauOxVicWLJLKQkQSRbi+M9W7IEPPKg596vmygDXn+b7bab9u0jNaHnYE5IFM32ih6+dQ5LISEj1nQkiQCaIiNOtTUxWtWoskDveBOQq1LhVjIEqm0vjV1tX76gwGEgPOpD8e6ceM5oZx4J2aqurMylWfJY/a/r+aB0zVlr/k73KElcT3RRxBI53zbL6501Kp9MQ8Hkj2aUKxNAAtCM9DpJ1BXkVR89orAteelUYdAmX8L5Q1JSsBUFUAKLnMUVEEIE8DUE/6CeQhJ0A45kvIH/YqhLm3QvB4I0SEaC4lJUln6Yxycv8nU6bUxOAa4GetrfMxmEzuXD7fuYLNJkZygZtEWeIcH6+C7FwV7QkoFFv/Eq3+U0OltwwWytRRSl3dA6aa1k8sda3b9JaGM9amjgNVDc0vW5va2q3NbfRz9JbaP69/jEBa46Q3Tx7Ixxq+glN9TSQ1FCpV6vFSudIf6zJfND+0QHmPr0tZ1MImftD2y8TnlE9GOL9chj1O2ctCxsg52/ZvWiHDJ1PGXgKJ6xEoG7oagkZ3wnh/EwQHGiAsVH45I1PcKlNKk6VKdSz+XRoDeqPRjfSLrWCxx5AYmPNg2KCJk7S/xCVIMQY0a/40Af6CYaphel5ZG1vubezoUdS3dS2prGt6pqapfT36+jW13jhNpa/dUdnQ9rRcbZ6Jfn+lurHtDYyBL2qaWpc2tE8Z82dej0DavJLFrbomURqysF4Lwnnoi3tzsj/3EckE3qVp1jR+0Nbj8qSrdH5Tn4v7uebE7MztW0zCfB9hQmvKAKSxmCtcjkLJkLfBZ0QDBPpaIDBAD6HjlTBhgmitXCNJxNogFvOBH+4PfXR6g6s9Bsh1TozVwaRYDcTFSSGnUNv5z17X/7ah1Bro0VDTXIS+noG+fhLrYbOltnmNsbplj1yj78b1sEVrqpwjUVbt4ouV0sr6tu34b5+1TZnxWce03m3dM+fuxng5iDGR8T+9Hp642kmkaJ/E5ltBKDWycA0er1CqfPhCoSePL/IsaXcaxQ5Z/Zg07jvqdzrP5QzHnq9njoStTuNA2j//7Ub8T4yck5ghjFN19A8gGrMXMge9CB4ja8Hfx4y5wIBxoISwMNEhuUpC7gOTtcC3tr7RR63RuuB+cHRGhth3YowWYtEmTJBAQpIUCksNibkFuj+lV9d/Y+hN1R5ac3WV3mw2GivNfI3eoDJYarsNVbXPm6obl0uUmmpcB6tkKstsqdr6hkxlqpGq9O1qQ8MBQ2VNL87/5zEGNmEM7MTjt209M8r/p9fEEzfu5QrNC2QKRaBGZyDz3o3LlbqX5HHHC8P2HSZ5nviO+FeI/pZX4Tw2MudcKfNvZH5r8dwoZRjCWi7haGPOFzIxY48H8n1KrBXUQSeBM3gTRN+/AJxHYg7wM6PpIdBfAeGhwkMqvTQB80AkrkM+9Q2N3ng+Due+cUI0+j5Kg/5XQHS0EJJTFcAVWQfnFP7vrQflGqaflt5cXagxWUxsnnFFUZn2ZG6BCorLdJst1vpHLHXNK3AvJZVrzA3of71cbayRaxpWoC8miWV6HeaFJpXG0ID7pda2Kb2bm7umb2jtmb6hc9qcQ5baznhDVc2/fF1k7gskrdlsQfVZtVaK663ZTyqTu3H4IhdO9OOp0rgzdH0n+ZuPc1xbj1aD/sd5z7fldD0eCTdWlwcgmXQDxBN+AknERZCHfw/iiPP0v6UpTJ1Ac4CQiQ917BUQj/4Gcge+DB5DG8F9nAUCsB7w9dJiLpBBRJhgs8ogm0T3hgYDjYH0dFNvLPo/Gv0fhRYZKYSoaNGv6VmqvX+2z/6sIVWo6NFUVaPkCE0vFZcaoKCoEnJzLZCVpYecPDVojJYZ6P+lWF+XYZyL0NdqtcFcKZSos/Br8ZiTJUqtvgHDolJtsL6tMVQ9R9aNyTNmr23s7NnYgeuCk1Phv5UDuaK6zQKxRYhrTqDBZPHiSgtGc8LfFUpSLtN5zVUyPq/rQL/hvBfLmTlNuM2ERyuNv8H4O/gUyHxOgWDcSazzjwJ/1GEQDT8E4hGHQTjqW1CEX+iLAXsOUAaeBPZ9H0PofbNh1DAzeHkw64CvpwYCfCUQEyN8RKpUJmI9GGyutvoqFe2zUlKMEBOthuhIYmKIiBDh94khK08768/0258xFj/F9EKVa6pnsLiWa4VFtVBa2ArFOW2Qm9YEmak1kJqmAp7QsKqyruVpgVhSgnVPOfoaHa7KJu+D4XpQTua8Sl81X6G1PIi1goXEAsZHD+b+Dxs6uj/GGNiMxxn/6vVJVR0uPFHNN1KFyKemtt5PYc0YzQ/ZpCMcYZKzxTjPW6YATJ6GvsZ8Lxfbcjv6XTHpGshCvwdpAONz4m/esL0guH8PcAftBPa924E3cAvw7tlKTTBwB4iDzvXtCQjvXIlxIxjyJSTfswSch1SDu7OF7gn8PHWYB5Tg6y3COSJUYz5MFMuUgVbrNJ1U1gaJ8Tj3IxQQGS6EcDQSA6QezC8yRecW/u+qBTjCqtdKy+qhvLgTWPnToSx7JhSmTIOc+C5In9gEKXFGyM7T3MCa/ikuX5iH/i+Tq7QV+HoTcN5ztMbKmQqNySqQSLJxD5bBF4lTcU1IEsuVxTWNzXNUusafsSZ4F2vDw2Zr27jcYsMfvjaeqPZhoaxSimt+QEO7xUMY8YWarPVkjqpxrs9+CGDabDxXM3mesL2lcRgHYedBgn6XjD1F5zd3yF7qc+Jvzt2boeKujVDutB5KndYCy8G4Tu+BoIiJLbInlE+8DhKMm5wBr4HHoHZwHVoLAR5WCBtfCT7uGvDxlIGfj/CyVCVJw/wUyxdxXKqqpgOXUwuxE0RkjcB6UUBjIDyC1gI/k9c11lXwz176f2Ww2M2PcMq6gV04Bzj5DwI362EoT30QCifNg+zIXsgI74a02GZISTCS/tRb0a8FuP8tJ/fOVXqTSq4xGvhicY5IIskSSiXpGAPE99T/aIlsLn9iGVu/kSeuuVTT0PxOe8/MfykH4s/tliuF3i2tbf6KSZ/zJEXMWl/dAvD4MwC9c9BPSmbOk/VdHv0zyINOg9DlDIhHHgER8ft9u6jf2QM29fm8xOkDqEBflzuthjKnd2411xPU/2QNUWFdIB17BgoGrIbggbPBa0gLjB/bBJGB9eDvWgN+7pgHPCUQMl64Tm1QJSpUihCRqPmDuvpeKCo00PkfGsqnR5IDiCWnaD7z9dCqfNyMfT3VfX0r/nzn/iNdSx+4i8vuzuGW9AK/4GEQ5j0JguyngZO6BErjHofCyEcgP3QBZIXMgsyIHsiMa4HsbMNWuVJZiDV4Il+sNqSnSacmxssejZ2geD4qUr46OkK1KjJC81JCnLKLIxAliBWCaK5QHCeVC8PzipQreeKqK82Te/b80WuUKBrj2XzzNJncEFhVvrqQ8OaJ75u6AZa/jL7vxTVeydR2iiTAXI/rt98pEI3GXD9sP/AH7Qb+wG3AGfD5b/xO/Fzq9Bb+95sO9jq10gGf0hpQbIspJeYR9j0bIPauxRAwqBOCRkyGSO92CPNuhMBxGAOuBvDxkEJikqhWrtKnFhabKjXqyVBTMx3S0rQQHibD/YIac4AS60ExTJwoh4hQI/hjHYl2LsBH+zRafZCfNjU6QnaPowZBQX9+XLCk9ZTtyCtceFOUvwREuctAlLMceOnPAzv+eSiLWAZFIU9DftBjkB+yEHIj5kDuxClQmFvzsUwjTyjJapqR4N8Jif5dMNGnG6J9WiDUvwaCAoxYE2Ft5GWkNVJkhO5zoZgfzRWIJ4plooj8QsWzYkXlsfapM0tU+n/e05strH5MqZH516imhorSrvf5ftVqgIcw71dhzjeUoX/iboIC122xG875EUdBeP9+4Ny7E7h3b6F5vszpI8zt/X7/e74vdnqNWqHTMuCj74npSoCyffj3bIck/HrwfdMgZNB0iHabAnGBXRA6pg1CnOthvKsJAt1VP+vM8iSDRROaX1ADdfWz6XMHCZN0jP/DSE2AFiWHlEQDeHvowcdND74ejJGagliAN5qPdjP+22LMEd3jfTVGjI1CtCB/b9X/+Jmju0ekOYuKHn9Tmvvcr/K8l0GSsxLE6a+DIGElVES9BqyQl6Bo/HIoHL8UCkMXQ0HUg1AQPwvKimt7y1N7WrM8HwVime6PQqrbwxDvNRsi3CZjbWQCj7FG8BxnwtdlBD8346/BgYZTWLtF8QSiiSIxP7KgWLuTPGz1R66TJ6paoexycpGn/HCC5GNrG8D7awGWLgWowX29mdynib0G6sCzIHU+C8Jhh2ltR+o4ssaXO22gazrxOzHic7v9Pd8XOb2Cx6eBW2LzP9YTaswront3Q47TixA+cC5E4F4gZvQsSAmeAZFjuiFsdDuNgSDcH0YFaF/QGLSJxSW120tK66Cr60EQi1vofjA8VIv1gA6iIwxYG+ggJb4K3MfqwZvkD7f+o2NM2OPi9hjx96LnN/F4Ho/H0L5C+9LB9qIdxX//Dvcq18j3h3k0QLgb1qaB00GR/woo898EVd67IM9aA6Lk94AX8y5wwnB+BGEeDEQdgpdDacTTUBr7KJSlzb4uKm5nF3pgXLi9AAWuz0O2y3LIdlsKiSMXQejwqeA+2gJuY8wYA2Z8LbhPdjeQa/41NNBwVqrihGDtOBFrgjSRvGr3P/O9TNM4gic2TZMl7H2U1OKk1nv/I4A3VgE0mjHvk/o+6hqoAs6AZMwZ4A/9BnjoIzJPyTp/+5z/fd+//hvfEyPzn5V+g/qf1JPqkEsgGbgf8vF7J96zEGIHPQhxox+A1MB5MGH0TIgc1Q3hozsxDpr/D3HvAR9VtX2Px/dUenqFkEo6PSG9914mmSSTSU9m0jtJSCghtNCbXRDb08dTERsqCqJIkd577x1EQASR9d/73JkwIPptn8/vn89nfe5MJmXmrrXX3vuce86Fs2k1EhLyUjIya5ujY2uRmzsOnZNeQGJCHYZ5EAfk+0Pcy0kDlYL/wa6VsLZQUcyopaMOWA9PPv+fYIBFCewtajDKZBaCjV8VCNR/HbmRxH3k1yiOXoXC6O+QE7wWilE/QDb4e6S5fItkp6+Q7PwZkt0+RNKwd5HmswgZSeOnJ/b/7H6CJZ0/QrzF54i3/ARhBv9GQN/F8Og7BVaG1bAkDXCPZEPx70C9sqtVE+ysKuDuolqjLEhzIR/wTs8u++zvuE9XqPVk8trmvLBFsTnk73Lq5z9cDvy4DuhslTgpGH4fBQ4XBfeKPkcE9xn/3IYM8vu0p3D/tDyv5V7Lu8T9B4L/5MDfBP9cW5S434byuWP0+pfwfvZ1ePZ4BX7mLyLIdgG8TWdjhEEXhhlOJEzAUKNWirG6HfVtCvfE5PprEZE1aGycifHj5iMsuBZD3Sow1L0SwwdXw3NIHcL8R2Mg80z89zeVjk/Dk6/x87+CteboZ/QCooifKMP3EW24lPABxffHKIxYjdLE71AavxZFMT8iL/AnZA3bBLn7OqQ507lzXkU54EukDPkUslH/RmbUC0fTbL7Znmq15mGq5VqkWH5P+A7xJisR1fdjBPZ5Gx69p8O8Xx3MDSX+BxL/Qyzb4WUxA8MtOqlOroS3d35RWkaWZ0JKQXt9yxjrv9NAZrGyMSfg7kH2/a45wKatlPOpz6uh54UjfifuL0NhfFFwn/38binX6214zO//zP3j8f54zH+AGPJ3RqzeW93xX6nl/9nj9PPfwPe5N+HXczECzRcjeADrYCG8DebBS38mhhtMxnDSwVCDsVQvl1XJFXXj4igH8JjKlKmvo6FhFvy9azHMrQYjPOowakgDvIc2IoRqa3PDUvLOsv8WLI3VT/2+OX2fX3OifBSt/7FArP5niNOneDX4CgkGK1GS8K3gXZ2wEaVxm5AftgU53tuROXg70t22INVlPdJdfkCqx7dIH/E55IFLkT5s2bIUK+qZ+/8kgR4nW21EnMEaRNLfD+z1nhgfs+zbCAvyAK4D7M1qENT3AwT1fwsBFq9gmNlkONmpb1bUJtrJFVkBialq+d/xL498ZwfX30XlwFffAP+inD+afCDfk743iGo948vI6XNM4v6ZbdS3r/tb3p/kPJH4TtThnBGt974Gi5Eag27/Vzv/CuU/T4j+IaDHe/Dt9S556TsIHfAGgkxfh6/+y/AxWIBR+rPhaTAVI0kDw03GXhk9PdkpJa3hQlRMHf2tNixc+D5UJVPhPawBI92a4OXRDO8hrfAb1oqhLnWCOwujsv82zA3LH3tuYlCCoaTDBP1vEK//NRINviWsRrLBD0gxXIucET8Q9+ugSt5M8b+dYn8X8oJ3QuG1G5nu5J+uO4UG0t03QD78O8h9VkAR/q+1SZr75fI9owUG7kCS2TYkGK5DZL8VCOz5PkY99woseo+GmUGVyP+OlBdDbb7n+0CL+xwEmC2Cq/kYDB9W1JqpkI9ITC3N+Fv+wy+Ken/Bi8DyZcC4KuIhiPh3vk6efwXKvseR9dyex7jnXj5Vh/cUnXhP0uFcN9a1nEfp/Yt0ISFa70XR+ymzJP5LB/2KQuKf60n/Xv9BQJ//IIS8NMTyHYSYvYkgg0WkgVcfaUB/OkZSPRTg2jglW1lblZDcgIjIOlRXz8RLL/0HWRkTBfdebq3w9WiD35A2hPtOIN9mPlXE4yNOzeixqX559/GvYNi3FP793kZavx+QRHGZYvAjUg03IM1oI9KNNyPNfDNUKeuJ+630mfZAFb8fJZH7qW/eB4Un1cxuByB3pfrJfafIBZlUD+REfXgpdeCeizKb/Ui2OdiNVOu9SLHYgXjjn8j/v0FYjw8R+Ow7MOvVBNN+VaL+76NXD7cB/0Ggy7fivqUhVh/Cx3QhnGzKr1fVJ9qnytVxT+M9ddQ2vVzPMwpt7L/7LjCnE6iK0/iw6VXk9j0p6nHmXkZ9vW7MJxPXWiSJWv5RzOvyHa85Mt9RlO9ju/GW+D7PHfFYciXVH2X2d1D0z1NiXCCwzzIE91uGUFPSNdW/oab/QpDR2/A3eAN+Bq/BR/9F0sJc+PSbBS/DKfeqJiS7paQ37I5LIA1ENaGr6w0smL8UidETiP82BA4bjzC/DoR7T6Q80AHjvmriU93Nq0m/sj8dGcZ9KgT4sUGfEpGHk/r9SLyvR6oBcW5EcWy0DXKTHUgz3omioE0i5itS90OdfBSlsUdRGHIEVF8jewRxP3Q/5EP2QT5sF7K8NiM3+DtkDP7pYJrdUQjYnniEAUfI//chwWQLYvRXIarHVwh+5l2Y9hxN/NfAluqZnnq1QgO+bj/Cl2oKvq9SsNl7GGxC/bB3ropqwLC/iv2soHu7Oe93TAJenAfUK7jPe4BCa8r5+qeheH4/8b5VcC/F/GfdfMc9EefaWI/Vec78MtfMeyTxzYihno8RQYjuuU6aA6R8U0+6K7e+LfhP6LERIZRTQw0+RbjZCoSZf4QgE/ICo38LDQQYLtF4wcvw058vdBAyaNwrivyqrKSUBsTENiE5sR2LF32KmdPfQVz4RIT7dSIueAoSQmYgKXI6vN3HwbxPFcz6VsJUw/HfoW9PNWL6rEByP6p7DTZBZrgVGYY7kUa8y0zIH03J1yley2TbUZZ2ABVpJ1CeeBqqiFPI8z+O3FFHKP4PQDFyP7JH7oXSZ4eoB3ODVx1KdziJNCfpXrsyF+nIz5OtjyOx/2GRA2LJb1J7bkAonVer58eIGtDWqgo99EoFHMj3vT02Cg0E9/8MviavwNm+7EdlQcnwp3Gf73jAPitFmsObOlXy/WLy/eJB5PsG55DV87Dgnmt85p49PklTv8XrxPWTRy24h48SXrBE5HnmO5yOUXqvC4QRYmyOC/7rVUBdBHmA1U3kPXsacX02IpjqqECjFYiw+pb4/wShZsuEBgKNl5IG3qXX3iQvWAQ/w5fhz15gtAAF5dne5AEbUmUtSIhvR0nRDHy8bA06O95EbNhkxAXNgCKDevLciR/nyOfcdTFtx4DejaSDOvLUGpj0roRxT0KvChjqoE8PFaJ7f4H0vj8hTX8r0g12QG60S/CeYUrxbHYImWZHUBy1A+XJB1EpO0nxfxbq2PMoCj6NXN+TUHpSDTXqEJSjSAN+u5AbsgV5sd/8HGGz/Ve+zy9DNvgksgiZBLnHCWQ4nkLygBNINd9LNeY6ZPTcLXrjAc9OQP8+zcR/A3GfL/jXJwxx24ZR7j8hgO+tbLoU/PnkyoIRT+Nf6XdzHse+inr8zrFANWmheMg9FFDOz+51hDx/h+A+jfw+WYzVSbxz/pZi+l/dsa31dG2MM/j74fQzzDtzHar3qkCkOL5MmI/UkHvIUgKtxH9tAFBhdguKnicRa/gTQky+pZy/EuED1iCU+uBg889IA8sRYvoRQkkDT3qBv/HLCHeY+VluUXlCasZopKa1Izl+Asa1LcLnn21AS+PryEyavLS4dMILFXV1VdWNYyZlpy78xbZHG6x7tMKKPNWiJ9XVPeth0qMOxs/XwqhHFfo9X46oXp8jvQ/1bf2o7zXYRdxTvJvsQ4bJIXHvwQzLo8gedBQV6QdQlXES1YojqEg5ckQdcQEFgWcp1k8h1+s4cnyOIMfvAHKDdqE4di0S3bbdCLfZTDnhPOWDM9JRgyyPc8h2PQ2Z7Rkk0f9I6LdJxGQ8xaL9P6ZgYJ822Fg2avgvRi/i39byLQwmDfi4biAP+ALexgtha11aZ2Ob+if+s8Mf3uTYq6K4byyhnO9H/Z7DVeT1OyXqPfZ8zvVSXpd4Z04jNN6tBcc2I/wpCKHXQ0Tcv0jHx8Ea4NzP84lt9P/rR1DdaXQLWVRzRJhuRrDFGoT1/x4R1t9T/b8SwZZU21h8Tjp45AWsg2Ajqg3IC4JMFyPU6jXIk5rS07Ma1qTJ2pCSPAEpiVOwcP5H+PzTTZgwsaOkpLyqsK65rWHeK298M27S9LdSwxb9Zqc3DvbPjcXAZ9vR/7lWQjMsnmuCybP1IufKehNHfbcT93uQaUS8Gx9GpilxbnYCcotTkJufhSrhACrTT6BWeQKqgvemqmOuoCT0PPL9iX/v08jzJv59D5Pn70dhBPWCwevux9mfQIz9AaRR7GcPv0z14BWB7OHUdw25RL3CeWQ6nUGGBeUC/Z0o6HVCjLe46s2FQ6/xsDbRxn+h8IC+/6iEu/tBjCINsAcEGr8PF8vG7U9yn+Nw2FZB/TbHXn3Fo9jPNb8sNJapt1lwzzleivO3uv2bEap5HqbxcSmetXEtPQ7SPA8kBOstpOfzH0O45VYx9ze6ARhLOajGibzI8BfIjY/Ta8T/gB8ROvBHcc9mcf/C/quov/laeEGIxguCTanvNfk3wkzfpxzBfeKbiBr06umCiqLEjKwx5AETkJo8Cdmy2fjP0m/Wff3tquntEyaWN7S0VU6bu/C1MR1TXu+YPOfj8GEvPXB6phOO5Kt2/xwPm39QXvhHC+Kf+5Zy7k/I6E09e9/9yDQ4BIXRMWSZnoTC/DQyLShG+5+HctgRwX2D8hSqi798rVx2+FJ59GWqBS8gz/cscv1OQel7HHkBh1AQtgslCauR5n4aaeTxSc7HEedwFMqRvyDT8wblCcZNKIdSHh5yBQrX85BZnqceYy8UxD978giKH4denbAyqtHhX/IAG5sv4OGyC34u6xFo+RmGmk6C7Sif5x/3/hv1PNarVEpzOyUhmn7PiPT8j91CYxz3zL3Ety63Ep/aOObHT+NXC39CgN5sAV8NPPUmir4/j/73vMnEP72XGpuHKLW6hkSqecMHbkGw7XoEO65HkOMPCLJZi2Dr7xBk/Y3QQJjVCtLBZ+QRy4UXhJj9R/QIYQPeRYTNW0j2mtuZkz9mqyy9HelpnUhLnA512ZQP1vz444wvV34zbcKkKVUt4yY2TJ//4jt0fHXanNdX+fWfDxfyVednJgsk/uMbyJ79CbIeO5DVm3K8/kHp+iWTU1CYnUOm+UUoBlxAzoBLqEw5hpqc02go3vptecbOvUVRx6Civpq9v5C4z/U7gfzAoygI3YuS5O+Q7Sl5feawc0gffBopbsdE/Od53Ub+qDsCOSNuQTnkBnLdLyPT5jwyjI5A2Yt+T+8n+BAnTj2noL9R3WP8sweYGE6Ck9MhyQMobnyNX+X5rwrrgY/mN5WhDz5g78/juGMPHv4QBf0vI4c+I/997um5hpO4f1nw7K/h2F8DXY79yI+0YH599GYKeOtN12DaY/Drs1zUfc3NwAvUczaT/mrs7otrwBLtDiPYfgcC7TciyGUjIoZsRJTHekRTfxPtvhYRVN9GOX2LCNuvhBewDsLMP0a4xTJEUB8caUOatX4HuSXl+Yrc8ZCldSAjdRoo16O54bUvt+zYMvPzL1dO7ZzaVTtm/MS6ybPmvTZh2sx3Zy98Z6unwbyHw5+ZJ+auU6kH5Tltea894toGnvNSGp8W3OdaXIJy4AUxX13sfRK12SfRVLJve3nW6m9VUUfI4w8j22cXcX4S+QEnkOtP3IccQGnSevKAY1CMuCaQPeIKMkZcgmz4WaQMPSqunyv2u4ciXzoX3r+hcORtFJAG+LqqLOOzyOlN/bjeTjE/6vr8TFgb6fq/pIHezxRgkOtBDHbdDX+OH8qTzuajP38s98fgGJ//ojxpXrdw8K/INToHZe+9IvalMZslwru1nGu51ULLse5jLecjieNRelPoOIW8ahKGU7wP05sgMFhvNBJD7kBBsb/kNWAm1R8tnlSHuNxF3tDTiHc7hGDnnQgctBUhgxmUC6ieCXVeh+BBaxHq8L24j3w45bdo51VCB2FWn1Pup17R+mOEWX9EOvg3Iu3f/CZL2bxBoeiAPG0q5CmzkCd/Ex1t/zmwY8+OOZ9+sWLKpK6Z9W0TOqs6p88YT3ht3svv7k7us/oPvoaB5zWznt+JzF4HxPgnz3fnGZ8XOVI58JLgnsfGa7KOoKn44OHRRbuW1yfcQ3XofZQH/oryIIpff+rzAnZJcZ+wDaqYPcj1vIW8UTe7ofChGPe+ikyv8/T4CgoDyQcDSQe+D1Ay6p7QAM+587xrbh/KI3oHRQ849LlZT+W/h145LC2XkgccgI/LFlEzeZh1PHZDU57rYf75Gm1VGPHvcgM5lHe53+OxHa7z2PO1vDOvnsSrp048j9SAvzdC87qW76EazgcLvsfBQ68d7npjBDz1PxL/e/xE4IM3gQlUhzRT7VftfRt55JUpXvvh707addmBkOE7ET5sG0KGbBEaCHRaLzQQRjmBdRBmt4p0sBph9tQn9P+SeP+cNPAJIvp/hDCLD5EWOb0jWzEBmRmTSAMzhAcUZizF9M6V2Ljh5MlPPtnw6eRJH301cfznBzs7VuyaPPXDb196490NCqsff2f+ea4ru88h5OmfRK7hRRSaXkWRzWXBe5HjNZRSb1dfcOjgmJJT342mGqom7HdUBN1FRfBtlAf/TPXfRagjT6M07gDUKVuQ7/WbWB9X6ENa97uLgoC75A93UEBaKQi6gXy/C/Q7D1AeRb0Q8VLiL2mg2O2mGJORkw4L/3GSeqgvMPy5BbAzan0K/2r07T0Gw70OUw7YhQDKm95GL8DCIq97HIBrP669+fr8Ih+Iud3svvtFzc81H/s++z3HthdxO1InlrXxzJB4no6h/+wiTBUYTLnTnV5zJc5diW8XvRY4U8w76jXCTq8SPObA14yv+BR4jby/nby/gXqPmribyKdzlRSwD4HDd8PHYxcCB+9ExKhtCB2yDWHDtiDUbZPQQcigDQhy0HgB+UCEw3eItP1W3Gs5ZMAK0gLlhAHLEWH58a3MnPoPCvKmIiOtC1mps5Gdsgj56cvQ1bkaa78/i6Xv7cGkjtXomrQW82dvxsJ5ux6+uPjLfZWjfryqpHOtpJgrMriEQuPrUFlfRan9NZQ53oTK+Tb1TXuOjS24dbAhgfQb8gcqAu+jjDgtC/yF+L+O0rBzUEUfRoVsM/n6XRT7/0ac3qcYv4/i4AeC69Lw+1Qr3KfjbyiNuIXCkCtoJH7qKEarIoBy0oB66G2huRz9s2J+JIY82uvZlzHIpA3P6+U+oYFSqgNzMWT4PsoBeymPUswYvg8Hq6rua0Gy0qU5F3HdtucfKKAahq/h4vFdrvm0sc8xPYxifAjxOaQ7nqWY1sKDNOD+7ES4Ue3McH52HFyeb4fj82Pg8EwrHPTqYatXi/6ky/ChB8Vcwyvk+98Q/1Po8Rj6jO30eZtyfkZRyhkkhO1DOOXOgGF7SAN74D9sF6J8tiGQNBA6dKvwghDXn6g2kLwgSOMFoY7Sfc5ZA6EDqDaw/lLUBtGD3l8uk437vSB3JjJTCSkLoEx7D0WZn2Dy+DVY8fkJvLVkLya0b8D49q2Y3LEPLQ37UFa25W6d8tMtJZYnUdT3uqhNS2yvQWV/E+WDfnlYnXDpFsX8bzxuVR/8ELXk2WXEbxXFM8d+OfGojjmCcuK+lPxAxa+TRlThD1HGY10x0lg788yoTfoD9Sm/oyr+V5TF3BA9URN/n3ygnD1g0HXkG14V8+MJz6yH97OL4WYyXsP9n3OAnd1SDHGjHDBoM0JMlsPdtP17Lf/pGv4rxBzvPXHtLl/Lw97PfR3nfG3cS7yPg5vGxzmmtXHNcKLHg3q0Cb4dn28RsHt+NGx71GFgjxrY9KqCOb0fd4M3he+3jge2bQFe57qfzsEk8qDpNaSBspsolp9BfMQBhPntgr/nHowasg+erIGhuxETRPyP2CF0ILzAfbOoD9kLWAeBxL/Qgc13QgchNppewfJLyOKn/Dsnqwt5ObPJA+YiM3khctOWkwa+QHvTD3j/X0ex6NXDmDR+H0bX7IMyez+yMnbuzy1K96qYlJ1Wk/POm9UxP+wenXDkzLjM+2db6XPUhj9AFcVwXRDH/QNUB1Dup9gvD/6FvPsKyqJPUtxvR3nETcF7lYbzGvKKevLAJuKA/84YioEWpQR+3JD5AI0y0lHCTYyl19lbKqguVLlQjhggjc2lPrsdvs+9jcHGE/+SfzPDiQjyOwBvyqOB5l9jiEknnsZ/ybC7yKXakq/b5Z6PY59zvjbumXtdzh27vbxRxLad5rF9z0bBuU3PWsG5Va8KWPZSwbxXCWz6tog5Pvb9H38EVi4DJqZLeHEssKCD6oGaWyjKOoOU2MMIDdwLX6+9GDViH4YLDewTXhDhux0RfpIXMKS64CeqC1gHUn0YYke9IuWEUNvV1DOuQmh/0oHVt1fSM1t3FuXNgTJzLuWgBciOXyL4L8ldhdaGjVg49zDmzDiBStUByGV7rysLUhx5vZEyv2gEwU+dvfPNyZOXfzN5zsw1zePHLyzPWbu6IuIOKoN+Rw35OXNfEXILlWHXUB1zBlUZO1EZfV3wro1z9nXmvF0pjXmMpRp4YgnVQCoJ4+nxGPp+q+J31KXdRGXsZVRH36f/8QdKPe6g2E669i7j+f0IfH6puB7mOcG3Lv+FogZ4Xi8b8dEHRQ0QaLUGIwznw9wu206X/zI6Fg+7g1zqadOf3Szq/gDq9Tj2Obdrc/ggHd4l7iVPH0D53EoDc4LlcxWw6q0mzlUw7V0Ikz556N+vTFzfz2vGlpHnb90IzKigz0oxsLABeHcB9YBTgbb6n1GQfRbJ8UcRFLQPAb574eO5r1sDwzwkHXBtEB24FYHkBf7u20WPEOS+uVsHYszAQdJBkO0aMW7AOoh2+3STXDbxD3XJAuSlLUJuwn9QEPslimTfoKxkI/G+Aw3VB1GuPoy83LmjFLlFjrwvnSIvzyU7L8GlNu/C6jmTzq6dNHPuionT5n9S1zx2ckn2qs+43q8MuUu4herIq+Tf51GdvftuRcIVVEdJHq6NdeZ8fIHE+ZQy8r4KCZOpB5pIz9sKpGNXHcVEq+SL48gTqii/FA/9lWrOG2L9FK+xCO6xDF595gmu/8x/qfABn+Ffwtt9J9WAa+GvvwQDrVQFuvzzukyOf6XJCaQ8t0n0/Jz3OfbZ75l3e+Kd+ZZiXeLdRq9acG/JvQbBlGCimYPoS/9X/7kCGPfNhRVxn5cmrRF+ZTGwZxfwMtV8Y+mcTKdz8NYc4J0XqAfs+AO15ddRqDiH5IRjCA4+iACqA729yQNGSRrQ9QLOCWHe1COOoDph8Hahg0C3LYRNpIFNQgei7mEvIPA4YsiA9UiJeGlVdvKLaC5bjuKk5VAlrIQqfpW4RqsoczPHPdKy3ojKzk+2yysosc/Jz7fjPQkTi/Usxlf8/kdbza4P2ifOfGf85Hkfl1ZU1zQ2zZxYLjv8oDr2Z1THUdyn7ztcrn51QVXi0dNVkQ8F981yKd6f5J05Z3Dc8/M5vM5iqgQ+RwspT84kHXQUSTlANeJ3cX1kFvWfafoHEdz7C3j3fhV9/1H+F/zztawvwI9yZ5DNOgTpL4WjRd1M5l+eJq274vqvyPMeFGZnkNZji+j7vIl/9n3O68y3NfFtTXxroY15LfcmpD9D+l+GxHsf+t+MXvR+TKn24zW/vE6Y1w7t2AG8vUCq9SbR/32lQ3r+ygw6B83koyXXkC0/h9jY4wgPPyw04Ef5y8fnAPy9H3mB57B9mrpgH3zJC8J8dog+gXUQ4LoVAdTzBjlvRsAgqhEdNiDUnnKD7SaED9yEqAHb/lCmvLRDlbYMbZXfoDJ5FSqI+4q4H1ER+xPKY3eiTnb4dkfJue8WdFzrUpaG2lTX1rtWVLwQ9jLx0lq97t22CXPmtU+YvaiqvqG1ecyMxaPbp0yta3jh9dbOttmVDaOnNmVd+7kqgmq5+IeC+ydjfqIG7P1Ta6TzoOV9UZd0fGWyxP/sBun3Womrck9pnIbXWqVSrx6hvwo+vZfApEfVX9QApehJ/MX47oafDdVI1He7mbSt6eY/S1rHVTgKyOXx5b7bRe0/inI/xz7HvRXxrcu1FuYCahHzumAN9NPLwXB6X9xbctxPng1sJM9/+yU6D3Kp3+cxH/6c7PsdLfcp9n9Gad4VJCefJf5PIirqGMLCDlEeOITAwIPw999PWpD8gHWgrQ18R+yB1+C9CPbcJfoEX9KB5AXbhBaCnKhGcNiOcIetCLPbTj3iDsaF4pR/X6zLXoWO6nWoSlyHyriNlKO3oTp2L2pjjqEh5hJaU3/l2mRDdXOW6wuzD3Z+9SEwv+vAyrbxna01Tc3tvLdxdVPL5OrGxintE+bvam55bUWD/NofHfSZW+gzNiY+FPUc89+hyfN8ZI9nX3+xQ4pz5pprYa0G+Dm/tmDso/hvo/NWFcQecA+5HleQZnkaUUaU63ovpXzb+BT+JQ08T1z4e2yR4t/wY74mRKyJkyfh0borHv9xOocMw91ijpf5d6K8z3Gv5f/vNGCmw7859Z3hA/aIWo+57yQ9r/5W4p7zGPv+ZPr+dNL19Gb6Xu0D1KpuEvfXkCm7iMTEs4iLO0X8n0BE9DHygaMICTlCXnD4Tzrw85ZqRK/he4UOfKhfDCA/CCK/Cxy8GyGUI0JcCM7kEYN2I8JhN8Lt9iPa9gASXHYdUyV+cbclbyMm1W1BXfJW1MXsQUPsQTRGn0Z9FPXgUXfEe62qqxveWn7+A47/MaprqG2tGF3T2DyReJ/c0NyxiPLBlw1lK3c2pF4Hc99J/E4p4Zrv927+tWDeF7ZJNe9L4yUw17rguOfcP7OJ6oAqyTu4D+A6UkV9QIHnNSiJryizbQjq+zEc+o39i/iX4Ga5FIEDSCuGn8HDcJLoAbLC/riYJtdZdzHsJuRmB8S1/Oz/Ev+PcrykgT97AfPPMKb/7Ua/p/B9KK3ppfc8dRawfLnk7/z+2xOkeR7xeUjTTUW/Q537C/KzriE95RLi45n7M4im8x8RI2kgMvIkIiJOkBccQ2iopAVdTwjyk+pEf589CCYEkh5CqG8M1yCCckUo5Yow6oMjnA4hfNBhRDseQbzDcWR47jhQmbSa+q4dmFy7Gw3J+ynujxHv59AcfVXwP5o8fHThT+UtqpuHx5b/grps0kbh9tPNo9/6YUzb4i3NdSuP1Cn3/VabeB0TiWvmfxJ9tmkqqcfjep/BMTynScK8ZinXazGvTYL2+exmKT64NpikknTDtaO4Pir0oeg5C4deR6TVLuL0Cww2mvqUMaBHOaA/1YgBA9aLnx2hPxP97bJtFAE3PxR7t2jX3voAOTZnkd6PzqPeYuH/T4t7c22d3133FYo6Id76JIo0+z3xXiAz5lFd/zYwa+wj7lsp9nmMlOeay1Pvo5D6G3nydSTGXSLOzxPfpOmoM8T3GeL9dDeiok4JsA44L0REHBN6iCQ9hJMvhJEeQgIOIJzqRUYo6SGCEEV6iCKPiKX6McZrP2JHUNwPO4TkoYeRNPiouL4mL3Tz3qqE9Rgt24825SGMJu9pirqA0VHX0Bx7G00Uw43p5z5pKv75TlP+L6jKPo7qnOOozT2OesUpNGTtQm3UzT9Y1+M0/HP8swY497MHsO9zHHPsM9jTGZzbmWd+3KUB1wPaXoB7Qc4VYzT8s45YU/w/1P63EW9DNZLJSowyWoBn/8L/e4iarAXBVhsF/159FsDSsjgq23vDBIVmLw+x5of+dp7Xr8jqf1Ks4eQcwDwbUWwbiTyv1vi91vOLqUeYi3jLw6KHEHu6FEp7gsyaT3lyjnQ9GdctY+IkCP+iXJMfdRfyqJtIj72KxMjLxCNxH3aO4vssxfcZ4lbSgKSDs6SNs4ihfloLKT9I/hBHiCVNRJEeYkkPMeQP0aSJWNJEDHlEnP9Bqn/o6H0QiaMOIcHrMJJHHkWGF8f/KWQNP4/C8N17qhN/QiPVnU30vxojKPcnXyW93qSccBPVCRdQJ7/xR232L2jIvo5q+SnUp5+gOnHr/cYYPCCIOp91PkEp8dOh8Xvmi/nnOObajzG1QgLX/Lr1oO4YwNiiR9wzRmvyCNdNjDrSVZrHMYRYfo8AgzfR65myv4x/9oZQC+qJ9b/AqD4v8VqxqnzFjIlZgQ+692nT7scl5p4HnUZmz4OIpFrARW8CLMj3ub+zob5/MHl8ZN+1UA65KXjX7gPEez/xOvHJ06RrycR7T5F4b+a5jADp+iJl4K+Qh/6MtLBrxNdlRAVdoNgl7gPPkp+fJV+XwFoIDz9LGjgnfCE29qzID1pwnZCccEaDU+Qhp5BAdWNy9EkkkiYSSRNJERKSSRcpQceQEngcaf4Ev1OQ+51Bjt85KP0vo9D/Ol+jeagqZf1Vzv3MfyNpczT1dHXkBVWUi2ro/9dQbqpNPI6apH2oSd57lz7ffY77jiyp3mNv02qgXQPd+k+L8Zo6cOwT4HOmy7ngXXNknbB3cF3woub8ykedQqC1VNeb9Kj5S/55fGhkvw8R0O9TjKJ+0d6qqiM/66UFyrBvDyrkj/bn092bL9f3VxSM/Bn5g68i1/0q8obdQq73Q2nPNm28K6W9vnj/p7oWigHKbc1lklex17dSXDRSvKuovyikukXpcwepATeQGHgVccR7uP95yt/nEOh7luq5s/D1faSBoKBz5AXnyAvOCQ1wfoiJuUDcnyfuJaQS0pIuEM5R/XBOHOXUPzDSSR9ZiaeRQZxlkmdkEK+Z5CeMbPqbeeHnCZdREnEN6rCb4HG8quhfL1WnbthRE7cPdeQDgvuIIyiLPIiqqEOiN6hI2ni1LubGRe5hRC0jl/jn3M/+rx3fY9610K3/dDHmLzBaB+M1uWOmZjyI+wTuEblOUJJvBlNfE2L8Gez7tf3lGACPDzn3eAVBfZbDp+erPAawsDDz9SWqqoo2RcCdO9wHavdr0+pAoekNeI8ehaZO1HLOPs+8KyqktaJlxHm1Snq/HAfC70lD1X48t8T7etyDwvsXpPleRZLvZST4X0Q08RzGfBPvfqPOIMDnTLcG/EgTAeQJgYHnhQbCiauIiAvkAxeQGHsRSVQvpCTSMemSqBszZZeRlX6pG8qMi8iho4L6CWXqRShSLiCXdKKk38mNv4i8+MsooPguir8OdcJNVCXcEfMu2vhtybi9oybtwK7y5FUb1QlrdqsTVm+vTjhwtCH++j7i+yHzrQXzzpik4/tC/0+O6/8N30+L9wmafDFZh3vuDZbMksZLXyUNFCdeRYDzfoSZr8JIkxmaGvDp/Fv/swuhvZaL+Hc0b/ygKOOdfxerRnc2ZX+frAh+CK0GuvfnUz4O7f6NzDt7Pdd4fO0W7wlRnyeNbzZTjddOXj86SBqrUA2nXmXEHWR53kCG92WkeF9ErC/leuKYEUo+HOx9WvDPYC0wAnzOCQ34kz+wBoLIK0JDLyKGNBAXdUniP+ESZMmXIU+7QvxfJY1ST5R9FYWKayjKuSqgUl4TKKXvqem1UvoZFfUaFVk3UCa/iZqsO2jM/g1N2ffFOee+RJunuX7n8clpRVK/qkWHDp58znWftvYbX/B0frUer0WLBq2avKAdG+T6gOOea0LuA3ksgMcFeLx06SvAm1RfVyluIWzkfnGdpK/x6+Tzpd0e0Kubf7WYCzLUG42QXsvg1fNlOJk1rSrJfP/jYtWYqe8uXeJYl7JlQVbgQxHn2hgXOtBwLvbzVEo9Ha8P5msGVZrrxtjvmHfO8Y2U46tGEPdDH4ix6vwRN8X1TWleFKte5xFHHEcT37HEexQhwkfinxHi84h/rQYCSSvBpIGAgAtCA5FCA5cQF00eEnOF8v4VZKReJQ1cQw7xWqi4gWLlDZTm3YS6gOq24l9QU3JToK7olkBt4W00EBoLf0VrMfFe/EBceyziTNubNUm1+Zw6qdbiuYoujSa04P6eofu9SU9AcKmp51o11zg/Ce3Yb1fVo/FgBvcBzL3oD5qkMQEeG+L4/9dLdCQPaKD3H0G1bZD9JgQa/UczDlio4b/4Mf6foyOvHRvVcyHzv0WV9eGKkuIJM5csWeK05dDnrnnDNr6V6/Wz2LNZ1/MZWTxGzPs9aThvkEu1zmjy+VbK702U36s9gEqPe2KOiq9Xyxl6GZlUW6ePPIuUkaeR6HUSsYRobwmRvhK0GmAvYA0EP8E/I8jvAungIvV5VDMEX5Y0EHWF6r2rlAeukgauIzeD545+Jv6Z+1uoKLlFOekOGsooxsvuoqniLlorf0NLxT2B9orfqT/5A5NqpHEoPr88FifG4zo04zJjJd9doO3btZrg3k3Ty82uefT4v4Iut/x86n8BXf55fICvk+QxqDfIA14jHYypfIi44OPwd96JMNMv4KTfrhP/j7jX8u/z3BsYKfG/r0KxbLW6dNKsxW8scfzk089GfLGj0zjb7St1jv0xsS8z7/HD1yDx3g88d8ljDzyXxfVcnY90rX6d2x+odrkr5qVLXW6g0P0ylHy9ugfVW0NPI3XYCSQNPy76rQTPYwJxo44R/8cFYnxOCA2wF0QSdDXACNFoQMs/IzTwstBAVBjFfzTxTxpIozyennKD6tKbKKQerST3NiqKf0W96i4ay39Dc9U9jKm9j/b6B5hQ/xAT6XxOJc5ntEnjE3xOOZ8uniGB66vFUzVjsk/ogcfutBDa0IB9gzG7+dE4z2wNd0/r83X7fS202niaD3Tx32mT5sl5zHwhvbdW8o348NPw8diHsP7fwctsDnTXAvTWcC/xXw6PZ+diRM/ZcDRrOl6e88nqctWMua+//rr9suWfDpu/YKF1S3OrYWbsTKs06++O5VsdQaHtORS7XEKF61VUOP8sUO18HRUO9NzhMkoHXSRQ/+x8FgVO1E85nUKG2wlkuB8XvWnKsCNIHH4YSSMOIY4QP/IwaeAI+cDRbh2wBtgPYnwkDURqNMC1YRjVAKHEfxjxzwgn/iMCLiFSo4E40kBi5FXSwXWkxd2guv9nqvtukQ/cphxwh/z/N9Srmf/f0Vb3B8YSB5OIn66x0jrX+XQeX+qSYonzKc9FcW319jwpz749S6OJqRJe09GEVhcLNWO4nJ8XaPShO47HYA1M12jgSd4nP+H7nAt4LGCcBmJcgL/Hr2t00NUq6beeckoS1cSeHgcQYrcO/sZvweC5ikfXAetV4vlnytCT0PufFXB6bjKG9JoBB7OGU5WKz9dWls6d9fLLL9t9+NGyIQsWLLBuaWkxVKsrxf0341wXl6XYfHY3e8BGZJvvRq7RARSYHkWR2QnkWZxAvuVxKKyOIbP/cWRZH0eGzXGxdlVG/pHseAQJTkeQ5Ep8ux9G3JBDYtwtTkcHrAEJxxEl/OBxDURRfx71hAa0/EdQzx4ZcAUxIVdIA9eQQD1cUtQNpMTcREbSTar7b6OYarvyvLuoLrqHutIHpIHH+ec1B1ruFzPfL0h5VYt3dbSwZNYjX9DO0WjnaThfdI/Zj/3zOO6T3Otyrp37754P1NQK4zW1AY8JtBY9qh349Tb+uQppvZYq5wF97isYOfSgmOfiddIO+i3dNUBvXhf8rBp9nysjVMC2x1jiv4v5P12d8/UPFaULX1qw4AWbfy/9j8fc+fMHtLa2GlVUVPQtLi7uk59bKnQQ7vhCRdyg92/I3FYiw3oD5DbrkG6yCTLzrciw3I40qx2QWW9HqvVuJNvuQbLdXiQ67EeS4wHEDzqIWGfin3SQ6HFI6EB4wciDf/IC1sGTXqDVQBRpIJrqwOgnPECrgZjg6+SD10kHkgbS4m5BkXwbBRm/0jn6jXRwDzVU57XwmCpxMIW4mdXxKP55foI18I5GA3zUcq/1AtaALv9/x70u77qc6/Kty3U3x5pe4MkxgCdRq9lTOyf5N6H94eSx3u67+RoneJlP7x73FevCe6jQj2DQsxxWPRvh3mcyHMxrT1Qpv11TXvzy0jmz5w5897333ObOnTuguaXVqKqqql9paWnfwqLivkVFRX3z80v1WQfRw2Yo4oa9cSjR+SPK7V8izfk7JA1Yg0SLH5FCHpE4YBOSB25Bgs1WJNruIB3sRqL9PsRpdJAwSNJBEmkgbrCUE5KeyAmsgTgdL+A+IY40EKfRQEwA4wJiNBqIJQ3EBl5FfBB5AGkgOZzxM9J4bDnmFrKor1em/orCjN9QRj1eed7vqCt8KPLmhIZHOmAv4JzKOmA/WKTB6zMkcG3ANQLjpclSDp73BOddGr6nNkg+zXXlRA3n43T4btUZ59PltFEpgdffa1GXJYH34alK1+yrnaK5b0I89ewxd4T/DaM8O9Kd+kDbH0UO4DXMHP899Rph2KuEoCKUwax3LZz6jYetRc3+KuWa79QFix68+68phkvefMt51uw5Vhz/xL9+qUrdr6i4pB/5gD6DtGBUXKK2mPZ6rnlR7kxlis+iHQnuS8GekO62Gim23yLRag0SBqwnHWxEkvXmbh0k2O8hP9jbrQP2A17fwTqIH/qoNmANJOp4QbxGA9wvxvhIGogjDfD4gVYDMawB8gBdDSSGkgbCbiIlgnwg6hZk0beRE0+9aNI95MnuQ5X5ABU5D1GdJ3loG3PUIOVUrgVnj5fAHGt5ZswdK9VfM1ulfoH5fpLrCTpca/275SkxrcvzkxwzeB8UsRdOkgSxLyLfOyeGeKcaPDv8ITLC7yM25BbVxFcxjOrqwa6cA7aINZEe5u2C/+f06mFEcW/ctxTGfcrEPgcOBq3Ef9XmGuX6rysL3sWSD7P6vrZo0aDpM2dYUf43qqyqZv71yQP0S0pK9ItLSg0Ixip1mSXfn3HS1Gne81+Z4ZWbPSEh3vvlLYmDlyKD9y8iHST2XyV0kKLRQTz5QaLNdiTZ7pJ0oOMHrAOuDZLIC7g20HpBvKZX0HoB6yCeNJBAGojV0UAc5QIeR4z3u0THJ3wg6AZSQ24iKfQXOt5CesQtyKPuICvmLhSxv1GPe4/08DsKUx8KH+Xxq0aOxyJpHTrnV567mlDzOHQ51uW5VYfnv4pnXY55nyuGLs9ajhlFMRIKoiS+eS/WLOq7MkOoHw/5A6nB9+lz3UV00E3qky8R/yfg6npYXOfH17rymm9es/4c1X8mvRrEfle8z4mxfjlsjBpga1Wxojb3p6+qi5di0deWvV546SX7qdO6LJuam5l/A+Jav6RUZcAgHRiWqFQmZeUV/Ruamp2mTJ3mM23GzMCyiioPVW3+qJTo0Xkxw146nOD+AdKHf40UxzVIsiA/4L3L+m/ozguP6YDqA9aBNidoddDdK+jogDXA4wbxo049poOnaSCRvIDnFpICrwsdpAT/TFogLwiWdJAWehuysDvIiLgDeeSvyIr+DTlxpIOEP8R9SNhb2WP5mli+LrpGw532yHxy7q3RiVstr3/H7ZP8ajlmfhk836JFroZnvn9WRhCQHvgQ2UF/QBb4gB4Tgu4hOfBXJNLniaTP6TfqAkaMOAEXD84Be8U1r7wW2tG8UfSAJv8YA0vj0u79rqxNyQMGlC2uyduyvLJoOeZ99o/ec+bNs500eYpFY9No9n8DtVptQJwz/4bkBcalarVpeWX1ANKHy5Su6T5dM2cFEP9uhSXqwNwCdZC6vjA4OWLs6EjnV6/HD/oAqUNWINH6ayRYUo3Qfy3lBSknpAzcJnJCPNUGrANtjZio0yvw3DzrgKHVQfzI40jS6kCjgQTvs0j00XiB7yMNMOLJExMDrmlwg/Tw85+0IOEOZKG/ITP0PrLIT7PDHwhvVWrijv2W71mmhZY73cdPxuzTeFVoeNWCuc0OenTM1vCcQTzznCxznRbwOx3v0/EeUv1/o+Nd4v4OfZbb4nPEEfdhflfg53We8v8xwf9g1/3wc94s1rzwfIAR1QE8BmBlUCH2PuV963i/MLv+6qnVBdv+XVO0Am2zg42ndU237ujoMKtvaDQsr6g0IL4NmXuVSmXE/JeVlZlXVldbU33oMrVrhs8MiX/XgmKVX35xaVBhqTq6sFgdVTY60zs+aPK8aOe3iM+PED/gc+EFCZbfI7n/OrGfHeuA9zdjL3i8RjwsesZ4tyOP5QTWgXb8SKuDJI0X8Hgy6yDB+zwSSQdJpAPWgtAAnRtGAmkhwf96txY4N+hqISX4toin9KC7IrbkIb8TD7+LY2boAwH2XC0yQx8KZAQ/fOyx4inI0sRuhgbMLSOTuGV+U/3vQ078MseyQIljLZhrGXEtE3zfIt5/ETrm98+5LoY+Y4jPBfh4nhL1H/Pv4XIAvm7bxNqXYJM34WTVIPjv90/eP79U7BfM+186DFTV1BRuX1JbtBr1LQ0OHZ2d/dvb203r6uoMy8vLDYh3Q+H7pcS/Wm2iKqsg/msl/qd1eRP/fuQHLvlFpT4FJRXhzH9BiYrv4RtBiC5QFSfFec7bGOP8Pvn/csSbfYVki9WiV+C9DFM5J1hTfThwl+gZE+z2kw4kL4h3lLyAdcBekDxEO4Z0ROiANZBA+Y7Hk1kHiV6nu71A6IDOia4O4nwv6+hA8gVJD5Iv8HlNp/Mrne9fH+NAC4mbe/8jcMzy72lj90ludfnl/59G7yPFn7wpgHTp/7MAv0etj0nvnT4H1bxc+/K4CI+b+1KdxP2fs8dhaa3X4O1i7QOvfeW9aMnrRR/4aM/jMjjbqwJrSrYtrC9ei9rq2SPGtLdbNjc3G1dW1bL36wvuS0oMi0tLOf5N1GXl5kW13iZtY6YP75zaNoT49yE9OOcWFnsWqcqiKQ9Ekw9E5haVRtAxjrwgraCoaXyO7OWLicPfR7TZh6QBygnm3yDFfC2SLDeIfS2TBmwR+xsKHdjt1ejgULcXJIlxA0kHj8YS6fkIyQsSRp4UOmANpNC5iH9CBwxtbtDqQNQI3edU0oF0zn8hbm51HyVNSODnWp60nOki7Ynn2t/R/j0tr7rcSrje7U2P9HmlO49xbcPvn+scqd45J/ogaYz0BIKoV/KmHDl06EGq/w6K+Gf+ed8PXvsYbLoEbtQLaPavFntE21urxfWfdeqtk+uLN6CualFkfWOjBcW+EcW+flGxivs9A4Ih9YCGecWJBm11n+ePa/gRnW1rMKdr/YapM1vcauoaBikLioYXq8rjKfajKe4jyQ8YceQHGQUFnctKC944mkN+neiwAdFGnyKBepMUs5VIMluDVIv1QgOpVluk/S6tdz4aPyIvSHI4LHmByxGkkBcIHZAXpGo0kDj86GM60HpBMuUErQ4kXOj2hOQnaoR4HT9IFTr4ayT+BXR/JkHDZ4IOr1pu43W4lbR4SeJW6FOHXw3H3PNK4x88DnYCMb7HEUmI8DmGcJ8jCPc+jACv/Rg1cj+GeRwQuX+I2x6qAXfC3/UnBPOaeMsP4GM6Hy4WzcIHmH/ngRViL6Aa1baJdapNqC9fml9RVWNeUVHBMd8vv6Cwb35+fr+CggL9gkKVYVPVh/Et1evQUvPh7bFjXp7XNfXNFxfO+eG96tpSG0V+wRDiP4HjnzQQRbEflVesSswvKc1QlU1dV5Dz+rFseu+8F0bqoK2INPy6WwNppIEU83WQWf6EFMvNSBuwTWggdaCkgVTSgPACHksedFRogMEaYC94TAekgRTKCcIPPP9rHWi1IOGKQLzm+DQ87TXmUBfMpxZS7nkUt1qIMQwNmFuGNMZxkl4/2c1xDHEcTRxH+B4mrg+JOd5wnwMI895PvO+n414BvxHSGpih7rsF754U+z6DtyLQbSOCeC001V9B5m/B23Qm3C3HwLl/I1wH1qU72Mn1ast2VJMHoLFq+bhSNfl7cYlBXn5B39zc/D7K3Ly+uXn5/UqKG42aytehTrXsSl1LbWxz+/jUzqldWfMWLKqb1Pl2YE5eoXuRuiKR4j1G4wF8TMlTKVOramf9lhqxfHuO/2ko/Y4gc9geyMwPI8pgDRJNv0Cy6ddIM/1eaCDNYqPQQKrVtu6x5CSbvUixlTTAXsAaeNILeL8UKS/Qa+QFWh2kajSgq4N4bZ3IWvDV5AfNkXn67+CxGNWJVS3EOJXfmW5exRim70nNeOYJnXnPx/mN8j5Izw8gctQBRI3i4z5x3bKW57CRe8W17EEj9og18byei9dD81oXT+KbOfcdsoWOm+Hn/hPF/zqJf9uvEGz1b7EntJf59IdD+7ev1a7/rS/fKasv346m6i9fzMsvMaOYN8hR5vbJzsnpnaVQ9BY/U7ZxV3XBV3fK68vDa+qboupHt8S2j5+YMmvuvKzpMxbnyTJTHTT8x5IHMPecC2QF+RM66lunXIn33Hc7K+gIcgKPI4veO++PJzOiz9hvPZJNVnZrIN1svdgzV2ax5XENDNwnNJBqd1DoINmB+HY81q0DrRewDlJJA7pekEJe0O0HnlK/oO0ZRN/g8wgxGug+1oLHH6UxyEecaq9h0PKq5ZZ51XLLHh1FHh1NYH6ZW238avnlYyivVyAEE8fMbzDxG6LhmNew8Lo2X7G2basGm+HvsUmDjQJ+7hsQ4LYege7Eu8uPCOQ9YAetQojdFwiz/gCB/ZfAt//sXz1tm3q52kv3oKsp3+pDGkBr3Zp1MnmUMfFuIM/M6pORmdnLO1rv2brSdS01+RtQVjopv7yqNqy8uja0ur4xsqW9PX76rLnpc+a/kJtXqBpWrC5PIv7jCXHEPWshq1w1Z0WR4sOjifQZkylHZdE5S6EcpaQ6Lov4lBlTnWdAPYDRGshMvyENrBUakJtteqSB/jspH+xGGmkg2WY/6eDQIw3oeEGq+9FuDUh54ZEXxGvA/YIW2vEkPmqh5fBJ6MarLq8ctzGEyFEUu1oQx8wvx2+oDr8cv8wvx3DI8EccM79ajn21axc1HDO3vm6b/sRtgBtx676WOP5BwM9ljUCA83eEVQhmOH2LEMevEGr/OUJtPkKw9Vu8d/4k5tzXelr3vfDqqlaa1FfsIv5/hJ7dP3rIs7L00+XyPn379v1HTfEGdXXBDqjzXmgsKa8KLi2rDKZ+P7Sytj68qaUtpmvGLNlLr76WW15Vn1RUWpFYVFrGGkgkD0gtUOfI6upfuJ3it+9sIp2LRMpR2eSNWZ5HIB9yCJlU08spx8uM9yJJf6vQQLrJaqGBDLONj3zAcvtTNMBeQHnf/ki3FyS7HhUa6M4HGh1wbcCI08wvacFjzDFeEuJGHRHHKDp2w1v3+aHu2NX1ZuY3gnSt5ThEJ4aDyadDNT4d8Fgc734sjnlNHvPMfs0c++hw7O+6ViCIuGUEEr/MbYiG3yCnlQgd9DXxvAJhDl9IXNt/ijC7jxBqtxTBtu/tCrV5Z0GI7aKn7vvdrQGK/4bKnaireCE/WRbaT3yvZNu86oI9KM1ZMqWkTB1Uoq4IJf5DVRVV4WXVdZF1Tc1xk6d1yV59fXFxbcPoNKr/UqgHTCX+U6jvyygqnNRRW/nWxWQep+FzR587k3KnnGJORn2KkvjJJO6yqe/LMNuHVMPdSDH+DhkmP5AXrBMakGs0kKHVwIBdYh/0NJ18kKajgVSnY4/Vh9wv8viRdgxJO8cUp5l7ZkR7HngMzKcWzDNzq5uDw6nODiUthxGYY47lQE0sa3n2pc8a4LGrm2eJ48d51sYyx7GfThwHOzFWI9CR43cl8f01wl0ojp1WINT5M0Q4f4Jwp2WIGLQU4YPePxPm+O7qUMc3XwtzfnVyrOesieTxw30tR/f4O76f/Kot2/ZrrfowGtTf3asrXfteddGu3yrztj0syZ9ZVqxWR1BujyT+o4n/2NKK6iR1VW0S1QHx4zomJr74yquZtaMbIkrKqgpEv1eiSi9Ulyrqal7ZmRmz6UwC+WWyD5/v/UijWMqiHJo2mPgfcQbZFK8KiuMsqvfTzbdLsU/8y43XIctkIzJMNz+mAb7GQFcD2nyg6wXcJ/A4svZ6g2i3Q4j1kBA/lHV4EDHDiGt6HDv8IKJGHOg+RtF7jNZAl2fmmGNZG8+Bg/cSt3u7eZY8e7tOXn6ca4nndSKWmWvOy+zVzHM3187fCJ6jPFYgdtjn9B6XI2bwB4hwff9UhPM7yyOc35oa7vx6cbjr7IBIvxRD5m0e+jyTVeXYJzUj3jhNluWQU6D2+p/wrv2qUW9YVlG0BxWFh1FZsAvq/K+/pZhPI975HrMRxGlQTmHBKHl2skdOTn1QTf3Y5NqGlrKOKTPGvvjakvntY19cmascX0I/m0fIKSwaXdVQ+9YNzq/Mf7wPr7faj/hh+5A56hzSKO6yKB8rhx5HjvNxZNuQDwyg/GCxAwriPMv4JwG+vuRpGpD1J68YsOeJfCDVhtp5RUa00wHEOB0SiHKl524HBCKoBolwPygdCeFD9wvw+lAtgggBHow9AoGPcf24dzN8XB/3b669/agGC9Lwrct1IHl2mPOXhC8Q6f4Zoj0+RJT70ntRg9/6PsJ18fRItxcUCf7jBjE3N4nj7ErnXsmpqfpJSWn6Sckp/ZJSJCSnpBqkpKaZpMpkFvIshUduUaXt/4b/CvXMiOqi1T+UFSxfXUz+nVeS559VkOCRmp7kXlzcnlmU8eqc+uYpLzaN6doyacbsL8ZPnsH7JX84afrcz+e//PrKrjnzviiQv/BhUUlnfWFFkry+5u0T2fHfn5K4P9LNfzj1pzKqudM9jyGD+vac4aeRw/sdO5yUNNB/v7jGLMd0O7JNtkBhsg1y063INNsmkG5Bzy13kgZ2dWtAjBPwvTK044YO+zXzilR7DaJYdtonEOG8XyDSZZ9AmOv+xxDsTj7uSh7vRl5O7zPIbZfYP0BA499+7pu745r59qX+OuAJvrXxrc3VXH8HOHKe/pK4/gJRbsspppciyuW9fdHu78wJc1okNuQurIvxk+clOKSkppsWFhYbUx9mSDW4fqoslXnWZ/6Tk1OZf33BPx1T0tKMCKay9IwBykLV3+b4v/tqLP8+vjz7py8K5V/NLcj4eF5F0X8+qap8eXn7xNlfl2V8ebEk+/WdYyd1fdrU3vlDx7RZn46fMuNdev7VuMnTP+3omrVs8sw5X6vzF1+oLvtwfmPV57dGN7x9nP00gWpjRhzVw+z/ERRPsRRX6VR/p1Nuzh1xAcrBp5HvcQI5tqeg7H8cuZYHkW22BzkmO7qRZSYhg/whnaDVgLgvDtWFQgOaMUPtdSaxDMc9iBtE+ZsQ7bRHIMJFA+fdYj+AEJddAqFuOxDiKu0XEcQgrrn25phmaLn2d90guNZ6uV93fK/prruDBn0jau9AqseiBn+GhFHLED/qvZtR7kteinB6Lf7J859XWNJPocyLkMkybKj+Nlar1SbKvDzD9HQ5cyxxn5pqoKsBfk5xb0KvmxH/tgUlFd7/W/75i7g+0dj46pnmltfO1ta8sn/C1OnL2ztnf5IXsQbqmtpZYyd3fdEyfspe4nsF4TPCtxOmzvyydeLkDydMm7myXD37mDpv2Z3R9f/6OTV4//4oqoE57uN8uK7aR7G/k/LsfnHuU6n+SqV8nDviEvKG8D3gT4m1pnnWpAWrY8gxP4hc071QmuwRyKbHDIX5XmSQP8gt9iCz2wN2iTECnjvguUSeU46334UYh90CrIFYx12kgd0Id94pwWWHOAa7bNPBFgEeM33E8ePQ5TtAU6dJkDxd22/x/Zei3D9AnNe/NiVGdd5LS6uYlVMW5ZelVDw1P+fkF3pkKnI8U9NklkXFJabl5eWmCoXSMC093YD4NWCf1+Wfj+T7RsS/Gf2ORUam3KVYXe/0f+GfuJxR3zLhQF3TrG3jpkz5gmJ8ZY36ne3Fsn9drGpoXkTx/k3LuGnHifevO2fM+WbSzLkbJ06fvX5i1+wN5AOrx06evLq0cNob6ZFrFyfytbtUK3Hcx3DPSzEv49rf9zxSeY6G6vF06s0Uw0+iYPh55LufRaHzaRTankGB1SnkWRxDgdkh5JnuR67JPqEFLf9yLUgDUh7Y2X1/LNYAX1cQZ7dTINp+B2lgp0CE4w6qobchjOG8tRu8PxDvHShhvcbDJQQ6r9Xg++66vJtvTX2uy3mo3QcIdXjrWMzIF9tS5AE28nx/m/Ssih1pmRV3s3Nzoojj6Oyc3D/dgy+noDCC8rc7xbFZXV2dhUqlMs3Mzn6cf10NcOynyUzSZOkW9DMDlAWq2P8L9xn5FXpTZs3u1dbZdXD0uCkXiOuNo8eN26ZOX/5Ape74srZl3NutHZNXEc8b2yZ2HaUcsGnSjLlrSAfrSA9rWydM/qm2ceam3Ohvzor1Hb7HEUr1MSOO+uh0/6tI872IdO/zlP/PQUa1X6rHEcg9iOehl1E0+DyKXE+jyO4siqzIDyxOIdfsGPJNDwsNZGugeIJ7Lf8yzb3S4gl8fVGs7TbE2m0lDWxHtMNW4n87IgiRg7ZS37SZdLBJQOwN5kT523ndY1zrxnfQoO80x1UaX6eezOHLbs651w62eZ/+3iufxAY3huaNtjFNkcntiRdH4sdZrqj4KDWjHHKF6lXiP5YQo8greD41Q7oVX15RSR9Fbl6ULCPTtqioyLyhsckqv6DQhHK/IXFsSPwbEvcSJP5ZE0b0t80pX1jKMjLssnPLA/8v/PPXnEVNzzS1dbmNn9J1nnRwYkzHpHV5GTOXlFaXj61ubN9d3zphS2P7xK1NbR0bm8d37mnrnHppbOfUVa0dUye3T5wWwX8j3nHT5yleVHtTz897sCX5nkWK4P4yxb7Ev9zrLNKp98scfFT0a7x/SaH7ReL/LEocz6HA+jwKLE+LtQW8zoA1kG96sJv/p2mAuWckDNwmrjWMttlMGtiCGNtNiLLfhEj7jQLhjgzpXgK8P2SA0w/wH/Q94btuBGmg5Vw3znV55zG1IJt3EDLwzTH82bNzswcTh+5p6fJBxI0DHR3puWtGdvlc5l8mLz+Vk5+fSvxHZSmUQdrznltQ7Eme4MWx3NLSMqChocFSqcwzJi9gfxf8M9/dGiBw3k/PkPcnjfWnvzU0U1lr/3/ln7+mzJyjN3bSrF6U66dRTt8zpnPaqeZxkzc0j+tc3tA6fn5j+4T6cVOm506ds8BD9/cmzZ4jjsl26zq4vosddUB4f4r/5cf4533Nmf9MqvszhxwT/OcPuYQij0sodj0PlTNpgPgvtCAPMCc/MD0pacBEygUKs31/4l8b+3wdSRwhXlx7vpk08BNi7DYgghBpv554+xEh9j+IvWF5TkTw7Kjr6Y8gxswdVj3B+wr6/U8RZruM4/1hiM3ia0ED51nqngfiNoA4dxEaIMjSM11kmeopzD8jM6e0NVuZKxMayFG68u8oC4qSyPtdKfatOjs77dTl5RZZCgX18+lGFP9GlOeNNXW+0AAdjelvW8rlWdb0v2yylP/7uv9/81VUpPrL19Kt1w6Pp7oqhuq8FJ8zgn+Z3xXi/jIyfC4I/jM8pTWBWUNOIGPQceS7nYHK/TKKXS6INWSlNudRbHUBBeZnUWx2CoWmx6E0OyI0oDA78JT434Gk/hL/yYL/TYgn7pn/CMH/OgHmniH2BXb4jnSwuhuSt696gveV4h6KT8Z86MB3EWj98q8j7Ub38nJs7/7sqekZejnKfMP0LIUv8eJMHDmxFhKS5SPS5BWC/7TMsm1U6+UQ90mZ2TmxRaUVERT7vunyLJuOjg77cePG2ZD3m5L3m1BsG1OcGwv+dTRAmjCVZ2YNzMjMtpFlyOyylFVe6dml/y+o/299JVCsxXgcQKIX5XjinGM/w/si5f0LYg0wx372sBNizyWZ/Qmq/U6j1O0KylylNYRq2wso6X9ReECJ2RmqA04IDeSYSfznmO9/Kv8c+3xNIfMfLfjfgHDb9Rr+1z7GveD4r9DN/ZcI4pi3+0wT80vZ65l7+NpM8Q0cOOuZp33+9Myy6HR5uivlZWfycGd6PIi4X6HVAHlAZ05eYX6BqqKwWFUulysUw0rV5YNmzprtWlNT01+hVJrS75owzwzi3ERHAyb0miV5vj1pxjZbWRzjmfD5U9/H/19fSTbrXoxypngceZj4Pye4Tx91ieKe6j7K+/LhJ8X97fi+hqm2R1Ay+DrF/1WUuVxCqeOFbv4LzM93e4BS4wGsgafyP2Db4/zbbhT8R9n+2M3/f8k9c24vxTzzzjEfbPtpd33H3IdYvwrfgV3f/VfnICNHXSzLSHcSGiAPSJcn2VEemCnLKv8hPatsGeWAhJrmsV+pyysjZ8yeEzZn7ry4SVOm+OTlF1hQ/2/OfQD5vxl5gJmkgXShA/5eZk6OLXmGU7pcbk91n8//C07/J19xVm8PjKVaK3bwQer1TkImcr60/p+5Z9+Xux8X60Pz3c6hzOMGSl0f8a+yuSjFvzkdzR55gFYDSvODxP/+p8Z/nA7/4d38S7Ev9gF/Gv9/8nop5oNtl0vzpiLu30aQ9WL4W8/FcOtxXoNty/7y8yelFellprQ+J1eq6on/bg0QPOixB+WFoQkJcQ4NrZ37P/rks731TS1x07qmh1fXN4aWqMviM+SZ/elnrUgDFqK/k0ka4F6fOO+vzCtwptzhSLEfmpHT0vv/IbX/7a+UgT98HeO4TVybJRtxqhvpGu55fXCBO+X6wb8I/it0+C+zv0D5n2pCi0vCA5j/PDOpH1SaH0Ym8Z9pceCp8c/8xxHv2tgPf4x/qu3sqW//y5j/6gnuPxbcc74Ptl6C4AEvwcd6yvn/zudPlRfqZSmqTDOV6plp6Sl2rAOKZSfK604Uu4NS05MGTJu+8Hp1wwRMnDL7Rv3oMbW5hcV+xK1nQbEqTaHMcycNDCANWNHvsQbMWQv/X3vXGtzEdYXlOjPu9I+xMbaklSz5BabgiQPBmCAj2cYpuBQwtmTLkiXZlt/GDzABAkRi2pKkQF1ok5KWxhNKUjJpSaZAaEkai+HZ8kqbx7SdBpQBJoFQakpplMH49Nx7d1e7C4EkmGf3eM5cfV5p5+797nfuufu4W1nlNFe7PdnYD9K9/s67TvvEJqR2aop1z5lnowZLM9+n1+ArHvwAuUcfi55+nK4x2pBzEbm/AI3fPE/5b8g8C83pH1P9E/59oxj/HgX/1Tz/Du27lH/2nnSee/QSwj16Ic8/y/v6Kd+Ef+rpu5hfk3uW41uNr9C4T+6bKDD8DCxcLzxsDPR80XZwuFpjnN62pEp323bkcRxymoXz/gzM79IbmltwftACLm8n1DX2QFv3ss/auhZvxr5RiD4R+0IR5okTSV6IvyXzPDLPJ3O9TNR+psvXVOxtXx17K3m8WZtr2LVxTto79Hz8zPS/07leVc5pum5x04OXqPYbx2I/yD4PjVmfyPgn8V/Qv5J/Qf/zdO+I/DP9s9hfItF+EV3zPXRD7lmet53leuat0bhv2Ey1P5X7KeQbnoIbH7Xc7M5ajd/veaCmrvsw+lanp627vKqpq6yi5TDJByvd86nXNi2EzkUrwq1djx/3N7V7sZ9YkP8pqPXx2AfM5PoOxgMD5vzpdkdFpqd+wU2d679dVsEduOQad5Ke4/M/9Cl9vwThnrzDSOC/eTTyP/ostGScgUazRP/JH1/FP3m/LtV+ynsS/o9K9H8Q+d/L+EfuybvhCs1vyXm/DvdE+yTuE+3bjC8i/5vAot+I+l9PYn/fV2kDu9NFS39rR2dt48IP3HVd4K7tgmpfB3NvB2DfAH/LY9Des/wccnulpm5+n9vjKXB5a6fZK51ZpA8Qx/EjE7fNGlaSbpGV6J+PsepWj7Fzh+gcv278OfDnXKC8U+6z/w0tY/4FTVn/hIaMT6AetU/493NnJPH/FD0PRPI/Uf987Gf8s7GfPD8yA7mfYdhP+WfaD6H234IC85tXc4/OuI/yT7gXcz7jy6L2LdxzMIVbC7mpj436qm1R5a7my/aK1q4lx/1tPYc9/gXg8nXTvuDydYKvYQHUNfUA9hFwuEi/6Drn8jbMddbU5pHzQw5ndbbXP9/hqX/8G1XezmHj6VZbkX6jy4ExunbMSZznn6HjPXGyThTVPsZ+fzrG/rQzovZrk8+K8z+mfcZ/ZcrfZGP/LMr/Iap9yr9xH+Nfov0C8y7q1+Ne0L6F5HxE+4YtyP0mer/sVMz7MPZvGq72sDubEpHzPzS1L3q/sX3JRdS72AdILHB6OqGqpoPkBhfqa59+2zHvCTfG/NF1jUsW1TcH0tx19w73ghXqNywi1+y9qafAn3WaxnviRPeUe177DcYzstxPyj+J/YT/Moz9ZZR/pv0ZPP9i7Kf8R7Uv8n8N7i2mnVdpn8Z+A8v7SOyfgnlfzricrw1XW5Q5amhpd7YUOWvaDhDOBe5JLlDhbIXyqrbBCmf7xQrHgk/Ly5ed9zX2HHZVrs7w1a+6q871fBmz6J5ZbE8+CF5tGOo5nOPhPI/wTnQv5V7I+2js5+f+n6/9I7z+o9ovTGU5X+E1uL+R9q3G34h5X4Ee8379s0T7RXmZS4e9PeaUs7ygyuUzOaqbV+F84e1K13xwuFk/cLjawYnzA1dt93r6vZq2Ya/D7baHjStnkmc9yHlcwi/ROeFccML91fM+pn0h7y+TzfuF2L8fpkv4J+97E7R/be53SrS/LTrfR+1buV/RvK+A+wUZ99fd6jbBeZ34ubljyaSmzsVzWzqW2jEfuCvn9zdjgH8Zplrz9ORNH1UlHqbXdn0jP4S6pFM856d5Pykf9wX+ee0T7mfpjtA1RcRxH51wX2ji3/cn4f1a3FsI96ks9hekvsa0T/I+7kXkvo/E/aN3ur3uR8tKc9NyknZVb+nIV6Eq4QhdU9CT+A/sC8h3Ulg83yud89Nxn9c+eS6I5vyUf6b96cY9IvcW0xvo1+FeMu4T7oW8z4Lan8a9gOW6v5A6juWfkVLt1ljC+GLtlJTe/tKEbWAfcRCcCX8GV+K7UDnyPXAm/ZWOE+TZEDt/399cet8n0z7lnjtAnXA/3bibcm/juRdcybvIvUT7hPsCzPmJ7h8x/OglUrfxaY13unnuezNkFNGSMznz8lLWvlmcuAXKE/rBkXAAKkYeAkfSUZhH7vtOPgpzUo5QL9X+Cb6t+yN8S3+Qcl9s2Eu5txrZO5+l3Ec9GvMt4pgvne+9ROf5+dzqeXe4Sf4vzZDObl/UZs8yTdB+b4Ml+SfwaOJWKB35e/hOUghmj9pDfVbKPpip3Q+l+r3UH+V2Uy9C3ouMb4A19Xeo69fp885Wid7Z/3aImrcaXxXjPbuut773DjeBagrLNrfOnqD7/guTU9aBLWkzFI96BUqSX4Pi5NdhespOKNHtgCI98srtoOsb2AzbMZb/lpaEZ/JZcJuRcU5yvALuZX6M//kJi+7H9/6k6j41Lq1E/Jya6s7O4ZZ2Y87468m6tR/la9fDVO3zYE35JVi0m+naBtN0mLvrt4AN+SVOPlMn+TzO5ZHvS4/on91m0T0z/yHjQrqmcV7aU3fs+FT7cmY2l8lwamq1Kce4sGgCt6IuT79q2WTu6eAk/ZqV+fofBvP1a4KT9T9YPJl70p5rWJ6rGR8rXivNMbff9rqrpppqqqmmmmqqqaaaaqqppppqqqmmmmqqqaba7Ta49y0YkMFB6wkZHrDKv74mXo7zY2RwqEsj219EE+iT7Y78XPKNcCAcgP4o3jOkiQdJDY5FNDFwTPguQF8YG/0Aw/hNsAY1GivDn02IA3gCSTHtYdsnxsLQcsTxu9nPl30IQxHEcUGKg4EQXBlAHEubYEgTyA1cCRMcInhQsyISuBxCHBOysiMZtF4OEtZDJoq7plkv014QJm0woLHFWf9LsU+DNQlrbLH9/6HYzDpLMObEAP2QK3SfExFajBC7E9suLosRCNPi6wr8gICtShyihXi7tUmJafU1MZqgHMfyOJ4vVyqwJsRjHtp8rBTXSc2XY7NGsf1zcPQZvFwFXqLAETmOidhoIW7PV+CEkAJr5HiEAvO7DSqwcrsCa2wKbL4BzlVg5cJD0xTYqqgQT3gURxR4SIEhqMBhBY4o8KAUxmDHV+KgDF9QbA/LcGz/MTnu65PhuL5+WYXj+wIyHNgow/GwUlo/VOF3IdqCcSRcBKNLyhCNh6Q/lmOrHMdRHI7u3ETDVdAn7I3iiGmAP1KGgVesJhZ6ZTgOnlTgfUJA5GszKMdDgywqDgrHttwkxxtY4B0ScDiWj4L80QzwgTjI4wg/OLFDDOAPrRA9RBAD/4DYWJIKSYL6IDu8qAltLViIUSHagNC4vA3JdkdroBhD9sC9av8D+gAhkQ==\"\r\n Icon7 = \"eJzsvQd0U8e2Pn7ARdJRsyXZluQiuffee+82YIyxMab33ntNCIGENJJQEkJICCRAAgkBQgKE3iH0TkLv1XQb2/r+e0bk/e677+a+3Nfu+7+F15o1kiydM7Nnl2/P7L2PIDQT7AQHB4F6s9DbVhDaCoJgNlvfL6HPN9JnEREv3vsKQk8nQcjIsL4PyBIEVbIgdO784v8fCkLcBEEIoGs4sOsI1s//6A/A/6rmvi1wnXdZf3R3e2x5vy0wvQQYEPMAhbqj8NL1OPhHvxNHujm7LAj/xOF141nvoLHvj4hotAxPuYPhyXfQM+QxirVHERDW57xmiHsxTVvLfuMnetq6TQ5t6fl5Qrn5h+Q61XjP7+h/zQMFmIdEP8asqkZ0CbyLVsZfkdpsHUI+LETa1WrErCut10z1vSof7HrXtCkJXj+kwHV25PnfxxJstySzjdM19Ik4gb7Rl1DldQWpws/QFybC40QCQi8UI+FMFcJXlkD1rq/FcVGwJXh3/lOXzj4LnN7yXWQYnfmkumgv2iQc4vOu9LiOaNePoPOOh9Nwb7h2pevkFMEYVQjXmRFQv+0Lx/mB9Z4b0+C6KQReA7pjSPw9TMyzoG8EMDoF6BX9FKmGNYjRfo5E16+RHvAjksK/gsePUae1y/zWaSb7nrMd6fCzcWLOlgzFdkuvwPt4uxXQ3vsB+kTexOhUoMb7Pop1J9BSfw5d/a+hg/kYvPuW3w7t2euUcXzaBofZpuMJuuVXyzX30SO4AWPTGzGK7t0poJZo0Yg2HpdRabqBNPkO6s+jbdIh6ObGQvt11HPNXs9L7m06Psy3uUb3eoZ5HYCBsQ14rdCC9yuAQbFNdN/zSFIuRIjwxochme8t0L0dDbsZLiHCp4IyRJgTm2m/826f0GcYFPcEfaNu4JXcBkzIasRUusarOXUIF5Ycd52ZtEXzvt9t50+DfnGcZuzJ1stfGOuTpPj+envzI7zVAviA+O698meo8j2MflG16OT1AKmui34Tp2jHGLaF9Dcuj96im+I3jP3Wx36oGC9feiDdYT1KDEfwVssnGJpQj3ntG9An7CbyNEcR3/wHaPom3Fd8q+7AfmNanvAsYH5al3+2rFmbwNtzkyBYqDVKBCGFhKDxhe5gfR21wfaCUEv/v5AuCJvThX/5XcCL72X8/0zPvGz/u5r+7dDSoGPZ0ExNQIbuAHIUp9He9zzGZT/EyNSHGBD3GGMzH6Nb2FW0cDuOYsNhZGu2IVycBYe4oJuClyD7j95bO9o3M3hf/lntDB8o2vsi1+FXS0+/OkzOsXBd8ElHYG41MC4dmJIPDI5tRGf/B8hQ7kaq6ju4BMT99h+5L/3ZiUPdxnltToNuij/sS7R1zdsLPXPs7m54JRmYVlKHEWTDRqXWYn7npxiZ8hxdg+6jS8A9dPS7jXjxOxQ6/4xg49DDgrNAKEBQ/8n72jjPCemjnxf+m9M34dBO9oNdjHK+osLgzf6fJjkypaW6FtPyGtDK9Tekyn9BpsNutDAeRbnHeVSYrqDa+w6yHLbDKJTD9EEYgjZmwzAlpNY+SbVdkDTrKXNThovhjmEyV0WK06SAVnTP9N/vH7A3d3vAxiwo3/CGcoK51sZTFvOX40uQrJtSormGd0qBz7uS7o97QHr4OSo9L6K1+1m0dD2JIpeDRP9NMIrFyDhZjvQ7HdAGI5F9uQvc5kTCvo8LlK96QTXaE8oRJvisTIXH6xHr1V3dF4SfaQGfPZnPfValwsYszftr+iTbHVjYybMOy/oAO6YAS/sCr9K3hsaDxnCJaH4Uudo9SJX+DLOxDTLuVyHiVAuEHChAR0xCDSYg+VIVzF8l0Bg8YdtZh8ArRQi71xLBP+VCLHPerf7I/7zrjPADf2t9MoWzMypc2To/Qe/Ic3il4AbdvwnjMiwYnwG0druMYpdjyHU8CG+hK5RjjXDYGAbng3EwHU6D+4lUhD5qiQL0o9YbSRfbwunzUKhm+Ta5bIt9lHSrGvr3Qxkj/Ju5206ShPhHjN2QLTtO674b0TZrECR8hmTVamRq16KasExLwkPJitVIUf4Af5thUIXr4TDXF0lPauC+OQEuW6MQf7wMUZtL4TM3E0FvFED+oTdkw92aFLN9HvvdLUThw96QV+v3ELTtYyM0by2E2beVjXf73G6UAwKcZqBGU4/hqXcwMP8aWgcfRp52P9Idfkax8ymU6s8gTrkEfqoRMEtq4Ch6QdneBR4jguHaOghuUYlwVxXATWgBT6EnPISO8D6YCZdlkVDN84N8jje8t6TBf1cWHPt6wtDPH+ZP4uD9dTJMi2PhNbcAbd+ZhyGvbUbrfqvQJecKuvg8Q43PA/QOr0O/iCa08TuHwIJRMId0g7EkD07dIuDYJ4DwWRTcX8uFO81Z/1o8DBMSYRiaBE1PT5CsQ9rfCOWb3lB/EYCoc62Qcr8DgvYXwP2nRBh3x8F5eTji41eikzdhlwRgSAjQL4TJANA/2oJuwQ8J2zVhYDRQZbiHdOVmRChnI9JxNqJU80kfzkSE7CMUexxBccBBRAfPQ2zCx3CZHgK3WZEwrIqB41fBUE4nms30uSNb43dB/p5Xo3ZGAGLPl8A4KgOJwga0Uj3DN8T3DIf1jST8GGbB4DgL4SmQPrJgeBLhM5LaCtM15Gh2IlG+EtkOu5Ch3ow01TokyVcRf+5CnuogKlyJVxt6wXdvFmTTTNek0zweyGsMv0pClO8p+rnPdNoaDe3yAOhGRdTFyVYgyu4z5KiO4LW8RizuaaH5gvTdI1R5XkP34Ad4Nd+CSTnAmDQaW0QDX5cCHc3X5TiKSDarPK/SZ3fo9TFUeB5DvHwF7Ke4XmlWoZ7q2NU5w+901lGfTalQdDFAMcQd0nGunwsThcwozRfnKrQPUOV+n7BuLfpFPsebLZoIR4L0bh35Ao/oHifQO+oc6WJGkwZqz8k/+I104ymUuZ0lnbiT49y2HndozBfxZsunCLdZCukb2ovaxb6n3Oakwm163m9OU7Lm619Pel1a47zUc1fYdH/ziL0F0nMo019FK5cb6BpQj/5RdZiUbcEMwqSM9v2igLakfyq9LnOa9AyrRzuv2yh3Z3rxHDr43kGh01FkajYhTbGL240pOU8QZbMWvj1qkFEwFwWey666KsvHetWUrncpij6tXuD53PRlFuIV3yJdthO56iNo6/aQ8DuzNc/Qg/D01EIQlgeGJTIcTnje/xkGRD+nMQEd/O4RnY8TzW+Sr3CX1vwQctSHkCLfjCLVecLsN5DV5Su4EuN4fV4A52/CG1XLvWE+lgzthhCoVnojyPwmsmyOkN09im4BzzCtAJjREvy+3YIayB9ooHUA5703ShktGjnt+5AcdvC9T3y3B90C65BN/BYsfR3Jsk1X02Q776XbHan3Gz8YPjXdIfvMDPkakv3V/nDaEg23TQmQL3e/oK8oPpshHEIrp9uoNj+jNX+ElUOsOn8BIfQRyU3Ec0/xWoGFfAJgfidgTrV1LD1CnhP/X6I134Ear1oECONgENK+ihCGRrcUHkSnCT+FOX7nc1U2zBniO56r5W94vitONFXbDHJOFqoET4lUHRTabPrRZNlWtNARv/g8oXk9IX+RfMfIy5hVCbxeBFrje0SDevIrGoj367G4F/BFd9INcQ3IJ782XlyBCJs5Tw0lGUc1VQFP9NGp53z8+ryvH5mwwvWLSIukk/Plv2VnoqQfX0gVt6DU+RzR7xEmZjURrqkn3qpFjd+v3NZ92JZhnjpa+yeYVmyh/z/BsASSyx6Eh0gGkuUbECR5pc5FaBHp15j+nW5bBJSLvaF6yw3u+xOhG+/3OtkZyV/fO1ZctLCF02WUu9RiGPHU67Tm02iu04sbye+2Yq5il1/Rzuc0lvS28sNMon8n3yekfx/ho3Yg+/MzvG36HQkVXg1w2xD6hnlX2i337+Ia3JfGrTasiK5zGOp5hPnzf31vd1mrfnHyr1BmuIByQy0GRTdhEq0n8zPZGrN7/zSG9AvRPFL8BBV+h9E18AHR6B66+NajbyjQQnsevsLoA0IXQePyTfAa53eD19O95G4/xl+UxKlTNAuDD8pTtJP/8r4msVoTKI6ZFydfQrhxDZIdF6MF+bRd/B9jdlUDv2/fiOe01hasHk6Yk9a/2vMC2gcdILt7h+zxTrJ9J5Gt3Itw5UcW595R23WLAo45TvN7+/d7OO+LXURjORfyc95DZRtj5l/eP0L8sGey/CeS8aN03WuIk32HPKc96Bd9h+77K77o9ox46iHJ1CO8W/YEn3Z6jBpTLaaT/1zmdor77ZnKXYgQ5sLYOhcOKz0hH0XC+xf3cDoVN9B9UQxCv8mFrMq51T/br3jZ/te0f/03uRkJWzNYBjdDY60w6VGtIPSjZqLWi7XN/2/vQEItLUMQHtBn5yfTa+ueg5s9vbQIAhqFZtvqBBvUkpq5QC7QZsHm+l/fju1TRFDr/HKf4mV72V62/0PNf03GtdDjhRAXu8GvbAzy7c4RLjyCVMUOtCc/dWzWXfSLvY2B8ffQLfQ+RqXf4697R99ClnYzWrofR57zThTodyNDsw4x9gsRYB4LMU13t7kgqP67xk1/MtdPIhfFXCpv0nzgB+lQZ8T5rUSJ5AFa6+8R1nuACZmNmNkG3N9j2Jvtv40nVMN8ssn02ZD4x+SXPUBH/7socT6NTPUuxEtXwk8+EmK6FvYmm4D/lrH7ClGGOeFH4q5VQDXVG5IcDez7ymGO6Xm/XAbLsCir/zKtGFjYDXiT/JYvCbdvfQUYntiEcekWvFFi3VMcnsA+A/lKl5AoW0v+1CFkunwKjU/AQXcM+y8fu02uulozK7A+6moZ1G/4QlKggdjVALuW0ilRtotTimSPMCICWNKH+f6PMb30EdG7kXzAp/huMPBpR7Yf0US+4HPCpffJP7pPvulT7gfGypYiU7cC3b2fwk/ZZwetcfDfwuD/kabq4p6vnuK9Uf6mJ/xPEv6c6gNJuiPEdvodYqET38tKFH5Q5It37pfJgdmEPsdm1iJRsgd52l+QodqFTIft6BR8kPy2C+Qrn0CB02G0MPzKffYczT4kK1chQDIemuZxEPu7QTXRDPs2unM2/rKv7QWbETKhedQ/Om7bLrp03eygTQHbsqH9KAj6n2KgmxUE+0gV+Qi68r/+foq4eVu29AKqDXX4hHzNIQmPUeJyCUVOx8knIvzv8Av1h9E74iH5aFfROeAu+SdP+Z5Hmmojgpq/Cgcvd2Teq0FeXQ/E/laOgLWZUI8wwy5ZDYlZsV5mVo2QFzkPEnN0gyQSyVhFqcsH+pmhK2wEIfBf8fl0l+5e29MQfKiAsL4HZJM8+HmjJNPx3T+ab7Ls52MF6gvo5PUUH9LsPu/G/Pbn5Mfe4PsYPcMe0XocQKnhJLUTNIcrKHQ+iCL9QfLpfoKfMAK6aDNK6/oi+UY7lDYORE9MRzmGI+JYKVTv0Lr30kM2xBVO80OheysQiqHuUI4xw6Gv6Zl9mvpT4jmN/Xyjh9/BnHsVGIWA9VnQL4mEy8Jw2Ceqv/2jsTvZpcoyZcdvVuktmEn8s3YE8O0ga3urpQWvFTKdY8GIJPLNfC6Snj2MVPJDE8SVNPZ1SFKs4nuOpqFBaNUwBKE7C+C6NAbxJ9ugvGk4zWMaWjwdgIDdOVC87QX7kQYop5EfNckEsbcrYu5XIOKXEhjGBt0gGl8K3pIHl59j6sWlfk+yG3tAN8n/LvMz/2j8AdJxzhmyw0/KnGoxhSRiUY9Gvoe9ZTLwcY3Vj2b7JwNiGtE9hO2bHeN7+Wxfn8lIpsM2wsoT4VUdiyx0R259L4SeKUbgoTxaj/58b7sCI2kthqHgYU9E7SmF/uNwSIcbYVethfupNATdK0FiXXuE/1QASYm2TnAWhmk+DjzhuzkD9pmOC/+erPgKg8Us6ZmbFfqn6BXyHO292J7IQ9JBdzCnuomfx0zMttoBplvZHnmJ/jhau/2GXO1+Gv8WBEunQCPxhvwzM6RbA6DcGgr90QR4/5YN8/F0eNAYvS5lI/ZRJVphKNoSfxTe743oXS3g9WMKfHdnwmF/BMIelaHgVi+L06v+z6VvEu/X6C+Tz2H89+Q9Ubn2UKH6PMpcL5JuOYNY6QoEN19APPIj38+oMJ1H15CLeC2/ER18HqO1+wUkK37g+1fFTmcRJf8IuuZhkLfRQjmU+GKmF0znM6D+PgTiIh+ot4XCeCEFMY2ViKbRR6MN0tEJOc+6QP6BN8S3PEl3udY3m+N60etaDkoxEH4bM2GbpNz898Yt7+KSI47VfxtmerchV3YKqfItSJRuIJu6B4mKlYiQzEOkZDFixC/o9WfIVh1BK8MFLsPpDhsQK36JKHE+gsVXYbKpgjJMA/2CMBjXxqIjXkHChQo4rw6H36F0xOwtQtGaTgj9IAOunWLgkhUNfXwCtItpjuM9oBxualBMMN9W7A1FckNHtMMYqMaZLbZK+6lywS5O1MjNoiDVi4KdUV6hT9fOClwlvmGEY00M0oVf0EbViG7m5+jrW49RoUA3jzoUOx9BjnYX6ZlDSJB/i1zS9208rnK+j5d/gyibjxHcbAq87XvAYJ8Bpb0j1O1doXnbH16vx8CzUwQMqSEwe2TCTSiBUWgFD6GG9NVw+AqDYKbX6tXBULzhCYe5AXD5iea0K463nHtdEX2uDPKBbpAY5JAopU320ao621LHetkQI+STTbDr5winghSEuc5B6+ztmD7xKKa9sRf931qGNpM/R+/8G6hyvYOWxt9Qab5OcnsbHf0e8D3gYs0JhMZMgXtma5g9O8DDtQI6r3AovHSQhyshjZNDmeMKbetAuFTHwVCeBpe8BDjHxcHonQtPbUe4q1oj4HwhQk4WWfnoTTPEaSRDb5jg8HkAQg4XwudgFhwXkA2bEgyfz5IQvq0Isb+U0Xq2RPiuEgQcSYH3qUjkfDsafd7/DpWDN6CozRaUp5wk7PkMvUIbacwPCbPR67A6vo/enyxqz8AmFETsQmibyfCNHwpTXA1ch5DufCsHru/lwPBBKgwzU+A6Mwuen7SEeXEJPBYXwbicPl9NdF4dBufFQXDp7svP/MTXTJANMELW3xXKd72hnOUD88Zk5D7ojlaNQ5BHei32Zlt478ggnoyCw4YwKL81w7tXLyR77UF7DTDUG+jnSePzB/oEW8c5NMHas3iWSvMtwkdP+FnIUMJvnQnztFReQpZ0D+JsliCi2RyEN38fkcJcRAizqX2IYOE1xAiLkSPbj46mx8jWbUWYbhbCAt5HSMSbhME0sKvQQtLBGeqJXnBZEQntijBovwuDx6YkeO5Kh9OmKMiW+MKmsxZ2r7vCc2cq/PZmQ7PBhCCPiSgWiNdlDZhGGPnrfuD4mZ0XsHOcvhFNGBgLPubRaU3oFvyYcJyF7+Wz84UeIdY4HYYj4hVLESl+RDZtNT/fYX2sfBGixQUk618h32k3KtyuId+BndfvRhXpYJfPI2D+Ih6+P6fDvDcV2u/DoJhFvDSI+H4gYcxxxlrpUp8rLkfj63RT/aGa7IWkw22Reon4dWoCgmWvIk22Ezn2dzA0sgFrR1rx89gMRnML8UwTeoc38X3SKfkWjMuw7sWPSLbOkZ1JdfJ/hFL9KdJLp0hvfYt09WbCeQeRpl5Hsr4P2ZqdZKt/RKr6JyQovuFnRrnanagJOkr2bTwK0ReGjbEQp5shm2GGpI++kfDGBcVo0/eiVJYo8ZQXiTM9dxrPp8J9dTzk4wxwGh+GONk3iLFdhHDZB0iS7EBfsl+LegA/jrL6An0jLfxMpcb3Ptr73OJYaFDcM7JpFn6+zObCzln6RzcSpnvC58HOtTLVO/hcWhjP0Dj30m/voJXraVR53iKbsZvs3zFkOrL1Wga3oylQLvKHpL/+nKyNyzDbt43T9OtjGj0HhG+Q+Csvy5I1jxXjTHBaRrqBvif/3OeCZInL+qCwyQ0ZzQ4j1/EA4Zn1yNccRfeAJnTyu4eZ5Q34oMLKHx3IJ2P3Z2fAbTwuULvC/bCx6exM0MLlg/kyHfzY9+4SZj1K9u4q8nX7OJ3ZfGp8b2EAzbHKfBuZiuN8X79z0K8ItfkCkrGu9+xaa98hjGOU+SgDPGZHTouvbYcQwiC62cEgHwBiT5Lr4a4X7ZIdKpjdChWmT0+33dtU7f6E/A7ws9OuAaRfQuqIN5owMcuC2e3YuSU7s2vk5zZ9Iuppbo+Q5rgGE3Iu8/F3D26gVk/8ZaHx1/KzPna2U+19Ey2J3sw+ZznuQBv3q4RLHiNLdYDm/yuW9aXxB/+KgGYfkh9lPK19xW+T+4qIq/qfgmDYFAaHFT7Xm3/hvFu63mOF8guftYQjNskrDAku75j6OJmTlyRLfkSR+ipqPB+jwOEsKlzvorPfM+LlBi6nDO+8U2Y9C2LvuwXVc5nt4Hcbqar1aEd2YEgci5NkvP+Y+OIGtw1tTdfIxv1COI/5Cdf4OVmp/jTZvhXcj8hSHqfr3cO6MST3oVdJN30F8VsjVMu8YXqnFIFl/eHp3XdPrOTTUYnCvO75wuw+JiGn2qZGfcBvT+YT/6PpCDJOR4btARTpTqFAewzZyqOoNj8lXPOIxvmY1vou8c2zf/GF2RowPmJjZWen7Oy2O62TVa828TNGJh9s7K1cz3Nb157wKTuPzHHciwqPazwOgOmidNV29A1/jPFpT5GvPIGE0GXwze6PgqCl6Ki5gg4uvzwwCEXdHGOC33CdkHBJ3yUaygpPOH4SAP2eELh1ao0UYRPHOamKLchQ7EUb411Uuj/gfNI3spF4oYFk8THJahM/e5yUDa4rRyZbdU530pfsvGRgbD2tVxOXaaZTW7tdIF4/ROO9TvbuHpdVdjaapdpPNDpCtP+Fn5FmyYmvBMLfMeeQ8s5kmLq3RPDbbeE3Px/GBRFNDnN9IM71hOlACpy3RNLaBEC3JRTq18IRa7sEadIdNP5tSJfvQjsT+eGEdUYnW8/GGc0HxLBz4yZub1ks7Vvk07CYTDaPt4mnRqZY+Dly54Cn9D0aOz/zbSC6n0Ua0YRhi56hdYT1DnHcFymbhwzxANLE3UgX9yDH7hyyAw4jeE5fJJm+g3SuKxSrfaDaFgjlz0FQbgiGx3HyH38thGlfChwZhv3SFQHBE5BBOC1bcQKt9bfJjhCfhDfivTKrr8X8lG8GWON/Gb07+T/B6NRGLgcM+zOd805r6xyYfDDdb+WpRvQh+1CgO0x6czuP1Wilv8LtlVnWnuzLpL2J4g9vJojfz00WN65KFbavMyxocVM6zwBdbirR2gvK1UFQbw6D6sdgOBIuMOyIB+lQKOaR3/ux6w2H6siDybY/W7LEk2jpdA1VHo/QybeO1vkO6Zmn3Nf6tJP1jJadFbPx9gp7RnzxDK/kWe0xiyf7uAPwCX2P6SbmVzLZ7uT/kJ83ZznsRopiA3oEMdx6DJ7Nu8LDrsV6e1GI9/QUZGWmk+os8QvtOOGZxPVczBzdzCBIR+ggvu8F+Uyv5/Lp5pPiZI9vpBPcZ9gOcxnQfKBTG7vejrFCsuAQKkz9PlG2xpKjOIV2bs9R5f4IncnWdAt+xM85hyc9wxslz/FZF+DDKvAxM33T1nSL5PQ5x2yjaC0+rLTgq97AEtJ/i3pa16GD70Mus0nyteTTrEUa8WWw5DUY/DLqvE3tb4lOiufhDqNPF9ldeCPO/vMJHpp2IzzXJB5zmRcG+27OEIe7pcgqXTz+ln8SLQjNfCS9fgwVp5Ot/Ropsq3IV59FW3drrEa/KNL7gY+QIu5HPPktnfzvcVozn5HJa7nbVYzLfMjjmruHPKP53OHxwYz+jNcWdCY7RnJTZryCJHEdYZ+PESgbB3eh7SDdkcDVum/8GG0hGeLy1CZC+F71nutp1WxP+G/JhryH8aw0XTPy7/lXAdLxXzOMkCkeQZ7qDKpMt9HVvw5vFDN+biD+qeM2a1pxPQbE3iR9fRjVPte47Z1dxfZA67k+ZXaAyUHfyDqyYc8wgnTm7EoaP/HSIJLhLMVhRMjmIkicAJNt217cr/4196LpSBqkX3hDs5Z8S/KxNJ8HwfdmAXTvBF0me+v498buLRnUIluzAywuLU91Dj38GzCM5HJiJnhM2uRshr2ekf5uwqddrHudTCemK/eRfbLayO+HWm0Yk2kW2z+I9GUrw22a82OsGkbXSbMgR3WS/Pc3ECCOqA9sNqaa3dv8beKaoItFCL5UDNlIt0af3ZmW0BPFcHzfHx7fxTc2q3Jo83fpLo5xI/l/VuB4Bq0Nd1DmfB+9gpowjmzRDNLp77Sy0pPFfLDYgw+Jlp91tfJzC9KDcdI1xF/P+OfMhk0jWX2H9FMXkvly4x3CO/X4pIb4Rn8NQfZvwlXM3+or9PUUTgsS77Wp+1w+CYMwzWWS1/nsC8bJIXWSYUZ/l51xq6UDjbB3li3+e2MXREHuJXbbFStbRrb7BN3jEkp119AtoB5jme9E+o3FLTA9w+bA9Dvbt90zzRo7OTzxKcIkHyFU+Sr3tyrI363yvE56hfCl13MMiAB6hzShhfYmycxaeEk6XmD3tRsuCXZdGn3DaWbwMyFGmsA+c9kXN8+0LJ5t3DdTfxfU3+mjEPJlxeF/NHapKAnyEXvtiRbnI1qyBClO81HufhJtDPdR7VmLnqTnmB5kenx8poXsf4PVzhJ/7JpqjcMYl0xYh/g8RDEFNcHHSL/UotDpONnrrSh2Oo2OPrXIVZ5FqnQ/woWZz9Uy3+Hat3zGG7+OhuN0v/NCc8Hn9/GoN4TVuKyLhm2s4rWgS8XLfb9MsdBc3P963GpZoOAr9h9EeqYxWb6ObPV+FLn8wu1fqOwdwjmEYQmTsFidt1o+wIaxVn5n8Vq9CdMw+/ol6cP19PlsWpuunozuZzA5t47Gfw95GsL0yj10HfI7nDYhRrIU0dLPYCzPbnD6OuCp02LyRSd7Lv1rmbRZ6B7mfja10XmMf2P0ltL6gBUZTfQd778ev1nWVRYnLn2eqzyFDPl+wt77ybY+Jj/tDNmUdXzfY0j8QwxOOocEmx2Ylv+YsM0D0n21NC8WW3wfw5JqaQ4PSF7vEia6jnEpz8g2P6N5nEeibD0yVDsIy/yCWMm3iLB5H4aoXKjXm6H+yB92VboP/iYvFwsyl1/ibhs/jkDkwjz4fJuK5mrblH90P/1le9letpftv6mx7s/3jYJgetGrWV+72fr+4mRrP22zIAHLrdosCJMEoS7dGthUN8nax8Haq170yhd95O+95UVgUsIfhixZ/y68iJV60ae/Y+3r0qz99Tprr37xf48XP3P4y2s0Wq/TjPpJ1NtQj8mCYD+MejZGCc2D+pSD1n7cfetPMIjniDU7T/OrpX4TrMOZ9O4/QMcXPYvTyqA2WXgZp/WyvWwv28v2sr1sL9t/fbMbpl8Yd6QMLC5F/ZU/5LPNSPRcg1zZcR6fxM56ksSf0d7vAoanXUe/uKvoHXUNfaJvonv4TQyIv4mR6dbXfWNuotC4C8mqVWjrfRa5ztuQpfsZxcZ9SFavRIx8ISKaz4RbahtIEqSPREHQ/TPnLpTLOoTsyH+QcKsSqoUBsH9NA/+8CSiyv48C5RW0dL6Otu63Ue11C6/lW/i+GDuDZXvZbB+M1YNgZzYsL4udWbIz2NGpdagwXeR7lDW+t3kuWonLKWSotyJevhyx9l/Bx9QPsjy1RSa1y/inzb2NemrQ1lwk3msH+btekKY6QtJDDX1lEXIk59FS/RidvOvRPaiBz3dcpnX/le+7tga+HwZ83d8akzo2zYLX8oBXc635SxPou/0iGzA0Dmjv9RB5jkeQptiGFPkGREkXQmMbBWmeAmKIKv1/et4StdRk20W3JWhnLtIed4RshgnSNEeI7fUQ+xlhVy45k6LYeaGtIzAqBrwWBjvzYo3tKbL9cXauxPKxDrxljSfr6NvAY3HZOQyrMzIoGhgSa41rrfaq5efI8eJy5Gh3oEfwHWTo50FjCPvxf3ruYpRjsv1Alxu+ezKRcLcK4ruekOZoINbo+XmymKX7WpAJymz7s4tbKBrQ0wx8N5BoUMbOCh4RDeqwsCswMLqJ1rmB7+193Rd4t7U1LmFEkgU9QupRaWIxIY8wLLEeVV63Oe8nEO/HKD9Gr4CH6OpZDze78g02guD1P8LrohAodjbMkwwzWlxWRyHqcised8jn3sYFYifDDTFD2/P376eJO18pEevQUiR+J15eSuvO4kFY7lmxnuXYHkWOZj9auZ1Bv9gzmF7yFK3dLpGcn0ULw2mec8vOf1q7X6LXR+i7u5GgWIEQcRq87PrBaNsKskoD7Ku0DZIkhx0Sk2KuTLBvR/rQn5rtf9m8fZsHO47wWuC9KqVBOc8PygV+CD5TCPUsP0hTiOczdXfEti5jRW+Hf7XHGif7OrFAfs3SQm5BpQb4tB0wo0UDCnVneawnswtsTVMVmxAtWY5k+RYex1rldQn5ugM8LqLM7Tw6Bz6k/ldkOexCknIVwsS3YWieAZsQAa474uC/LwfOX4bzPGxJtRMkwUrI1OI5UZC+IwrNff5oXvaCYKcK1/1hDGLzYJmbfLzpfZfPwxuCDuXBf2MWpGNd4boxAbqvwiDJ1TwUgxzeFguc9H/r915iV32S+GNdpvwICuS30NOnCYu7sTMbC9p51qLMlZ3L/0prTnre+yGfc6nhOLcHleab6BX2GN2C76O99z0MYvnhHleRqlqHCHEW9EIu3PsGoJrHkA5H1pMuKHrUB4kXK+H2QxzEce6Q5GsgC1XXiwHqjWKcZqW82vC9vMR5pdQo/1Gml+9TDTRddP0o4qlMsP03+/aCp6BSfuF/y+9INoJPFEAzMwC2FRoe/+UwPwDScudbokL0/Xt8Ey6+F5sibkW+6hxau9zB4EgLP1djZ7k8jirhAVoaLqJ/lLW2EVv7DPVObusYLVq7/8pjLwqdD6HUeAR5ur1IVq7h668VouDazxdtG0Yh72F3JN1vh46YjG6Yig6YiJy73eC+MRHyd7x4bLOstxEK4g/Dymget6waboK8nyvk/d1gH6++ZqeXjRD+Ira++QzDrIAT+bwGReyhVrDt7wTnpeFQzyGeb+lUJ2oVif+e3MTKvmqZLZ5FW2dgJNmwD2nO89qD5xyzs96fx7PY/zqMy2jCq2QbWZ2nQbF1aGu+SDQ4wuUjWfEjjxeLI72fQH2C/BuwM1tFcyMiV2Qj+1FXeG1Lg+aLYCQerkDLukFoicHohFeIN8Yg7nw5j+lj8Yekt6CYZOb1KxwXBUFB9loxxgO+WzOheyMQtmmqw0SDbJtOjt3Dz7ZACQYhna6vXRIG084UZDzqzOIALVKVrN2f0Rtxsm+6ZItn0FLzGKOJWisHA0ffBfZOB+aT/fuqj9XOM16YmG3h+eksdqt/zAPkaQ6h2Pkk8p2YjtjCY4cKnA5Z5V/yNhxkfoi/0Artaa0rHg9H6C7i0UXBMP2UhNw73Wj+k9EVU9CZ+lb1gxB5sgWcl0VA8qobbPs4E0ZxgawT2auWzoitrUDW866IPVYG7Rhf2IbJ4TkzFpq14bCZ7fag+Ty3h1l13RB9pBWa56q2/1m9mSB+PzRLPIUSx1sYHGPNs3mrZT020rrvfxPY9qr1jJHFZ0ym//UMfc7PFVk8Vwv9bzy+iMWQdAl4xOOlWAwM05dRsnlc/sM/SEU0qhD3oAppTzsh9EIJ4i63Qd6T7miLETzGvwj9aB0HoA29LmsYgqwrnRG4PQfa+UGQTnLjcQLagzEwnEnmMdw5lh4I2ZQHuxw1bMLFTTLBLoywzbmoa62hmxEIO6NsyJ+df6K4piZHvIAy3SN0C3xG83tGeu0hrzcwJr2WsE8TPxtm55bsHJvhPYYRWZ2yAYR92pqvkuwfpt9c4/FuLJ7eKv8z4G7TAqJJCcX3/rD73huSH8gubQ6B4Wgi/C7mw+e3HHifzYL3Beov5yD0dgtk1nclao0h3piEqqbRyLrWGUE/ZSNobx5iLpTD/dc0OJ9NQC56o/h2X2hH+NwjmbjA9Ib6Y39IExyOK4Xmyj87/zRhV2CG9EhTK+0DtPd8SDr+MdqartNa7kGU/XdIFNejyOUYhibeJSxYh/Y+V0kO6jlOGBRjQY3PI9IDh3jsR7bjTsIFp9HKcIn8n0/hLm0BuVQN5duecGA1SBb4Q1zjD9XhcE4HyVek977zg/hzIFT7wuByJgHmq5kIul+MmGcVPB+hHCPR6sEgqD72g+7zEOiXRUEkesp2ByL5SQe0ahoMjw1J3N7LCNuRnUj+R3CDY0u/TnHqZQ0ZsoPIILFhZ6dpys2IkX6JaPEzHlcRI/sS8bLVPNcsTbkF2Zqt6OBzm/ylO2TvrvC4x0zHTdznSVP9zK/D6g34i0PhKPGBPJFokOcCeVsD2Fkvq40SfKAA9m+5w21dPIz7E2G+kkkyMJhkYBQyyEJEPGtBPJIJv2PpCNySAeUsXyje84Z0nBtkpU61kndNOyQ7/RHxsDXaYSxizpRB0skZUsEu/8/Mm+xsjXSc02Zdj0Qk2K1GnvIMCrUnES9dw/P+i3SneSxhvPxrkuX5iBEXWfMAxA8QSe9T5NtonS9z/JdHNMty3EpzXgp29h8hfsjiOaiNJgyUDWWyIzz3pUG/NZbb6d54E2UYCu23ofDenYbA/WS/f0pH1lcVSJiYD88uoXCODoGzSzSc5NFwliZDtzQUiqlmiGPcoejnXi/2MFwVfwqAy2+JKEZ/soET4bQwFPbhqkuiYJchCvZq8a/ODejPRszW5WneCditeN0T9hMc4R3aH5nNDqPE4S7aeTyhNb1Ja3sfvUMb0c2vHi115zmmS1P/TDjoHHJ023ksUoZ6O48LZXF/uZq9PL8gRlxItJmNQHEMvMROMIrZcCT4IWtuC3UrPVRf+EO/Mgq+K9LgPz0Rrp0C4FESDpNPEgxCOlyEfNKZpfAQOsBfGA4/uyHwlfSDp7QGLhvIPk41cZzAcL3zmii4H0iG/lgCQq+Xko0Zh8TLlZBPJz8nTA3RRnZPVMrPioEOu8UEzW7yd3bLexh/c5jpD8cFgZAON0DS35n01PvIFM6iSkY+XRL5eGTrPmnN4sCeYHTlJfRuexIDkp6hjdtVXm+nzPU86QPy8/UneIwjw3sM/zPZj1bNR5DiFQQJkxDc7BUE2oyDqVkFdEIYFALhPEGEPUF+G6EZJALJhGCCY7NIOEty4C6rgknSnnRmG5p/W3gJ3REgjKI2Ah7N2kDxQyCcV0VCOyeQ5MALjl8GQ7cqHIplAVAtCYTfniyk3axB6PEiKD/0gbRUB4mbHPa+ctiVOMKumxNkw1yhGmWGsr87HAaaoBvhD1PPbHgNaInEaSPQ+ZsZGLLuA3T4fjqylw1ByvwRyBo2B73jH6Gz7xOa+288PrIr2Qlm8/pFNvE6QSyetb2ZfF/zXvhMqIbLmDjoShLhEp8Bp6xEaDoFwXEoYdLxdM+Z3tDO84XzV0HQfx8Klx+C4LaR9NpKP2gXmqGdbYJmqju0ND7ntqFwzcmBPj4VgZeLkPKwA1zXxkH2lhkyxgtTPCCb4M5qzUAx14cwdTwiTpXCe38GtDw/Igxey5MQvqMIqefaI/tWV+Q86I7Me52RfKs9Yq63RsjTFATfzkTmmgkY9PpW9B10FDV5F1EVcRtdvBrQxRXoFdLEcwlYLHvnwCfoEviUxy2y/aHf8yNYLGm16R7y3fYhPpH0ZtgsBHm/As+0bjAPL4dpeiu4vVICj6nFMExIh350BoxDcuA2ogTeE2vg+VYVPGe1h3leJTyWF8KD/Bi3fUkwHo2Ay8YAGLvTOg+gOY+n+b5OemC0O2SdDdyHUBBNFR/4QEcYkOUt5t3vjkrLKG5HW4HW8XFHBB4vgHFDHBzXhkG1gWzPl2Y4zoiAf9Z0JLjtQKWEsL4nyxti8Ts05yALr4HVOwLczvN5RoPnI7A6RG1NN9Ap4C73g1gsKotj7h3eiArSCUXKE8iU7UCCZAWimy14kQsyG2HC24gUPqb+LXr/PkKE6fx1gvA9MoSdqNLeQ3v9faRIf0SYfBZC9e8jOOBN+IdPgCSP5KeG/PdRNO9eRkg7uYDpMDZnx8+DoPkyBJ47UhF/vS1Sb3VA6OkSOJHOkH3mDbvxBtiO0UM+yxuaz4MhLvWD5ls/uFako0i4jyKbRyiSPcOAENIBLUj2K6xxb2xebM5s7VlcMaMBi839fa5VnnfRI/Qp/5w1Viuqe/BzdPR/xPUEqw3GbEKUOI/7gyyXhOeWKFYSRlpL/1tMfsJb3GawvAyWX9Kd5Kmtxw2kyjcgUbISGdLNyFXsYDkZsK/Uwb61FopB7nD5MgIem5PgsioKjsuD4UDN8bMgiLNpvgtMsPvaE6pVwVZMlEr+/wIfmM5moLipH+LPVEB/jvROYRwSm5HtEy+hUFaHnl7A1+Tz/DDSGg/I9vvYnPpEMNzbiJ5hjTzmmNVjYzGlLH+jT4Q1Zp99zr7LaMW+z2LIWQ4KyzFOJ9sRIc7m+yIpNG9mK1j+GXvNsGK8fBnRhvCGbCHR5jsU64/xPAvmS5QajmJA/B3ov42C5w/JCN6dR2tbBM/9adCuCoN8gS/EN0gfVJOv0N1A9sGN2cin4uc+tzSHohs9jqdCM86H+5BBG7KRf4ywxckCKCeEIdBxLKIl85Eh34d8+XUUKx/w/F3mA7H4tumlVh7g+TeRjAYNVp1HOmBSjoXnULC4ctbYPiGjw+AXeQk9Q5t4/grbGykju8nmmabewPNXrHktu+izH3jcObOfDD8WOh1Gqmot4Y3PkKPdyf3rLO0mdAw9Qnh4IrfxCfcqoSWfR5xF2PF1E9d9sgrnRnGAq0V8zfxY7KhPFuWiWiYIznadnYZLvvGp87tdAK+VyZD3doWiC/kSk0MI0y1Gku2PCBffJf77FEnEZ2XOt/F2qXX+bL/v9zhilsfDcki6EW929H9Auu8BjxtmeRlvtrDWr2J+E6tTwOgwOo3pDAuXgy5EK5bfkE9+EcPU6aotvMYmW9801QaeZ1Lje4/osY/7UqyuWobDJu5L5Wr2E898iwTVMiTfbw+fMzkQF5JtG+nK8vcfkA68Ln7tB8V48zPbHPU05SCP7aJOsVYMc9ggVrqc1UzxfarfG2+RbydbuTUKiuXkf6w1IyR9HI/FT5D9SDy3mOhtnX978xPS9fUYmfIYX/ezWPd/08HzY1huEsvR4PafMF/PsPu8Pl/X4Hs874D5xCx+ls2d/YbJBovNZ/kpjG9YXiyr5VhCGIKds7QkHmCfMX5g9RwGxjYRjZ+iynwHOapj6BV6n/zNRqLXNiQov4aM/AbJB8TnvQ3XxEr9ILkgt+d7IEs8NsY8bIuI97IfygLVd6QJ1v1t5fs+UC8MhMNHAczuP5YMMHxqM0de6e5duTFBWEU8f4jW+wYq3e4i3/EE+bK/oqt/I/qG1XNcMyS+nsfDsz0Plr/C8iW6BT0lW/8U5e6XeU1O5v93D64lf9maG8LOAZieZPLP6jf2IF+Z4UPmI7CcU5a3xWxovtM+osFOTkdrLb0bRM86XvOr2OkMUmQHMKNFE49PzlTvRrhkLmRj3Z/K2ukXEYZVsj0gwWCb4dDH9HrI1vy7mY3dEHi7GO77U1hsN1QtjJC1duZYUVrtvEgU5HzPM0AYlBVr+1V9rpJwjNtT9A8G2ruRjaO+L61xOxPZ9LRGvv/Pzng+am/1+5kMMJvH9F/XoCc8N6BvhIXH5aboPsabpfc5//eNZHkndXxfnOkMZit/94vZOjN9yHADw45sn5g15i8zW8ryq1obryJZtpn0/0Us7A5eDy+TeCVU+i6bz0Gx3GWZZrDXVo+PYh54/pAEr1/S4LE3iTCgL+w+84D0M696zZqw+8o3vSD2Mt4XjYp4opXWPkWaqzC6TAiXvfs0R3ESRepr6OT1DBWu18i2/IZqM6v/SLwf+pzHIzM9xviXxSWz8x+WD8E+Y/v+HfweWnOygh/xOgCsDng78ptZjiXTlYzn25P/0M7rFqq8bpIOeMj305ksszq2LF+IyT/zH9jaszqn7LykhGSj0u0BUmX7yJbe5zqI8R/LtQ6z/QTqOf5w+i4c5mPx0O8hG044UT7Q94myMnyfc2nWVEN2fmVAVqsQnSROIZnnFSZ2Miy3G2v4MOJki0fua8LhFlCFNJsdyJQfQInuHKo87iFZup3ns1ST3Fd7PuB7OgOinxLmecJrlrD9DlaLkvVMrzH71ptowObKcoiYbq8y3+JYYHC8VU6Y/u9DcsDmzfR+W1p/Nv9S/VmOnxnfsDw8lpvG+KDa8w7p38+5jLM9tFySqeFJ9VgxgO5dTPIvI/rYroL0W2fYv22EtkMu4pNmosx/A+KEL3fnCgdGthKu9IkSZn4Q4zrkRJxs9B612W2TfQ+2FxqBlMZ20H+WjFi7ZTyPKUXciFKX35BFvnyG/BeUG+5wvcfy11jNUVb/nsk/W0NWl4/tefH5p1tzTdkcGR0YDbqHWGtmstxNlgs5kMkFzZ3pQPZ5pfkG9xdYTX+mM1oZz/N8SpYTzfbJeD662w2S8Z28jmiqcgOyVHtQ43Uf1R4PkCk9hRzHY0gYNh2B0eNR5rMBA11ItnR0X/OBRoUQ2VEwChU2ec3nO75tvqKZ4Q1pmY7XxtAuCoFhXSzkyz3g7T8Qac23IUn8CemEpZLF9UgVd6Ot612UG8nvNT9E7zBrTjDDe2xuTM7ZXhhbd4Z1phRY15bxPtN1Y9OtuKBrYB3P72ax8cwfYPuDwxKZDnxO/vFlfkbE9hEYpmP6jeXGtXG/hgLtCRrLTmSrDiNLcYjnyKXKN9O4tvF5Z0jPoEJoQkXHvYh6vzd88zsjuG81QidVwHMS+Q7Twy3O7wZCxWrrzvdHwIk8eB3OgGwBvf8mCM7roiFfZYK+vAhJAuEs8WekyDciTb6N72+w3A1W17EPYV6WT8NqPTP7zebH8gDZGrN17R/dxPMsmB5guYxMLzBdxz6z4j+mE58Sv9Ry/cdkgf2O9UwGWI0Ktp/UQn+OX5PlW7G8w1QaC8u1jpUt4flBrKWK20k+DyJX/huKmhFmKNuGsHn9kE+0kH5hhOOmALidiYPhSCyc98dAvT4UIuF4h3XkR26JgccvKTz/WrcyHI7kV6qmBSNGshBpkt38unnqU9zWFel+Q4+g5+gfTnMhmz23ndXOzSV9z/Qu8+UYnu9FMs7wCzvzZ/vfTA+yXDA2b0aP99rgRX1gK0bqQzaE8U930v9MNqq973I5y1TtIvmq5XxSpD1L89+FQHEc3x9iss/yCpPFdVw2WZ5hZvMjiAn/Cupf/eFITBeumc33PqSzPSFZTvhnuS/fN1T+FAz9wXhE3WuN2Ott4H8sh++Hq2j9JfOcYY7vjnRhL11zJ887LGc1pnwaaCxPuZ8xh3Db8v7WPW7G46z2KcuN/T03j535sHVl82M0YPaA9cw3YvWdWFwE6xntmH5g8sHsfrfgen4WVuNzl+eVpas2Ywjphu6Bz5CtPkA+zwy4iSXwFDve85X1LwoSJwVFyuZGx4srSuNlK7rGC8t7On2Y9YVqbyCkg/WQjXGC+LE3lN8HQvdLDBx2RPB8RYfN4dBuiYRuM/lAP4RB/MQbsnfMkL7vfk/ewX17rGRxQ5bsBLLlJ1Gqu0L25SEq3clnC2rkZ3fjMu/xHK2fJ1hzb+a0s557vf/C/2O5ojU+D3neLlt7ZgsYj7D8OdZ/2tlaL5r97s3S32sGs3PDu3z/nOl7dm7KcmpqfG6ge0Aj+XxfwySphEGacVbrEBolOAvRf7lH17JatCvUTpIrjnoW+uzNgtjPFQ6vekOc6w050YD5sMzfEV8z3ZW96nFUOtHtZ8kY42eS0cbpssGuQ8RuhixhrSANF2bMSJT9YGH6JF95AR3MhHVM5Fu6kW023yUs94j08SXi23q+9iyfivk8i3ta4x9eK7DydrfgOo5P+kfVc1+f6Qj2fBF2NsRiQlitI5YP+UUPa/5tP9Id7PtM97N1T1Ks4TghS7WP+1k+0j5wk5cgSj71nklWdEYqs0Ge6vv9JZqrm9LsdiwrEPa9mi2s6SAf6zEz+HAB3++Uk88vTvbYSL7/J7K+xs5iZ0O0mK79w9r/etuUSX7iYNIvc7ney1EeQwun68QDN9EtoI5sO7PzTdy+dQ9+TBjkMOGMw/w5Byyu46MaqwwwHcDyoVvoL5C/18D5nPl27Xmu90OSBQuvmcx85RUDgWX9rH4AuzbL285idWTk3yNOXEo+xgKESKdBr8hcrpkbeEE3mjBasRPklQZoYiOva4XQdoooY6V0iO6TZu1l+9wXxiJgXw4/AxPbuNz6s/v53mKPBaHi64ghvZIorgLxAOn77chzOIl2HrU875XZNZaby/K4S50v0Tj3kZ5cyX00do79dpmFryfzZ8ZnWPj8e4XfxMc1Fs7nA2Ke89zYasIKjB+YLWC+0nLCLO/S60n0WZXpLt13J587q9HuLw6Bp6TDPDZGw+mE056nM6H8LhDKbwMhm+UOSbYaiq5GuKyJ4n5LzJU2rFZ5kzTRcaQoSP/Ucx58xD6dWS2KZNKjOfJTXO7L9DdI55HvQrL3Cqup15FhOWaz6tEzpIGwfQP3c0enPSM8Rv6YwwHC6Hd5jQbWGAafnEvrSTzNns/DdP675VY5Z/Rj9UVGJVtzJFnO51tEn/5k+7IVxxEvW4kQWotg8RWYxIpZfA9+UPMIzxOZjX5nc6FY4g/xUx++J6f40Jvv6WlXhcP/biH0X0agmbd95z+77oJEUIaJ714o0p5BiRPhO/lR8u/u8HzHgZHWHPmhhL3ZWe7UAvBzq+7EA0zWGd+uGGRdb4bTMgiDtfO6zGteLx9oPQNm/2ON6UVGAyY7HX3JF3S9jT7k37J4KaYPBxI+LHA4hxjpEpBe543W/o0X5w9OvusyTiY21CD6bjkcFwbBfpwRgecLEXe+DeTvecFzQwqCLxfDbpB+3z8w92Zhspnb8tQn0c0PhB3rUOJ4E939aW1IhkeRLXuXdNzbzE9NYnWuLZjX0cLrmzEZZ/qcxXb9XrOE+aYJsjWE6Z/yeibMRjAdz849Gf+wmtm9aM5t3e+hWHeZ9MZjbJpI9oHsY77DJcRKl8NH7EWt901q7fkYs8RAj+Xxv3qsTWQ8X+d1OQdhewphm6q67rwhembsnba1Tl+FQVpJfmuZ8ylJnubfxHv8UQsWJ49iPhSrr5DreBRFmvNo5cRw/TPy5wnDZ7G8ZQunAZsH86uY3mb6m/E+wzysBjzz45ndL3Y5jST5ar53NyCK2QELJmY1cJw4lWjwKvFB72ALzy3uRTLE6jkyvu/iw2oPbqH1HgZnMWaNThrGnzsm76zP9t+Sec99ZTxsq7WLhYhm7oqfgub7HsyGskS/ldNnlq5QNt+bxWdBJkj+Zj7l32o6MbTMTxzSECcuQ57mICo9rqJQcxotnQlvmx7zsY0nDN+dbP7rhdaamgy3MBvPdDWLc2R665e3wGuQz2rLsPBThEs+hdGuHHluaziWYXXLmWywmv+sTltX3wZ0ocZ4q4vvc6L3XaRKf+Fnpu5iS1azjteAl3XWD3BfFQ/nT8JgU+zwLzUEbRZ41Pj/mgeXCYGX+PwHqIMMG2LhONEHEpV02J+Zu1YMrPISu1n3VpmuVy9B59CDqHFvRAvCO+29HvB6cf0j6vBmyf/LRR+aYOG1kNg+BstlZ9j24NvWfPD3CNeOjH9AurEWBa7r4SmvIZlp4rUMWawH28PLIt+1WHcWHbyeoKtPE+GLi+SnHiCstx4hdq8+1wohZTR/k8MoryWmDUk8LskmVjH0X8nsm85mwy8JDbqPgmHTvHmh7Ef/DtGXyqEZ5HVH+HdqsEpFWzdXMX8Z0y3piu3kS2xHlITVIXofPnZjkOLwLfl2teRz3SL9dIfL6iq6+9J+1vVnezpsL5OdYfFc/hxrzSKW18xiPSYS7htH/lC56TjCJB/zZyKwfRtW26RQd5zXCUmj+3b0u4lMzU9Ile3iuD1WWAwXl8wryjHOi/Wfhj9nZ9sOb/s12CQqqv6NzjIIto5bw694HEiBPFV3yGl5+Pr0h52h7OV+6u/b926+5DtcZmcJ6YrdJPOEbxz2Eaa/yfcP4hVLeM422zcrd73Ka1v0CL1LftoF/DTCgg3jrbna7PkKnfyeYhjxwnCyZazmwg7yA1j9qY9JY9W4Ek/rrpMtf87tO8uRztccQ7pyB3IdjvBWSHghTv4lYmSLEGk3B25pLaFbGgDX9XHkf4c2yYa5rm0eJP5h/VD5usCV/jcLYJgQBPMrUSjFYKgGetQKL55X+LdapPjxRIZp81S/clzH9m1bu53hdfwLHM6D2T8We5yvPcR9zalF99Ej4ggChNXoE/iQMP1tjE27iyl5tdwnZ8+WYnvO3UPukC28jU863CWsex0V7hfRM+g2ZpTex2t5D0jur9J1t3Asn6HcTn77AbIRP5D/+iXC7N6Eq7EQyq/c4bgiCNIxbrBrrV3y78mvZJHXOOP+BBg+jYB7/2Ck36D1H2mCja2N/5/Vfy/by/ayvWwv28v2sv2fbv/Jv82C0Owv+xdVBSz21veNKmvPCyhQfwFWF2YKL38Ei8RamKFRbe0TTNa+2QJrb/N7n44XBRmsvT698UVf+6Igw+Y/rHHwN//crF2zF3UhlC/6tC0XeG+xsfa1g6zXfTFMQfL77SZbe+nfvLikznp9dZ2aD1NZB/67yAfglx0ssZalqLX2zS6oWX0IQXJ0AZ+der219xi3gP88pWkTLzuR3LSJX0bdmH7B2pv+s6vH6kR0psaH8bJOxMv2sr1sL9vL9rK9bC/by/ayvWwv28v2sr1s/8cbK84pGWW8HrGrCLE7W8BtSQy0n4ZA8aknbBbZIypkAbJtD/M85FztbmRptyBbt4nXqsrW7kSl7xn0iz+LwSm/oXPYaXQIPoO+cefQPfI8ekZfQC9qvWPPY2gae30enUKs/6sOPI0UB1bb6nOUeuxBa68jyHH+GWlauq7zTygwbkGuywbEKhYiRvE5wsV32Hk4fM0DIeuhh2hUNIiCnd8/m37/afp3cPjQvCHZUtLUH36HsuG4KhTyhd5QzPBFQMmryFdeR4H8CgrVl1CivYQywzWUOp3jsc4dfWt5HMb0F/EILI6KxRKx1+wsnsWP9ou0xtmyWDMWc8TisVkMKqs/0SngFgqcjvJcIRaby3LPczR7ee2dloYzKHE5iVTlOh53lyD/DlGyTxAueReO7SMhTVBaREHa5Z9Nv/9okxHbN2vnsNj1+1hUYARCThexnA9rnkuVBqregciyP4oCCdFIcQ2Fqqtorb+DavMjdPN/jmHxDTx2ncUAsWdMsRjmMenW/LUvX9Q7YbFNjNbDEqzxUCxOhq0Lq3XFaj+xZywOiWtEhekKrcU9/hyecvdLPJep0OkYzwlKV7Fc+GU8x4/lzIc1fxO64GhIKx0hquVz/tl0/I/R3t7Dpp1mh+vqOLTEIASeLIBkntlanyTIAWIHA+ynOsCc1x35NjdQKL+Dlo6P0MWnEVWmRzyfnsXesfgzVkeHxZyx2CsWU8piqlmcGXsmBIvDY7G4LNaW5R1NLyb6p1tjVNjvee2NfOt1+kdaa/L0DQfaed5Hke4kMlW7kaHciRT5eiSI3yGS+N9NUgTRSaQxOkP0Ub/2z6blP9rEYHU72y66Wx4bk1CJ0Qgi2tvPMvGcTzGSeKpSz+OEZX31kFVqkKRej2JpPdobLBhGumREvJVev+fzsPh9xsss7o3FM7I4MBb7tprov3Ei8ONoa/xzJ7/nGBrfxPUQ+//ELGtMNIvz7hfBcuXAYz/7hluf21nqcg7pym1c76QqNqPE+QxydDtgsh0IR2kgZCnqG2Kco+mfTc8/2+SC0FzM0c20H26A2+YEzvdhZ0pgP5do355on6BhzyLmcatidyPEAifYpgjDUsUdPxaJpJtdgF7ewEeV1rir117oExZzymLN2LOhWXwt0y3j0oA1w4FfZgCbJ1tzEXqGNPJnlDGeZzmm3D7EWGvudfZr4r9j69cnvJHnI7J8PPbsOJZzHSlZhG4hlzCOyVv8c2S6LIHK1vedfzZN/zTPC4JCbO2yRjLZDcat8Shs6M1rh9jPIdp3MUBM0lppP5Jo35Hep+t+E0MdMthvU2Xb5haId1HhCNTQGrxF9Ns4wVqzkD1jjcV/T85p5M8A4s8aTAePhWYx3Ow5JOyZ1190s64Vs8O9wsDjxJg+6uz/jD/HrpN/Lc8RnZjdRPbagkqet7uV53Sz/PZQyfvI1a/EwEC6Bv0+X7sLjs3CvrMVBId/Nm3/XmN/YoC6jGh7WjrZHepvg5F2rwNCTxZB8iHRvivRPJloX0n8P9AVYhW9T9N+Jark/1JXL1FcMyJfvIVWCrKNKtLTAaTX+1p5vmfYU6LFMeT/f+29d3yUdfY2PBiSzNxTMum99957h5CEkkCk9w7CAiKIihVBsZd1d+1dUbF3QZQmCqiAuAoIIkiVpiAgNbmec507Qd/dZ5/P7vo+7/v7Qz6f+zM3M8lk5ny/33Ou064TsFG5hCbn/oQxaccEv2xSTr1ras5getEpPCu66YNrWI/bqnM3B8UewvCkn7SfkHP62DvYHLpN+UfIT8ceQ/bLc540593lGvcj3TZf8M8zyPd4G/GOKTC6+sGW73PICHY8YVhsgwxL5zyDFPz/A+Susk/2HijyXW9cHQXb/CjYn0pUuad/1R3GQ4JzhovMi0Xn9A3m3EEYfYI3GqX+/f/xffKMx0q6GTtU/s0G0E923A2iB96ZafY59Qn7Xvuq2Q9XLhdnjynHoXOp6I91KLa9g+bIFaJXDmBI/A86w1H57vzXa48+eWDYw8o5xpwlzR4/9i/W+a5RDjRyH2UZdyLZmIVQoxq+F8XAyPSF7Q6Tu8M2ub0/uVLWI871g2EzXjYs3jMEm/Y0LrIF/3+83y/qXOAYahsXui7quUKErSiG7T72UMUg/tMa5GxthuOJRFhHyz7PFltbH8h+lI3yONQwKwb+6T3lO3tXG5/vIq9oH9n/XIehgcCDgi3fuoz48jR6BG5Trlz2RjcEbDC5AXyW6zpwVmm21+Mo9H4HXXzW6F5XnrFA8muu1b5xcnA2idzJOzk2/Rc9T1wD9lOXOd5UnqZU42qEGFXw7mSB98wQhK0qRtKnXRD2TqHy0DnvT1D+GGu/QNgyBcP52WF42Y4ZnrbXZS26/9+WvUetq6dxWcT6oCezkLOpF7qeG4uAJzPgdW24zsBN/6oR/q9kwjpJ9HuGmzp/r9jYP/07vKHFxmsvdrVvQoNDsLn7Z/QT7TQtDfiz2N6HhrDH8axgxsNa08xeJfYqN/ivk72+AS1yPvqEfac+Fft0+0Z+r77AJdnkANqlPe2cA3xx5HatiTc5MY7J72w3+U9F/+QZj+o8tnCvRhguB2I+Lkf92Qno2zYDNUdGKOdh7pZmJH/eFUFv5OjMXNuMcNh6BcBI9YHh74DhsK8yLrJeImejUtajyrAaVVZPa5XV4lFls1xUIc8PNMKdY21d/K73m5P4RPT83DX/jtxtPQNqjZkRi8mXFbeqComra5GxuTuCHs6E1/BA5dOJXlGOgNeyYb1SdHyh3y75TLOMASH/Vh8Sr1zjwdfLjQ9QaWNP6xfiE+/CoNBzmCM4Z9Es8bcuYb/8ee2Hvjhin/bLkSeB16jkk8ofQq4Ingf2yU8rPKX9mH3CdsrvHFafbVTyMe0lZn07+3fZX9ocslVtsCn/6xHsUQlXjC/6nJ2GCbgFw3Ed6k+PR4OsxWDl2bsefU5fiuLv++l3dj8lOHWu6N+xsud6yFmvFFtXKjq3LkB1rmN6JOyjwkzdFeuEEeOCTX7O7/5URDycB6OLLNz/QS7ek0LyXM+k6GzM7O+akPJpHUIXFcJ+Y4zJhSp4xv1EivJbKddRlf8Ozj3/T85VJ8PiUWS8tKXG+AKc+djTd7f2vk7JbMO9ov9fY6/eJLMPhH7AjJITaArZrtwe7Pe7sp3rmxwd7GGs8flIdPvnanfJU9Iz+CuR9VHFPDwj3YM3oHf4NjQErhP9swJlzjfV/pIDLsAjG0aEDZXbB2NU2xz0w0yUHhqE0qODlJtpmKzHONyESbgNg85fieqDwxG7sgLuJ1Nhvz0WxnVRMMhpM12uG6Ph90IGoldVIExk5vtoKhyz5PVBwWIbQ2DtK+sV6PjcsHjm/CvZeN0U8bLPB5nI3dcHXY+NQfHu/nDfnQSvnv6qC12PJcNxj5xF8aeMWv9TRmfbv3yvf3X5yBeuMFb80M3YhYsDfsGIaJGnYPYba8UH6Gb2yrHHgH2WxESLr2Jf6Vnl2p2Sf079Yfpa7K3nuaAN5nxr9pJT/5MbhjajPmAduorOLzQWoth4tT3m86py9lH+7MFyWIIQ0D0MGd/2QNHOvija3hdxKyuRsq4OzWemYhTmokXWpGfrFOWRHYd5aDk7HQXb+yBqSan2qNnviiP/iLkO10TB/9F0RH9YhogPShDwchacDyaqznBeJ3ZTMIq1zPcXm8OY+4/YqvNlwZOcr6Sg+OBA5T6dgjtRuKUvOk8JgvPxJCSs76L8eLarI7jvuZb/lQ1KMKbEVBqrzna3H0K/wPMYKz7YzXXQmY3PCa58/VKR+WzT/2Wf52e3m7Ef+lYdvjE5HCaKDppW0Kb9/pNyT8m+36J2gpxF1a5lKLQtRInIvdzxLsqdb8u1SGzvW8p/S86rOGM0PMVUZT9Si6Gyz7v/OBFpG+rhL3vYdlc0gh7PQun6/mj6eQqaxLdskFMwCFeLnpqvfH5dfxqNzK96iL0u0Dl7xjzB35dHwDZNsJ9gRNc9CXDdJ7K/Iw72udGwTwpH1Iel8HsiDdZRsg5Zru9tFu9RamunBVb4vpiOulPj0R9XofHsJOWhDHguC50fiESOnIemc1Pg/2KmcucZTmP6f2vX04258VXGJ22NjgPo7XsCo+NbcUuDyZ++RvyrDeLjfnar/H+q2VPPM8AeYs4UZfyN/VhXVrZhVMpp5V4ze8vPap8y9Q25RNjrRrzJnh/Orle+JP9PVf+wNyjXeABJtmkwOrkR914p6jFZ9cyIc9ejZs9w7bMkF5rj0USEvJ6Hyh1DRP9cpeeB3Mxj5JEcrAPbrkDtoRG6boGv5sD5N8FMNwhmmhJm8m3JZZCHbnio9jN3Oz0OXY6ORurn3eD7YAq8RSd5hxhrPdMchxMWVelZi91WC8eqdBhLU+F9fwxcKzPRF7PQ5dBo2G6TNU73Wfx7cFW2cVdNlbG6rZv9OzSK3R0YflLjC/Rn2fP40Rxg/W0mvy9jbpy/TJnTv2VMjnZhdlWb2lVy/U0vPqtxnwEx+0Xmn+nMePIHsLeO/hdtAuXfEr5TsRM5UGl/04158LEkI+fOKvSQ/V1+fhha2mZitMi29/lLUbSrL1LXd0PWlz1Q+G0L6o+PE7nfqHInL3B/XIEhcg4G4kq0tE5H3ZExyPp7D4QvKlL7QP4v6yzBTFyLMSL/piDEfluL4uO0LdPRdH4Ksr7qCdftCfAuEEwV74Lj7jhYH46FTXxb2+yIj73/En0s9MtS1UcpX9bDa0wQDKvtn/rV/pNLsE+l4P+2Bvte9HQfxKiE8xonI2fY8MQTmFnyi/ZKsxeNfYcb7zbvGXNjjIj9eYxJTMk/r3xTvNiXylmo/aJMDi5y74zPOKWcNOSPIscAsX+t4CXKn/NH2Hsd2rkeAREBqNk/EvGHuyH4u1LE7emKtH09kbKjEenf9UD18RHo2fYn9JKrqW0qmtumiS66BDXnRqPizHB0PT9W5cn1GI5rMejslag/Mg45m5sQubgYPk+mwCDvxNQw2D9Kg21NGqK+q0bFL8OU57vuxFjELa8wee9j7MRSnxnDQnO9xffyvjHix5h1VbL2E+BekAprtd9Zw9LpP8I7/3hlGXcn1BjrNAatvK7RJzFefCTG/8elcQ70j7gki/Ilf9tZ7f1lLI46iP2wPAuUP+eAcN8zB0MbQL5j6iP2ApLPifiUvgB50cjlQ75D+m6M/bPnn/wmUUYzbBYbjCmhcHyUrjxm1reTYH0vCfYP0uD+JNvkg97dgLTdPZD6fSOSdtQjZXcjUvZ1R/K+Rr1yfmpB5ZkRaJZzRDtNLDtWbPWQ1tnocfQSZG3qichnC5C1owl5B/oi5Jsy+HyRg8jtVSj5ZbBya9fsGwGH4EzPdPs34jtcIT7V68Q8oUsKlVPWOlT2fqjjid8je15RRl//KmPNUcaAevn8iDGJrRgULb5W5EEMT/gZQ+KOyPWj+FubUG5frvGDcek/YnrRaY1RMwdA7kXa4A6uHcbk+EguujGpZ9o5xNZqvIEcWvSjmfsi/qfPlm9/HInGFEQY3eG0+MOa54J1QbzyA3Emtu3lRNjelHX4IAWuT7PgXpsN54oM2N8X3LlIcOWSNPisyoLf53kI+boUEVsrEb2zBmmHe6HgZH9UnhuBxrZJGCC6aZzYa9qM5k2TEPZaPvLWN6suz9jXCz7rc+DamIO8Y/1U7118/DKEvJAL7xFBsI0OUWzF82ObIZgn0rmJcc/fK39eZd7vL6s1vkZ35wEMiTqh85fZ7zoy8Rf1pyqc7yvnYqFtAfKtzwqOeQ8VjmXoLnLkrOARyYcVY/aJ+BaXl59Wnpz5YsPHpp7FCNFhg2OZh9yo2IeYh3a4wX8DmoK/Qf+Ig/Jei5Fmm4NI2f8+nSLgzPRF4Ls58H0hHeGvFgreS4brtTQ412Qi4JsiBG8shvFykq6P8UIijDdTYF0smOfjNPhuzEP49grE7a1D4oEGpBzugcyjzcg42oTcExej4vwI1Lddgm5bRisXqvOxJPg9nY6w1wvgtyQbtpWiV1anInpPrXLC0u9jbMf5VDKcDyXqGhg1/m2GpXPl/xuyt3haAlJCZ6+tsn6MGmMDOF+CfhR50hsD/i57fjHybU8p7yJxIrE6+4jJjVoma1JhX4nuAZvl+krk+CEag9ZjfNpxDI46joFRh1TfDE34UTlG6/zXgL3/5NesJhaiHXB9LL7AW8ptGWeMQ6BV/FK3FY5s8WPjxAam+sJWIfezIhC/sRbVZ8coJvcRv9R2TwzClxQjY1tPJHzTFcFflSDtYE/Uib9Mjn5ysleKtSwRhFog0swRS53R1ge58pi3o1l5yl0PCCb9a4L60caVsq/vim2zvZsMr4+SEP59pdoW2pPKA0MVryqfT4nfedn7vyv2J/bE194tYLzjjtDvEzMvVb47cj/V+Xyp8i8x3kCp8Q4qHSuUk4pcnNTXhfZnZB0e1rUgD2uBsUCfK3G+gCKHyctabluBnv47taeceoZr2Tdqp+ihzTrvp8j+gvK40vdi3jdH9H+mcbvY4JsRI3DC4emGo28Q3HMTEHB/OvyfSIfvgnSU7xwk0rhVMQ/nflhvjVJcGvR6DoI/zEfUpkp0w0TR97eKv3QXBv8yW/T4cCS/X4nweakIGhsHvz4R8KuS3ysVLPlgknJfGreITG8Q+Y8IoV753JgffSNtj7E2HTF7u6C+daLaj24/jTP1T48A1gZM+a/2u9hr+4CQe+2XRhywzQ+Hz1XiX3s/iEqryN6+FX1Df9RcIOVOjo4Gv03KJ8FYTmPARtVD9FvJMV9oXyB793XF8Jm225VLvkDWosKxRPnpLw7fo3F/2luuaa17pcY7lWfY/qhya5mx58vU/yIPEh9dHhFwFfkhZXM90vb0QOiyIkStKkPjCcrhJsV/1T+NUE7x6NUVqDo6XGdtpD1XhbT5xUgZVYCoihT4xvjD5e8vuDYOQZZyhFrqEWKpRoSlSa5mBIj/5PyzyJ/+8mzzsnUPOGVrCdpnW5AAxycZCNpSjBzRW0MFS42SdSd3uZU+b5zrhGHx/Ld9Xvnnae3qO9X31sRDuubXRcH7Oj+El/VBueV91T3dnT8I/j+FfuGH0Cdkr3L2D4g6LDr8JEYmn8TguMMa26fu4Kx48lWRn5U5l8agdSg2XtK4QpXIumfgZn2e8dCm0C2K+csd7+ie5/4n5qTsGfehzCOMngg0chFgZMNl52wCDzgy/eB6NgXOjzIR9GUx4rZ1Qfbu3khfXo+sJ2sRfnUqwicnIra3nIHQBLgtsXJlIEBl3Ygwj0ZEefZFgo38MpPUxnONk4xL5W+OQsSyYuV8tk0Vv2B6OPwXiLxfFNv+lwT4vJqGoC+KEL6tAkkHGnXuyETBUXWHxsBvYYb6dNSPRifrYsPb+KvhMGYZdmO4cZGtTvBSNzkfvOrEN+5ty3PfKH7fNyELchAoZ1XjVNPC4X25L5ITZ6Ha41N0MTYpr/LI2HOas51ZbM4PYb6dNUDzBFdenmfmx8YlnNM5QqzrYXyN+SzG+InpmdPi+vQM3qzxIL7WHLZN183E+s8g23Yvsq13y5m5RePOnD2RKvgz1hiOIEP8JSMSdrtT1qAzrE5v2IrcsJX6wprihHewTZ53CE51w2UJh59F1sZShWjLYCR7zUCa97VI8Z6FZOtMpNquVr8ui7VdotvIdZNkTEO0MRAhRiWClxXA9XSycu0zTuFPO7ykCOR2Ic6P29IFQZ8XIuzLMhQc6Ie+rTNxcetl4qf1gI9gAuaGDOYirAYMw6DuglHgC6PaX/PrnIVjTAyH87Y4jQX6v5IF4w5zLoTtavE/ZsQg1/oAaiyfo96yF/28RN4i42cEOy4Q//d58W+fYz5+0FncM/gAZgz4GuPHfIxLR36t68N4NPMrxJCM55OnuSVip+a7aHeHJfwka7Af/SJ3KbcvY6LUQYWuZ1RfpV80H+mWucjoNA+ZHrci3XMukj1nIKbzYIR2qoZfp0TRHaGwW5wiby+9rJ28YLf5we2SPRqQAL/gLAQEFyEgqBiB/hUIdXdDmL0HQryrEeJRjeBO5aJzKhHXaRRSLroK6Z3nIbXzNYj2GgCfdzPgfDkF9gcS4LwzXudBMD7n+2IGgt/NU+5s8qJzbkLgOzlI/7IB5fsGofbISGRs7K4zmmxXiS6qDxBfQGRvkzUIl8cKf9j6BWndg3VEsGJX6+AgeLb4wbPJT2csdG5xw7slFD7F+XCV5SKwS3dkNl+HIVOewuR7H8JVTzyNyS/8Db3emIrSZX2Q9UkD0j9uRMbCoWga8yqmZkJ1En1bxtiYCxiZclz5NMmlOa6dV5jzF1j7Rt5N5h8vDpJzkrAUkX+rg/99qfAbnoOAGpFdXilCkrsiPEb0UEIp3HmpcNXGwtUkMhkQCffkaLivjIPvHJH5HUkIekD0w2NyiQxCxDaHLcxEyJPyfn+Oha/YU59rI+GSM24fFQyjORD2smC40mIRGCnrEdYDgUFlsC5KQtzfu6Dm4AiEv1ukeR274Bv7HbEw5H2M+wVr3hurPI2OhxM1vhq5qAQF21pQsnsA4tfUIIB7+s9xsM4Ig/egQHj19te8jPXyMFnTOAQ8nI6oF4uQtLgGqR90RebKBuSs6oG81U0o/qovyvYIJj7YH8U/9kTB+Sq574emHZei75brMHTZX9H//pfQ87o3MWDARgwu3oOh4ccxJrJNub/oXxFXEl+St3w0Oa/Tz+icC/KX0iebkt+m3M6sSyTPKX+2hTx6ReJPzLoJ8X/th7AreyB8QAsC+1bBOS1JcEkcAgWHh27MQvB60S/rsxC9owRR35Yg7KsChPw9D5FbShD5Nf+fj8hthYjfW46E/RWI2JGP6N2FiNiZJz5AKWJ3lCH+2wokfF2J2DXyHktyEcFZGncnik7pjy6t41CydyD8XslUvkiV/c3R5IyEcW2UWa9za4zG8oiXfGWNI94TzPt32a+bBfd+XovQpYUIWpyHqI/KkPpVA4q+74+6o+NEX11+ISbVD7M0vsFYavWJkSg9MhTZ+5qQvF8+1658xG+oR+mfb8MVzYdwV0/gGpHX1BhgmE8rhvudx+hI2cui90ennNI4G2VPfmHKlryTY+Q5Ps84G/mkOWuB/jA5eclFzXVgvShnEwwW29HTR2yyZR2qPJejyms5Ki5ahILOTyHb/mekxVyPhC4TENtvDKJ7DUNk94GIaByEkMZGBHUVnF/TgMDyWoRk90RoaW9EVoxEbM0URPUch6iR4xA79k+InTgZkbOGIfTWngh+WH7nFdH3S4sRul70+eZ8BInfHHWl+MyXx4M1U5q/IgadF20+TglXfjP6HZzDyjyL66+J6rOFvCny+qQaWSL/8v1D0HRiisbAic14MS5ed2Y88sVmxHxZg6A1hXB/ZPrs6ls8H4fOD4rNuT4RIYMHI6XiblT5ih0VpDQlTuSe1YYR8eK7xp/DqKTTWldFHmbmGSl3crZzP5PHelo7zzXlSj5vnXURd6A9P3ZKOR+ZHyMXJh8ZgyPnK3P0rI3gPAPaZc6BU9/CJj6e54PIttyHfMujyLH8BZmW2+T+EXnuHrEXc5BluUPuxaZabpbXbkWB5XHBcO+h0rIUjZ03Y7Bb/D/3UfSybkWZ5W3kejyEXMfDyA7inJVbkJI/FwnFl8I7xcH8CwzWSJE39MZo85oUbs59lecUC8m+d4mvxlk5xEjkKSvfPQh9z83AaNyoMabupyab8t5UDd/l2fB+MR6ef46E160R8LpNHu8WvMlY6hvitz8VD+e8IGTPnIEell/QbGlDo+ch9HadxZAwkVE2dEbupXlmrdvlpab8dHZMrsk73SFP1uiSS5czJ6a0c4qTr4z5Rubrp+Sd1frQy9trSTmPhOeGNoH2oH/0XvXLGBviGrBevcPPZl0EfYoc4z6tU2GtFh+LxX8oERzL14hrso175bXXFHsxz0COLH4Onk2ucbHxMoptgnu95Hc7L0R5J/EjOz0NY2yYOV92drvsuecHhsAmNpPnwC2+FvMNgS9mqe4PkMeoD0XHbKxH/u6Lkbu7BXFf1SJgdb7GpzyfiIHXPZGwXhdh1r3NkLPz13j4i20il6K/+Ir2F5MQtbUKrvEBCO/ahLrOWzX2WW/sRQ/jBPr4tOF6kdf9A834Jjl+GefnPp7Zvg6UH3UKL+qUGaUmn33HbBeuD88BOZ1Zc05bwFgoHzvOAOcDUG/RXlNevQQbMS5kyngh8u2PyRr8TR4f1Vko9NsYt6Ccyx1v6zwMxkEo+2xZH+ZwiG3pE1a6FmvujXMYNA/htwaVziXmPBV5L+aF6kOWmbJnTVrPQNgaAmDtGaD1Iz73J2neIPL9EtU1rG3wfSkDfuTLXpwL92vpMJ5KgNeTIu+nY+H9TBxsz4q/tiAJxiMJ5tnpJphogeir97OR+FVX1BwbifJdg5Cwohox22sR9noa/AOLUOb1rvi934r894j8j6OPE7ha5PTKZJNnkrFM5hs7ZmnrnJ28X+fMjMsQ3SSynlZgvsa9zvoTXpcWtck+P6Uchnxtcp6przrm1FCXcY/SbgyQc0A/meegznet5uW5v/OMh9pjTW+q7Ln3Nf4kr1POfI4y5XP0w3nlGY+j0HhBZ71xFixr3onROKOl3LFYsMIBjM/bAZvoeetV4fCaKvK6LhIBT2ciZlm51h1Rv4csLoD75XS4nkvW+jbFQqx7+VOY5jYNznR5NB7GohS41+Yg5huR7WeVsM+S9013wfG84LSvi5F79OILudKirX1hrJPnHy5Egn0q8r2fQKV9pfKo93KcRB9XKwYFtuG+PuZcF3IVs96c+TDG9i9rn11A2REDUfbU51N13hPt8wk9L6xLZy0u9zvzxDw/rKvumIvEc8TfMW14m+In5uxZU0SZMV7aMSOP+55ng1etz8r2uUCv6PPUN7yqXEvaZwYtknVZoj52vu1JXSfOYONZ5Jkos7+P4Ul7cUWXnYhcWoL4tTU6B4z5ftY3ZG/thYiPSuH7diYczyWZ8r0nxszndwkw62gp+1mR5uO08DPG7TE/2Belnvf5KAsRWyrVd3NOkTXoHoCY+wtRuKwZxSv6oHr7MAQvyYL9ylQkeV2KbK+79IyXGK+hi0POpGMnOF+z2fc47uwFvDDRnCdOzl1yjjPn1TE/iXv/kpxWlSHxJnn4yafNXIGp59t03zMnw7rnjlp05e1v11cdM6s6ZjCxboL5BeKnlojvVNacT8wYB/ld6/3Wq09NrnLabMYzOJ+ItY2sB2NsqovvKq0Ho09Ie8LcJmN8zDdz/xfbX0VD0CoMTvtUZ3ZynivjSXW/CNb6tBLudzJhPJMIG/G/4E6Nd86JNmuKhoSY85smhH1vDA8daFwdFW3cFKW1tNZXEkOst0fdKna3NfSbMhQdHYDIZwp0hrl3jfhbsTbYevvCMS8BWY47UeGxVM7o83rGyf1dbLyOCmMZqm1fYFD4Lxq/J6cxuUDJ9Uo7wHwLMT3lNqkd89DH4hwvcvhThpx9xDroSwuJ9U9ibn0bHh5uzoTgOWBvBteBuJTXzHZ7wfegPR6edFRtM/1mngGtFRWMxBoK1hb1jfpeZ0N0Fx3O+BPj3fwd/jxjTPw59tdwlgbXkrqrwvXehXlKpQ6z5qXQ+bRg86s1T1l8ZCCCVxeqDufce8X8l0eaPsADcabMc92HROcvc7yawr290stiCTcslijDclG64bIPsiW4brEPD11rvJTU5r0kGXE/1CF9fy8ELsuD84UU+L+cicTNZUjpNh21lo2ocTBe9ppiDMZ+Od+B/P91jq0Ym3RWuYPJwUy+3VVzzTXoyDXqbJXsNsU5Q9XGitxEfzBXw/pE7nXOIhka/7POHn56HDmczfVjDw1fp+z5yNwZ73mmuAb0I4hz+d6MadT7rdNcDeVHPcN4B2N6nGXOOY7MIfSV/U/sy5rriyN3aI31MPHzBsce0rnode4Nyh0/POmg6jDqpyq/N5C1rxnRm6vhXJIO68NiQ68RmU8OJx49ZUwM+0b2/znmdxyL02AfHvazxWIZKnZ4iLNfWJvhtms9HWMN3OP2a6LA+FrynkYYq9Jg+zAFxrspsL9s2g7rS5EIvKSqrcjyPCqNj5V/t9j2pvKOE58Vib0iF3pPv10622lkoshWdMmMkpN4bGSb1qNwrm/HnDdTRqcUu9C+Ue7ck8R7QxMOqI2jvr04Yo/87BldD/pjHTliriNtgdmrZ+o1nqdhiUfVh6M8NVfsuwG9Q7fr32Dussa9QmN+9C9oJzgrijXZY9NOaE0GbQuxwLD4Y+gdvBPdfDaiu98O3N3HrJdh3wf1VLFrAZwfCo55PVlrmLVmbmTofqN/8PVGmX8s65XtjybEe7+RuCFwWzHqD44XXykeHpaLtlp9bGe1tplrJThVZ2c9nwY/2eMBr2absQvOXJwRccSYG/W29fbQ+a7mxKczvOa3VXgvM2eXGCIrX9lHQbvQIL5XtWO1nIm1uDhsv8i/VXyv0+gfyXry7YpP7utnzo8griEOpf7hHuW+Z2yHtYjU2Tz7vUX/Mt7JvPvguP3685ddmDvYdmFuE/c8bS/9OK4p8Sj9AcbqqIu4xzmPxdzfregXvQvV7qVaQ8Tadv6tppCteib6iV7iPJ8xqceV635g1BH9XlXGWoxIOIbXppv1NAOi96DE9jby7A+pH6T6fUJYq9ESfLMR7XT9Q8ze2nlu2JqE77tpzQ99rJgXiuF3RQK8q3xhLXDDXuIP+0yd+WDWGgk+MkaFHjKq/acaFg/lBi6zjPfMsNz0Zan1XZ3t0uDagQGhpzA5VfZckuyVGPFHI48odvmTyHVMsvgANWZf0fQizrdpxaPDTRvAfiTu2WntNnik/A7nfVF+jIdypk+HXqJcuonNfHzkecWx7FtlnQrnfnXgVtperiX9N8aOWJ/C3+utMesv1c4Sv3OWHuckUebU8eb87JUa8za5xpnjPKXzmDg7jtzTNU6x2+6/Y1L2Wbw9y8TRXFvaumz7PToPVPb8N0aWb+pvZJ5wkZ/nGM869/3Oy6P3Z33WQ21E1qE+CNlSqnnl2K9rEXh/OsKeyIU1323W/tJ/nqT+21NGuDP0t+uYZEz7sNB4TmS/ET3d+9BfZH+pfN/xiUCL/ymMipf9STwoWHBwzFH0Cz+CyTlnVN/rbJVyMwdA3n3OU6AOIsaZVmD6mFwH2t5RKT+r7aV/wD3IHse4zrJvohZgXrczmkugf0B7Oz7znM7p49w50w84L+sh5y7pmMq+o9egVvwB6hvOHWTMgn0z3P/0mamPyh2L9DnONx4c8xOGx3P+7wF09fkMJdb3MTT2R+2hZf8Uvw/tN/Vtltg8oyVora3Ad753mvOGzoWOBV69/Tf5XhHXGvFoHlJX1yFvTwsyv+uF4JUFsL2WBOvCBFhfSNBZ4MFritSv1R6jS0V3dQ/cYM/0vcCf7WexdO5ksaRGOHotoD9fbaxBo88uNPsdwpCoXzCjoBUtQQfRzbEDfUOPYIz4iqNE5w+L+0XrfsaJLuC+pJ6gfaTM6YNR/tp/3e4Pd6wB663GiA6mLu+Yd894QJHjWZHj38U+/HRhNix95sntmJN2kjFR2pERin1+UfzJHIKJf1bL/Vb9W+MzT2kOgb0faosDv0JXeZ02tUL8W87IGhXfqrPhuji/QLltlWDdkzqXgnMMOKPvkuwTystNf9mYHPaLfXYU/O5NQezrZcj+qheKDg9AwuauCFyeC/srifB8KBLed0a02m6L/N52W9R645bodwX3v+41O+xu7+mhD4u/u92YHbXbbrJPevnNT5obcXP2JutA3x2OzNBz6V436pyuRp+derHGfHDUCTQH7kG5Vfwu5xb0CfpB9P4P6B9xCJdkCk6sbp+n0sWUF+VPvc9+pDvbL/pWxKKUC/2wsTpb+aw5gzTHnCHJfgDy8HP2Jn+mI25EH5jrxhwB7a0ZB9rTjhsPav6Ac+2p21nDyHjBZSVtque4z5mHZt8250BSz9W6VyHX9ojG7/pF7NO5jOR/7x6wVfTnSZ0fwT5Z6sCxKcdRZlumdQKMZQa+kaPzWZO/6YaQNTnwXBCITjf5wGuS/1nbiNC1jl6RT7orUwYH+BcnOi2+of+Y1/W5Jtlt9At+1RgfttTrrsgf03f2RLdT42FfGIWYnNEov2gxauxrdZZbpfEJ+oYdwNjkc7I/1un/ewf+gGGxZ8TenhK887POuuL+Hasz3Y5qDHNawVnFLsSPt/Qy+3t5BojnGX/riCdwf09r92upW5h7ZPyF+Jyzby/s/Vzzd3R+Wl6brhvnX9He8neIdVgfqn1LsgbU2fQrWGM6gnN4grfpHu4V8rV83nO6xpwNRl+L/i9nxdJ3ox6bWXpOa7c5I3CO7Kturs0osy5FqfVt5WJwLIhH50f80GmeG87h+Ugqm4rCmHuRY70LRR5PnSzxeG5rjfH+hgbfVcsqvN58p97vhdUXJz75RXPY+2+nuUbf7V1g/9AYGwqvGyO075e1qCmH6+B/WwnyOj2q88pq7J+i1HhP53kNiz0u+2Kj/P339bNwpiDjzCMTRQbx3Hein+KO6iwx1m9yv7GfcZr4U/Sd2MtCX4ry57lQvFdoxhE6YqI6VzGnTXUJZ3BzLWibiXtoS6ivKM8r2+Oi/L+Zq/xB9T/lzZkAfQRjEtfSn6VfzZltl2Sd1/+zhpQ+7cDoA7IeW9UOVIq+4zxa+mXkE+kRuAXTOOc0W/aDYIx653co8/pIMMEmlIy9Ex5/c8E2NQpRFcNQEfUCBobsx5Xys3c3cJbpx+savX5Uu/yoBZ5hlpJMR0b0HFe3xB/901MR7lsG37hoWHv7a/yUNe+JK2u0D8y5JB5xaRPb9/5nOsOvSnQj52rVOteK7JfI858L1v8efYL3Cgbdq/NpGOtnfxbnEeoMX7EBxO60fcwtUt4606/exNJzG36N7fPivuZadMSYua+pY2hXGeecnGueI8YmiIGmF7epH0C9Qtlz39OO9A79VmvWmU9WPRRs9q6O0vzmCcX9nAPJPpuLQwUv+W1HnetLNLi3oIf/dvmuH2lMq9axQWS+A3X271Ft3Sy+/Zdo9jiC5pZPEbMuDUXJj6CX6La7Zb/cmCV6M1owXazYwoR1CLe0/IXUwVZX51usyfZltv4BJ9x3JCD0zTz43BMH74m+sF0RBvf9yQh4KQsRHxQj4PVseL8UjoCBFSiwPCn6ZaX4VItQYryp+qfasVLOwSKd/9EcsFf2/QkMEFvVP4L2lziE2KXNxOvtcU6dKya6Y1jCUV2XjrlqHXPniIO43/nIi/ubZ4QxHuIizYnJGaAOoq7nrFL6YIxPj5e17fCZhsu+p82lz8b4J30H1q/0j/pBseX4zNN6LvpG7EWfkF1an1Ql36nG8ZnOSWXfGmeE8rxzdiZnFFYbn6GLwdla3wjO+A6D/M5hSqzs7SfnobD/zSiLuxsZOVORUz0GiXIGwsq7IKibyLc5Eu5hYfC9NBbOq2LgvD0efs9nIGdbM6qODUf40mIYD8Zpr6nfaxkIfC8X/m9kw/5SPBw3J2o8ocz7Pf0MrGFj7y6xgfmZV+p8w9GJZ/RcDo8RDJ4F5Sahb8S9SZlQfsSE1Bn0j8amn9W5LtPa+15Y939vOxalDmEPkuqnOtM+sza9Y8at+mkiX9as0F8weWvadN7z2IxTuubUQcRBrJerao/1sIaIupCfi2tLW1DjXC17faN+tyLBkfnGE2adnmAM1nFXG5/qviPe62LfhHq74A7nfvTyFt1qbUXNy7cg66YrMNByHp63+8D9cjRiNhQg43vRHTu6IHVnV6TvakTClq4I+aQIPm+mw+e1NPgvzkHIyiIEry7SugnH00l6+b+RhZDF+fB5KQ22JyOQkDIV5Z2WaO0aP1OVfbXonM91/9c4BYP6bsWQ6GM6a4ozRWeJLDhzilwx/I7ENB24nzFNxpQ7ciY8+9QbnKX+F85lGmDiIupxngvqJ+om1j7/uX1mE9fGjFm3aZyU54FYaGo7/mEfJJ+jruMasaZI+wN8VguG2ah2YUqe6C9Zqwbfr1X+Zfb3NO/CnkniyCrjYzTY94msv1e5VxmrdWY2fU321dZ775T1/BLpr06Be08w0nPuQYNlt8rPtTAVgSvzEfKZ+LUrcuBemgXXova4xMJEGHLZ30iGz6IM+C/J0bmlsRuqEfBmNtijFPJevvb/Wl+MQPDIepRYzDhmldrd9ahzfo1G9zfy2TfLfvoWvQJ3CgaTvS+6epLIX/3c/mZsh/uZfRf00znHjBiUOp3y43mgTzA0/qickVbVP5ztxv6LjhgaZU3dRJxEzhr6Ch0xH43bFf3q604rNO9pI0xbc1axUoPfF1rH29X9mc5kIl8H55w3+m2Wa4ue40zjTsQYQ7Vng74Na1UZUyk13lYemwpjuWI76p9a20bUdFqH6JvGwnkkFt4PhCC8ugV5QY+Zcc07Y2B9Ol7ztdYXE2AT/4pyt72SBEPk7l6eheivq1FyZCAKd7Futw+ytvVCwHs58H09E27RP/bH5XfvDkda+FxUdF6qcTTKv87xDZr9D4g/clwxziDZ9/QRyZ9xXY05A455Rs6v5lxR+onT8ts01rlqHvDoSFOmzC9yfzPGQ118SbZpR4lDiUcZk+B5oU8wv/2Ra0kdxbW4sT32Nrs9dj01/1fMxJgnzwPPF/PF1c5VWnPNxylid6gX2Y9Q7/d3wTQf6cwy1q9R/qxnk8fP5HFcpnHH0Dzj4aFFxovXyhr8rdh49ZVi4/WlRV4vrEj3ufk1n+VZZ3035MA2RzDL1AB4c04r627vjYX9ZdEli9LgfD9dL5/lmfD9JAdB64sQ822t1lQXHh+AzN1NiFhbpjMQ7S8mw/5CkuYhvW8Lhn+vEpR1ekd1IPd9tf0zPX89/Xajf/iPGk+bmNGms936Rx4RfX0eS2abM2xXzAE+vhk6n/OK8jad28ecC2cUUr9QbowrEKfQV6I9pa7ia5Q7uZaosyhz6n5yZd3XfqY6OGyUO6jBfC/mwbiulDn1+wSxrex54pzMCvsyzRGWiE4fkbQfV8u6T8xolXP7ndbBxxvjEW40ytWAMKPutX+3/tV3bc6msI1lglkiYJ8WAZ87E3SepH1hEvw+zkXI30sQuL4QvqtFB4nsmctizYhrWabGSe2LU2F/OwWsy7X9LRbG7bJ2cnaMW6JOeY22vZvqvvZQpbcZ1+xq3yKY6xv08t+tvu6gyJ8FZ+4TjH8c49PaNDcxKO47wfCnL/R2vXOFyfvDvCH9dPqMjNlTlh1cTNMKW3XuOPH8n0QnU6fQJ6ZsyfnD9eI6MG7HteTcWT7H/AvfU+cydzPttzmT+5z4F/sVB43POK0xs0rHUuWRKLcvUh9gmpyV4XEnFDek2q5HpNGCQFvBOau3fTblavcx6j28LeX/Su6BvlHKYRP4Yd6Kgr39NI9onxwB/ztTdO8azyXC8UqKrIM539P4SxyMe2JhuyPG7G+8Ra55UewP+Nl2Y9Qe47qor+U9lhqXhN9pTArv12mOJSDZcllFgfezp2n32T9B29PDdRAjBWtpPCScs4x/QkvYfsH4xwR7nJXv9rXYhG0ix9Oqx9nbS53N2ZwLJ5l9Rey5vrXJ1DGM/VCPMwZJ+TNeQ14I7mniHuod7nXqHMr+lUvNvDHXkf4n35c9Y+QVMmcfQzEt60XpW9CnqnB8oPkqYvsq51LRNyvRJ+ggujg2Isd2P6JtAxDfeQSKHH853+j/ycq4wEGLPMM8EegOPzgocc1zPYO//LrOd/UWeY/Npc6XPmuO+HhJhePl52Mts0N8FiR9Ubx3ABzXm7F+17z2PUwdNDfajIXOijhmzIxYL68/aUwLn21MCR9sjAttMIYEZxg1AYFGJ6vtn9bXkh6VYly1h30lJWJ/KowPNc7Z3Uf2fuRZDIk6g/5hx2QPndEZi4Nj2U93DhOyGFcw+xUZg7yy4rzYzvPKcUV7zFgn4/20p3Pa9yz1BnE7fST2A4zLOKF2mbE0rf/RGrez+vPkalrYrsN4z7msjEGSu49nhhiXe17PosaMN2l+kHn1Usdb2pPEOaGVto9RYnsD8baxiDCaWgOzS1uD4ouQ7BoJX1ssvGydEGBLQzdjzcHu9n1Pd7F/Pb9rp6+mNFh21uZaHsryDU4p7FzieCR0YR6K9w+A7eYoM2/OfMtN0R+J7/qyMSNiiuzlcmNIaOB/0kthN1w5ogv3sX473ZgrWOAx1Z/0+xp8tqF34H70Djig85xHJ53BDV1MHEN/h3Eb4pZpBeeV46dG9lyNzyrlq6LNo24nxxX1Ee0s7e9l7XkS1hL0Cv5OzsN53e9cG+pz4pnBcT/qxTg/bQH3PvUa9z5l/9bl5joQN9EW0AYMjf9JdU8Xn081jlDMHhnZT8xNE9+zxiqiU48tzqGJl7teSTgX/EQKvMbaYRsaAPuEcBhjQuBZ5LndYrWMF5/Vz1Jq6dV5oNfztulBO4zhwfCcFITivf2RsqZOaw2VA6wp6Njv6R8S29Mz1hh2knXzacYc+ax/U14F4i/OF+WczVrHOlmHb9ESchgDo3+SfX9COfLGC+6mDBmjbwndLZh6reyx95BvfV5jxozBjE49LnI9r/KjDiGm4ZrxPAxPPKKzayfnHVO58qwwHsQ9zRh0/+h9giUPasyBMVTFqn3NGDBjwa9fZs5u5Yxn9hhMEPzZ6LdJ53RS7xcaz2o+mjNDyQsUZ4zZ429JSbXstsSGf1mOimPD4P9utmJ3nxdSNb9tZ119A+vAHW22YSEIfasAwYLNPa4JQcpn3VBxeCi8b5Q93xxEnpSbjEjnP8Uy/90r2hhUnGLMOp9vPG76tvSxFPOsVp+LcTadaxu8ByPiTmFmIf3bsyK3n+U6qf4OcTd7E1tCDmieekj8Aa2TYRyddRpNodvQT3x/2lbaAsqPXJLUHbOrWtVuNgRsEhtwVG0vX9fcWAV95ZNmTbT4TsSYlDHXh37Z07JeCy+B5mOZ4ye+4ezdrs6NojtXgHmiLOMuzY8kybmOMvptDTIKo/i9Pa8Nr3evyUH4lnK4F2XC9Vqq1ujYn5XracHsN0VpjQh7KVij6fN8KtzvZaKubQL8n82Ad1ff/UaCT5ffs+95yZ74gj1w3O9V9o8E52/Wi77exSGHtHZ2dNI59a844/qBgeYcVc4IJzee5pyyGHdolf+f1/1JHUEZjU49qrqYuSfWM0wUO0H9c2ezKT9iG50zLnt3WCLj9odVL/FnHhxq4k/qI/rNwxJ+VltDnTe70pzr/cgws6aLtRWcvX5J+nnxETep/8SzS13DGeSZxq3E9l8bhj2843t3vjfizeS9jSg43A9By/PhXCgY5qF42Bck6nq4X0wza3VmCb58PEVxY8qRHojdXAuvCUFnDYdR/HtlH2Z0bcqw3abz4ut8NqDWtVb1PWXfJ0Bsazp0jjnrZzlvmX7uVZWteFCwzfMTzZjAiKSTaocvK2xT/hL6WW+Iv/vCZHMNWM+mPUPk6wncJDbhiOoL6m+dhz3GxKYdPi/xDLFRB+5nnRZnvbNvbEjsMYxLPaufaX6jyR/BtWbMg8/18P0e5balqu/ZK5RqzNZ+oSRj2ka3ER3U8b07DXOPiF1bhWqMQZdzYxDzeSUcj8mevzEK9icTkba3J3J39IbzcXnu1mj4PpuG6lMjUdo6FPZnEmHt6v/y75W9jxGZnGa7cXcX1+eCbU7KPj+j+ZN611Y0+f2AMYltuKLEjCuQZ5Y+Lm3ulFyTC+DSglZczjxS9inlBdB4fIW5Z8lrwhqBBRNMXU+c2RS6VXFgmfGBYJ9T6iswLsFcNm0of4/2gLqHZ4P/J04lD+KYlDMYHn8CvUN2oTloHy6Tv03OUHJb8v0niB/SJ/iA4hv2UqYYVyoHX4ZxExKNqa/aDO8L/F2W7vYRkS8WofHMJCQf6I6k/Q3I3dtH+xetY0O09iD1UE80YRpKtw6AzyMm/0z8oipEfFxK3o2fjCDH7+Ly9jOS3RnG/B2M7/ULPo5xyZwl3oaLg35Ek/8BDAj7BdNFxlfLGecakO+UZ/wv/UxeYMYnpxe0qd6gPaWciROJZygPYk3GH+hrMb/CujTWIjDXTRzYGPSF2lf6VLQHPC+0vR2xhcdHmmeDen1q3nkMjT2BAZGH0S9c7ErADlxTfVZzgKzp5Z5oDjygtqrIeEl74th7J/r+tGCKq377vT0G+d0Y81YpEtd1AbnAmAM3PknTvpW8jc3w6uMPW6rrLduatBsSt9cfLDkwEM5H5QxMDIV3lRve9X4/2OsD83/v3hed+FiZ8Z7qHeZQmoJ2qG/b5P+D6J3DYsNOYWxyKyZltGF2uSmHO8QePjzElA1nqhNPEqtQ9stvMP0jzeu2mDhxwcR2HMPaBtFNI5KPahyYmKjG9TEGRB1S7Mr1oq6hLWBdxBXkK+5u2pnrq029Mjz+F83rjEyQz5V6Rs8debTI6zRRzlaNbaP6K+x/DDO6MabwYohRkXZhz1ssnayTQ+/L+rwHMrb2gPXaCHKSvGebHFrpsSB6TtiXpcje3qw9bjaL5wj9nXs8wr0Wxm03HoxX7isj3Q3D4nXX75V9rDGijvz4rNdkzSDnmDMOSz3Eud69Aw+iX9hRDIw6prrnSrF3VzBemXYOs0rbFGewlv/ZCWYcgPJmjXKHb8q9T93z2W3Au1eaa3RrT7HT6acF529FtQ95+p9Apc87GJ7wS3s8+Zzgy6Oi106LfuPZknPQVewO8+uUf+xpraug/SHG+VN2m9qjCSnnxFZtFZ3/AYjfYowh8DfS3v/t95V/bvvlkW9mbuyB5C+7wXpFOKy1fvMuvF5r83e8k3Yu92ALAq5PRmeH19CO1wQDzbc/lQTXo8la12942576PbKXz1Ymn/Eo9SJxMbmqm4K3aw6oXjB4T//vFPMwxjkm+QymF5q47irmn9LPae0U9QplTf3MOmZe1EOM1XM9GG97Z5bJsbTiBhOf8NyQ+21myWnkWp9AtPcIBHgXosj3YbHbzDe2oTmUvJObFe8zRzwy6TgukfM3Ma1V7NNpTEg1z+JUsT+s8eof+jPq7NtE569WnUafUfDESS/DEnFBtkEe+faZkZsT1tQg/P1ieI4Pgq3cb/b/Y32ybJ7GG8nf5R/th5iF8jMp9vs7XvO6OeJB1o+Hv1UIa70/DE/ruP9e9uk14UbDCcb8iDcr7ctRZl+MHqHrcAe5rOW09gk+jO6+36IpcBdGJ5/WXMXMIsF8xW2q8xnDZxySsRyz5rhNcXpHPIG4hTqfdeaU/0dzTT1OffWc6KdxqbvEL92onIfZjtvh65WNUenfKZ5kvJ41amat4Tqtdesta8IaRtZw0caSy29w5Cmtfenm2Ka5z3JjidhcM5bpa8Rf0PcXldjH+MyNP5W8sQ5Br+TCa1Bgq+iQIf872dgWJt4fu6UGkStKYWsKOGy1WLQO3HgqYXPZj4MR8WIhvHJd3xgWi+d/I3uH4b44ymg5lWncpjaKNVv0TRj/pl9YHHCH2ICtGBrRJj7UQbV15OIkruwVsE/0TqvGXihbypp6mjJjPeH0IrNeh3k95mpZ70M/i/pn472mHebPPi3r0NvnGO4WTHOz2AXW2wR27oZe0Uswtxsx0kl9jnWy9BeIlSodH2gcoYf/VgyMPIqhYpcYi+pq36xx8XLjQ5SKf55lu7stolPjB5yP4hlh6+3d4r8q6JFM5T4MejWHuv6QEe2q+1fysT4eNypgVR4i1pXBdW0srN7WayzPePsELSs436dtOgLuS4NXnP2v/6ncnUZwidihFfHGON0jrImtda7XehfWDJm9aQ8ixXsespw3ab/I4IhTGt8nv+mwuKMYlXRWscqbM814C/czY8Ad9cus7xkSR9/0vMpeMUy9ic0/vcXEKFwzrRdLaMPlOW24tVH0tt/HiL/oevSP/Ub1F3ur+0bsVm5c6kTWV1SJfuTFmu9LC06gOuBlFFlf11wE84TldrElXs8iyqPfWa88+7O2McGf+t2ahNhlFYhbXwO3+E3egwJ3GrE+6f8nOXk9EFPuXpGNqE2CMRcXw5bms8djlN+18du7aq+t711J8Iq13/qfyD7EqGqONUae556nb8saCuZs+ZmZT6tzbxQbuFhjUrk2WQPbbOUN589099uKRv/NGBh9CBMz2zAo7kfcP7BNOQypV1bdZM5LoK6nb0SOpck559VPYxzmesHxfxZMs3Y+sPomcw2INclBOTCgFVUX7UO591q0hO4yc+3dTH44xu6ag7+TPb9W8UCD7yb5nOvFNm0UH/wIKlyva20a8+Xs/SnyWIgE6yVwXRKL4IWZSPysi+ZTA9/OBTncrX0Dl9r/DV7ti24IDnYty/gl6ttq5O5rgWtgGIyaABScGYgWzID/g7L/UxzP/ruyTzAm5os9OlNkLFR809W1QTEOzypr9QdG7dFcbUvoHp3ZQf78EqfoJPvz8n3Xif7Zj3Hpx0Uu52Wft2ltGPsppmeexLsz2vDWTPF1W1rx/IQ2/LV/m+CYo5iQcVrjEZNzzNkVN9S1iR/WhqXXtOGD2fI7l7XJ2pzHlIxWjI49j8miz//a1/R/72lpE1tzBt0Dtqi8Kx3LFKPW+36JevffxTf/AiXWxSiyca7L05onz/Caj5iQIfC9ORVhH+Url6//8xmM27RaJ4ZsELzyv9X1/+qyv5n8QcC6ApScFH3/WB5sCS407p6AoW3XIvh50WHZrnW2f3POtezpebRN7MFqcH2r8WPGFVjP0tXxFUal7hTMsQvd3d+jwf2N+JS70M39hdZekONxeMIB0ekHMa1oN27qvg9d/Zch1/NpdLeJXUjYjcm5+7SO/traH3BT42HRG7sEz4vNTj0gNnMnBsbs0h7EcRm7ZQ134+bGPaKXyJ8kfyv0a3ltk/yNbbi6eieurtoveJ545wfUuj4V2X+oOUPy/lQ5V+q+oY/CvDgxc4HarOsRdVFfuKbFw7UoAc6HkmCdFwnviSGMma23W7wD/lNdbXs8fr7z7TREr6sUeWfDu9IXaW92RfG3A+D3nKxrnf9Or3/B+/7H9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9X/9+p/wb6cWG/zz/RwdsGreW369b7P8+jOnLcsv3B/1+PV+p/fyCz8/J1rvffRXb9jZ8UanO5l/zBLNH2+/l1fm+PzmPvrC/Q2WX++jLNUX7r0sN1y4t4T85v4qYHnH/XT8+nyn39x7/Hrf2ePXn/fy6PjCct8J5y0X/t1w1PL/y7/pv956/ObjhP967/G4xdpx7/OJ5XTHffVyjwuf+YY53js77s9bzGXhvznV7WttoXgu/KoswIUfsXT6zZ/t9F9L4bfyw9EL38tD7qPbX/JYdvTCDnBE77ywKhHRN3ZsSUtJ519X6CrZWW3t96dlk5y/8Bkv3HvslA3T/rV8lpfcgPY/FX13xYUNUx396/311XLf/mevRUXHn+10DBEdf6rTRwi5sDG8MeeC1ESUHW+v9/+T//0vFFslQA==\"\r\n \r\n if old == True:\r\n s = Icon7\r\n else:\r\n s = Icon8\r\n\r\n return s.decode('base64').decode('zlib')",
"def load_minimap(self):\n minimap_types = ['cover', 'fog']\n self.game_data[\"minimap\"] = {\"fog\": None, \"cover\": None}\n file_name = self.game_data[\"file_name\"].split(\".json\")[0]\n for minimap_type in minimap_types:\n file_name = f\"{file_name}-{minimap_type}.png\"\n self.game_data[\"minimap\"][minimap_type] = pg.image.load(\n path.join(self.saved_minimap, file_name)).convert_alpha()\n logger.info(\"Load the minimap %s\", file_name)",
"def get_icons(self):\n return self.data[\"icons\"]",
"def load_opacitymaps():\n\treturn load_builtin_data('opacitymaps')",
"def IconBundleFromFile(*args, **kwargs):\n val = _gdi_.new_IconBundleFromFile(*args, **kwargs)\n return val",
"def get_imlist_png(path):\n \n return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.png')]",
"def load_glyphs():\n ComponentConnection.H_CONNECTED = ImageTk.PhotoImage(Image.open(Path('data', 'engineering', 'h-connected.png')))\n ComponentConnection.V_CONNECTED = ImageTk.PhotoImage(Image.open(Path('data', 'engineering', 'v-connected.png')))\n ComponentConnection.H_DISCONNECTED = ImageTk.PhotoImage(\n Image.open(Path('data', 'engineering', 'h-disconnected.png')))\n ComponentConnection.V_DISCONNECTED = ImageTk.PhotoImage(\n Image.open(Path('data', 'engineering', 'v-disconnected.png')))"
] | [
"0.6490824",
"0.64318",
"0.6354683",
"0.62018186",
"0.61104596",
"0.59513116",
"0.58033264",
"0.57248694",
"0.57137376",
"0.56984043",
"0.5696082",
"0.56323403",
"0.56122166",
"0.5600569",
"0.55870014",
"0.54954994",
"0.54757214",
"0.54697824",
"0.5464561",
"0.5461193",
"0.5456313",
"0.5449581",
"0.54463613",
"0.5436708",
"0.5428587",
"0.5418593",
"0.54177624",
"0.54075086",
"0.53889555",
"0.5369275"
] | 0.7397227 | 0 |
Creates a virtual bitmap with round corners. | def GetRoundBitmap(w, h, r):
maskColor = wx.Colour(0, 0, 0)
shownColor = wx.Colour(5, 5, 5)
b = wx.Bitmap(w, h)
dc = wx.MemoryDC(b)
dc.SetBrush(wx.Brush(maskColor))
dc.DrawRectangle(0, 0, w, h)
dc.SetBrush(wx.Brush(shownColor))
dc.SetPen(wx.Pen(shownColor))
dc.DrawRoundedRectangle(0, 0, w, h, r)
dc.SelectObject(wx.NullBitmap)
b.SetMaskColour(maskColor)
return b | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_round_edges_bitmap(width: int, height: int, radius: int):\n mask_color = opts['gui']['attrs']['mask_color']\n background_color = opts['gui']['attrs']['background_color']\n bitmap = wx.Bitmap(width, height)\n dc = wx.MemoryDC(bitmap)\n dc.SetBrush(wx.Brush(mask_color))\n dc.DrawRectangle(0, 0, width, height)\n dc.SetBrush(wx.Brush(background_color))\n dc.SetPen(wx.Pen(background_color))\n dc.DrawRoundedRectangle(0, 0, width, height, radius)\n bitmap.SetMaskColour(mask_color)\n return bitmap",
"def round_corner(self,radius, fill):\r\n corner = Image.new('RGBA', (radius, radius), (0, 0, 0, 0))\r\n draw = ImageDraw.Draw(corner)\r\n draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fill)\r\n return corner",
"def imBox(self, width, height):\n img = Image.new(\"1\", (width, height))\n draw = ImageDraw.Draw(img)\n bgColor=255\n draw.rectangle((0,0) + img.size,fill=bgColor)\n return img",
"def pixelcode(self):\n\n maxX, maxY = self.size()\n result = bitmap((2*maxX, 2*maxY))\n for x in range(maxX):\n for y in range(maxY):\n pixel = self.get(x,y)\n result.set(2*x,2*y, pixel)\n result.set(2*x,2*y+1, not pixel)\n result.set(2*x+1,2*y, not pixel)\n result.set(2*x+1,2*y+1, pixel)\n return result",
"def round_corners(self, im, rad):\n circle = Image.new('L', (rad * 2, rad * 2), 0)\n draw = ImageDraw.Draw(circle)\n draw.ellipse((0, 0, rad * 2, rad * 2), fill=255)\n alpha = Image.new('L', im.size, 255)\n w, h = im.size\n alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0))\n alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad))\n alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0))\n alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad))\n im.putalpha(alpha)\n return im",
"def get_image(self):\n image = Image.new('1', (8, 16))\n draw = ImageDraw.Draw(image)\n for x in xrange(8):\n for y in xrange(16):\n draw.point((x,y),self.get_pixel(x, y))\n return image",
"def round_rect(self, surface, rect, color, rad=20, border=0, inside=(0,0,0,0)):\n rect = pygame.Rect(rect)\n zeroed_rect = rect.copy()\n zeroed_rect.topleft = 0,0\n image = pygame.Surface(rect.size).convert_alpha()\n image.fill((0,0,0,0))\n self.render_region(image, zeroed_rect, color, rad)\n if border:\n zeroed_rect.inflate_ip(-2*border, -2*border)\n self.render_region(image, zeroed_rect, inside, rad)\n surface.blit(image, rect)",
"def rounded_border_box(self):\n return self.rounded_box(0, 0, 0, 0)",
"def SetRoundShape(self):\n w, h = self.GetSize()\n self.SetShape(GetRoundShape(w, h, 10))",
"def draw_round_rect(self, x, y, w, h, r, color=None, aa=False):\n self._draw_fast_hline(x + r, y, w - 2 * r, color, aa) # Top\n self._draw_fast_hline(x + r, y + h - 1, w - 2 * r, color, aa) # Bottom\n self._draw_fast_vline(x, y + r, h - 2 * r, color, aa) # Left\n self._draw_fast_vline(x + w - 1, y + r, h - 2 * r, color, aa) # Right\n # draw four corners\n self._draw_circle_helper(x + r, y + r, r, 1, color)\n self._draw_circle_helper(x + w - r - 1, y + r, r, 2, color)\n self._draw_circle_helper(x + w - r - 1, y + h - r - 1, r, 4, color)\n self._draw_circle_helper(x + r, y + h - r - 1, r, 8, color)",
"def draw_rounded_rectangle(\n img: PilImage, color: PilColor, arc_size: int = 20\n) -> PilImage:\n x0, y0, x1, y1 = img.getbbox()\n x1 -= 1\n y1 -= 1\n copy = img.copy()\n draw = ImageDraw.Draw(copy)\n arc_size_half = arc_size // 2\n draw.arc((x0, y0, arc_size, arc_size), start=180, end=270, fill=color)\n draw.arc((x1 - arc_size, y0, x1, arc_size), start=270, end=0, fill=color)\n draw.arc((x1 - arc_size, y1 - arc_size, x1, y1), start=0, end=90, fill=color)\n draw.arc((x0, y1 - arc_size, arc_size, y1), start=90, end=180, fill=color)\n draw.line((x0 + arc_size_half, y0, x1 - arc_size_half, y0), fill=color)\n draw.line((x1, arc_size_half, x1, y1 - arc_size_half), fill=color)\n draw.line((arc_size_half, y1, x1 - arc_size_half, y1), fill=color)\n draw.line((x0, arc_size_half, x0, y1 - arc_size_half), fill=color)\n return copy",
"def CreateSubBitmap(*args, **kwargs):\n return _gdi_.GraphicsRenderer_CreateSubBitmap(*args, **kwargs)",
"def create_full_pic(self):\n self.create_half_pic()\n mirror_update(self.flag)",
"def DrawRoundedRectangle(*args, **kwargs):\n return _gdi_.PseudoDC_DrawRoundedRectangle(*args, **kwargs)",
"def DrawRoundedRectangle(*args, **kwargs):\n return _gdi_.DC_DrawRoundedRectangle(*args, **kwargs)",
"def draw_borders(img):\n ret = img.copy()\n ret[0, :] = GRAY # top\n ret[-1, :] = GRAY # bottom\n ret[:, 0] = GRAY # left\n ret[:, -1] = GRAY # right\n return ret",
"def CreateBitmap(self):\r\n\r\n memory = wx.MemoryDC()\r\n\r\n bitmap = wx.EmptyBitmap(self._total_w, self._total_h)\r\n memory.SelectObject(bitmap)\r\n\r\n if wx.Platform == '__WXMAC__':\r\n memory.SetBackground(wx.TRANSPARENT_BRUSH)\r\n else:\r\n memory.SetBackground(wx.Brush(self._backgroundColour))\r\n memory.SetBackgroundMode(wx.TRANSPARENT)\r\n memory.SetFont(self._font)\r\n memory.SetTextForeground(self._colour)\r\n memory.Clear()\r\n\r\n if self._itemimage:\r\n memory.DrawBitmap(self._itemimage, self._ximagepos, self._yimagepos, True)\r\n\r\n if self._itemcheck:\r\n memory.DrawBitmap(self._itemcheck, self._xcheckpos, self._ycheckpos, True)\r\n\r\n textrect = wx.Rect(self._xtextpos, self._ytextpos+self._extraH, self._textwidth, self._textheight)\r\n memory.DrawLabel(self._text, textrect)\r\n\r\n memory.SelectObject(wx.NullBitmap)\r\n \r\n # Gtk and Windows unfortunatly don't do so well with transparent\r\n # drawing so this hack corrects the image to have a transparent\r\n # background.\r\n if wx.Platform != '__WXMAC__':\r\n timg = bitmap.ConvertToImage()\r\n if not timg.HasAlpha():\r\n timg.InitAlpha()\r\n for y in xrange(timg.GetHeight()):\r\n for x in xrange(timg.GetWidth()):\r\n pix = wx.Colour(timg.GetRed(x, y),\r\n timg.GetGreen(x, y),\r\n timg.GetBlue(x, y))\r\n if pix == self._backgroundColour:\r\n timg.SetAlpha(x, y, 0)\r\n bitmap = timg.ConvertToBitmap()\r\n return bitmap",
"def createCornerPin():\n i = b.createNode('CornerPinMI')\n i['tile_color'].setValue(int('%02x%02x%02x%02x' % (232.05, 145.095, 0, 255), 16))\n if cc:\n i = gU(i)\n return i",
"def DrawRoundedRectangle(*args, **kwargs):\n return _gdi_.GraphicsContext_DrawRoundedRectangle(*args, **kwargs)",
"def RegionFromBitmap(*args, **kwargs):\n val = _gdi_.new_RegionFromBitmap(*args, **kwargs)\n return val",
"def rounded_rectangle(src: np.array, top_left: tuple, bottom_right: tuple, cornerRadius: int = cornerRadius, color: tuple = (255,255,255), thickness: int = 1, lineType: int=cv2.LINE_AA) -> Any:\r\n # corners:\r\n # p1 - p2\r\n # | |\r\n # p4 - p3\r\n\r\n p1 = Point(top_left[0], top_left[1])\r\n p2 = Point(bottom_right[0], top_left[1])\r\n p3 = Point(bottom_right[0], bottom_right[1])\r\n p4 = Point(top_left[0], bottom_right[1])\r\n\r\n # Fill\r\n if thickness < 0:\r\n main_rect = [Point(p1.x + cornerRadius, p1.y), Point(p3.x - cornerRadius, p3.y)]\r\n left_rect = [Point(p1.x + cornerRadius, p1.y + cornerRadius), Point(p4.x, p4.y - cornerRadius)]\r\n right_rect = [Point(p2.x - cornerRadius, p2.y + cornerRadius), Point(p3.x, p3.y - cornerRadius)]\r\n\r\n [cv2.rectangle(src, rect[0].toTuple(), rect[1].toTuple(), color, thickness) for rect in [main_rect, left_rect, right_rect]]\r\n\r\n # Outline\r\n cv2.line(src, (p1.x+cornerRadius,p1.y), (p2.x-cornerRadius,p2.y), color, abs(thickness), lineType);\r\n cv2.line(src, (p2.x,p2.y+cornerRadius), (p3.x,p3.y-cornerRadius), color, abs(thickness), lineType);\r\n cv2.line(src, (p4.x+cornerRadius,p4.y), (p3.x-cornerRadius,p3.y), color, abs(thickness), lineType);\r\n cv2.line(src, (p1.x,p1.y+cornerRadius), (p4.x,p4.y-cornerRadius), color, abs(thickness), lineType);\r\n\r\n # Arc\r\n cv2.ellipse(src, (p1+Point(cornerRadius, cornerRadius)).toTuple(), (cornerRadius, cornerRadius), 180.0, 0, 90, color, thickness, lineType);\r\n cv2.ellipse(src, (p2+Point(-cornerRadius, cornerRadius)).toTuple(), (cornerRadius, cornerRadius), 270.0, 0, 90, color, thickness, lineType);\r\n cv2.ellipse(src, (p3+Point(-cornerRadius, -cornerRadius)).toTuple(), (cornerRadius, cornerRadius), 0.0, 0, 90, color, thickness, lineType);\r\n cv2.ellipse(src, (p4+Point(cornerRadius, -cornerRadius)).toTuple(), (cornerRadius, cornerRadius), 90.0, 0, 90, color, thickness, lineType);",
"def generate_image(self):\n\n if not has_pillow:\n raise RuntimeError(\"requires https://pypi.org/project/pillow/\")\n\n background = self.get_background()\n foreground = self.get_foreground()\n\n matrix = self.generate_matrix()\n\n image = Image.new(\"RGB\", (420, 420), background)\n draw = ImageDraw.Draw(image)\n\n for (i, row) in enumerate(matrix):\n for (j, bit) in enumerate(row):\n x = 35 + j * 70\n y = 35 + i * 70\n\n if bit:\n draw.rectangle((x, y, x + 70, y + 70), foreground)\n\n return image",
"def CreateBitmap(*args, **kwargs):\n return _gdi_.GraphicsRenderer_CreateBitmap(*args, **kwargs)",
"def drawRectangle(img, top_left, bottom_right, color = (0,0,255), thickness = 3):\n\tcv2.rectangle(img, top_left, bottom_right, color, thickness)",
"def create_ring(self):\n\t\tself.north_coords = numpy.add(self.center, self.north)\n\t\tself.northeast_coords = numpy.add(self.center, self.northeast)\n\t\tself.east_coords = numpy.add(self.center, self.east)\n\t\tself.southeast_coords = numpy.add(self.center, self.southeast)\n\t\tself.south_coords = numpy.add(self.center, self.south)\n\t\tself.southwest_coords = numpy.add(self.center, self.southwest)\n\t\tself.west_coords = numpy.add(self.center, self.west)\n\t\tself.northwest_coords = numpy.add(self.center, self.northwest)",
"def create_rect_mask(self):\n if (self.xc + self.roi_size//2 < self.pic_width and \n self.xc - self.roi_size//2 >= 0 and \n self.yc + self.roi_size//2 < self.pic_height and \n self.yc - self.roi_size//2 >= 0):\n self.mask = np.zeros((self.pic_width, self.pic_height))\n self.mask[self.xc - self.roi_size//2 : (\n self.xc + self.roi_size//2 + self.roi_size%2),\n self.yc - self.roi_size//2 : (\n self.yc + self.roi_size//2 + self.roi_size%2)\n ] = np.ones((self.roi_size, self.roi_size))",
"def DrawRoundedRectanglePointSize(*args, **kwargs):\n return _gdi_.PseudoDC_DrawRoundedRectanglePointSize(*args, **kwargs)",
"def alloc2img(self):\n\t\t#NOTE: self.allocation is relative to the window object, so we ignore X and Y\n\t\talloc = self.allocation\n\t\t# This is ripped from rect2img()\n\t\tx,y = self.widget2imgcoords(0, 0)\n\t\t# Doesn't check _w2i_matrix since widget2imgcoords() does that\n\t\tw,h = self._w2i_matrix.transform_distance(alloc.width, alloc.height)\n\t\treturn frect(x,y,w,h)",
"def CreateSubBitmap(*args, **kwargs):\n return _gdi_.GraphicsContext_CreateSubBitmap(*args, **kwargs)",
"def makePolygon(center, sides, radius, background, colorValue, colorsRGB):\n\n\tpoints = polygon(center, sides, radius)\n\tpointsList = [list(a) for a in points]\n\tp1 = np.array(pointsList)\n\timg = np.zeros((256, 256, 3), dtype='int32')\n\tif(background == \"white\"):\n\t\timg.fill(255)\n\telif(background == \"random\"):\n\t\tr = random.randint(200,245)\n\t\tb = random.randint(200,245)\n\t\tg = random.randint(200,245)\n\t\timg = np.full(img.shape, (r,b,g), dtype=np.uint8)\n\tcv2.fillPoly(img, pts =[p1], color = colorsRGB[colorValue])\n\treturn img"
] | [
"0.7063769",
"0.60604304",
"0.5830127",
"0.5667308",
"0.5437381",
"0.54086745",
"0.5393314",
"0.5353336",
"0.5332856",
"0.53235507",
"0.5276968",
"0.52623665",
"0.52610254",
"0.524472",
"0.52440923",
"0.5211178",
"0.51898247",
"0.5180378",
"0.5161632",
"0.5122108",
"0.5118235",
"0.5104066",
"0.50744236",
"0.5073064",
"0.50498945",
"0.50403726",
"0.503099",
"0.50284326",
"0.5016244",
"0.50134254"
] | 0.6869948 | 1 |
builds the two tkinter frames that are used as parents for the tkinter widgets that both control and display the RDM messages. | def build_frames(self):
self.cntrl_frame = tk.PanedWindow(self.root)
self.cntrl_frame.pack(side = tk.TOP, padx = 1, pady = 1, fill = tk.Y)
self.info_frame_1 = tk.PanedWindow(self.root)
self.info_frame_1.pack(side = tk.TOP, padx = 1, pady = 2, fill = tk.Y) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_message_frame( self, parent, default_scroll = True ):\n# color = \"black\" # this may need a bit of rework -- looks like not used\n #iframe = Tk.Frame( parent, width=300, height=800,\n # bg =\"blue\", relief = Tk.RAISED, borderwidth=1, )\n iframe = self\n\n # bframe is for the buttons on the left\n bframe = Tk.Frame( iframe, bg = \"white\", width=30 )\n # width=300, height=800, bg =\"blue\", relief=RAISED, borderwidth=1, )\n bframe.grid( row=0, column=0, sticky = Tk.N + Tk.S )\n\n text0 = Tk.Text( iframe , width=50, height=20 )\n\n s_text0 = Tk.Scrollbar( iframe )\n s_text0.grid( row=0, column=2, sticky = Tk.N + Tk.S )\n\n s_text0.config( command=text0.yview )\n text0.config( yscrollcommand=s_text0.set )\n\n text0.grid( row=0, column=1, sticky = Tk.N + Tk.S + Tk.E + Tk.W )\n\n self.msg_text = text0\n\n iframe.grid_columnconfigure( 1, weight=1 )\n iframe.grid_rowconfigure( 0, weight=1 )\n\n # now into the button frame bframe\n\n # spacer\n s_frame = Tk.Frame( bframe, bg =\"green\", height=20 ) # width=30 )\n s_frame.grid( row=0, column=0 )\n row_ix = 0\n\n # ---- Clear button\n b_clear = Tk.Button( bframe , width=10, height=2, text = \"Clear\" )\n b_clear.bind( \"<Button-1>\", self.do_clear_button )\n if self.gui_style:\n self.gui_style.style_button( b_clear )\n b_clear.grid( row=row_ix, column=0 )\n\n self.button_widgets.append( b_clear )\n row_ix += 1\n\n # ---- Copy selection\n a_widget = Tk.Button( bframe , width=10, height=2, text = \"Cop Selection\",\n command = self.copy_selection)\n # b_temp.bind( \"<Button-1>\", self.doButtonText )\n if self.gui_style:\n self.gui_style.style_button( a_widget )\n a_widget.grid( row=row_ix, column=0 )\n self.button_widgets.append( a_widget )\n row_ix += 1\n\n #-----\n a_widget = Tk.Button( bframe , width=10, height=2, text = \"Copy All\" )\n a_widget.bind( \"<Button-1>\", self.do_copy_button )\n if self.gui_style:\n self.gui_style.style_button( a_widget )\n a_widget.grid( row=row_ix, column=0 )\n self.button_widgets.append( a_widget )\n row_ix += 1\n\n # -------------\n self.cb_scroll_var = Tk.IntVar() # for check box in reciev frame\n a_widget = Tk.Checkbutton( bframe,\n width = 7,\n height = 2,\n text = \"A Scroll\",\n variable = self.cb_scroll_var,\n command = self.do_auto_scroll )\n\n a_widget.grid( row=row_ix, column=0 )\n self.button_widgets.append( a_widget )\n\n row_ix += 1\n self.cb_scroll_var.set( default_scroll ) # was AppGlobal.parameters.default_scroll )\n\n return iframe",
"def build_frames(dialbox):\n #Buttons Frame\n dialbox.button_frame = tk.Frame(dialbox.master_frame)\n dialbox.button_frame.grid(row=3, column=1)\n #Output Frame\n dialbox.output_frame = tk.Frame(dialbox.master_frame)\n dialbox.output_frame.grid(row=4, column=0, columnspan=2)",
"def createWidgets(self):\n self.myFrame1 = Frame(self)\n self.myFrame2 = Frame(self)\n self.myFrame1.place(x = 100, y = 100)\n self.myFrame2.place(x = 200, y = 100)\n \n self.Label1 = Label(self.myFrame1, text = \"Este es el Frame 1\")\n self.Label2 = Label(self.myFrame2, text = \"Este es el Frame 2\")\n self.Label1.place(x = 100, y = 0)\n self.Label2.place(x = 100, y = 0)\n \n self.Button1 = Button(self, text = \"Dest 1\", command = exit)\n self.Button2 = Button(self, text = \"Dest 2\", command = exit)\n self.Button1.place(x = 200, y = 200)\n self.Button2.place(x = 300, y = 200)",
"def create_frame_email(self, frame_parent):\r\n frame = ttk.LabelFrame(frame_parent, text=\"Email Settings\")\r\n # master width control\r\n\r\n label_server_host = ttk.Label(frame, text=\"Relay Server Host:\")\r\n label_server_host.grid(row=0, column=0, sticky=\"W\")\r\n #\r\n self.entry_server_host = ttk.Entry(frame)\r\n self.entry_server_host.grid(row=0, column=1, sticky=\"WE\")\r\n\r\n label_server_port = ttk.Label(frame, text=\"Relay Server Port:\")\r\n label_server_port.grid(row=1, column=0, sticky=\"W\")\r\n #\r\n self.entry_server_port = ttk.Entry(frame)\r\n self.entry_server_port.grid(row=1, column=1, sticky=\"WE\")\r\n\r\n label_from = ttk.Label(frame, text=\"Email From:\")\r\n label_from.grid(row=2, column=0, sticky=\"W\")\r\n #\r\n self.entry_from = ttk.Entry(frame)\r\n self.entry_from.grid(row=2, column=1, sticky=\"WE\")\r\n\r\n label_recipients = ttk.Label(frame, text=\"Email Recipients (newline delimited):\")\r\n label_recipients.grid(row=3, column=0, columnspan=2, sticky=\"NW\")\r\n #\r\n self.text_recipients = ScrolledText.ScrolledText(frame, wrap=tk.WORD, height=5, width=1)\r\n self.text_recipients.grid(row=4, column=0, columnspan=2, sticky='WE')\r\n\r\n frame_ar_state = ttk.LabelFrame(frame, text=\"Access Restriction State\")\r\n frame_ar_state.grid(row=5, column=0, columnspan=2, sticky=\"W\")\r\n\r\n ar_modes = [\r\n # text, mode, column\r\n (\"Enabled\", True, 0),\r\n (\"Disabled\", False, 1)\r\n ]\r\n\r\n self.ar_status = tk.StringVar()\r\n self.ar_status.set(True)\r\n\r\n for (text, mode, column) in ar_modes:\r\n _rb = ttk.Radiobutton(frame_ar_state, text=text, variable=self.ar_status,\r\n value=mode, command=self.on_ar_status)\r\n _rb.grid(row=0, column=column,)\r\n #\r\n label_subject = ttk.Label(frame, text=\"Email Subject:\")\r\n label_subject.grid(row=6, column=0, sticky=\"W\")\r\n\r\n self.entry_disabled_subject = ttk.Entry(frame)\r\n #self.entry_disabled_subject.grid(row=6, column=1, sticky=\"WE\")\r\n\r\n label_body = ttk.Label(frame, text=\"Email Body:\")\r\n label_body.grid(row=7, column=0, columnspan=2, sticky=\"NW\") #\r\n\r\n self.text_disabled_body = ScrolledText.ScrolledText(frame, wrap=tk.WORD, height=5, width=1)\r\n #self.text_disabled_body.grid(row=8, column=0, columnspan=2, sticky='WE')\r\n\r\n #########\r\n\r\n self.entry_enabled_subject = ttk.Entry(frame)\r\n self.entry_enabled_subject.grid(row=6, column=1, sticky=\"WE\")\r\n\r\n self.text_enabled_body = ScrolledText.ScrolledText(frame, wrap=tk.WORD, height=5, width=1)\r\n self.text_enabled_body.grid(row=8, column=0, columnspan=2, sticky='WE')\r\n\r\n ######\r\n\r\n _width_control_1 = ttk.Label(frame, width=17)\r\n _width_control_1.grid(row=0, column=0)\r\n _width_control_1.lower() # hide it\r\n _width_control_2 = ttk.Label(frame, width=90)\r\n _width_control_2.grid(row=0, column=1)\r\n _width_control_2.lower() # hide it\r\n #\r\n frame_control = self.create_frame_control()\r\n frame_control.grid(row=1, column=1, sticky=\"SE\")\r\n return frame",
"def create_right_frame(self, master: Tk) -> None:\r\n\r\n def create_file_menu(master: Widget) -> None:\r\n \"\"\"Create the file menu with the parent MASTER.\"\"\"\r\n\r\n file_menu = Frame(master, bg=self.MAIN_BG)\r\n file_menu.pack(side=TOP,fill=X)\r\n\r\n new_board_button = Button(file_menu, text='New board', font=self.FONT_NORMAL,\r\n command=self.on_new_board)\r\n new_board_button.pack(side=TOP, anchor=W, pady=self.WIDGET_PAD)\r\n\r\n open_button = Button(file_menu, text='Open', font=self.FONT_NORMAL,\r\n command=self.on_open)\r\n open_button.pack(side=LEFT, padx=(0,self.WIDGET_PAD), pady=(0,self.WIDGET_PAD))\r\n\r\n save_button = Button(file_menu, text='Save', font=self.FONT_NORMAL,\r\n command=self.on_save)\r\n save_button.pack(side=LEFT, pady=(0,self.WIDGET_PAD))\r\n\r\n def create_edit_menu(master: Widget) -> None:\r\n \"\"\"Create the editing menu with the parent MASTER.\"\"\"\r\n\r\n def create_mode_buttons(master: Widget, mode_var: IntVar) -> None:\r\n \"\"\"Create mode buttons with the variable MODE_VAR and the parent MASTER.\"\"\"\r\n\r\n add = Radiobutton(master, text='Add', font=self.FONT_NORMAL,\r\n variable=mode_var, value=0)\r\n remove = Radiobutton(master, text='Remove', font=self.FONT_NORMAL,\r\n variable=mode_var, value=1)\r\n toggle = Radiobutton(master, text='Toggle', font=self.FONT_NORMAL,\r\n variable=mode_var, value=2)\r\n\r\n add.pack(anchor=W, padx=self.WIDGET_PAD, pady=(self.WIDGET_PAD,0))\r\n remove.pack(anchor=W, padx=self.WIDGET_PAD, pady=(self.WIDGET_PAD,0))\r\n toggle.pack(anchor=W, padx=self.WIDGET_PAD, pady=self.WIDGET_PAD)\r\n\r\n self.edit_menu = LabelFrame(master, text='Editing', font=self.FONT_SMALL,\r\n bg=self.MAIN_BG)\r\n self.edit_menu.pack(side=TOP, fill=X, pady=self.WIDGET_PAD)\r\n\r\n self.edit_mode = IntVar()\r\n self.edit_mode.set(0)\r\n\r\n create_mode_buttons(self.edit_menu, self.edit_mode)\r\n\r\n def create_settings_menu(master: Widget) -> None:\r\n \"\"\"Create settings menu with the parent MASTER.\"\"\"\r\n\r\n def create_speed_widgets(master: Widget) -> None:\r\n \"\"\"Create speed widgets with the parent MASTER.\"\"\"\r\n\r\n speed_label = Label(master, text='Speed:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n speed_label.grid(row=0, column=0, sticky=W, padx=self.WIDGET_PAD, \r\n pady=(self.WIDGET_PAD,0))\r\n\r\n self.speed_scale = Scale(\r\n master, from_=0, to=len(self.TIMES_PER_GEN)-1, resolution=1, orient=HORIZONTAL,\r\n bg=self.MAIN_BG, font=self.FONT_SMALL, command=self.on_speed_change)\r\n self.speed_scale.set(self.INITIAL_TIME_PER_GEN)\r\n self.speed_scale.grid(row=0, column=1, sticky=W+E, padx=(0,self.WIDGET_PAD),\r\n pady=(self.WIDGET_PAD,0))\r\n\r\n def create_zoom_widgets(master: Widget) -> None:\r\n \"\"\"Create zoom widgets with the parent MASTER.\"\"\"\r\n\r\n zoom_label = Label(master, text='Zoom:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n zoom_label.grid(row=1, column=0, sticky=W, padx=self.WIDGET_PAD,\r\n pady=(0,self.WIDGET_PAD*2))\r\n\r\n self.zoom_scale = Scale(\r\n master, from_=0, to=len(self.CELL_SIZES)-1, resolution=1, orient=HORIZONTAL,\r\n bg=self.MAIN_BG, font=self.FONT_SMALL, command=self.on_zoom_change)\r\n self.zoom_scale.set(self.INITIAL_ZOOM)\r\n self.zoom_scale.grid(row=1, column=1 ,sticky=W+E, padx=(0,self.WIDGET_PAD),\r\n pady=(0,self.WIDGET_PAD*2))\r\n\r\n def create_rule_widgets(master: Widget) -> None:\r\n \"\"\"Create rule widgets with the parent MASTER.\"\"\"\r\n\r\n rule_label = Label(master, text='Rule:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n rule_label.grid(row=2, column=0, sticky=W, padx=self.WIDGET_PAD,\r\n pady=(0,self.WIDGET_PAD))\r\n\r\n self.rule_entry = Entry(master, font=self.FONT_NORMAL)\r\n self.rule_entry.grid(row=2, column=1, sticky=W+E, padx=(0,self.WIDGET_PAD),\r\n pady=(0,self.WIDGET_PAD))\r\n self.rule_entry.insert(0, self.INITIAL_RULE)\r\n\r\n rule_button = Button(master, text='Set Rule', font=self.FONT_NORMAL, bg=self.MAIN_BG,\r\n command=self.on_set_rule)\r\n rule_button.grid(row=3, column=1, sticky=E, padx=(0,self.WIDGET_PAD),\r\n pady=(0,self.WIDGET_PAD))\r\n\r\n self.settings_menu = LabelFrame(master,text='Settings', font=self.FONT_SMALL,\r\n bg=self.MAIN_BG)\r\n self.settings_menu.pack(side=TOP, pady=self.WIDGET_PAD)\r\n\r\n create_speed_widgets(self.settings_menu)\r\n create_zoom_widgets(self.settings_menu)\r\n create_rule_widgets(self.settings_menu)\r\n\r\n right_frame = Frame(master, bg=self.MAIN_BG)\r\n right_frame.pack(side=RIGHT, fill=Y, padx = 20, pady=(0,20))\r\n\r\n create_file_menu(right_frame)\r\n create_settings_menu(right_frame)\r\n create_edit_menu(right_frame)\r\n TkState.disable(self.settings_menu.winfo_children())\r\n TkState.disable(self.edit_menu.winfo_children())\r\n\r\n quit_button = Button(right_frame, text='QUIT', fg='red', font=self.FONT_NORMAL,\r\n command=master.destroy)\r\n quit_button.pack(side=BOTTOM, anchor=E, ipadx=20, ipady=10)",
"def build_window(self):\n # Size config\n self.root.geometry('{}x{}'.format(800, 450))\n self.root.minsize(600, 400)\n\n # create all of the main containers\n self.left_frame = Frame(self.root, bg='red', width=150, height=450, pady=3)\n self.right_frame = Frame(self.root, bg='blue', width=650, height=450, pady=3)\n\n # layout all of the main containers\n self.root.grid_rowconfigure(0, weight=1)\n self.root.grid_columnconfigure(1, weight=1)\n\n self.left_frame.grid(row=0,column=0,sticky='ns')\n self.right_frame.grid(row=0,column=1,sticky='nswe')\n\n # create all of the left containers\n self.Username_Search_Frame = Frame(self.left_frame, bg='yellow', pady=3)\n self.Username_Search_Frame.grid_rowconfigure(0, weight=1)\n self.Username_Search_Frame.grid_columnconfigure(0, weight=1)\n self.Username_label = Label(self.Username_Search_Frame, text=self.client.username)\n self.Search_entry = Entry(self.Username_Search_Frame, text='Add people')\n self.Search_entry.bind('<Return>', self.add_event)\n self.Username_label.grid(row=0,column=0,sticky='nswe')\n self.Search_entry.grid(row=1,column=0,sticky='nswe')\n\n self.Show_Friend_request_Frame = Frame(self.left_frame, bg='red', pady=3)\n self.Show_button = Button(self.Show_Friend_request_Frame, text='Chats')\n self.Show_button.bind('<Button-1>', self.show_event)\n self.Show_button_label = Label(self.Show_Friend_request_Frame, text='Chats')\n\n self.Friend_request_button = Button(self.Show_Friend_request_Frame, text='Friend_request')\n self.Friend_request_button.bind('<Button-1>', self.Friend_request_event)\n self.Friend_request_button_label = Label(self.Show_Friend_request_Frame, text='Friend_request')\n\n self.Show_button_label.pack(side=LEFT, fill=BOTH, expand=YES)\n self.Friend_request_button.pack(side=LEFT, fill=BOTH, expand=YES)\n\n self.logins_list_Frame = Frame(self.left_frame, bg='green', pady=3)\n self.logins_list_Frame.grid_rowconfigure(0, weight=1)\n self.logins_list_Frame.grid_columnconfigure(0, weight=1)\n self.logins_list = Listbox(self.logins_list_Frame, selectmode=SINGLE, exportselection=False)\n self.logins_list.bind('<<ListboxSelect>>', self.selected_login_event)\n self.logins_list.pack(side=LEFT, fill=BOTH, expand=YES)\n\n self.friend_request_list = Listbox(self.logins_list_Frame, selectmode=SINGLE, exportselection=False)\n self.friend_request_list.bind('<<ListboxSelect>>', self.select_friend_request)\n #self.friend_request_list.pack(side=LEFT, fill=BOTH, expand=YES)\n\n self.Username_Search_Frame.grid(row=0,column=0,sticky='nswe')\n self.Show_Friend_request_Frame.grid(row=1,column=0,sticky='nswe')\n self.logins_list_Frame.grid(row=2,column=0,sticky='nswe')\n\n self.left_frame.grid_rowconfigure(2, weight=1)\n self.left_frame.grid_columnconfigure(0, weight=1)\n\n\n # create all of the right containers\n self.Target_name_frame = Frame(self.right_frame, bg='yellow', pady=3)\n self.Target_name_frame.grid_rowconfigure(0, weight=1)\n self.Target_name_frame.grid_columnconfigure(0, weight=1)\n self.Target = Label(self.Target_name_frame, text='Target_name')\n self.Target.grid(row=0,column=0,sticky='nswe')\n\n self.Message_box_frame = Frame(self.right_frame, bg='black', pady=3)\n self.message_list = Message_list(self.Message_box_frame)\n self.message_list.show()\n\n self.Entry_frame = Frame(self.right_frame, bg='grey', height=100, pady=3)\n self.Entry_frame.grid_rowconfigure(0, weight=1)\n self.Entry_frame.grid_columnconfigure(0, weight=1)\n self.Entry = Text(self.Entry_frame)\n self.Entry.bind('<Return>', self.send_entry_event)\n self.Entry.grid(row=0,column=0,sticky='nswe')\n\n self.Send_file_button = Button(self.right_frame, text='Send file')\n self.Send_file_button.bind('<Button-1>', self.send_file_event)\n self.Send_file_button.grid(row=3,column=0,sticky='nswe')\n\n self.Target_name_frame.grid(row=0,column=0,sticky='nswe')\n self.Message_box_frame.grid(row=1,column=0,sticky='nswe')\n self.Entry_frame.grid(row=2,column=0,sticky='nswe')\n \n\n self.right_frame.grid_rowconfigure(1, weight=1)\n self.right_frame.grid_columnconfigure(0, weight=1)\n self.right_frame.grid_rowconfigure(2, weight=4)\n self.right_frame.grid_columnconfigure(0, weight=1)\n\n self.root.protocol(\"WM_DELETE_WINDOW\", self.on_closing_event)",
"def extra_frame(self):\n\n self.extraframe = tk.Frame(self.extra_notebook, bg='white')\n self.extraframe.pack(anchor='center', expand=True, fill='y')\n # RoHS checker\n self.rohsframe = tk.Frame(self.extraframe, bg='#7093db')\n self.rohsframe.pack(pady=10, fill='x', expand=True)\n rohs = DoubleTextButton(self.rohsframe,\n text_main='RoHS Bill of Materials Comparison',\n text_sub='Output a delta report between two BOMS',\n command=lambda: self.raiseframe_extra(ROHSCompare))\n rohs.pack(fill='x', expand=True, side='right', padx=(4, 0))\n # Format Checker\n self.filterframe = tk.Frame(self.extraframe, bg='#7093db')\n self.filterframe.pack(pady=10, fill='x', expand=True)\n filtercheck = DoubleTextButton(self.filterframe,\n text_main='Format Checker',\n text_sub='Will output filtered CCL to check CCL format',\n command=lambda: self.raiseframe_extra(FilterCompare))\n filtercheck.pack(fill='x', expand=True, side='right', padx=(4, 0))\n # Illustration tool\n self.illtoolframe = tk.Frame(self.extraframe, bg='#7093db')\n self.illtoolframe.pack(pady=10, fill='x', expand=True)\n illustration_tool = DoubleTextButton(self.illtoolframe,\n text_main='Illustration Tool',\n text_sub='Used to insert and delete illustrations',\n command=lambda: self.raiseframe_extra(InsertDelIllustration))\n illustration_tool.pack(fill='x', expand=True, side='right', padx=(4, 0))",
"def create_left_frame(self, master: Tk) -> None:\r\n\r\n def create_animation_menu(master: Widget) -> None:\r\n \"\"\"Create the animation menu with the parent MASTER.\"\"\"\r\n\r\n def create_gen_labels(master: Widget) -> None:\r\n \"\"\"Create generation labels with the parent MASTER.\"\"\"\r\n\r\n gen_label = Label(master, text='Gen:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n gen_label.pack(side=LEFT)\r\n self.gen_number = Label(master, text=0, font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n self.gen_number.pack(side=LEFT)\r\n\r\n def create_rule_labels(master: Widget) -> None:\r\n \"\"\"Create rule labels with the parent MASTER.\"\"\"\r\n\r\n rule_label = Label(master, text='Rule:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n rule_label.pack(side=LEFT, padx=(50,0))\r\n self.rule_name = Label(master, text=self.INITIAL_RULE, font=self.FONT_NORMAL,\r\n bg=self.MAIN_BG)\r\n self.rule_name.pack(side=LEFT)\r\n\r\n def create_anim_buttons(master: Widget) -> None:\r\n \"\"\"Create animation buttons with the parent MASTER.\"\"\"\r\n\r\n self.reset_button = Button(master, text='Reset', font=self.FONT_NORMAL,\r\n command=self.on_reset)\r\n self.reset_button.pack(side=RIGHT)\r\n self.step_button = Button(master, text='Step', font=self.FONT_NORMAL,\r\n command=self.on_step)\r\n self.step_button.pack(side=RIGHT, padx=self.WIDGET_PAD)\r\n self.play_button = Button(master, text='Play', font=self.FONT_NORMAL,\r\n command=self.on_play)\r\n self.play_button.pack(side=RIGHT)\r\n\r\n animation_menu = Frame(master, bg=self.MAIN_BG, pady=self.WIDGET_PAD)\r\n animation_menu.pack(side=TOP, fill=X)\r\n\r\n create_gen_labels(animation_menu)\r\n create_rule_labels(animation_menu)\r\n create_anim_buttons(animation_menu)\r\n TkState.disable([self.play_button, self.step_button, self.reset_button])\r\n\r\n def create_board_canvas(master: Widget) -> None:\r\n \"\"\"Create board canvas with the parent MASTER.\"\"\"\r\n\r\n self.canvas = Canvas(master, bg='black')\r\n self.canvas.bind('<Configure>', self.on_canvas_resize)\r\n self.canvas.bind(\"<B1-Motion>\", self.on_canvas_click)\r\n self.canvas.bind(\"<Button-1>\", self.on_canvas_click)\r\n self.canvas.bind(\"<ButtonRelease-1>\", self.on_canvas_mouse_release)\r\n self.canvas.pack(fill=BOTH, expand = TRUE)\r\n\r\n left_frame = Frame(master, bg=self.MAIN_BG)\r\n left_frame.pack(fill=BOTH, expand=TRUE, padx=20, pady=(0,20))\r\n\r\n create_animation_menu(left_frame)\r\n board_frame = Frame(left_frame)\r\n board_frame.pack(fill=BOTH, expand=TRUE)\r\n create_board_canvas(board_frame)",
"def _create_rx_frame(self, rx_window):\n self.window = Frame(rx_window) # we create a special Frame on the main window for the rx frames\n self.window.grid(row=0, column=0)\n\n self.printRec = False\n\n self.logText = ScrolledText(self.window, width=70) # log text\n self.logText.grid(row=1, column=1)\n\n self.buttonStart = Checkbutton(self.window, text=\" Receive info \", command=self.change_receive, bg='bisque',\n cursor='hand2')\n self.buttonStart.grid(row=3, column=1)\n\n self.buttonClear = Button(self.window, text=\" Clear \", command=self.clear, cursor='hand2')\n self.buttonClear.grid(row=4, column=1)\n\n self.buttonConnect = Button(self.window, text=\" Set Com \", command=self.clear, cursor='hand2')\n self.buttonClear.grid(row=4, column=1)\n\n self.logText.insert(END, \"Detected lasers :\" + '\\n')",
"def create_right_left_containers(self) -> None:\n self.frame_left = tk.Frame(self, borderwidth=5, relief=tk.GROOVE)\n self.frame_left.grid(row=0, column=0)\n # self.window_left_info = None\n self.frame_right = tk.Frame(self, borderwidth=5, relief=tk.GROOVE)\n self.frame_right.grid(row=0, column=1)",
"def makeWidgets(self):\r\n self._frame = tk.Frame(self, relief=tk.RAISED, borderwidth=1)\r\n self._frame.pack(fill=tk.BOTH, expand=1)\r\n\r\n self.pack(fill=tk.BOTH, expand=1)\r\n\r\n self._frame._label1 = tk.Label(self._frame, text='----File Name----')\r\n self._frame._label1.pack(fill=tk.X, expand=tk.NO, pady=1, padx=2)\r\n self._frame._entry = tk.Entry(self._frame)\r\n self._frame._entry.pack(pady=2, padx=2)\r\n\r\n self._frame._label0 = tk.Label(self._frame, textvariable=self.timestr)\r\n self._setTime(self._elapsedtime)\r\n self._frame._label0.pack(fill=tk.X, expand=tk.NO, pady=3, padx=2)\r\n\r\n self._frame._label2 = tk.Label(self._frame, text='----Laps----')\r\n self._frame._label2.pack(fill=tk.X, expand=tk.NO, pady=4, padx=2)\r\n\r\n self._frame._scrollbar = tk.Scrollbar(self._frame, orient=tk.VERTICAL)\r\n self._frame._listbox = tk.Listbox(self._frame, selectmode=tk.EXTENDED, height=10,\r\n yscrollcommand=self._frame._scrollbar.set)\r\n self._frame._listbox.pack(side=tk.LEFT, fill=tk.BOTH, expand=1, pady=5, padx=2)\r\n self._frame._scrollbar.config(command=self._frame._listbox.yview)\r\n self._frame._scrollbar.pack(side=tk.RIGHT, fill=tk.Y)",
"def create_status_comm_frame(self):\n\n self.Comm_Status_Frame = tk.Frame(master=self)\n self.Comm_Status_Frame.config(highlightthickness=1, highlightcolor=\"black\", highlightbackground=\"black\")\n self.Comm_Status_Frame.pack(side = tk.LEFT, padx = 20, pady = 10, fill = tk.BOTH)\n\n status_lbl = tk.Label(master = self.Comm_Status_Frame, text = \"COMMUNICATION STATUS\", width = 25)\n status_lbl.pack(side = tk.TOP)\n\n self.Connect_frame = tk.Frame(master=self.Comm_Status_Frame)\n self.Connect_frame.pack(side = tk.TOP, padx = 20, pady = 10, fill = tk.BOTH)\n\n self.ip_var = tk.StringVar()\n self.port_var = tk.StringVar()\n\n GUI_MAP.create_entry_frame(master = self.Connect_frame, label_text = \"IP: \", label_target = self.ip_var)\n GUI_MAP.create_entry_frame(master = self.Connect_frame, label_text = \"PORT: \", label_target = self.port_var)\n \n self.ip_var.set(\"192.168.1.158\")\n self.port_var.set(\"2777\")\n\n connect_btn = tk.Button(master = self.Connect_frame, text =\"Connect\", command = self.connect_robot)\n connect_btn.pack()\n\n # create status labels\n self.robot_status_var = tk.StringVar()\n self.vizual_status_var = tk.StringVar()\n self.sensors_status_var = tk.StringVar()\n\n GUI_MAP.create_label_frame(master = self.Connect_frame, label_text = \"Robot: \", label_target = self.robot_status_var)\n GUI_MAP.create_label_frame(master = self.Connect_frame, label_text = \"Vizualization: \", label_target = self.vizual_status_var)\n GUI_MAP.create_label_frame(master = self.Connect_frame, label_text = \"Sensors: \", label_target = self.sensors_status_var)\n\n\n self.robot_status_var.set(\"Disconnected\")\n self.vizual_status_var.set(\"Disconnected\")\n self.sensors_status_var.set(\"Disconnected\")",
"def __init__(self,*args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n self.winfo_toplevel().title(\"ElogQP\")\n self.container = tk.Frame(self)\n self.container.pack(side=\"top\", fill=\"both\", expand=True)\n self.container.grid_rowconfigure(0, weight=1)\n self.container.grid_columnconfigure(0, weight=1)\n self.activeFrames = []\n for F in (Frames.frame_start.frame_start, Frames.frame_modules.frame_modules, Frames.frame_showError.frame_showError):\n self.createFrame(F, F.__name__)\n \n self.showFrame(\"frame_start\")",
"def _create_com_frame(self, right_frame):\n self.window = Frame(right_frame) # we create a special Frame on the main window for the tx frames\n self.window.grid(row=0, column=1)\n\n self.setTxt = Label(self.window, text=\"Communication set : \")\n self.setTxt.grid(row=1, column=0)\n\n self.comTxt = Label(self.window, text=\"Com : \")\n self.comTxt.grid(row=3, column=0, columnspan=1, sticky=W)\n\n self.entree_com = Entry(self.window, width=8)\n self.entree_com.grid(row=3, column=1, columnspan=4)\n\n self.baudRateTxt = Label(self.window, text=\"Baud rate : \")\n self.baudRateTxt.grid(row=4, column=0, columnspan=1, sticky=W)\n\n self.entree_baud = Entry(self.window, width=8)\n self.entree_baud.grid(row=4, column=1, columnspan=4)\n\n self.buttonSet = Button(self.window, text=\" Set parameters \", command=self.set_communication,\n bg='Green2', cursor='hand2')\n self.buttonSet.grid(row=5, column=1, sticky=W)",
"def createFrame (self,message):\n \n f = self.frame\n \n lab = Tk.Label(f,text=message)\n lab.pack(pady=10,side=\"left\")\n \n self.number_entry = t = Tk.Entry(f,width=20)\n t.pack(side=\"left\")",
"def create_status_robot_frame(self):\n\n self.Robot_Status_Frame = tk.Frame(master=self)\n self.Robot_Status_Frame.config(highlightthickness=1, highlightcolor=\"black\", highlightbackground=\"black\")\n self.Robot_Status_Frame.pack(side = tk.LEFT, padx = 20, pady = 10, fill = tk.BOTH)\n\n status_lbl = tk.Label(master = self.Robot_Status_Frame, text = \"ROBOT STATUS\", width = 15)\n status_lbl.pack(side = tk.TOP) \n\n self.lbl_pose_x = tk.StringVar()\n self.lbl_pose_y = tk.StringVar()\n self.lbl_angle = tk.StringVar()\n self.lbl_status = tk.StringVar()\n self.lbl_goto_x = tk.StringVar()\n self.lbl_goto_y = tk.StringVar()\n\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"X: \", label_target = self.lbl_pose_x)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"Y: \", label_target = self.lbl_pose_y)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"Angle: \", label_target = self.lbl_angle)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"Status: \", label_target = self.lbl_status)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"\", label_target = None)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"next GOTO X: \", label_target = self.lbl_goto_x)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"next GOTO Y: \", label_target = self.lbl_goto_y)\n\n\n self.lbl_pose_x.set(\"N/A\")\n self.lbl_pose_y.set(\"N/A\")\n self.lbl_angle.set(\"N/A\")\n self.lbl_status.set(\"N/A\")\n self.lbl_goto_x.set(\"N/A\")\n self.lbl_goto_y.set(\"N/A\")",
"def receiveFrame(self):\n\t\treceiveFrame = Frame(self)\n\t\treceiveFrame.grid(column=2, columnspan=2, row=0, rowspan=6)\n\t\treceiveFrame.config(bg = \"white\")\n\n\t\treceiveLabel = Label(receiveFrame, text=\"Receive\", font=(\"Sans Serif\", 20, \"bold\"), fg=\"blue\", bg = \"white\")\n\t\tself.receiveText = Text(receiveFrame, width=67, height = 10, fg = \"blue\", highlightthickness = 2, highlightcolor = \"blue\", highlightbackground = \"light slate gray\")\n\n\t\treceiveLabel.pack(pady=\"10 0\")\n\t\tself.receiveText.pack(padx = 10, pady = 10)",
"def setUpFrame(self):\n #adds labels to the Board\n self.mineLabel = tk.Label(self, text=\"Mines: \"+str(self.numMines))\n self.mineLabel.grid(row=0, column=0, sticky=\"W\", columnspan=int((self.cols-2)/2))\n self.smileButton = tk.Label(self, image=self.images[1])\n self.smileButton.grid(row=0, column=int((self.cols-2)/2), sticky=\"WE\", columnspan=2)\n self.flagLabel = tk.Label(self, text=\"Flags: \"+str(self.numFlags))\n self.flagLabel.grid(row=0, column=int((self.cols-2)/2)+2, sticky=\"E\", columnspan=int((self.cols-1)/2))\n\n #left click listeners on smileButton\n self.smileButton.bind('<ButtonPress-1>', lambda event, num=0: self.changeSmile(num))\n self.smileButton.bind('<ButtonRelease-1>', self.replay)",
"def createFrame(self):\n \n self.outerFrame = f = Tk.Frame(self.frame)\n f.pack(expand=1,fill=\"both\")\n \n if self.label:\n labf = Tk.Frame(f)\n labf.pack(pady=2)\n lab = Tk.Label(labf,text=self.label)\n lab.pack()\n \n f2 = Tk.Frame(f)\n f2.pack(expand=1,fill=\"both\")\n \n self.box = box = Tk.Listbox(f2,height=20,width=30)\n box.pack(side=\"left\",expand=1,fill=\"both\")\n \n bar = Tk.Scrollbar(f2)\n bar.pack(side=\"left\", fill=\"y\")\n \n bar.config(command=box.yview)\n box.config(yscrollcommand=bar.set)",
"def __init__(self, master):\n tk.Frame.__init__(self, master)\n self.master = master\n\n # Set the UI \n self.welcome = tk.Label(self, text = \"Welcome!\", font=(\"Arial Bold\", 50)).grid(row = 0, ipady = 80)\n\n self.login_frame = tk.LabelFrame(self, width = 50)\n self.login_frame.grid(row = 1)\n\n label_username = tk.Label(self.login_frame, text = \"Username: \\t\", font=(\"Arial Bold\", 30)).grid(row = 1, column = 0, pady = 5)\n self.entry_username = tk.Entry(self.login_frame, width = 20, font=(\"Arial Bold\", 30))\n self.entry_username.grid(row = 1, column = 1, pady = 5)\n label_password = tk.Label(self.login_frame, text = \"Password: \\t\", font=(\"Arial Bold\", 30)).grid(row = 2, column = 0, pady = 5)\n self.entry_password = tk.Entry(self.login_frame, width = 20, font=(\"Arial Bold\", 30), show=\"*\")\n self.entry_password.grid(row = 2, column = 1, pady = 5)\n self.bt_login = tk.Button(self.login_frame, text = \"Login\", font=(\"Arial Bold\", 30), fg = \"red\", command = self.login_bt_pressed)\n self.bt_login.grid(row = 3, columnspan = 2, pady = 15)\n\n self.bt_login_face = tk.Button(self, width = 30, text = \"Login with facial recognition\", font=(\"Arial Bold\", 30), fg = \"red\", command=lambda: master.switch_frame(FacePage))\n self.bt_login_face.grid(row = 3, pady = 60)\n self.bt_login_qr = tk.Button(self, width = 30 , text = \"Engineer QR\", command=lambda: master.switch_frame(QrPage), font=(\"Arial Bold\", 30), fg = \"red\")\n self.bt_login_qr.grid(row = 4)",
"def createMessageFrame (self,message):\n \n label = Tk.Label(self.frame,text=message)\n label.pack(pady=10)",
"def createFrame(self,message):\n \n f = self.frame\n \n label = Tk.Label(f,text=message)\n label.pack(pady=10)\n \n self.id_entry = text = Tk.Entry(f,width=20)\n text.pack()",
"def create_widgets(self):\n self.pack(fill=tk.BOTH, expand=True)\n self.top_frame = tk.Frame(self)\n self.top_frame.pack(fill=tk.X, expand=False)\n\n # Create obstacle button\n self.create_obstacle_button = tk.Button(\n self.top_frame,\n text=self.OBSTACLE_CREATION_INACTIVE_LABEL,\n command=self._toggle_creation_mode_cb\n )\n self.create_obstacle_button.pack(side=tk.LEFT)\n\n # Load button\n self.load_button = tk.Button(\n self.top_frame,\n text=self.LOAD_BUTTON_LABEL,\n command=self._load_button_cb\n )\n self.load_button.pack(side=tk.LEFT)\n\n # Export button\n export_button = tk.Button(\n self.top_frame,\n text=self.EXPORT_BUTTON_LABEL,\n command=self._export_button_cb\n )\n export_button.pack(side=tk.RIGHT)\n\n # Main canvas\n self.canvas = tk.Canvas(self, background='white')\n self.canvas.config(width=self.CANVAS_WIDTH, height=self.CANVAS_HEIGHT)\n self.canvas.bind('<ButtonRelease-1>', self._draw_line)\n self.canvas.pack(fill=tk.BOTH, expand=True)\n self.canvas.focus_set()",
"def config_frames(self):\n self.root.grid_rowconfigure(1, weight=1)\n self.root.grid_columnconfigure(1, weight=1)\n\n self.top_frame = tkinter.Frame(self.root, pady=1)\n self.top_frame.grid(row=0, columnspan=2, sticky='nsew')",
"def __init__(self, master=None):\n # Initialise variables\n tk.Frame.__init__(self)\n self.frames = []\n self.labels = []\n self.entries = []\n self.user_values = {}\n self.header_values = {}\n self.summary_values = {\"EWA\": {}, \"Ave\": {}}\n self.row_buttons = []\n self.master.title(\"LoL Team Checker\")\n\n # Please check how to code this by PEP standards\n self.default_values = {'ln': [\"Summoner Name\", \"Champion Name\"],\n 'rn': [\"Games\", \"Win Rate\", \"Kills\",\n \"Deaths\", \"Assists\", \"CS\",\n \"Towers\", \"Gold\", \"KDA\",\n \"Prediction\"],\n 'li': {\"Names\": ['{s}'.format(s=\"Summoner\"\" \")\n + str(i) for i in range(1, 6)],\n \"Champs\": ['{s}'.format(s=\"Champ \")\n + str(i) for i in range(1, 6)]\n }, 'ri': ['-' if i == 9 else '0' for i in\n range(10) for j in range(5)],\n 'rv': [tk.StringVar() if i == 9 else\n tk.DoubleVar() for i in range(10)\n for j in range(5)]}\n\n # Create Frames\n self._create_left_name_frame(self.default_values['ln'])\n self._create_right_name_frame(self.default_values['rn'])\n self._create_left_info_frame(self.default_values['ln'])\n self._create_button_frame()\n self._create_right_info_frame(self.default_values['rn'])\n self._create_mid_region_frame() # mid, top, frame created by column\n self._create_left_summary_frame()\n self._create_mid_summary_frame()\n self._create_right_summary_frame()\n# configuration, not explicitly.\n # Configure frames\n# self.master.grid()\n top = self.winfo_toplevel()\n# top.grid(0, \"ew\")\n top.columnconfigure(0, weight=1) # , minsize=100)\n top.columnconfigure(1, weight=1) # , minsize=75)\n top.columnconfigure(2, weight=1) # , minsize=100)\n# top.rowconfigure(0, weight=1)\n top.rowconfigure(1, weight=1)\n top.rowconfigure(2, weight=2)\n top.rowconfigure(3, weight=2)\n# self.columnconfigure(0, weight=1)\n# self.columnconfigure(1, weight=1)\n# self.rowconfigure(0, weight=0)\n self.grid(sticky=\"ew\")",
"def build_window(self):\n # Size config\n self.window.geometry('750x500')\n self.window.minsize(600, 400)\n\n main_frame = tk.Frame(self.window)\n main_frame.pack(fill=\"both\")\n\n top_frame = tk.Frame(main_frame)\n top_frame.pack(side=\"top\", fill=\"x\")\n\n tk.Label(top_frame, text=\"Enter a URL to classify\").pack(side=\"top\")\n\n webpage_classifier_form_frame = tk.Frame(top_frame)\n webpage_classifier_form_frame.pack(side=\"top\")\n\n tk.Label(webpage_classifier_form_frame, text=\"URL\").pack(side=\"left\")\n url = tk.StringVar()\n self.url_entry = tk.Entry(webpage_classifier_form_frame, textvariable=url)\n self.url_entry.pack(side=\"right\")\n\n\n self.add_webpages_to_dataset_button = tk.Button(top_frame, text=\"Add Webpages to Dataset\")\n self.add_webpages_to_dataset_button.bind('<Button-1>', self.add_webpages_to_dataset)\n self.add_webpages_to_dataset_button.pack(side=\"bottom\")\n\n self.balance_dataset_button = tk.Button(top_frame, text=\"Balance Dataset\")\n self.balance_dataset_button.bind('<Button-1>', self.balance_dataset)\n self.balance_dataset_button.pack(side=\"bottom\")\n\n self.submit_button = tk.Button(top_frame, text=\"Scrape Site\")\n self.submit_button.bind('<Button-1>', self.scrape_site)\n self.submit_button.pack(side=\"bottom\")\n\n\n\n bottom_frame = tk.Frame(main_frame)\n bottom_frame.pack(side=\"bottom\", fill=\"x\")\n\n # ScrolledText widget for displaying messages\n self.messages_list = scrolledtext.ScrolledText(bottom_frame, wrap='word', font=self.font)\n self.messages_list.configure(state='disabled')\n self.messages_list.pack(fill=\"x\")",
"def initWidgets(self):\t\t\n\t\tdef initSheetFrame():\n\t\t\t# ...for sheet music display\n\t\t\tself.sheetFrame = Frame(borderwidth=1)\n\t\t\t\n\t\tdef initControlButtons():\n\t\t\t# Transcription control button container\n\t\t\tself.controlButtons = Frame(relief=SUNKEN, borderwidth=1)\n\t\t\t\n\t\t\t# Define buttons\n\t\t\tself.recordBtn = Button(self.controlButtons, text=\"Record\", \n\t\t\t\t\t\t\tcommand=self.record)\n\t\t\tself.pauseBtn = Button(self.controlButtons, text=\"Pause\", \n\t\t\t\t\t\t\tcommand=self.pause, state=DISABLED)\n\t\t\tself.stopBtn = Button(self.controlButtons, text=\"Stop\", \n\t\t\t\t\t\t\tcommand=self.stop, state=DISABLED)\n\t\t\tself.heavyFiltering = IntVar()\n\t\t\tfiltering = Checkbutton(self.controlButtons, \n\t\t\t\t\t\t\t\t\ttext=\"Smooth Input Audio\", \n\t\t\t\t\t\t\t\t\tvariable=self.heavyFiltering)\t\t\t\t\n\t\t\ttempoLabel = Label(self.controlButtons, text=\"Intended tempo: \")\n\t\t\tself.recordingLabel = Label(self.controlButtons, text=\"STOPPED\", \n\t\t\t\t\t\t\t\t\t\tfg=\"white\", bg=\"lightblue\")\n\t\t\tself.tempo = Entry(self.controlButtons)\n\t\t\tself.tempo.insert(0, \"60\")\n\t\t\t\n\t\t\t# Pack buttons\n\t\t\tself.recordBtn.pack(side=LEFT)\n\t\t\tself.pauseBtn.pack(side=LEFT)\n\t\t\tself.stopBtn.pack(side=LEFT)\n\t\t\tfiltering.pack(side=LEFT)\n\t\t\tself.recordingLabel.pack(side=LEFT)\n\t\t\ttempoLabel.pack(side=LEFT)\n\t\t\tself.tempo.pack(side=LEFT)\n\t\t\t\n\t\tdef initBottomButtons():\n\t\t\t# Application control button container\n\t\t\tself.bottomButtons = Frame(relief=SUNKEN, borderwidth=1)\n\t\t\t\n\t\t\t# Define buttons\n\t\t\tfileName = Label(self.bottomButtons, text=\"Current file: \")\n\t\t\tself.fileLoc = Label(self.bottomButtons, \n\t\t\t\t\t\t\t\t\ttextvariable=self.saveFileStr)\n\t\t\texport = Button(self.bottomButtons, text=\"Export\", \n\t\t\t\t\t\t\tcommand=self.export)\n\t\t\treset = Button(self.bottomButtons, text=\"Reset\", \n\t\t\t\t\t\t\tcommand=self.reset)\n\t\t\tquit = Button(self.bottomButtons, text=\"Quit\", \n\t\t\t\t\t\t\tcommand=self.quitIt)\n\t\t\n\t\t\t# Pack buttons\n\t\t\tfileName.pack(side=LEFT)\n\t\t\tself.fileLoc.pack(side=LEFT)\n\t\t\texport.pack(side=LEFT, **self.buttonOptions)\n\t\t\treset.pack(side=LEFT, **self.buttonOptions)\n\t\t\tquit.pack(side=LEFT, **self.buttonOptions)\n\t\t\t\n\t\tinitSheetFrame()\n\t\tinitControlButtons()\t\n\t\tinitBottomButtons()",
"def __init__(self, parent, store):\n super().__init__(parent, store)\n\n #Create components on frame 1 ==========================================\n self.frame1_label = tk.Label(self.frame1, text=\"Login with password\", font=(\"Arial\", 20, \"bold\"))\n self.password_label = tk.Label(self.frame1, text=\"Password: \")\n self.password_field = tk.StringVar()\n self.password_entry = tk.Entry(self.frame1, textvariable=self.password_field, show='*')\n self.login_btn_label = tk.StringVar()\n self.login_btn_label.set(\"Login\")\n self.login_btn = tk.Button(self.frame1,\n textvariable=self.login_btn_label,\n width=15,height=2,\n command = lambda : self.login(self.store[\"username\"], self.password_field.get()))\n\n self.back_btn = tk.Button(self.frame1,\n text=\"Back\",\n width=10, height=2,\n command = self.back)\n\n\n #Create components on frame 2 ==========================================\n self.frame2_label = tk.Label(self.frame2,\n font=(\"Arial\", 20, \"bold\"),\n text=\"Login with facial recognition\")\n self.open_cam_btn = tk.Button(self.frame2,\n text=\"Open camera\",\n relief = tk.RAISED if self.store[\"face_added\"] else tk.SUNKEN,\n state = \"normal\" if self.store[\"has_cam\"] else \"disabled\",\n command = self.loginWithFace)\n\n #Config row and col for parent, frame1, and frame2 =====================\n # Params: widget, hori_split, vert_split Or it means\n #Num rows and num cols: Ex: 1, 2 means 1 row and 2 cols for the big grid\n self.configureWidgetGrid(self.parent, 1, 2)\n self.configureWidgetGrid(self.frame1, 3, 2)\n self.configureWidgetGrid(self.frame2, 3, 1)\n\n #Place components on containers ========================================\n self.separator.grid(column=0, row=0, sticky='nse')\n #Display frame1 and frame 2 on parent using grid layout\n # Set sticky=\"nsew\" to make screen resizable\n self.frame1.grid(row = 0, column = 0, columnspan=1, rowspan=1, sticky=\"nsew\")\n self.frame2.grid(row = 0, column = 1, columnspan=1, rowspan=1, sticky=\"nsew\")\n #Add components to frame 1 grid\n self.password_label.grid(column=0, row=1, sticky=\"e\")\n self.password_entry.grid(column=1, row=1, sticky=\"w\")\n self.login_btn.grid(column=1, row=2, sticky=\"wn\")\n self.frame1_label.grid(column=0, row=0, columnspan=2)\n self.back_btn.grid(column=0, row=2, sticky=\"ws\", padx=20, pady=20)\n #Add components to frame 2 grid\n self.frame2_label.grid(column=0, row=0, columnspan=2)\n self.open_cam_btn.grid(column=0, row=0, sticky='s')\n\n #Only load the model if this person has registered face\n if self.store[\"face_added\"] == True and self.store[\"has_cam\"]:\n import os\n print(os.getcwd())\n self.recognizer, self.pca = loadModelAndPCA(cfg.models[\"MODEL_NAME\"],\n cfg.models[\"PCA_NAME\"])",
"def create_frame_ar(self, frame_parent):\r\n frame = ttk.LabelFrame(frame_parent, text=\"Access Restriction Settings\")\r\n # master width control\r\n ttk.Label(frame, width=70).grid()\r\n #\r\n label_ips = ttk.Label(frame, text=\"Exempted Hosts/IPs (newline delimited)\")\r\n label_ips.grid(row=0, column=0, sticky=\"NW\")\r\n #\r\n self.text_ips =ScrolledText.ScrolledText(frame, height=5, width=1)\r\n self.text_ips.grid(row=1, column=0, sticky='WE')\r\n #\r\n label_url = ttk.Label(frame, text=\"Access Restricted URL\")\r\n label_url.grid(row=2, column=0, sticky=\"WE\")\r\n #\r\n self.entry_url = ttk.Entry(frame)\r\n self.entry_url.grid(row=3, column=0, sticky=\"WE\")\r\n\r\n frame_control = self.create_frame_control()\r\n frame_control.grid(row=1, column=1, sticky=\"SE\")\r\n return frame",
"def create_widgets(self, root):\n\n self.widgets['Main Frame'] = Frame(self.widgets['Login Window'], borderwidth=20)\n self.widgets['Main Frame'].pack(expand=YES, fill=BOTH)\n\n Label(self.widgets['Main Frame'], text='Enter password:').pack(side=TOP, expand=YES)\n\n self.widgets['Input Frame'] = Frame(self.widgets['Main Frame'])\n self.widgets['Input Frame'].pack(side=TOP, expand=YES, fill=BOTH)\n\n self.widgets['Password Entry'] = Entry(self.widgets['Input Frame'], show=\"*\")\n self.widgets['Password Entry'].focus_set() # place cursor in entry\n self.widgets['Password Entry'].pack(side=LEFT, expand=YES)\n\n self.widgets['Enter Button'] = Button(self.widgets['Input Frame'], text=\"Enter\",\n command=lambda: self.verify_password_input(root))\n self.widgets['Enter Button'].pack(side=RIGHT, expand=YES)\n\n self.widgets['Login Window'].bind(\"<Return>\", lambda e: self.widgets['Enter Button'].invoke())\n\n self.widgets['Message'] = Label(self.widgets['Main Frame'], width=50, wraplength=300)"
] | [
"0.68194026",
"0.67628336",
"0.66859114",
"0.6685763",
"0.66459084",
"0.6627091",
"0.65815926",
"0.65467334",
"0.65451664",
"0.65284294",
"0.65139276",
"0.643256",
"0.6422164",
"0.6420944",
"0.6383805",
"0.6314913",
"0.62750316",
"0.62735295",
"0.626137",
"0.6218138",
"0.6210344",
"0.6187011",
"0.6160253",
"0.6142972",
"0.61259717",
"0.6112292",
"0.6079421",
"0.6079195",
"0.60786015",
"0.60779804"
] | 0.7488129 | 0 |
sets the int var self.universe to the value of i | def set_universe(self, i):
self.universe.set(i) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def i(self, i):\n\n self._i = i",
"def assignMoreVectors(self, i):\n return",
"def update_global_identifiers(self, universe_test):\n self.cellNum += 1\n self.surfaceNum += 1\n self.materialNum += 1\n if universe_test:\n self.universe += 1",
"def universe(self):\n return self._universe",
"def set_idx(self, i, other, tensor_value):\n for k, v in self.variables.items():\n if k not in other.variables:\n self.variables[k][i] *= 0\n\n for k, v in other.variables.items():\n if k not in self.variables:\n self.variables[k] = np.zeros(tensor_value.shape)\n self.variables[k][i] = other.variables[k]",
"def var(self,i): # TODO: change to property to access (read only?) X?\n return Var(i,self.dims[i])",
"def universe(self):\n try:\n return self.__universe\n except AttributeError:\n return None",
"def set_value_at(self, i, new_value=default_value):\n self.heap[i] = new_value",
"def vec_unit(n, i):\n return jnp.zeros(n).at[i].set(1)",
"def set(self, i: int, v: int) -> None:\n self.add(i, v - self.range_sum(i, i))",
"def subst_i(term):\n return Term(term.sums, term.amp.xreplace({i: k}), term.vecs)",
"def create_universe():\n # Universe is a large list, planets and others things are separated using\n # type() and isinstance(object, ClassInfo)\n univers = []\n captain = Captain()\n captain.ship = Ship()\n captain.account = BankAccount(captain.name, [])\n planetes = create_planetes()\n for planete in planetes:\n if planete.homeworld:\n captain.homeworld = planete\n captain.location = planete\n if planete.tech_level > 5:\n planete.shipyard = populate_shipyard()\n\n univers.append(captain)\n univers.extend(planetes)\n\n return univers",
"def set_state(self, i, state):\n self.states[i] = state\n self.sanity_check()\n print self",
"def setVariableIndices(self, indicesOfVariables):\n for e in self.children:\n e.setVariableIndices(indicesOfVariables)",
"def updateValue(self,i,x):\n assert 0 <= i < len(self)\n self.__update_aux(0,0,len(self),i,x)",
"def assign_index(self):\n\n i = 0\n for word in self.words:\n self.index[word] = i\n i += 1",
"def setIInternal(self):\n # if the di vectors are defined this method populates the upper limit vector\n self.i = {}\n for label in self.di.keys():\n self.i[label] = []\n L = 0\n for l in self.di[label]:\n L += l\n self.i[label].append(L)",
"def from_vasp_index(i):\n return Orbital.all_orbitals[i]",
"def __setitem__(self,i,v):\n _items[i] = v",
"def __setitem__(self, i, v):\n raise TypeError(\"'Factorization' object does not support item assignment\")",
"def nom(self, i):\n pass",
"def __setitem__(self, i, val):\n\t\tif i < self.n:\n\t\t\tself.v[i] = val",
"def __setitem__(self, i, value):\n self._ar[i] = value",
"def variabilize(self):\n if self.nvars>=0:\n pass #already done\n else:\n varTab = syt.SymbolTable()\n def convertArgs(args):\n return map(lambda a: -varTab.getId(a) if isVariableAtom(a) else a, args)\n def convertGoal(g):\n return Goal(g.functor, convertArgs(g.args))\n if self.lhs: self.lhs = convertGoal(self.lhs)\n self.rhs = map(convertGoal, self.rhs)\n if self.features:\n self.features = map(convertGoal, self.features)\n if self.findall:\n self.findall = map(convertGoal, self.findall) \n self.variableList = varTab.getSymbolList()\n self.nvars = len(self.variableList)",
"def set_iload(self):\n k = self.istore[0]\n v = self.stencil.get_all_velocities()\n indices = self.istore[1:].copy()\n indices[1] += v[k].T[1]\n self.iload.append(np.concatenate([k[np.newaxis, :], indices]))",
"def world(self, value):\n self.worlds[self.world_index] = value",
"def set_iload(self):\n k = self.istore[0]\n v = self.stencil.get_all_velocities()\n indices = self.istore[1:].copy()\n indices[1] += v[k].T[2]\n self.iload.append(np.concatenate([k[np.newaxis, :], indices]))",
"def set_iload(self):\n k = self.istore[0]\n v = self.stencil.get_all_velocities()\n indices = self.istore[1:].copy()\n indices[0] += v[k].T[0]\n self.iload.append(np.concatenate([k[np.newaxis, :], indices]))",
"def update_agent_location_vector(self):\n\n for agent in self.agents:\n location = agent.getz()\n # print(location)\n if location[0] == 0:\n vectorized_agent_loc = location[1]\n elif location[0] == 1:\n vectorized_agent_loc = 4 + location[1]\n elif location[0] == 2:\n vectorized_agent_loc = 8 + location[1]\n else: # location[0] == 3\n vectorized_agent_loc = 12 + location[1]\n\n if agent.isBusy == False:\n # remove any location if it shows it as well\n self.agent_locations[0][vectorized_agent_loc] = 0\n continue\n else:\n self.agent_locations[0][vectorized_agent_loc] = 1\n if self.DEBUG:\n print('agent location vector is ', self.agent_locations)",
"def set_iload(self):\n k = self.istore[0]\n ksym = self.stencil.get_symmetric()[k][np.newaxis, :]\n v = self.stencil.get_all_velocities()\n indices = self.istore[1:] + v[k].T\n self.iload.append(np.concatenate([ksym, indices]))"
] | [
"0.59920424",
"0.5927336",
"0.57459134",
"0.56548715",
"0.5609323",
"0.5395382",
"0.5320092",
"0.5219125",
"0.51983535",
"0.5163157",
"0.5133494",
"0.5103622",
"0.5095128",
"0.50520754",
"0.50436985",
"0.5029962",
"0.499586",
"0.49659938",
"0.49072215",
"0.49047565",
"0.49040177",
"0.49018645",
"0.48934594",
"0.48614958",
"0.48595592",
"0.4854071",
"0.4844762",
"0.48412907",
"0.48312953",
"0.4812583"
] | 0.91754603 | 0 |
runs discovery for the current universe. | def discover(self):
self.ola_thread.run_discovery(self.universe.get(), self._upon_discover)
if self.auto_disc.get():
self.ola_thread.add_event(5000, self.discover)
else:
print "auto_disc is off" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discover(self):\n pass",
"async def discover(self):\n raise NotImplementedError(\"this is a base class\")",
"def detailed_discovery(categories):\n if len(categories) > 0:\n unlock_vault()\n\n for ip in discovery_info.get(\"ip_addresses\"):\n names = vault.get_names()\n if ip not in names:\n user = ask_username(ip)\n pwd = ask_password(ip)\n vault.add_secret(ip, user, pwd)\n\n for ip in discovery_info.get(\"ip_addresses\"):\n if ip not in discovery_info.get(\"visited_addresses\"):\n discovery_info[\"visited_addresses\"].append(ip)\n\n user = vault.show_username_by_name(ip)\n pwd = vault.show_secret_by_name(ip)\n\n new_ci = ConfigurationItem.ConfigurationItem()\n new_ci.add_ipv4_address(ip)\n ci = methods.ci_already_exists(new_ci)\n if ci == None:\n ci = new_ci\n\n ci_os = check_os(ci)\n\n if ci_os == \"mac\":\n print(blue + \">>> \" + reset +\n \"Discovery in the OS X machine with the address \" + str(ip) + \"...\\n\")\n os_x.run_os_x_discovery(ci, user, pwd, ip, categories)\n\n elif ci_os == \"windows\":\n print(blue + \">>> \" + reset +\n \"Discovery in the Windows machine with the address \" + str(ip) + \"...\\n\")\n windows.run_windows_discovery(\n ci, user, pwd, ip, categories)\n\n elif ci_os == \"linux\":\n print(blue + \">>> \" + reset +\n \"Discovery in the Linux machine with the address \" + str(ip) + \"...\\n\")\n linux.run_linux_discovery(ci, user, pwd, ip, categories)\n\n methods.add_ci(ci)",
"def device_discovery(endless):\r\n click.echo(\"start device discovery ...\")\r\n _device_discovery(endless)",
"def main():\n for dev in Discover.discover().values():\n print(dev)",
"async def discover(self):\n\n def get_discovered_servers(discovery):\n servers = discovery.all()\n discovery.stop()\n return servers\n\n discovery = RoonDiscovery(None)\n servers = await self._hass.async_add_executor_job(\n get_discovered_servers, discovery\n )\n _LOGGER.debug(\"Servers = %s\", servers)\n return servers",
"def walkDiscovery(self):\n myname = socket.getfqdn()\n self.log.debug(\"My hostname = %s\", myname)\n myip = None\n try:\n myip = getHostByName(myname)\n self.log.debug(\"My IP address = %s\", myip)\n except (socket.error, DNSNameError):\n raise SystemExit(\"Failed lookup of my IP for name %s\" % myname)\n configs = yield self.config().callRemote(\"getDeviceConfig\", [myname])\n me = configs[0] if configs else None\n if not me or self.options.remodel:\n me = yield self.discoverDevice(\n myip,\n devicepath=self.options.deviceclass,\n prodState=self.options.productionState,\n )\n if not me:\n raise SystemExit(\"SNMP discover of self '%s' failed\" % myname)\n if not myip:\n myip = me.manageIp\n if not myip:\n raise SystemExit(\"Can't find my IP for name %s\" % myname)\n\n yield self.discoverRouters(me, [myip])\n\n if self.options.routersonly:\n self.log.info(\"Only routers discovered, skipping ping sweep.\")\n defer.returnValue(None)\n\n ips = yield self.discoverIps(\n (yield self.config().callRemote(\"getSubNetworks\"))\n )\n if not self.options.nosnmp:\n defer.returnValue((yield self.discoverDevices(ips)))\n defer.returnValue(ips)",
"def run(self, registry):",
"def run(self):\n # Get the UUID so we can heartbeat to Ironic. Raises LookupNodeError\n # if there is an issue (uncaught, restart agent)\n self.started_at = _time()\n\n # Cached hw managers at runtime, not load time. See bug 1490008.\n hardware.load_managers()\n\n if not self.standalone:\n # Inspection should be started before call to lookup, otherwise\n # lookup will fail due to unknown MAC.\n uuid = inspector.inspect()\n\n content = self.api_client.lookup_node(\n hardware_info=hardware.dispatch_to_managers(\n 'list_hardware_info'),\n timeout=self.lookup_timeout,\n starting_interval=self.lookup_interval,\n node_uuid=uuid)\n\n self.node = content['node']\n self.heartbeat_timeout = content['heartbeat_timeout']\n\n wsgi = simple_server.make_server(\n self.listen_address[0],\n self.listen_address[1],\n self.api,\n server_class=simple_server.WSGIServer)\n\n if not self.standalone:\n # Don't start heartbeating until the server is listening\n self.heartbeater.start()\n\n try:\n wsgi.serve_forever()\n except BaseException:\n self.log.exception('shutting down')\n\n if not self.standalone:\n self.heartbeater.stop()",
"def discovery(self) -> Optional[pulumi.Input['DiscoveryArgs']]:\n return pulumi.get(self, \"discovery\")",
"def discovery():\n launch_training_on_all_splits(experiment='discovery', splits=DISCOVERY_SPLIT, base_model='pretrained', dropout=0.7987, learning_rate=0.00009659)",
"def GetUniverse(u):\n raise NotImplementedError",
"def main():\n config = Config()\n core = HostSearch()\n hosts = core.get_hosts(tags=['!nessus'], up=True)\n hosts = [host for host in hosts]\n host_ips = \",\".join([str(host.address) for host in hosts])\n\n url = config.get('nessus', 'host')\n access = config.get('nessus', 'access_key')\n secret = config.get('nessus', 'secret_key')\n template_name = config.get('nessus', 'template_name')\n\n nessus = Nessus(access, secret, url, template_name)\n\n scan_id = nessus.create_scan(host_ips)\n nessus.start_scan(scan_id)\n\n for host in hosts:\n host.add_tag('nessus')\n host.save()\n\n Logger().log(\"nessus\", \"Nessus scan started on {} hosts\".format(len(hosts)), {'scanned_hosts': len(hosts)})",
"def dynamic_discovery(self):\n while True:\n sleep(DYNAMIC_DISCOVERY_SLEEP)\n logging.debug(\"Starting Dynamic Discovery!\")\n self.datastream_discovery()",
"async def discover(self, timeout: int):",
"def discovery(self) -> pulumi.Output['outputs.DiscoveryResponse']:\n return pulumi.get(self, \"discovery\")",
"def main():\n config = _config()\n\n resolver = Resolver()\n resolver.nameservers = config['initial_nameservers']\n LOG.debug(\"Resolving namdservers %s\", config['nameservers'])\n nameservers = [resolver.address(_) for _ in config['nameservers']]\n\n resolver.nameservers = nameservers\n\n addresses = {}\n for domain in config['domains']:\n addresses[domain] = resolver.address(domain)\n LOG.debug(\"Found addresses: %s\", addresses)\n\n account = Account(**config['credentials'])\n client = Client(account)\n domains = client.get_domains()\n\n for domain, address in addresses.items():\n if domain not in domains:\n raise ValueError(\"%s not in client list of domains\" % domain)\n current = client.get_records(domain)[0]['data']\n if current != address:\n LOG.info('updating %s (%s -> %s)', domain, current, address)\n client.update_record_ip(address, domain, '@', 'A')\n else:\n LOG.info('Record up-to-date %s (%s)', domain, address)\n LOG.debug(\"complete\")",
"async def discover_catalog_hook(\n self,\n plugin_invoker: PluginInvoker,\n exec_args: Tuple[str, ...] = (),\n ):\n # Discover only in sync mode (i.e. no args)\n if exec_args:\n return\n\n try:\n await self.discover_catalog(plugin_invoker)\n except PluginLacksCapabilityError:\n pass",
"def run(self):\n print \"running presence detection\"\n r = rospy.Rate(10)\n time.sleep(2)\n\n while not rospy.is_shutdown():\n if self.running:\n self.find_new_people()\n self.follow_people()\n\n r.sleep()",
"def do_resolve(self,args):\n try:\n for solution in self.resolve_all(args):\n self.print_solution(solution)\n except:\n traceback.print_exc(file=sys.stdout)",
"def run_everything(self):\n try:\n if self.database == \"genome\":\n self.genome_deprecation()\n return\n\n record = self.ncbi_search(self.database, self.term)\n count = record[\"count\"]\n self.original_count = count\n\n self.main_organizer(count, record[\"qkey\"], record[\"webenv\"])\n except ProgramDone:\n return",
"def run(self):\n\n from dials.algorithms.refinement.refiner import phil_scope\n params = phil_scope.fetch(source=phil.parse('')).extract()\n\n # disable outlier rejection for speed of refiner construction\n params.refinement.reflections.outlier.algorithm='null'\n\n refiner = RefinerFactory.from_parameters_data_experiments(params,\n self._reflections, self._experiments)\n\n d1 = self._experiments[0].detector\n d2 = refiner.get_experiments()[0].detector\n\n assert d1.is_similar_to(d2)\n print \"OK\"\n return",
"def main():\n argument_parser = argparse.ArgumentParser()\n argument_parser.add_argument(\"name\", nargs=\"+\",\n help=\"DNS name(s) to look up\")\n argument_parser.add_argument(\"-v\", \"--verbose\",\n help=\"increase output verbosity\",\n action=\"store_true\")\n program_args = argument_parser.parse_args()\n fuckall = []\n for a_domain_name in program_args.name:\n if a_domain_name not in fuckall:\n print_results(collect_results(a_domain_name))\n fuckall.append(a_domain_name)",
"def discover(self):\n\n # Get the Huge Page configuration\n self.get_hugepages()\n\n # Get the device configuration\n self.get_devices_per_node()\n\n # Get the CPU configuration\n self.get_cpu()\n\n # Get the current grub cmdline\n self.get_grub()",
"def cli(ctx: Configuration, raw):\n from netdisco.discovery import NetworkDiscovery\n\n click.echo(\"Running discovery on network (might take a while)...\")\n netdiscovery = NetworkDiscovery()\n netdiscovery.scan()\n\n for device in netdiscovery.discover():\n info = netdiscovery.get_info(device)\n click.echo(\"{}:\\n{}\".format(device, format_output(ctx, info)))\n\n if raw:\n click.echo(\"Raw data:\")\n netdiscovery.print_raw_data()\n\n netdiscovery.stop()",
"async def discover(*args):\n # Since discovery needs to connect to all discovered bluetooth devices, and\n # only rules out devices after a timeout, it can potentially take a long\n # time. If there's already a discovery running, just skip this poll.\n if hass.data[DOMAIN][\"discovery\"].locked():\n return\n\n async with hass.data[DOMAIN][\"discovery\"]:\n bluetooth_devices = await hass.async_add_executor_job(\n pykulersky.discover_bluetooth_devices\n )\n\n # Filter out already connected lights\n new_devices = [\n device\n for device in bluetooth_devices\n if device[\"address\"] not in hass.data[DOMAIN][\"devices\"]\n ]\n\n for device in new_devices:\n light = pykulersky.Light(device[\"address\"], device[\"name\"])\n try:\n # If the connection fails, either this is not a Kuler Sky\n # light, or it's bluetooth connection is currently locked\n # by another device. If the vendor's app is connected to\n # the light when home assistant tries to connect, this\n # connection will fail.\n await hass.async_add_executor_job(check_light, light)\n except pykulersky.PykulerskyException:\n continue\n # The light has successfully connected\n hass.data[DOMAIN][\"devices\"].add(device[\"address\"])\n async_add_entities([KulerskyLight(light)], update_before_add=True)",
"async def run_discovery(self, plugin_invoker: PluginInvoker, catalog_path: Path):\n if not \"discover\" in plugin_invoker.capabilities:\n raise PluginLacksCapabilityError(\n f\"Extractor '{self.name}' does not support catalog discovery (the `discover` capability is not advertised)\"\n )\n\n try:\n with catalog_path.open(mode=\"wb\") as catalog:\n handle = await plugin_invoker.invoke_async(\n \"--discover\",\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n universal_newlines=False,\n )\n\n invoke_futures = [\n asyncio.ensure_future(_stream_redirect(handle.stdout, catalog)),\n asyncio.ensure_future(handle.wait()),\n ]\n\n if logger.isEnabledFor(logging.DEBUG) and handle.stderr:\n invoke_futures.append(\n _debug_logging_handler(self.name, plugin_invoker, handle.stderr)\n )\n\n done, _ = await asyncio.wait(\n invoke_futures,\n return_when=asyncio.ALL_COMPLETED,\n )\n failed = [future for future in done if future.exception() is not None]\n if failed:\n failed_future = failed.pop()\n raise failed_future.exception()\n exit_code = handle.returncode\n except Exception:\n catalog_path.unlink()\n raise\n\n if exit_code != 0:\n catalog_path.unlink()\n raise PluginExecutionError(\n f\"Catalog discovery failed: command {plugin_invoker.exec_args('--discover')} returned {exit_code}\"\n )",
"def main():\n\t#ps = PackageScanner()\n\t#packages = ps.getInstalledPackages()\n\t#print(packages)\n\t#ps.saveScanResults()\n\n\tan = Analyzer()\n\tan.loadFromFile(config.PKG_SCAN_DIR / config.PKG_SCAN_FILE)\n\t#an.loadFromPackageCont(packages)\n\tan.analyze()\n\tan.saveAnalysisResults()",
"def main():\n rospy.init_node('dibujo_server', anonymous=True)\n\n ruta = Ruta()\n s = rospy.Service('dibujo', Dibujo, ruta.dibujo)\n print('========= Waiting for service ========')\n rospy.spin()",
"def discover(self):\n\t\t\n\t\tremote_address = None if self.remote_address is None else socket.gethostbyname(self.remote_address)\n\t\t\n\t\t# Find remote source\n\t\tif remote_address and remote_source:\n\t\t\tremote_source_info = avahi_browse(ZEROCONF_TYPE_PA_SOURCE, address=remote_address, device=remote_source, wait=5)\n\t\t\tif len(remote_source_info) == 0:\n\t\t\t\traise Exception('Source %s at %s not found.' % (remote_source, remote_address))\n\t\t\telse:\n\t\t\t\tself.remote_source_info = remote_source_info[0]\n\t\telse:\n\t\t\tself.remote_source_info = select_avahi_device('source', address=remote_address)\n\t\t\n\t\t# Find remote sink\n\t\tif remote_address and remote_sink:\n\t\t\tremote_sink_info = avahi_browse(ZEROCONF_TYPE_PA_SINK, address=remote_address, device=remote_sink, wait=5)\n\t\t\tif len(remote_sink_info) == 0:\n\t\t\t\traise Exception('Sink %s at %s not found.' % (remote_sink, remote_address))\n\t\t\telse:\n\t\t\t\tself.remote_sink_info = remote_sink_info[0]\n\t\telse:\n\t\t\tself.remote_sink_info = select_avahi_device('sink', address=remote_address)"
] | [
"0.64019483",
"0.6259512",
"0.58157563",
"0.57681084",
"0.5739528",
"0.5714246",
"0.56753683",
"0.564835",
"0.55920976",
"0.5574409",
"0.55319387",
"0.5502363",
"0.54179734",
"0.54072213",
"0.5403968",
"0.5373921",
"0.5297729",
"0.5259509",
"0.5221286",
"0.5197727",
"0.5170125",
"0.51644826",
"0.5163888",
"0.5155382",
"0.5133519",
"0.51325893",
"0.51087916",
"0.5094701",
"0.50669545",
"0.50649226"
] | 0.6932293 | 0 |
Command is called by id_box. sets the value of the device"s identify field based on the value of id_box. | def identify(self):
if self.cur_uid is None:
return
self.ola_thread.rdm_set(self.universe.get(), self.cur_uid, 0,
"IDENTIFY_DEVICE",
lambda b, s, uid = self.cur_uid:self._rdm_set_complete(uid, b, s),
[self.id_state.get()]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def identify(self):\n self.__send_short(self.MGMSG_MOD_IDENTIFY, 0x00, 0x00)",
"def test_id_set(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n css = mocker.patch('pysds011.driver.SDS011.cmd_set_sleep')\n css.return_value = True\n csi = mocker.patch('pysds011.driver.SDS011.cmd_set_id')\n runner = CliRunner()\n result = runner.invoke(main, ['--id', 'cccc', 'id', 'abcd'])\n\n # Two calls to cmd_set_sleep:\n # the first use original address\n # at the second one, the sensor has a new address: so sleep\n # has the use the new one\n calls = [call(0, id=b'\\xcc\\xcc'), call(1, id=b'\\xab\\xcd')]\n css.assert_has_calls(calls, any_order=False)\n csi.assert_called_once_with(id=b'\\xcc\\xcc', new_id=b'\\xab\\xcd')\n assert result.exit_code == 0",
"def device_selected(self, uid):\n if uid == self.cur_uid:\n print \"Already Selected\"\n return\n # This line is going to return \"DEVICE_LABEL\" so you may as well skip it\n pid_key = \"DEVICE_LABEL\"\n self.dev_label.set(\"%s (%s)\"%(self._uid_dict[uid][pid_key][\"label\"], uid))\n self.ola_thread.rdm_get(self.universe.get(), uid, 0, \"IDENTIFY_DEVICE\", \n lambda b, s, uid = uid:self._get_identify_complete(uid, b, s))\n\n if \"SUPPORTED_PARAMETERS\" not in self._uid_dict[uid]:\n self.ola_thread.rdm_get(\n self.universe.get(), uid, 0, \"SUPPORTED_PARAMETERS\",\n lambda b, l, uid = uid:self._get_pids_complete(uid, b, l))\n else:\n self._notebook.Update()\n self.cur_uid = uid",
"async def identify(self):\n await self.send({\n \"op\": 2,\n \"d\" : {\n \"token\" : self.client.token,\n \"properties\": {\n \"$os\" : platform,\n \"$browser\": \"SpeedCord\",\n \"$device\" : \"SpeedCord\"\n },\n \"intents\" : self.client.intents,\n \"shard\" : (self.id, self.client.shard_count)\n }\n })",
"def script_set_device(self,udid=None):\n self.desired_caps['udid'] = udid;",
"def setDeviceID(self, id, unitCode=0):\n resp = self.XAPCommand('DID', id, unitCode=unitCode)\n return int(resp)",
"def fillQuickList():\n global quickList\n cmd = \"/sbin/blkid\"\n proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)\n for line in proc.stdout:\n line = line.replace(':', '').strip()\n propList = line.split()\n devName = label = uuid = fsType = ''\n devName = propList[0]\n for property in propList:\n if property.startswith('UUID'):\n uuid = property.replace('UUID=', '').replace('\"', '')\n quickList[devName] = uuid",
"def test_update_device_by_id(self):\n pass",
"def test_partially_update_device_by_id(self):\n pass",
"def _set_id(self, value):\n pass",
"def SetGPU(id):\n global option\n option['device_id'] = id",
"def do_id (self, line) :\n\t\tprint \"\tuid=%s(%s)\tgid=%s\"\t% (self.__image['meta']['UID'], self.__image['meta']['user'], self.__image['meta']['GID'] )",
"def _add_device(self, uid, succeeded, data):\n # TODO: Bug: on discover the label in the label in the device option menu \n # doesn't change and if you try to select the first device it tells \n # you that it is already selected\n if succeeded:\n self._uid_dict.setdefault(uid, {})[\"DEVICE_LABEL\"] = data\n self.device_menu[\"menu\"].add_command( label = \"%s (%s)\"%(\n self._uid_dict[uid][\"DEVICE_LABEL\"][\"label\"], uid), \n command = lambda:self.device_selected(uid))\n else:\n self._uid_dict.setdefault(uid, {})[\"DEVICE_LABEL\"] = {\"label\":\"\"}\n self.device_menu[\"menu\"].add_command( label = \"%s\" % uid, \n command = lambda:self.device_selected(uid))\n self._uid_dict[uid][\"index\"] = self.device_menu[\"menu\"].index(tk.END)",
"def device_id(self):\n data = fcntl.ioctl(self._fd, _EVIOCGID, '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\n idbus, idvendor, idproduct, idversion = struct.unpack(\"hhhh\", data)\n return idbus, idvendor, idproduct, idversion",
"def updateDevice(self, *args):\r\n\r\n # Update the list of vision choices and the default vision choice\r\n self._appChoice[\"vision\"] = [choice[0] for choice in self._system[self._appString[\"device\"].get()]]\r\n self._appString[\"vision\"].set(self._appChoice[\"vision\"][0])\r\n\r\n # Delete the old choices fromt the option menu\r\n menu = self._appOption[\"vision\"][\"menu\"]\r\n menu.delete(0, \"end\")\r\n\r\n # Add the new list of choices to the option menu\r\n for string in self._appChoice[\"vision\"]:\r\n menu.add_command(label=string, command=lambda value=string: self._appString[\"vision\"].set(value))",
"def set_id(self, refobj, identifier):\n cmds.setAttr(\"%s.identifier\" %refobj, identifier)",
"def do_device(self, args):\n self.device_command.cmdloop(\"Enter to device mode\")",
"def do_Device (self, line):",
"def test_update_device_by_id1(self):\n pass",
"def onButton(self):\n \n s = self.id_entry.get().strip()\n if len(s) < 3: # Require at least 3 characters in an id.\n return\n \n self.answer = g.app.leoID = s\n self.top.destroy() # terminates wait_window",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def test_partially_update_device_by_id1(self):\n pass",
"def _set_id(self):\n raise NotImplementedError()",
"def set_ID(self, x):\n x = str(x)\n if self.ID != x:\n self.ID = x",
"def test_id_is_set_without_original_id(mocker):\n runner = CliRunner()\n result = runner.invoke(main, ['id', 'abcd'])\n assert result.exit_code != 0",
"def id(self, val: str) -> None:\n\n self._id = val",
"def define_box_location(self):\n self.contents['Box_ID'] = np.ones(self.numatom) * self.num_box"
] | [
"0.6012484",
"0.56254315",
"0.5605983",
"0.54867625",
"0.54489803",
"0.5412264",
"0.54017085",
"0.5397051",
"0.5396992",
"0.53842187",
"0.5315069",
"0.5282258",
"0.52782863",
"0.526158",
"0.5250456",
"0.52348113",
"0.5213762",
"0.52123815",
"0.51809037",
"0.5173153",
"0.5172777",
"0.5172777",
"0.5172777",
"0.5172777",
"0.5103172",
"0.5086244",
"0.50836134",
"0.5075052",
"0.50038505",
"0.5003298"
] | 0.64114225 | 0 |
callback for the rdm_get in upon_discover. populates self.device_menu | def _add_device(self, uid, succeeded, data):
# TODO: Bug: on discover the label in the label in the device option menu
# doesn't change and if you try to select the first device it tells
# you that it is already selected
if succeeded:
self._uid_dict.setdefault(uid, {})["DEVICE_LABEL"] = data
self.device_menu["menu"].add_command( label = "%s (%s)"%(
self._uid_dict[uid]["DEVICE_LABEL"]["label"], uid),
command = lambda:self.device_selected(uid))
else:
self._uid_dict.setdefault(uid, {})["DEVICE_LABEL"] = {"label":""}
self.device_menu["menu"].add_command( label = "%s" % uid,
command = lambda:self.device_selected(uid))
self._uid_dict[uid]["index"] = self.device_menu["menu"].index(tk.END) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_menu_connect():\n\n ServerSockets.get_menuitems_by_category(True)",
"def device_infomation_submenu(dev, pdmssp=False):\n \n loop = True\n menu.clear()\n\n while loop:\n print(\"Device Information Menu\")\n print(\"=======================\")\n print(\"1. Show Device Info\")\n print(\"2. Show Device Stats\")\n print(\"3. Show Network Info\")\n print(\"4. Show Network Stats\")\n print(\"5. Show Line Info\")\n print(\"6. Show Running Config\")\n print(\"7. Show Session Stats\")\n print(\"8. Show Call Logs\")\n print(\"0. Exit\")\n choice = input(\"Enter your choice[0-7]: \")\n\n if choice == \"1\":\n ## Show device info\n menu.clear()\n print(f\"Device Info - {dev._qpaths_dict['deviceinfov2']}\")\n print(\"======================================\")\n res = dev.getDeviceInfoV2(pdmssp)\n print(\"\") \n if res != None: \n menu.display_dict(res[\"data\"][\"body\"][\"data\"]) if pdmssp else menu.display_dict(res[\"data\"])\n input(\"\\nPress Enter to continue...\")\n menu.clear()\n elif choice == \"2\":\n # Show device stats\n menu.clear()\n print(f\"Device Stats - {dev._qpaths_dict['devicestats']}\")\n print(\"========================================\")\n res = dev.getDeviceStats(pdmssp)\n print(\"\") \n if res != None:\n menu.display_dict(res[\"data\"][\"body\"][\"data\"]) if pdmssp else menu.display_dict(res[\"data\"])\n input(\"\\nPress Enter to continue...\")\n menu.clear()\n elif choice == \"3\":\n # Show network info\n menu.clear()\n print(f\"Network Info - {dev._qpaths_dict['network']}\")\n print(\"========================================\")\n res = dev.getNetwork(pdmssp)\n print(\"\") \n if res != None:\n menu.display_dict(res[\"data\"][\"body\"][\"data\"]) if pdmssp else menu.display_dict(res[\"data\"])\n input(\"\\nPress Enter to continue...\")\n menu.clear()\n elif choice == \"4\":\n # Show network stats\n menu.clear()\n print(f\"Network Stats - {dev._qpaths_dict['networkstats']}\")\n print(\"==========================================\")\n res = dev.getNetworkStats(pdmssp)\n print(\"\") \n if res != None:\n menu.display_dict(res[\"data\"][\"body\"][\"data\"]) if pdmssp else menu.display_dict(res[\"data\"])\n input(\"\\nPress Enter to continue...\")\n menu.clear()\n elif choice == \"5\":\n # Show line info\n menu.clear()\n print(f\"Line Info - {dev._qpaths_dict['lineinfov2']}\")\n print(\"=================================\") \n res = dev.getLineInfoV2(pdmssp)\n print(\"\") \n if res != None:\n menu.display_dict(res[\"data\"][\"body\"][\"data\"]) if pdmssp else menu.display_dict(res[\"data\"])\n input(\"\\nPress Enter to continue...\")\n menu.clear() \n elif choice == \"6\":\n # Show Running info\n menu.clear()\n print(f\"Running Config - {dev._qpaths_dict['runningConfig']}\")\n print(\"==================================================\") \n res = dev.getRunningConfig(pdmssp)\n print(\"\")\n if res != None:\n menu.display_dict(res[\"data\"][\"body\"][\"data\"]) if pdmssp else menu.display_dict(res[\"data\"])\n input(\"\\nPress Enter to continue...\")\n menu.clear()\n elif choice == \"7\":\n # Show session stats\n menu.clear()\n print(f\"Session Stats - {dev._qpaths_dict['sessionStats']}\")\n print(\"===============================================\") \n res = dev.getSessionStats(pdmssp)\n print(\"\")\n if res != None:\n menu.display_dict(res[\"data\"][\"body\"][\"data\"]) if pdmssp else menu.display_dict(res[\"data\"])\n input(\"\\nPress Enter to continue...\")\n menu.clear() \n elif choice == \"8\":\n # Show call logs \n menu.clear() \n calllogs_submenu(dev, pdmssp)\n menu.clear()\n elif choice == \"0\":\n # Exit menu\n loop = False\n else:\n print(f\"Invalid input '{choice}' >> Expecting [0-7].\")\n time.sleep(1)\n menu.clear()",
"def DebugMenuProviderMixin_on_menus_update(self):\n self._DebugMenuProviderMixin_clear_menu_actions() # clear the existing menu actions\n \n ## Update Drivers Menu:\n curr_drivers_items = list(self.connection_man.registered_available_drivers.keys())\n for a_driver_key in curr_drivers_items:\n self.activeMenuReference.active_drivers_menu.addAction(a_driver_key)\n ## Update Drivable Menu:\n curr_drivable_items = list(self.connection_man.registered_available_drivables.keys())\n for a_driveable_key in curr_drivable_items:\n self.activeMenuReference.active_drivables_menu.addAction(a_driveable_key)\n ## Update Connections Menu:\n curr_connections_descriptions = list([a_conn_ref.description for a_conn_ref in self.connection_man.active_connections.values()])\n for a_connection_key in curr_connections_descriptions:\n self.activeMenuReference.active_connections_menu.addAction(a_connection_key)",
"def _handler_discover(self, *args, **kwargs):\n next_state = None\n next_agent_state = None\n \n # Try to break in case we are in auto sample\n self._send_break() \n\n next_state = ProtocolState.COMMAND\n next_agent_state = ResourceAgentState.IDLE\n\n self._go_to_root_menu()\n \n return (next_state, next_agent_state)",
"def _on_message(self, hwnd: int, msg: int, wparam: int, lparam: int):\n if msg != win32con.WM_DEVICECHANGE:\n return 0\n event, description = self.WM_DEVICECHANGE_EVENTS[wparam]\n print(f'Received message: {event} = {description}')\n if event in ('DBT_DEVICEARRIVAL'):\n print('A device has been plugged in')\n self.on_change(self.list_drives())\n # elif event in ('DBT_DEVICEREMOVECOMPLETE'):\n # logger.info('A device has been plugged out')\n # self.on_change(self.list_drives())\n return 0",
"def device_selected(self, uid):\n if uid == self.cur_uid:\n print \"Already Selected\"\n return\n # This line is going to return \"DEVICE_LABEL\" so you may as well skip it\n pid_key = \"DEVICE_LABEL\"\n self.dev_label.set(\"%s (%s)\"%(self._uid_dict[uid][pid_key][\"label\"], uid))\n self.ola_thread.rdm_get(self.universe.get(), uid, 0, \"IDENTIFY_DEVICE\", \n lambda b, s, uid = uid:self._get_identify_complete(uid, b, s))\n\n if \"SUPPORTED_PARAMETERS\" not in self._uid_dict[uid]:\n self.ola_thread.rdm_get(\n self.universe.get(), uid, 0, \"SUPPORTED_PARAMETERS\",\n lambda b, l, uid = uid:self._get_pids_complete(uid, b, l))\n else:\n self._notebook.Update()\n self.cur_uid = uid",
"def on_menu_item(self, e):\n if e.Id == ids.RESTORE:\n wx.PostEvent(self.app.roster, ev.ShowRoster())\n elif e.Id == ids.HIDE:\n wx.PostEvent(self.app.roster, ev.HideRoster())\n elif e.Id == ids.EXIT:\n wx.PostEvent(self.app.roster, wx.MenuEvent(\n wx.wxEVT_COMMAND_MENU_SELECTED, wx.ID_EXIT))\n elif e.Id == ids.PREFERENCES:\n wx.PostEvent(self.app.roster, wx.MenuEvent(\n wx.wxEVT_COMMAND_MENU_SELECTED, ids.PREFERENCES))\n elif e.Id == ids.OFFLINE:\n wx.PostEvent(self.app, ev.ChangePresence(status='offline'))\n elif e.Id == ids.AWAY:\n wx.PostEvent(self.app, ev.ChangePresence(status='away'))\n elif e.Id == ids.ONLINE:\n wx.PostEvent(self.app, ev.ChangePresence(status='online'))",
"def _syncDisplayMenu(ned, menu):\n pass",
"def load_devices():",
"def device_discovered():\n event.set()",
"def _discover(self, event):\n entities = self._translate_event(event)\n if entities:\n return None\n\n selection = event[\"data\"].get(\"selection\")\n if not selection:\n return None\n\n return {\n \"items\": [{\n \"label\": self.label,\n \"variant\": self.variant,\n \"description\": self.description,\n \"actionIdentifier\": self.discover_identifier,\n \"icon\": self.icon,\n }]\n }",
"def DebugMenuProviderMixin_on_buildUI(self):\n self._DebugMenuProviderMixin_build_menus()\n self._DebugMenuProviderMixin_build_actions() # the actions actually depend on the existance of the menus for this dynamic menu case",
"def get_device(self):\n self.connect_button = 1\n self.device_name = self.deviceEntry.text()",
"def __init__(self):\r\n Device.__init__(self)\r\n\r\n self.menu.addAction(\"Restart\", self.restart)\r\n self.menu.addAction(\"Stop\", self.terminate)",
"def get_menus():\n\n pass",
"def _run(self):\n\n self.sendStatus(C.MSG_PROBE, 'Detecting Management Interface')\n\n data = self.getData()\n host = data.p.host\n for params in data.p.interfacesList:\n port = params['port']\n interfaceHref = params['interfaceHref']\n self.sendStatus(C.MSG_PROBE, 'Checking %s:%s' % (host, port))\n if self._queryService(host, port):\n self._sendResponse(data, interfaceHref, port)\n self.sendStatus(C.OK, 'Found management interface on %s:%s'\n % (host, port))\n return\n\n self._sendResponse(data)\n self.sendStatus(C.OK_1, 'No management interface discovered')",
"def MyMenuHandlerCallback(self, inMenuRef, inItemRef):\r\n if (self.DataRef != 0):\r\n \"\"\"\r\n We read the data ref, add the increment and set it again.\r\n This changes the nav frequency.\r\n \"\"\"\r\n XPLMSetDatai(self.DataRef, XPLMGetDatai(self.DataRef) + inItemRef)\r\n pass",
"def laser_cb(self, msg):\n #rospy.loginfo(\"Received new scan\")\n self.laser = msg",
"def do_device(self, args):\n self.device_command.cmdloop(\"Enter to device mode\")",
"def select(self):\n if not self._selected:\n \tself._selected = True\n\t\tself.log(\"device {} is now selected\".format(self._secondary_address))",
"def Do(self):\n # type: (MenuContext) -> None\n raise NotImplementedError",
"def get_menu_items():\n\n pass",
"def load_device():",
"def _createDisplayMenu(ned, menu):\n pass",
"def selector(self):\n if self.selectedUnit:\n if not self.map.hasUnselectedUnitAt(self.pos):\n self.menu = Menu.Menu([], MENU_POSITION)\n #self.menuGroup.add(self.menu)\n self.selectedUnit.setNeighbors(self.map.getNeighbors(self.selectedUnit))\n if self.selectedUnit.hasUnfriendlyNeighbors():\n self.menu.add(Menu.MenuComponent(\" Attack\", self.startAttackMode))\n if self.selectedUnit.canCapture(self.pos):\n self.menu.add(Menu.MenuComponent(\" Capture\", lambda: self.capture(self.selectedUnit, self.pos)))\n self.menu.add(Menu.MenuComponent(\" Wait\", self.deselectUnit))\n self.menu.add(Menu.MenuComponent(\" Cancel\", self.cancelMove))\n self.menuMode = True\n else:\n self.selectSpace()",
"def manage():\r\n print('''\\n%s at %s acting as user %s\r\n\\nDevice Management Menu''' % (PACKETMASTER.model, ADDRESS, USERNAME))\r\n choice = moves.input('''\r\n 1 - Hardware Configuration Menu\r\n 2 - Rule and Port Group Configuration Menu\r\n 3 - App Configuration Menu\r\n 4 - Savepoint Configuration Menu\r\n 5 - User Management Menu\r\n 6 - Back\r\n 7 - Quit \\n\r\n Enter the number of the selection to check: ''')\r\n try:\r\n choice = int(choice)\r\n except ValueError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n manage()\r\n menus = {1: hardwareconfig,\r\n 2: ruleconfig,\r\n 3: appconfig,\r\n 4: saveconfig,\r\n 5: userconfig,\r\n 6: topmenu,\r\n 7: exit}\r\n try:\r\n select = menus[choice]\r\n select()\r\n except KeyError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n manage()",
"def on_hid_pnp(self, hid_event = None):\r\n # keep old reference for UI updates\r\n old_device = self.device\r\n\r\n if hid_event:\r\n print(\"Hey, a hid device just %s!\" % hid_event)\r\n \r\n if hid_event == \"connected\":\r\n # test if our device is available\r\n if self.device:\r\n # see, at this point we could detect multiple devices!\r\n # but... we only want just one\r\n pass\r\n else:\r\n self.test_for_connection()\r\n elif hid_event == \"disconnected\":\r\n # the hid object is automatically closed on disconnection we just\r\n # test if still is plugged (important as the object might be\r\n # closing)\r\n if self.device and not self.device.is_plugged():\r\n self.device = None\r\n print(\"you removed my hid device!\")\r\n else:\r\n # poll for devices\r\n self.test_for_connection()\r\n\r\n if old_device != self.device:\r\n # update ui\r\n pass",
"def discover(self):\n self.ola_thread.run_discovery(self.universe.get(), self._upon_discover)\n if self.auto_disc.get():\n self.ola_thread.add_event(5000, self.discover)\n else: \n print \"auto_disc is off\"",
"def do_Device (self, line):",
"def build_discovery_items(self):\n\n # discovery disk names\n self._lld_disk_names()"
] | [
"0.6013685",
"0.57967615",
"0.57610536",
"0.5615194",
"0.5563712",
"0.5537211",
"0.5413569",
"0.53911567",
"0.5368401",
"0.5362007",
"0.5357612",
"0.53087044",
"0.52785635",
"0.527515",
"0.5239037",
"0.5237308",
"0.52293265",
"0.5228126",
"0.52249765",
"0.5221598",
"0.51543856",
"0.514569",
"0.5137841",
"0.51367813",
"0.513656",
"0.5100266",
"0.5092275",
"0.5084877",
"0.502927",
"0.5023493"
] | 0.59731305 | 1 |
callback for the rdm_set in identify. | def _rdm_set_complete(self, uid, succeded, value):
print "value: %s" % value
print "rdm set complete" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def identify(self):\n if self.cur_uid is None:\n return\n self.ola_thread.rdm_set(self.universe.get(), self.cur_uid, 0, \n \"IDENTIFY_DEVICE\", \n lambda b, s, uid = self.cur_uid:self._rdm_set_complete(uid, b, s), \n [self.id_state.get()])",
"def _get_identify_complete(self, uid, succeeded, value):\n if succeeded: \n self.id_state.set(value[\"identify_state\"])",
"def do_it(did_list):\n d_to_return.callback(deferred_id)\n return process_did_list(did_list)",
"def do_it(did_list):\n d_to_return.callback(deferred_id)\n return process_did_list(did_list)",
"def setpointCallback(self,setpoint):\n if not self.setpoint_valid:\n rospy.loginfo(\"First setpoint received.\")\n self.setpoint_valid = True\n self.set_pose = setpoint\n if not self.enabled:\n rospy.logwarn(\"{}: PIDs not enabled, please call 'rosservice call {} true'\".format(rospy.get_name(),rospy.resolve_name('~enable')))\n rospy.loginfo('{}: Changed setpoint to: {}'.format(rospy.get_name(), setpoint.pose))",
"def after_each(self, dataset: pydicom.dataset.Dataset) -> None:",
"def _collect_set(self, pidset):",
"def guide_identify(self, sub, obj, t, r=None, v=None):\n pass",
"def set_modified_callback(self, callback):\n slw = super(RadioButtonListWalker, self)\n urwid.connect_signal(slw, 'modified', callback)\n return",
"def get_cmdset_callback(cmdset):\n string = self.format_output(obj, cmdset)\n self.msg(string.strip())",
"def onApply(self):\n qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)\n masterVolumeNode = slicer.vtkMRMLScalarVolumeNode()\n slicer.mrmlScene.AddNode(masterVolumeNode)\n slicer.vtkSlicerSegmentationsModuleLogic.CopyOrientedImageDataToVolumeNode(self.getClippedMasterImageData(),\n masterVolumeNode)\n try:\n self.logic.launchLiverSegmentation(masterVolumeNode, use_cuda=self.device.currentText == \"cuda\",\n modality=self.modality.currentText)\n\n self.scriptedEffect.saveStateForUndo()\n self.scriptedEffect.modifySelectedSegmentByLabelmap(\n slicer.vtkSlicerSegmentationsModuleLogic.CreateOrientedImageDataFromVolumeNode(masterVolumeNode),\n slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeSet)\n\n except Exception as e:\n qt.QApplication.restoreOverrideCursor()\n slicer.util.errorDisplay(str(e))\n\n finally:\n qt.QApplication.restoreOverrideCursor()\n slicer.mrmlScene.RemoveNode(masterVolumeNode)",
"def mycallback(arg):\n key = arg[0]\n dataset[key] = arg[1]\n pbar.update('Reading {}'.format(key))",
"def dset_in_dmr(dname, doc, dapns):\n parts = PurePosixPath(dname).parts\n\n namespaces = {'dap': dapns}\n\n # Find the dataset's group node...\n xpath = '/dap:Dataset'\n for grp in parts[1:-1]:\n xpath += '/dap:Group[@name=\"{}\"]'.format(grp)\n grp_nodes = doc.xpath(xpath, namespaces=namespaces)\n if len(grp_nodes) != 1:\n raise ValueError('XPath \"%s\" found %d group nodes' %\n (xpath, len(grp_nodes)))\n\n # Find the dataset's node...\n xpath = ('(dap:Char | dap:Byte | dap:Int8 | dap:UInt8 | '\n 'dap:Int16 | dap:UInt16 | dap:Int32 | dap:UInt32 |'\n 'dap:Int64 | dap:UInt64 | dap:Float32 | dap:Float64 |'\n 'dap:String)[@name=\"{}\"]'\n .format(parts[-1]))\n dset_nodes = grp_nodes[0].xpath(xpath, namespaces=namespaces)\n if len(dset_nodes) != 1:\n raise ValueError('XPath \"%s\" found %d variable nodes' %\n (xpath, len(dset_nodes)))\n\n return dset_nodes[0]",
"def on_rfid(self):\n pass",
"def ml_run(self, run_id):\n raise NotImplementedError()",
"def callback(self):\n pass # pragma: no cover",
"def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n self.ID = self.ID + 1\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n name = str(f\"{self.ID:02d}\"+\"_single.jpg\")\n cv2.imwrite(os.path.join(self.args.path_in, name), image)\n\n if (self.ID == 5):\n # Run SyntheticDataGeneration\n self.synthetic.eval()\n self.ID = 0\n # Annotate image and publish results\n current_directory_path = os.path.join(self.args.save_path, str(\"/Documents_orig/\"))\n for file in os.listdir(current_directory_path):\n name, ext = os.path.splitext(file)\n if ext == \".jpg\":\n image_file_savepath = os.path.join(current_directory_path, file)\n cv_image = cv2.imread(image_file_savepath)\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)\n if self.image_publisher is not None:\n image = Image(np.array(cv_image, dtype=np.uint8))\n message = self.bridge.to_ros_image(image, encoding=\"bgr8\")\n self.image_publisher.publish(message)\n for f in os.listdir(self.args.path_in):\n os.remove(os.path.join(self.args.path_in, f))",
"def register_iden_progress_changed_callback(self, callback=None):\r\n return self._arm.register_iden_progress_changed_callback(callback=callback)",
"def before_each(self, dataset: pydicom.dataset.Dataset) -> None:",
"def RecordedMachineIdent(self): # real signature unknown; restored from __doc__\n pass",
"def pass_selection_dR(self, dR):\n dR_max = self.cut_dict[self.sample_name][\"dR_cut\"]\n return self.pass_selection_val(val=dR, val_max=dR_max)",
"def device_selected(self, uid):\n if uid == self.cur_uid:\n print \"Already Selected\"\n return\n # This line is going to return \"DEVICE_LABEL\" so you may as well skip it\n pid_key = \"DEVICE_LABEL\"\n self.dev_label.set(\"%s (%s)\"%(self._uid_dict[uid][pid_key][\"label\"], uid))\n self.ola_thread.rdm_get(self.universe.get(), uid, 0, \"IDENTIFY_DEVICE\", \n lambda b, s, uid = uid:self._get_identify_complete(uid, b, s))\n\n if \"SUPPORTED_PARAMETERS\" not in self._uid_dict[uid]:\n self.ola_thread.rdm_get(\n self.universe.get(), uid, 0, \"SUPPORTED_PARAMETERS\",\n lambda b, l, uid = uid:self._get_pids_complete(uid, b, l))\n else:\n self._notebook.Update()\n self.cur_uid = uid",
"def handle(self, rsm_ctx):\n pass",
"def setLayerPointRightSelectCallback(self, id, delta, callback):\n\n l = self.layer_mapping[id]\n l.right_callback_point_select = callback\n l.delta = delta",
"def perform_callback(self, *args, **kwargs):\n pass",
"def DM(self):",
"def on_meta_ids(self, ids):\n pass # pylint: disable=unnecessary-pass",
"def post_hook(self):\n self.mk_rg1()\n self.mk_rg2()\n self.mk_rg3()",
"def post_execute(self):",
"def snapShot_set(self, md_dat = None, sizeMethod = 'bb', mainHandleNormalizeScale=True):\n try:\n _str_func = 'snapShot_set'\n log.debug(cgmGEN.logString_start(_str_func))\n str_self = self.mNode\n \n if md_dat is None:\n md_dat = self.__snapShotDat\n \n\n\n md_ctrls = controls_get(self, define=True, form=True, prerig=True,asDict=True,getExtra=0)\n md_ctrls['base']=[self]\n \n log.debug(cgmGEN.logString_sub(_str_func, 'Gather Dat'))\n #_progressBar = CGMUI.doStartMayaProgressBar(stepMaxValue=len(ml_ctrls))\n _state = self.getEnumValueString('blockState')\n \n md_missing = []\n \n def matchControl(mDat,idx,datSet):\n def match(mObj):\n mDat['mCtrl'] = mObj\n mDat['strNew'] = mObj.mNode\n #md_dat[datSet][idx] = mDat#...force back\n ml_matched.append(mObj)\n ml_unmatched.remove(mObj)\n return mObj\n \n if datSet == 'base':\n return self\n \n mCtrl = mDat.get('mObj')\n if mCtrl.mNode:\n return match(mCtrl)\n \n #Str check\n mCandidate = cgmMeta.validateObjArg(mDat['str'],noneValid=True)\n if mCandidate:\n log.info(cgmGEN.logString_msg(_str_func, \"Str Validated | {0} == {1}\".format(mDat['nameBase'],mCandidate)))\n return match(mCandidate)\n \n for mObj in md_ctrls[datSet]:\n if mObj.getNameDict() == mDat['cgmTags']:\n log.info(cgmGEN.logString_msg(_str_func, \"cgmTag Validated | {0}\".format(mDat['cgmTags'])))\n return match(mObj)\n \n try:\n log.info(cgmGEN.logString_msg(_str_func, \"Index Validated | {0}\".format(idx)))\n mObj = match(md_ctrls[datSet][idx])\n return match(mObj)\n except:pass\n \n log.error(cgmGEN.logString_msg(_str_func, \"Missing: {0}\".format(mDat['nameBase'])))\n md_missing.append(mDat)\n return False\n \n for datSet in 'base','define','form','prerig':\n mDatSet = md_dat[datSet]\n log.info(cgmGEN.logString_msg(_str_func, \"{0}...\".format(datSet)))\n ml_matched = []\n ml_unmatched = copy.copy(md_ctrls[datSet])\n \n for ii in range(3):#3 loop to account for parentage\n log.info(cgmGEN.logString_sub(_str_func, 'Push: {0}'.format(ii)))\n for i,mDat in mDatSet.iteritems(): \n\n mCtrl = mDat.get('mCtrl')\n if not mCtrl:\n mCtrl = matchControl(mDat,i,datSet)\n \n if not mCtrl:\n log.error(cgmGEN.logString_msg(_str_func, \"Missing: {0} | pass: {1}\".format(mDat['nameBase'], ii)))\n continue\n \n str_short = mCtrl.mNode#mDat['strNew']\n log.info(cgmGEN.logString_msg(_str_func, \"{0} | {1} | {2}\".format(i,str_short,mDat)))\n \n _d = mDat\n log.debug(cgmGEN.logString_msg(_str_func, \"{0} | {1}\".format(str_short,_d)))\n \n #Scale...\n if datSet != 'base':\n _bb = mDat.get('bbSize')\n _scaleDone = False\n\n if _bb:\n if mainHandleNormalizeScale and mCtrl.getMayaAttr('cgmType') in ['blockHandle','formHandle']:\n _average = MATH.average(_bb)\n mc.scale(_average,_average,_average, str_short, absolute = True)\n _scaleDone = True\n #TRANS.scale_to_boundingBox(str_short,[_average,_average,_average],freeze=False)\n \n if not _scaleDone:\n if sizeMethod == 'bb' and _bb:\n TRANS.scale_to_boundingBox_relative(str_short,_bb,freeze=False)\n else:\n for ii,a in enumerate('xyz'):\n _a = 's'+ a\n _v = mDat.get(_a)\n if not mCtrl.isAttrConnected(_a) and _v:\n ATTR.set(str_short,_a,_v)\n else:\n log.debug(\"|{0}| >> connected scale: {1}\".format(_str_func,_a)) \n \n \"\"\"\n if not ATTR.is_locked(mCtrl.mNode,'scale'):\n if sizeMethod == 'axisSize':\n if _d.get('axisSize'):\n try:\n DIST.scale_to_axisSize(_d['str'],_d['axisSize'])\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n elif sizeMethod in ['bb','bbSize']:\n if _d.get('bbSize'):\n try:\n reload(TRANS)\n TRANS.scale_to_boundingBox(_d['str'],_d['bbSize'],freeze=False)\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\"\"\" \n \n \n \n #Other...\n \n _pos = _d.get('pos')\n _noParent = _d['noParent']\n if _pos:\n try:mCtrl.p_position = _pos\n except:pass\n \n _orient = _d.get('orient')\n if _orient:\n mCtrl.p_orient = _orient\n\n #_worldScale = _d.get('worldScale')\n #if _worldScale and _noParent is not True:\n # mParent = mCtrl.p_parent\n # if mParent:\n # mCtrl.p_parent = False\n # mc.xform(mCtrl.mNode, scale = _worldScale, worldSpace = True, absolute = True)\n \n # if mParent:mCtrl.p_parent = mParent\n #else:\n\n if _state == datSet:\n break\n if ml_unmatched:\n log.warning(cgmGEN._str_subLine)\n log.info(cgmGEN.logString_msg(_str_func, \"{0} | Unmatched...\".format(datSet)))\n pprint.pprint(ml_unmatched)\n log.warning(cgmGEN._str_subLine)\n\n\n \n #rootShape_update(self)\n #pprint.pprint(vars())\n return True \n\n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)\n finally:\n CGMUI.doEndMayaProgressBar()"
] | [
"0.5682611",
"0.5316892",
"0.52162796",
"0.52162796",
"0.5072463",
"0.50554055",
"0.4996405",
"0.49675164",
"0.49276254",
"0.49173698",
"0.48191282",
"0.4788057",
"0.47169998",
"0.4650642",
"0.4647563",
"0.4641984",
"0.46267664",
"0.46096426",
"0.46091643",
"0.4599053",
"0.4585009",
"0.45750168",
"0.4547638",
"0.454705",
"0.4540027",
"0.4530798",
"0.45034316",
"0.4502794",
"0.45022056",
"0.44967136"
] | 0.56141204 | 1 |
Capitalize the two DNA sequences so that the answers are not affected. function homology is used to find the interval with the highest similarity. I use double for loop, one for the position of a long sequence string, and the other for each word in the string. | def main():
long_sequence = input("Please give ne a DNA sequence to search: ")
short_sequence = input("What DNA sequence would you like to match? ")
# converts characters to uppercase
new_long_sequence = long_sequence.upper()
new_short_sequence = short_sequence.upper()
ans = homology(new_long_sequence, new_short_sequence)
print("The best match is " + ans) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def question_13(one_string: str, two_string: str) -> str:\n return one_string.capitalize() + two_string.capitalize()",
"def sequence(word1: str, word2: str) -> str:\r\n matrix = [[[0, [0, 0]] for x in range(len(word1) + 1)] for i in range(len(word2) + 1)]\r\n\r\n for i in range(1, len(word2) + 1):\r\n for j in range(1, len(word1) + 1):\r\n # compares every letter in\r\n if word2[i - 1] == word1[j - 1]:\r\n matrix[i][j][0] = 1 + matrix[i-1][j-1][0]\r\n matrix[i][j][1] = [i - 1, j - 1]\r\n else:\r\n if matrix[i - 1][j][0] > matrix[i][j - 1][0]:\r\n matrix[i][j][0] = matrix[i - 1][j][0]\r\n matrix[i][j][1] = [i - 1, j]\r\n else:\r\n matrix[i][j][0] = matrix[i][j - 1][0]\r\n matrix[i][j][1] = [i, j - 1]\r\n # the code below runs in order to extract the sequence. it starts at position (m,n)\r\n res = \"\"\r\n i = len(matrix) - 1\r\n j = len(matrix[0]) - 1\r\n while i and j != 0:\r\n if matrix[i][j][1] == [i - 1, j - 1]:\r\n res = word1[j - 1] + res\r\n i, j = matrix[i][j][1]\r\n return res",
"def match_word_sorted(code1, code2):\n list1 = code1.split(\" \")\n list2 = code2.split(\" \")\n set1 = set(list1)\n set2 = set(list2)\n common_words = set1 & set2\n try:\n common_words.remove(\"\")\n except:\n pass\n\n words_to_index = {}\n for word in common_words:\n in1 = list1.index(word)\n in2 = list2.index(word)\n words_to_index[word] = (in1, in2)\n sorted1 = OrderedDict(sorted(words_to_index.items(), key=lambda t: t[1][0])).keys()\n sorted2 = OrderedDict(sorted(words_to_index.items(), key=lambda t: t[1][1])).keys()\n\n a = Sequence(sorted1)\n b = Sequence(sorted2)\n v = Vocabulary()\n a_encoded = v.encodeSequence(a)\n b_encoded = v.encodeSequence(b)\n scoring = SimpleScoring(MATCH_SCORE, MISMATCH_SCORE)\n aligner = GlobalSequenceAligner(scoring, GAP_SCORE)\n score, encoders = aligner.align(a_encoded, b_encoded, backtrace=True)\n max_score = 0\n for i, encoded in enumerate(encoders):\n alignment = v.decodeSequenceAlignment(encoded)\n if alignment.score > max_score:\n max_score = alignment.score\n return max_score",
"def gibber(self): \n for x in self.consonants:\n if (x in self.sentence):\n \t self.sentence = self.sentence.replace(x, x+'o'+unicode(x).lower())",
"def seq_align(string1,string2,mismatch_penalty,gap_penalty):\n\n # define 2x2 matrix\n matrix = []\n for i in range(len(string1)+1):\n if i == 0:\n matrix.append(list([gap_penalty * x for x in range(len(string2)+1)]))\n else:\n matrix.append(list([gap_penalty * i if x == 0 else None for x in range(len(string2)+1)]))\n\n # populate matrix by looping through the strings and finding optimal value for each spot\n for i in range(len(string1)):\n for j in range(len(string2)):\n if string1[i] == string2[j]:\n val1 = 0 + matrix[i][j]\n else:\n val1 = mismatch_penalty + matrix[i][j]\n val2 = gap_penalty + matrix[i][j+1]\n val3 = gap_penalty + matrix[i+1][j]\n min_val = min(val1,val2,val3)\n matrix[i+1][j+1] = min_val\n\n\n # define values to use while retracing\n result_str1 = ''\n result_str2 = ''\n i = len(matrix)-1\n j = len(matrix[0])-1\n\n # trace through matrix to find the optimal character alignment\n while i > 0 and j > 0:\n val1 = matrix[i-1][j-1]\n val2 = matrix[i-1][j]\n val3 = matrix[i][j-1]\n min_val = min(val1,val2,val3)\n if val1 == min_val:\n result_str1 += string1[i-1]\n result_str2 += string2[j-1]\n i -= 1\n j -= 1\n elif val2 == min_val:\n result_str1 += \"-\"\n result_str2 += string2[j-1]\n i -= 1\n else:\n result_str1 += string1[i-1]\n result_str2 += \"-\"\n j -= 1\n\n # for any leftover j values\n if i == 0:\n while j > 0:\n result_str1 += '-'\n result_str2 += string2[j]\n j -=1\n\n # for any leftover i values\n if j == 0:\n while i > 0:\n result_str1 += string1[i]\n result_str2 += \"-\"\n i -= 1\n\n return matrix[len(matrix)-1][len(matrix[0])-1], result_str1[::-1], result_str2[::-1]",
"def titleize(phrase):\n words = phrase.split(' ')\n results = ''\n for i in range(len(words)):\n word = ''\n for j in range(len(words[i])):\n if(j == 0):\n word = words[i][j].upper() \n else:\n word += words[i][j].lower()\n results += word\n if(i < len(words) - 1):\n results += ' '\n return results",
"def one_away(w1, w2):\n\n if abs(len(w1) - len(w2) > 1):\n return False\n\n # i = 0\n # w1_d = {}\n # w2_d = {}\n\n # for i in w1:\n # w1_d[i] = w1.count(i)\n\n # for j in w2:\n # w2_d[j] = w2.count(j)\n\n # unmatched = set(w1_d.items())^set(w2_d.items())\n \n # if len(unmatched) > 2:\n # return False\n # return True\n \n if len(w2) > len(w1):\n w1, w2 = w2, w1\n\n # Keep track of number of wrong letters\n diff = 0\n\n # Loop over w1 with i and over w2 with j\n i = j = 0\n\n # while j < len(w2):\n\n # if w1[i] != w2[j]:\n\n # # We found a wrong letter\n # wrong += 1\n # # We'll move to the next char in the longer string.\n # i += 1\n # if wrong > 1:\n # return False\n\n # # If same length, move the next char in shorter.\n # # Otherwise, don't move in shorter string --- this\n # # will cover the case of a added letter.\n # if len(w1) == len(w2):\n # j += 1\n\n # else:\n # # Both letters match; move to next letter in both\n # i += 1\n # j += 1\n\n # return True\n\n # iterate over 1 word - shorter of the two, so there is no index out of range error\n # as i, j increments\n while j < len(w2):\n # if letter are different, add to diff variable\n if w1[i] != w2[j]:\n diff += 1\n # as soon as diff is more than 1, than it's fast fail\n if diff > 1:\n return False\n # two scenarios: if same length for both words, both go on check next \n # word\n if len(w1) == len(w2):\n i += 1\n j += 1\n \n else: #if one word is longer than the other, go on to next letter in \n # longer word, and see if it matches previous letter in shorter word\n # because this is a case where extra letter is added in the middle of long\n # word, but the rest should be the same as the shorter\n i += 1\n else:\n i += 1\n j += 1\n return True",
"def getSecondStrand(sequences):\n compDNA = []\n for dna in sequences:\n compDNAAux = dna.replace('A', 't')\n compDNAAux = compDNAAux.replace('T', 'a')\n compDNAAux = compDNAAux.replace('C', 'g')\n compDNAAux = compDNAAux.replace('G', 'c')\n compDNA.append(compDNAAux.upper())\n\n for i in range(0, len(compDNA)):\n compDNA[i] = compDNA[i][::-1]\n\n return compDNA",
"def laceStrings(s1, s2):\n # \n s3 = '' # new interlaced string\n i = 0 \n for letter in s1:\n s3 += letter\n if i < len(s2):\n s3 += s2[i]\n i+= 1\n while i < len(s2):\n s3 += s2[i]\n i+= 1 \n return s3\n # end of code ",
"def SuperString():\n input = f.LoadFile('\\\\rosalind_long.txt')\n [Labels, DNA] = f.FASTA(input)\n \n \n while len(DNA) > 2: # Repeat cycle until only one string left \n # Find pair of strings with greatest overlap\n ## Initialize overlap matrix\n overlap_matrix = []\n for i in range(len(DNA)):\n overlap_matrix.append([])\n for j in range(len(DNA)):\n overlap_matrix[i].append(0)\n \n ## Fill in with overlaps\n for i in DNA:\n for j in DNA: \n overlap_matrix[DNA.index(i)][DNA.index(j)] = Overlap(i,j)\n \n # Replace strings with max overlap with superstring\n ## Find index of max overlap + value\n max_overlap = MaxMatrix(overlap_matrix)\n ind1 = max_overlap[0]\n ind2 = max_overlap[1]\n ## Make superstring based on this info\n s = Combine(DNA[ind1],DNA[ind2])\n ## Remove shorter strings, add superstring\n x = copy.copy(DNA[ind1])\n y = copy.copy(DNA[ind2])\n DNA.remove(x)\n DNA.remove(y)\n DNA.append(s)\n \n superstring = Combine(DNA[0],DNA[1])\n f.ExportToFile('rosalind_long_output.txt', superstring) \n return",
"def DNA_strand(string):\n return string.translate(DNA)",
"def replace_word_candidate(self, word):\n capital_flag = word[0].isupper()\n word = word.lower()\n if capital_flag and word in self.teencode_dict:\n return self.replace_teencode(word).capitalize()\n elif word in self.teencode_dict:\n return self.replace_teencode(word)\n\n for couple in self.word_couples:\n for i in range(2):\n if couple[i] == word:\n if i == 0:\n if capital_flag:\n return couple[1].capitalize()\n else:\n return couple[1]\n else:\n if capital_flag:\n return couple[0].capitalize()\n else:\n return couple[0]",
"def translate(word1, key, word2):\n key = dict(zip(word1, key))\n return ''.join(key[sym] for sym in word2)",
"def _double_metaphone(st):\n vowels = ['A', 'E', 'I', 'O', 'U', 'Y']\n st = ''.join((c for c in unicodedata.normalize('NFD', st) if unicodedata.category(c) != 'Mn'))\n st = st.upper() # st is short for string. I usually prefer descriptive over short, but this var is used a lot!\n is_slavo_germanic = (st.find('W') > -1 or st.find('K') > -1 or st.find('CZ') > -1 or st.find('WITZ') > -1)\n length = len(st)\n first = 2\n st = '-' * first + st + '------' # so we can index beyond the begining and end of the input string\n last = first + length - 1\n pos = first # pos is short for position\n pri = sec = '' # primary and secondary metaphone codes\n # skip these silent letters when at start of word\n if st[first:first + 2] in [\"GN\", \"KN\", \"PN\", \"WR\", \"PS\"]:\n pos += 1\n # Initial 'X' is pronounced 'Z' e.g. 'Xavier'\n if st[first] == 'X':\n pri = sec = 'S' # 'Z' maps to 'S'\n pos += 1\n # main loop through chars in st\n while pos <= last:\n #print str(pos) + '\\t' + st[pos]\n ch = st[pos] # ch is short for character\n # nxt (short for next characters in metaphone code) is set to a tuple of the next characters in\n # the primary and secondary codes and how many characters to move forward in the string.\n # the secondary code letter is given only when it is different than the primary.\n # This is just a trick to make the code easier to write and read.\n nxt = (None, 1) # default action is to add nothing and move to next char\n if ch in vowels:\n nxt = (None, 1)\n if pos == first: # all init vowels now map to 'A'\n nxt = ('A', 1)\n elif ch == 'B':\n #\"-mb\", e.g\", \"dumb\", already skipped over... see 'M' below\n if st[pos + 1] == 'B':\n nxt = ('P', 2)\n else:\n nxt = ('P', 1)\n elif ch == 'C':\n # various germanic\n if pos > first + 1 and st[pos - 2] not in vowels and st[pos - 1:pos + 2] == 'ACH' and \\\n st[pos + 2] not in ['I'] and (st[pos + 2] not in ['E'] or st[pos - 2:pos + 4] in ['BACHER', 'MACHER']):\n nxt = ('K', 2)\n # special case 'CAESAR'\n elif pos == first and st[first:first + 6] == 'CAESAR':\n nxt = ('S', 2)\n elif st[pos:pos + 4] == 'CHIA': # italian 'chianti'\n nxt = ('K', 2)\n elif st[pos:pos + 2] == 'CH':\n # find 'michael'\n if pos > first and st[pos:pos + 4] == 'CHAE':\n nxt = ('K', 'X', 2)\n elif pos == first and (st[pos + 1:pos + 6] in ['HARAC', 'HARIS'] or \\\n st[pos + 1:pos + 4] in [\"HOR\", \"HYM\", \"HIA\", \"HEM\"]) and st[first:first + 5] != 'CHORE':\n nxt = ('K', 2)\n #germanic, greek, or otherwise 'ch' for 'kh' sound\n elif st[first:first + 4] in ['VAN ', 'VON '] or st[first:first + 3] == 'SCH' \\\n or st[pos - 2:pos + 4] in [\"ORCHES\", \"ARCHIT\", \"ORCHID\"] \\\n or st[pos + 2] in ['T', 'S'] \\\n or ((st[pos - 1] in [\"A\", \"O\", \"U\", \"E\"] or pos == first) \\\n and st[pos + 2] in [\"L\", \"R\", \"N\", \"M\", \"B\", \"H\", \"F\", \"V\", \"W\"]):\n nxt = ('K', 2)\n else:\n if pos > first:\n if st[first:first + 2] == 'MC':\n nxt = ('K', 2)\n else:\n nxt = ('X', 'K', 2)\n else:\n nxt = ('X', 2)\n # e.g, 'czerny'\n elif st[pos:pos + 2] == 'CZ' and st[pos - 2:pos + 2] != 'WICZ':\n nxt = ('S', 'X', 2)\n # e.g., 'focaccia'\n elif st[pos + 1:pos + 4] == 'CIA':\n nxt = ('X', 3)\n # double 'C', but not if e.g. 'McClellan'\n elif st[pos:pos + 2] == 'CC' and not (pos == (first + 1) and st[first] == 'M'):\n #'bellocchio' but not 'bacchus'\n if st[pos + 2] in [\"I\", \"E\", \"H\"] and st[pos + 2:pos + 4] != 'HU':\n # 'accident', 'accede' 'succeed'\n if (pos == (first + 1) and st[first] == 'A') or \\\n st[pos - 1:pos + 4] in ['UCCEE', 'UCCES']:\n nxt = ('KS', 3)\n # 'bacci', 'bertucci', other italian\n else:\n nxt = ('X', 3)\n else:\n nxt = ('K', 2)\n elif st[pos:pos + 2] in [\"CK\", \"CG\", \"CQ\"]:\n nxt = ('K', 2)\n elif st[pos:pos + 2] in [\"CI\", \"CE\", \"CY\"]:\n # italian vs. english\n if st[pos:pos + 3] in [\"CIO\", \"CIE\", \"CIA\"]:\n nxt = ('S', 'X', 2)\n else:\n nxt = ('S', 2)\n else:\n # name sent in 'mac caffrey', 'mac gregor\n if st[pos + 1:pos + 3] in [\" C\", \" Q\", \" G\"]:\n nxt = ('K', 3)\n else:\n if st[pos + 1] in [\"C\", \"K\", \"Q\"] and st[pos + 1:pos + 3] not in [\"CE\", \"CI\"]:\n nxt = ('K', 2)\n else: # default for 'C'\n nxt = ('K', 1)\n elif ch == u'\\xc7': # will never get here with st.encode('ascii', 'replace') above\n # \\xc7 is UTF-8 encoding of Ç\n nxt = ('S', 1)\n elif ch == 'D':\n if st[pos:pos + 2] == 'DG':\n if st[pos + 2] in ['I', 'E', 'Y']: # e.g. 'edge'\n nxt = ('J', 3)\n else:\n nxt = ('TK', 2)\n elif st[pos:pos + 2] in ['DT', 'DD']:\n nxt = ('T', 2)\n else:\n nxt = ('T', 1)\n elif ch == 'F':\n if st[pos + 1] == 'F':\n nxt = ('F', 2)\n else:\n nxt = ('F', 1)\n elif ch == 'G':\n if st[pos + 1] == 'H':\n if pos > first and st[pos - 1] not in vowels:\n nxt = ('K', 2)\n elif pos < (first + 3):\n if pos == first: # 'ghislane', ghiradelli\n if st[pos + 2] == 'I':\n nxt = ('J', 2)\n else:\n nxt = ('K', 2)\n # Parker's rule (with some further refinements) - e.g., 'hugh'\n elif (pos > (first + 1) and st[pos - 2] in ['B', 'H', 'D']) \\\n or (pos > (first + 2) and st[pos - 3] in ['B', 'H', 'D']) \\\n or (pos > (first + 3) and st[pos - 3] in ['B', 'H']):\n nxt = (None, 2)\n else:\n # e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'\n if pos > (first + 2) and st[pos - 1] == 'U' \\\n and st[pos - 3] in [\"C\", \"G\", \"L\", \"R\", \"T\"]:\n nxt = ('F', 2)\n else:\n if pos > first and st[pos - 1] != 'I':\n nxt = ('K', 2)\n elif st[pos + 1] == 'N':\n if pos == (first + 1) and st[first] in vowels and not is_slavo_germanic:\n nxt = ('KN', 'N', 2)\n else:\n # not e.g. 'cagney'\n if st[pos + 2:pos + 4] != 'EY' and st[pos + 1] != 'Y' and not is_slavo_germanic:\n nxt = ('N', 'KN', 2)\n else:\n nxt = ('KN', 2)\n # 'tagliaro'\n elif st[pos + 1:pos + 3] == 'LI' and not is_slavo_germanic:\n nxt = ('KL', 'L', 2)\n # -ges-,-gep-,-gel-, -gie- at beginning\n elif pos == first and (st[pos + 1] == 'Y' \\\n or st[pos + 1:pos + 3] in [\"ES\", \"EP\", \"EB\", \"EL\", \"EY\", \"IB\", \"IL\", \"IN\", \"IE\", \"EI\", \"ER\"]):\n nxt = ('K', 'J', 2)\n # -ger-, -gy-\n elif (st[pos + 1:pos + 3] == 'ER' or st[pos + 1] == 'Y') \\\n and st[first:first + 6] not in [\"DANGER\", \"RANGER\", \"MANGER\"] \\\n and st[pos - 1] not in ['E', 'I'] and st[pos - 1:pos + 2] not in ['RGY', 'OGY']:\n nxt = ('K', 'J', 2)\n # italian e.g, 'biaggi'\n elif st[pos + 1] in ['E', 'I', 'Y'] or st[pos - 1:pos + 3] in [\"AGGI\", \"OGGI\"]:\n # obvious germanic\n if st[first:first + 4] in ['VON ', 'VAN '] or st[first:first + 3] == 'SCH' \\\n or st[pos + 1:pos + 3] == 'ET':\n nxt = ('K', 2)\n else:\n # always soft if french ending\n if st[pos + 1:pos + 5] == 'IER ':\n nxt = ('J', 2)\n else:\n nxt = ('J', 'K', 2)\n elif st[pos + 1] == 'G':\n nxt = ('K', 2)\n else:\n nxt = ('K', 1)\n elif ch == 'H':\n # only keep if first & before vowel or btw. 2 vowels\n if (pos == first or st[pos - 1] in vowels) and st[pos + 1] in vowels:\n nxt = ('H', 2)\n else: # (also takes care of 'HH')\n nxt = (None, 1)\n elif ch == 'J':\n # obvious spanish, 'jose', 'san jacinto'\n if st[pos:pos + 4] == 'JOSE' or st[first:first + 4] == 'SAN ':\n if (pos == first and st[pos + 4] == ' ') or st[first:first + 4] == 'SAN ':\n nxt = ('H', )\n else:\n nxt = ('J', 'H')\n elif pos == first and st[pos:pos + 4] != 'JOSE':\n nxt = ('J', 'A') # Yankelovich/Jankelowicz\n else:\n # spanish pron. of e.g. 'bajador'\n if st[pos - 1] in vowels and not is_slavo_germanic \\\n and st[pos + 1] in ['A', 'O']:\n nxt = ('J', 'H')\n else:\n if pos == last:\n nxt = ('J', ' ')\n else:\n if st[pos + 1] not in [\"L\", \"T\", \"K\", \"S\", \"N\", \"M\", \"B\", \"Z\"] \\\n and st[pos - 1] not in [\"S\", \"K\", \"L\"]:\n nxt = ('J', )\n else:\n nxt = (None, )\n if st[pos + 1] == 'J':\n nxt = nxt + (2, )\n else:\n nxt = nxt + (1, )\n elif ch == 'K':\n if st[pos + 1] == 'K':\n nxt = ('K', 2)\n else:\n nxt = ('K', 1)\n elif ch == 'L':\n if st[pos + 1] == 'L':\n # spanish e.g. 'cabrillo', 'gallegos'\n if (pos == (last - 2) and st[pos - 1:pos + 3] in [\"ILLO\", \"ILLA\", \"ALLE\"]) \\\n or ((st[last - 1:last + 1] in [\"AS\", \"OS\"] or st[last] in [\"A\", \"O\"]) \\\n and st[pos - 1:pos + 3] == 'ALLE'):\n nxt = ('L', ' ', 2)\n else:\n nxt = ('L', 2)\n else:\n nxt = ('L', 1)\n elif ch == 'M':\n if (st[pos + 1:pos + 4] == 'UMB' \\\n and (pos + 1 == last or st[pos + 2:pos + 4] == 'ER')) \\\n or st[pos + 1] == 'M':\n nxt = ('M', 2)\n else:\n nxt = ('M', 1)\n elif ch == 'N':\n if st[pos + 1] == 'N':\n nxt = ('N', 2)\n else:\n nxt = ('N', 1)\n elif ch == u'\\xd1': # UTF-8 encoding of ト\n nxt = ('N', 1)\n elif ch == 'P':\n if st[pos + 1] == 'H':\n nxt = ('F', 2)\n elif st[pos + 1] in ['P', 'B']: # also account for \"campbell\", \"raspberry\"\n nxt = ('P', 2)\n else:\n nxt = ('P', 1)\n elif ch == 'Q':\n if st[pos + 1] == 'Q':\n nxt = ('K', 2)\n else:\n nxt = ('K', 1)\n elif ch == 'R':\n # french e.g. 'rogier', but exclude 'hochmeier'\n if pos == last and not is_slavo_germanic \\\n and st[pos - 2:pos] == 'IE' and st[pos - 4:pos - 2] not in ['ME', 'MA']:\n nxt = ('', 'R')\n else:\n nxt = ('R', )\n if st[pos + 1] == 'R':\n nxt = nxt + (2, )\n else:\n nxt = nxt + (1, )\n elif ch == 'S':\n # special cases 'island', 'isle', 'carlisle', 'carlysle'\n if st[pos - 1:pos + 2] in ['ISL', 'YSL']:\n nxt = (None, 1)\n # special case 'sugar-'\n elif pos == first and st[first:first + 5] == 'SUGAR':\n nxt = ('X', 'S', 1)\n elif st[pos:pos + 2] == 'SH':\n # germanic\n if st[pos + 1:pos + 5] in [\"HEIM\", \"HOEK\", \"HOLM\", \"HOLZ\"]:\n nxt = ('S', 2)\n else:\n nxt = ('X', 2)\n # italian & armenian\n elif st[pos:pos + 3] in [\"SIO\", \"SIA\"] or st[pos:pos + 4] == 'SIAN':\n if not is_slavo_germanic:\n nxt = ('S', 'X', 3)\n else:\n nxt = ('S', 3)\n # german & anglicisations, e.g. 'smith' match 'schmidt', 'snider' match 'schneider'\n # also, -sz- in slavic language altho in hungarian it is pronounced 's'\n elif (pos == first and st[pos + 1] in [\"M\", \"N\", \"L\", \"W\"]) or st[pos + 1] == 'Z':\n nxt = ('S', 'X')\n if st[pos + 1] == 'Z':\n nxt = nxt + (2, )\n else:\n nxt = nxt + (1, )\n elif st[pos:pos + 2] == 'SC':\n # Schlesinger's rule\n if st[pos + 2] == 'H':\n # dutch origin, e.g. 'school', 'schooner'\n if st[pos + 3:pos + 5] in [\"OO\", \"ER\", \"EN\", \"UY\", \"ED\", \"EM\"]:\n # 'schermerhorn', 'schenker'\n if st[pos + 3:pos + 5] in ['ER', 'EN']:\n nxt = ('X', 'SK', 3)\n else:\n nxt = ('SK', 3)\n else:\n if pos == first and st[first + 3] not in vowels and st[first + 3] != 'W':\n nxt = ('X', 'S', 3)\n else:\n nxt = ('X', 3)\n elif st[pos + 2] in ['I', 'E', 'Y']:\n nxt = ('S', 3)\n else:\n nxt = ('SK', 3)\n # french e.g. 'resnais', 'artois'\n elif pos == last and st[pos - 2:pos] in ['AI', 'OI']:\n nxt = ('', 'S', 1)\n else:\n nxt = ('S', )\n if st[pos + 1] in ['S', 'Z']:\n nxt = nxt + (2, )\n else:\n nxt = nxt + (1, )\n elif ch == 'T':\n if st[pos:pos + 4] == 'TION':\n nxt = ('X', 3)\n elif st[pos:pos + 3] in ['TIA', 'TCH']:\n nxt = ('X', 3)\n elif st[pos:pos + 2] == 'TH' or st[pos:pos + 3] == 'TTH':\n # special case 'thomas', 'thames' or germanic\n if st[pos + 2:pos + 4] in ['OM', 'AM'] or st[first:first + 4] in ['VON ', 'VAN '] \\\n or st[first:first + 3] == 'SCH':\n nxt = ('T', 2)\n else:\n nxt = ('0', 'T', 2)\n elif st[pos + 1] in ['T', 'D']:\n nxt = ('T', 2)\n else:\n nxt = ('T', 1)\n elif ch == 'V':\n if st[pos + 1] == 'V':\n nxt = ('F', 2)\n else:\n nxt = ('F', 1)\n elif ch == 'W':\n # can also be in middle of word\n if st[pos:pos + 2] == 'WR':\n nxt = ('R', 2)\n elif pos == first and (st[pos + 1] in vowels or st[pos:pos + 2] == 'WH'):\n # Wasserman should match Vasserman\n if st[pos + 1] in vowels:\n nxt = ('A', 'F', 1)\n else:\n nxt = ('A', 1)\n # Arnow should match Arnoff\n elif (pos == last and st[pos - 1] in vowels) \\\n or st[pos - 1:pos + 4] in [\"EWSKI\", \"EWSKY\", \"OWSKI\", \"OWSKY\"] \\\n or st[first:first + 3] == 'SCH':\n nxt = ('', 'F', 1)\n # polish e.g. 'filipowicz'\n elif st[pos:pos + 4] in [\"WICZ\", \"WITZ\"]:\n nxt = ('TS', 'FX', 4)\n else: # default is to skip it\n nxt = (None, 1)\n elif ch == 'X':\n # french e.g. breaux\n nxt = (None, )\n if not(pos == last and (st[pos - 3:pos] in [\"IAU\", \"EAU\"] \\\n or st[pos - 2:pos] in ['AU', 'OU'])):\n nxt = ('KS', )\n if st[pos + 1] in ['C', 'X']:\n nxt = nxt + (2, )\n else:\n nxt = nxt + (1, )\n elif ch == 'Z':\n # chinese pinyin e.g. 'zhao'\n if st[pos + 1] == 'H':\n nxt = ('J', )\n elif st[pos + 1:pos + 3] in [\"ZO\", \"ZI\", \"ZA\"] \\\n or (is_slavo_germanic and pos > first and st[pos - 1] != 'T'):\n nxt = ('S', 'TS')\n else:\n nxt = ('S', )\n if st[pos + 1] == 'Z' or st[pos + 1] == 'H':\n nxt = nxt + (2, )\n else:\n nxt = nxt + (1, )\n # ----------------------------------\n # --- end checking letters------\n # ----------------------------------\n #print str(nxt)\n if len(nxt) == 2:\n if nxt[0]:\n pri += nxt[0]\n sec += nxt[0]\n pos += nxt[1]\n elif len(nxt) == 3:\n if nxt[0]:\n pri += nxt[0]\n if nxt[1]:\n sec += nxt[1]\n pos += nxt[2]\n if pri == sec:\n return (pri, '')\n else:\n return (pri, sec)",
"def generateWord2(self, parameters=None):\n\n\t\t##Initial set-up\n\t\t#A syllable consists of an optional onset, a nucleus, and an optional coda\n\t\t#Sources:\n\t\t# http://en.wikipedia.org/wiki/English_phonology#Phonotactics\n\t\t# http://en.wiktionary.org/wiki/Appendix:English_pronunciation\n\t\tonsets = [\"ch\", \"pl\", \"bl\", \"cl\", \"gl\", \"pr\", \"br\", \"tr\", \"dr\", \"cr\", \"gr\", \"tw\", \"dw\", \"qu\", \"pu\",\n\t\t\t\t \"fl\", \"sl\", \"fr\", \"thr\", \"shr\", \"wh\", \"sw\",\n\t\t\t\t \"sp\", \"st\", \"sk\", \"sm\", \"sn\", \"sph\", \"spl\", \"spr\", \"str\", \"scr\", \"squ\", \"sm\"] #Plus the normal consonants\n\t\tnuclei = [\"ai\", \"ay\", \"ea\", \"ee\", \"y\", \"oa\", \"au\", \"oi\", \"oo\", \"ou\"] #Plus the normal vowels\n\t\tcodas = [\"ch\", \"lp\", \"lb\", \"lt\", \"ld\", \"lch\", \"lg\", \"lk\", \"rp\", \"rb\", \"rt\", \"rd\", \"rch\", \"rk\", \"lf\", \"lth\",\n\t\t\t\t \"lsh\", \"rf\", \"rth\", \"rs\", \"rsh\", \"lm\", \"ln\", \"rm\", \"rn\", \"rl\", \"mp\", \"nt\", \"nd\", \"nch\", \"nk\", \"mph\",\n\t\t\t\t \"mth\", \"nth\", \"ngth\", \"ft\", \"sp\", \"st\", \"sk\", \"fth\", \"pt\", \"ct\", \"kt\", \"pth\", \"ghth\", \"tz\", \"dth\",\n\t\t\t\t \"ks\", \"lpt\", \"lfth\", \"ltz\", \"lst\", \"lct\", \"lx\",\"rmth\", \"rpt\", \"rtz\", \"rst\", \"rct\",\"mpt\", \"dth\",\n\t\t\t\t \"nct\", \"nx\", \"xth\", \"xt\"] #Plus normal consonants\n\n\t\tsimpleLetterChance = 65 #percent, whether a single letter is chosen instead of an onset/nucleus/coda\n\t\tbasicLetterChance = 75 #percent, whether a simple consonant/vowel is chosen over a more rare one\n\n\t\t#Prevent unnecessary and ugly code repetition\n\n\t\t#Start the word\n\t\trepeats = 1\n\t\tif parameters and len(parameters) > 0:\n\t\t\trepeats = SharedFunctions.parseInt(parameters[0], 1, 1, 25)\n\n\t\twords = []\n\t\tfor i in xrange(0, repeats):\n\t\t\tsyllableCount = 2\n\t\t\tif random.randint(1, 100) <= 50:\n\t\t\t\tsyllableCount -= 1\n\t\t\tif random.randint(1, 100) <= 35:\n\t\t\t\tsyllableCount += 1\n\n\t\t\tword = u\"\"\n\t\t\tfor j in range(0, syllableCount):\n\t\t\t\t#In most cases, add an onset\n\t\t\t\tif random.randint(1, 100) <= 75:\n\t\t\t\t\tif random.randint(1, 100) <= simpleLetterChance:\n\t\t\t\t\t\tword += self.getBasicOrSpecialLetter(\"consonant\", basicLetterChance)\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(onsets)\n\n\t\t\t\t#Nucleus!\n\t\t\t\tif random.randint(1, 100) <= simpleLetterChance:\n\t\t\t\t\tword += self.getBasicOrSpecialLetter(\"vowel\", basicLetterChance)\n\t\t\t\telse:\n\t\t\t\t\tword += random.choice(nuclei)\n\n\t\t\t\t#Add a coda in most cases (Always add it if this is the last syllable of the word and it'd be too short otherwise)\n\t\t\t\tif (j == syllableCount - 1 and len(word) < 3) or random.randint(1, 100) <= 75:\n\t\t\t\t\tif random.randint(1, 100) <= simpleLetterChance:\n\t\t\t\t\t\tword += self.getBasicOrSpecialLetter(\"consonant\", basicLetterChance)\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(codas)\n\n\t\t\tword = word[0].upper() + word[1:]\n\t\t\twords.append(word)\n\n\t\treturn u\", \".join(words)",
"def rhymes(self, a, b):\n D = self._pronunciations\n a = a.lower()\n b = b.lower()\n # print \"----------------------------------\"\n # print \"Rhyming \",a,b\n\n if a in D.keys() and b in D.keys():\n a = D[a]\n #print a\n b = D[b]\n #print b\n\n #stores syllables after the first consonant sound\n last_syl_a = []\n last_syl_b = []\n\n # for each pronunciation of the word\n for y in a:\n syl = []\n pos = 0\n for i in range(0, len(y)):\n #if vowel\n if y[i][-1].isdigit():\n pos = i\n break\n # append all syllables from first vowel\n for i in range(pos, len(y)):\n syl.append(y[i])\n\n\n last_syl_a.append(syl)\n # print(last_syl_a)\n\n # for each pronunciation of the word\n for y in b:\n syl = []\n pos = 0\n for i in range(0, len(y)):\n # if vowel\n if y[i][-1].isdigit():\n pos = i\n break\n # append all syllables after first consonant sound\n for i in range(pos, len(y)):\n syl.append(y[i])\n\n last_syl_b.append(syl)\n # print(last_syl_b)\n\n if any(i in last_syl_a for i in last_syl_b):\n # print \"Rhyming - Yes\"\n return True\n\n else:\n # print \"Checking if Shorter word is suffix of Longer word's pronunciation\"\n if len(last_syl_a[0]) > len(last_syl_b[0]):\n big = last_syl_a\n small = last_syl_b\n else:\n big = last_syl_b\n small = last_syl_a\n\n for i in big:\n for j in small:\n count = 0\n for k in range(0, len(j)):\n if j[-(k + 1)] == i[-(k + 1)]:\n count = count + 1\n if count == len(j) and count > 0:\n # print \"Rhyming - yes\", i,j\n return True\n\n return False\n else:\n # Either or Both words not in CMU Dictionary\n return False",
"def convert_ambigs(strings, alph):\n ms = alph.translator(False)\n for i in range(len(strings)):\n strings[i] = strings[i].translate(ms)\n return(strings)",
"def smoothie(s_1: str, s_2: str) -> str:\n assert isinstance(s_1, str), \"s1 needs to be a str\"\n assert isinstance(s_2, str), \"s2 needs to be a str\"\n glass: str = \"\"\n for i, j in zip(s_1, s_2[::-1]):\n glass += j + i\n return glass",
"def rhymes(self,a,b):\r\n \r\n a=a.lower()\r\n b=b.lower()\r\n if(a in self._words): ##check if A is in the dict\r\n checkA=1\r\n soundA=self._pronun[a]\r\n lenA=len(soundA)\r\n #print(soundA)\r\n else :\r\n return False\r\n if(b in self._words): ##check if B is in dict\r\n checkB=1\r\n soundB=self._pronun[b]\r\n lenB=len(soundB)\r\n #print(soundB)\r\n else:\r\n return False\r\n \r\n if((checkA==1) and (checkB==1)): ##if both in dict then move ahead\r\n #print(lenA,lenB)\r\n \r\n for countA in range(lenA):\r\n if soundA[countA][0][0] not in ['A','E','I','O','U']:\r\n soundA[countA]=soundA[countA][1:]\r\n\r\n for countA in range(lenA):\r\n soundA[countA]=''.join(soundA[countA])\r\n \r\n # print(soundA)\r\n \r\n\r\n for countB in range(lenB):\r\n if soundB[countB][0][0] not in ['A','E','I','O','U']:\r\n soundB[countB]=soundB[countB][1:]\r\n\r\n for countB in range(lenB):\r\n soundB[countB]=''.join(soundB[countB])\r\n\r\n #print(soundB)\r\n \r\n else:\r\n return False\r\n\r\n rhyme_count=0\r\n \r\n for countA in range(lenA):\r\n for countB in range(lenB):\r\n if((soundA[countA].endswith(soundB[countB]))==True):\r\n #print('substring matched')\r\n rhyme_count=rhyme_count+1\r\n\r\n for countB in range(lenB):\r\n for countA in range(lenA):\r\n if((soundB[countB].endswith(soundA[countA]))==True):\r\n #print('substring matched')\r\n rhyme_count=rhyme_count+1\r\n \r\n if(rhyme_count>0):\r\n #print('True') \r\n return True\r\n else:\r\n # print('False')\r\n return False",
"def sentence_similarity_asym(sentence1, sentence2):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n \n # Get the synsets for the tagged words\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n score, count = 0.0, 0\n\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n pathsim = [synset.path_similarity(ss) for ss in synsets2]\n if len(pathsim) == 0:\n #print sentence1, sentence2\n pathsim = [0]\n best_score = max(pathsim)\n \n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n count += 1\n\n if count == 0:\n return 0\n # Average the values\n score /= count\n return score",
"def stem(self, word):\n word = word.lower()\n\n step1_success = False\n\n # All acute accents are replaced by grave accents.\n word = (word.replace(u(\"\\xE1\"), u(\"\\xE0\"))\n .replace(u(\"\\xE9\"), u(\"\\xE8\"))\n .replace(u(\"\\xED\"), u(\"\\xEC\"))\n .replace(u(\"\\xF3\"), u(\"\\xF2\"))\n .replace(u(\"\\xFA\"), u(\"\\xF9\")))\n\n # Every occurrence of 'u' after 'q'\n # is put into upper case.\n for i in range(1, len(word)):\n if word[i - 1] == \"q\" and word[i] == \"u\":\n word = \"\".join((word[:i], \"U\", word[i + 1:]))\n\n # Every occurrence of 'u' and 'i'\n # between vowels is put into upper case.\n for i in range(1, len(word) - 1):\n if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels:\n if word[i] == \"u\":\n word = \"\".join((word[:i], \"U\", word[i + 1:]))\n elif word[i] == \"i\":\n word = \"\".join((word[:i], \"I\", word[i + 1:]))\n\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n rv = self._rv_standard(word, self.__vowels)\n\n # STEP 0: Attached pronoun\n for suffix in self.__step0_suffixes:\n if rv.endswith(suffix):\n if rv[-len(suffix) - 4:-len(suffix)] in (\"ando\", \"endo\"):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n rv = rv[:-len(suffix)]\n\n elif (rv[-len(suffix) - 2:-len(suffix)] in\n (\"ar\", \"er\", \"ir\")):\n word = \"\".join((word[:-len(suffix)], \"e\"))\n r1 = \"\".join((r1[:-len(suffix)], \"e\"))\n r2 = \"\".join((r2[:-len(suffix)], \"e\"))\n rv = \"\".join((rv[:-len(suffix)], \"e\"))\n break\n\n # STEP 1: Standard suffix removal\n for suffix in self.__step1_suffixes:\n if word.endswith(suffix):\n if suffix == \"amente\" and r1.endswith(suffix):\n step1_success = True\n word = word[:-6]\n r2 = r2[:-6]\n rv = rv[:-6]\n\n if r2.endswith(\"iv\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith((\"os\", \"ic\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2 .endswith(\"abil\"):\n word = word[:-4]\n rv = rv[:-4]\n\n elif (suffix in (\"amento\", \"amenti\",\n \"imento\", \"imenti\") and\n rv.endswith(suffix)):\n step1_success = True\n word = word[:-6]\n rv = rv[:-6]\n\n elif r2.endswith(suffix):\n step1_success = True\n if suffix in (\"azione\", \"azioni\", \"atore\", \"atori\"):\n word = word[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n rv = rv[:-len(suffix)]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif suffix in (\"logia\", \"logie\"):\n word = word[:-2]\n rv = word[:-2]\n\n elif suffix in (\"uzione\", \"uzioni\",\n \"usione\", \"usioni\"):\n word = word[:-5]\n rv = rv[:-5]\n\n elif suffix in (\"enza\", \"enze\"):\n word = \"\".join((word[:-2], \"te\"))\n rv = \"\".join((rv[:-2], \"te\"))\n\n elif suffix == u(\"it\\xE0\"):\n word = word[:-3]\n r2 = r2[:-3]\n rv = rv[:-3]\n\n if r2.endswith((\"ic\", \"iv\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith(\"abil\"):\n word = word[:-4]\n rv = rv[:-4]\n\n elif suffix in (\"ivo\", \"ivi\", \"iva\", \"ive\"):\n word = word[:-3]\n r2 = r2[:-3]\n rv = rv[:-3]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n else:\n word = word[:-len(suffix)]\n rv = rv[:-len(suffix)]\n break\n\n # STEP 2: Verb suffixes\n if not step1_success:\n for suffix in self.__step2_suffixes:\n if rv.endswith(suffix):\n word = word[:-len(suffix)]\n rv = rv[:-len(suffix)]\n break\n\n # STEP 3a\n if rv.endswith((\"a\", \"e\", \"i\", \"o\", u(\"\\xE0\"), u(\"\\xE8\"),\n u(\"\\xEC\"), u(\"\\xF2\"))):\n word = word[:-1]\n rv = rv[:-1]\n\n if rv.endswith(\"i\"):\n word = word[:-1]\n rv = rv[:-1]\n\n # STEP 3b\n if rv.endswith((\"ch\", \"gh\")):\n word = word[:-1]\n\n word = word.replace(\"I\", \"i\").replace(\"U\", \"u\")\n return word",
"def needleman_wunsch(\n seq1, seq2, match=1, mismatch=-1, gap_open=-5, gap_extend=-3, at_genome_start=False\n):\n alignments = pairwise2.align.globalms(\n seq1,\n seq2,\n match,\n mismatch,\n gap_open,\n gap_extend,\n )\n # Alignments is a list of tuples. Each tuple has length 5. Entries:\n # 0: seq1 alignment (ie with dashes for indels)\n # 1: seq2 alignemnt\n # 2: alignment score\n # 4, 5: don't know (not using them)\n if len(alignments) == 1:\n return alignments[0][0], alignments[0][1]\n\n if at_genome_start:\n best_pos = last_gap_end_in_string(alignments[0][1])\n else:\n best_pos = alignments[0][1].find(\"-\")\n\n best = alignments[0]\n\n for a in alignments[1:]:\n if at_genome_start:\n gap_pos = last_gap_end_in_string(a[1])\n else:\n gap_pos = a[1].find(\"-\")\n\n if gap_pos > best_pos:\n best = a\n best_pos = gap_pos\n\n return best[0], best[1]",
"def overlap(s1,s2):\n if len(s1) >= len(s2):\n longer = s1.lower()\n shorter = s2.lower()\n else:\n longer = s2.lower()\n shorter = s1.lower()\n s3 = shorter + longer # by default this is our combination \n if shorter in longer: # return if shorter is contained in longer\n return longer\n else:\n for i in rnage(1,len(shorter)):\n if shorter[i:] == longer[:len(shorter[i:])]:\n #checks if shorter is in beginning of longer\n s3 = shorter + longer[len(shorter[i:]):]\n break\n for i in reversed(range(len(shorter))):\n if shorter[:i] == longer[-len(shorter[:i]):]:\n # checks if shorter is in ending of longer\n if len(s3) > len(longer[:-len(shorter[:i])] + shorter:\n s3 = longer[:-len(shorter[:i])] + shorter\n break\n return s3",
"def main(argv):\n \n ### gets data from csv, sets variables\n seq1, seq2 = get_seqs('../data/seq.csv')\n \n \n # Assign the longer sequence to s1, and the shorter to s2\n l1, l2 = len(seq1), len(seq2)\n if l1 >= l2:\n s1, s2 = ((l2 - 1) * \".\" + seq1 + (l2 - 1) * \".\"), seq2\n #puts l2-1 \".\"s both sides of l1, allows alignment of all overlap combos\n else:\n s1, s2 = ((l1 - 1) * \".\" + seq2 + (l1 - 1) * \".\"), seq1\n l1, l2 = l2, l1 \n\n # writes alignment(s) with highest score into output file\n my_best_score = -1 #so 0 beats best score\n for i in range(l1 + l2 -1):\n score, matched, shift, end_shift = calculate_score(s1, s2, l1, l2, i)\n #assigns returns from calc_score function to these variables\n if score > my_best_score:\n my_best_score = score\n statement = \"This alignment occurs when the smaller strand (\" + \\\n str(l2) + \"nt in length) attaches from base \" + str(i - l2 + 2) + \\\n \" of the larger strand, with the highest score of \" + str(score) + \\\n \":\\n\"\n #statement explaining the alignment in detail\n best_comparison_highSP = (shift + matched + (l2 - 1) * \".\" + \"\\n\")\n best_comparison_lowSP = (shift + matched + end_shift + \"\\n\")\n best_s2, best_s1 = (shift + s2 + end_shift + \"\\n\"), (s1 + \"\\n\\n\\n\")\n #formats the matching, s1 and s2 lines to line-up neatly\n if i < l1 - 1:\n best_alignment = (str(statement) + str(best_comparison_lowSP) \\\n + str(best_s2) + str(best_s1))\n else:\n best_alignment = (str(statement) + str(best_comparison_highSP) \\\n + str(best_s2) + str(best_s1))\n # uses returned variables to write a statement about the alignment \n # giving its score and startpoint, and assigns 3 lines of alignment \n # (s1, s2 and matching bases) to a variable each for later printing\n f = open('../results/seqs_align.txt', 'w')\n f.write(best_alignment)\n f.close()\n print(\"Done!\")\n return None",
"def disambiguateWordsOld(self, word_list, tag_list):\n\t\t# print u\" \".join(word_list).encode('utf8');\n\t\t# print u\" \".join(tag_list).encode('utf8');\t\t\t\n\t\n\t\tif len(word_list)==0 or len(word_list)!=len(tag_list):\n\t\t\treturn word_list;\n\t\telse:\n\t\t\tnewwordlist=[];\n\t\t\twordtaglist=zip(word_list,tag_list);\n\t\t\t# print wordtaglist\n\t\t\tfor i in range(len(wordtaglist)):\n\t\t\t\tif i+1<=len(wordtaglist):\n\t\t\t\t\t# do tests with next word\n\t\t\t\t\t# إذا كانت الكلمة الحالية \"أن\" تكون \"أنْ\" حرف نصب إذا سبقت فعلا\n\t\t\t\t\t# وتكون أنّ، من أخوات إنّ إذا كان ما بعدها اسما\n\t\t\t\t\tif wordtaglist[i][0]==u'أن' and self.tagger.isVerbTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case1';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنْ','t');\n\t\t\t\t\telif wordtaglist[i][0]==u'أن' and self.tagger.isNounTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case 2';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنَّ','t');\n\t\t\t\tnewwordlist.append(wordtaglist[i][0]);\n\t\t\treturn newwordlist;",
"def compSeq(s1, s2, lineL=50):\n lineN = int(np.ceil(min(len(s1), len(s2))/lineL))\n count = 0\n samecount = 0\n outStr = ''\n for linei in range(lineN):\n if (linei+1) * lineL < min(len(s1), len(s2)):\n end = (linei+1) * lineL\n else:\n end = min(len(s1), len(s2))\n outStr += 'Pos %d - %d\\n' % (linei*lineL+1, end-1+1)\n for sitei in range(linei*lineL, end):\n outStr += s1[sitei]\n outStr += '\\n'\n for sitei in range(linei*lineL, end):\n out = ' ' if s1[sitei] == s2[sitei] else '|'\n outStr += out\n count += 1\n samecount += 1 if s1[sitei]==s2[sitei] else 0\n outStr += '\\n'\n for sitei in range(linei*lineL, end):\n out = '.' if s1[sitei] == s2[sitei] else s2[sitei]\n outStr += s2[sitei]\n outStr += '\\n\\n'\n outStr += 'Seq1 (%d) and Seq2 (%d) are %1.1f%% similar\\n\\n' % (len(s1), len(s2), 1e2*samecount/count)\n print(outStr)",
"def sentence_similarity(self,wnsimilarity,sentence1, sentence2,icneed=False):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n \n # Get the synsets for the tagged words\n synsets1 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n \n score, count = 0.0, 0\n # For each word in the first sentence\n for synset in synsets1:\n \n # Get the similarity value of the most similar word in the other sentence\n score_list=[]\n if icneed == True :\n for ss in synsets2:\n try:\n temp=wnsimilarity(synset,ss,self.brown_ic)\n score_list.append(temp)\n except:\n continue\n \n else:\n for ss in synsets2:\n try:\n temp=wnsimilarity(synset,ss)\n score_list.append(temp)\n except:\n continue\n \n \n score_list = np.array(score_list, dtype=np.float64)\n score_list = np.nan_to_num(score_list)\n# print(score_list)\n if len(score_list)>0:\n best_score = np.nanmax(score_list)\n else:\n best_score=0.0\n# print(best_score)\n# print(type(best_score))\n \n # Check that the similarity could have been computed\n if best_score is not None:\n score =score + best_score\n# print(score)\n count = count+ 1\n \n \n# print(\"one sentence over\")\n # Average the values\n score /= count\n return score",
"def test_normalize_phrase(self):\n test_pairs = [\n [\"Commissioner v. Palin\", \"palin\"],\n [\"Commr v. Palin\", \"palin\"],\n [\"Comm'r v. Palin\", \"palin\"],\n [\n \"United States v. Learned Hand et. al.\",\n \"unitedstateslearnedhand\",\n ],\n [\"Baker, Plaintiff v. Palin, Defendant\", \"bakerpalin\"],\n ]\n for pair in test_pairs:\n self.assertEqual(\n normalize_phrase(harmonize(clean_string(pair[0]))), pair[1]\n )",
"def longest_common_substring_bottom_up(s1, s2):\n lcs_array = lcs(s1, s2)\n n = len(s1)\n m = len(s2)\n l = lcs_array[n][m]\n result = \"\"\n while l != 0:\n if s1[n - 1] == s2[m - 1]:\n result = s1[n - 1] + result\n l -= 1\n n -= 1\n m -= 1\n elif lcs_array[n][m - 1] >= lcs_array[n - 1][m]:\n m -= 1\n else:\n n -= 1\n return result",
"def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein"
] | [
"0.60838586",
"0.60141426",
"0.5906382",
"0.58889776",
"0.5880022",
"0.57611173",
"0.5745369",
"0.5738358",
"0.5728159",
"0.56908894",
"0.5626435",
"0.5607777",
"0.5582242",
"0.55685735",
"0.5559297",
"0.55515736",
"0.555132",
"0.55334914",
"0.550639",
"0.55015826",
"0.55010045",
"0.54980505",
"0.54920626",
"0.54840213",
"0.54830384",
"0.544423",
"0.54400027",
"0.5439455",
"0.5437852",
"0.54367346"
] | 0.62365746 | 0 |
Decorator to cache the given function's output. | def _memorize(func):
def _wrapper(self, *args, **kwargs):
"""Wrapper to cache the function's output.
"""
if self.use_cache:
cache = load_cache(self.cache_filename)
original_key = generate_hash(
self.__class__.__name__, func.__name__, args, kwargs)
cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()
cached_val = cache.get(cache_key)
if cached_val:
return cached_val
val = func(self, *args, **kwargs)
if self.use_cache:
cache.set(cache_key, val)
return val
return _wrapper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cache_function(self, func):\n\n @wraps(func)\n def wrapper(*args):\n if self.__log:\n self.__logger.info(f\"Called {func.__name__} with {args}\")\n fileName = self.__build_file_name(func, args)\n\n if os.path.isfile(fileName):\n # Result is already stored in cache\n # Retrieve return value from cache\n return self.__read_cache(fileName)\n else:\n # Result is not stored in cache\n # Run function\n if len(args) > 0:\n returnVal = func(args)\n else:\n returnVal = func()\n\n # Store value in cache\n self.__write_cache(fileName, returnVal)\n\n # Give return value\n return returnVal\n\n return wrapper",
"def cache_result(func):\n\n @wraps(func)\n def with_cache(*args, **kwargs):\n \"\"\"\n Cached function\n \"\"\"\n key = '{}{}{}'.format(\n hash(func), hash(args), hash(frozenset(kwargs.items())))\n\n cached_result = cache.get(key)\n if cached_result is not None:\n return cached_result if cached_result != 'None' else None\n result = func(*args, **kwargs)\n cache.set(key, result if result is not None else 'None')\n\n return result\n\n return with_cache",
"def cache(func):\n results = {}\n\n @functools.wraps(func)\n def __cache(*args): # changed function\n nonlocal results # if this function call with parameters that already used\n if args in results.keys(): # then answer gets from dictionary\n # print(\"{} - got from cache\".format(args))\n rez = results[args]\n else:\n rez = func(*args)\n results[args] = rez\n return rez\n\n return __cache",
"def cache(func):\n\n def func_wrapper(self, hook=None, result_name=None):\n \"\"\"Wrapper to cache the result of a function.\"\"\"\n if self._cache is not None:\n c = self._cache.copy()\n c['cache'] = True\n return c\n else:\n ret = func(self, hook=hook, result_name=result_name)\n if not isinstance(ret, dict):\n raise TypeError( # pragma: no cover\n \"A dictionary was expected not '{0}'.\\nIssue with class '{1}'\"\n \"\".format(\n type(ret), type(self)))\n self._cache = ret\n ret = ret.copy()\n ret['cache'] = False\n return ret\n return func_wrapper",
"def cached():\n def decorator(fn): # define a decorator for a function \"fn\"\n cache_name = fn.func_name\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments\n if os.path.exists(cache_name):\n with gzip.GzipFile(cache_name, 'rb') as cachehandle:\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with gzip.GzipFile(cache_name, 'wb') as cachehandle:\n pickle.dump(res, cachehandle, pickle.HIGHEST_PROTOCOL)\n return res\n return wrapped\n return decorator # return this \"customized\" decorator that uses \"cachefile\"",
"def cache_result(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n assert len(args) == 0 and len(kwargs) == 0, \"Wrapped call must be empty\"\n if not hasattr(f, \"cached_result\"):\n f.cached_result = f()\n return f.cached_result\n return wrapper",
"def cache(func):\n storage = {}\n\n def wrapper(*args, **kwargs):\n key = str(*args, **kwargs)\n if storage.get(key):\n return storage[key]\n else:\n result = func(*args, **kwargs)\n storage[key] = result\n return result\n\n return wrapper",
"def memoize(func):\r\n func.cache = {}\r\n return decorator(_memoize, func)",
"def cache_result(func):\n def cache_set(key, value):\n cache.set(key, value, CACHE_TIMEOUT)\n return value\n\n def cached_func():\n prefix = func.__name__\n cached_funcs.add(prefix)\n key = get_cache_key(prefix=prefix)\n return cache.get(key) or cache_set(key, func())\n return cached_func",
"def memoize(function):\r\n cache = {}\r\n def decorated_function(*args):\r\n if args in cache:\r\n return cache[args]\r\n else:\r\n val = function(*args)\r\n cache[args] = val\r\n return val\r\n return decorated_function",
"def cached(func):\n return _lru_cache(None)(func)",
"def memoize(func):\n cache = {}\n @wraps(func)\n def wrap(*args):\n if args not in cache:\n cache[args] = func(*args)\n return cache[args]\n return wrap",
"def cached(cachefile):\n def decorator(fn): # define a decorator for a function \"fn\"\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments \n # if cache exists -> load it and return its content\n if os.path.exists(cachefile):\n with open(cachefile, 'rb') as cachehandle:\n print(\"using cached result from '%s'\" % cachefile)\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with open(cachefile, 'wb') as cachehandle:\n print(\"saving result to cache '%s'\" % cachefile)\n pickle.dump(res, cachehandle)\n\n return res\n\n return wrapped\n\n return decorator # return this \"customized\" decorator that uses \"cachefile\"",
"def cached(cachefile):\n def decorator(fn): # define a decorator for a function \"fn\"\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments \n # if cache exists -> load it and return its content\n if os.path.exists(cachefile):\n with open(cachefile, 'rb') as cachehandle:\n print(\"using cached result from '%s'\" % cachefile)\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with open(cachefile, 'wb') as cachehandle:\n print(\"saving result to cache '%s'\" % cachefile)\n pickle.dump(res, cachehandle)\n\n return res\n\n return wrapped\n\n return decorator # return this \"customized\" decorator that uses \"cachefile\"",
"def memo(func):\n cache = {}\n\n def wrapper(*args, **kwargs):\n update_wrapper(wrapper, func)\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n return wrapper",
"def memorize(func):\n cache = {}\n\n @wraps(func)\n def cached_function(*args, **kwargs):\n if args not in cache:\n cache[args] = func(*args, **kwargs)\n return cache[args]\n\n return cached_function",
"def memoize(func):\n cache = {}\n # Store results in a dict that maps arguments to results\n def wrapper(*args, **kwargs):\n if(args, kwargs) not in cache:\n # call func() and store the result.\n cache[(args,kwargs)] = func(*args,**kwargs)\n return cache[(args,kwargs)]\n return wrapper",
"def memoization(func):\n cache = {}\n\n @wraps(func)\n def _wrap(*args, **kwargs):\n key = (args, tuple(sorted(kwargs.items())))\n result = cache.get(key, None)\n if result:\n print(\"It's cached\")\n return result\n\n result = func(*args, **kwargs)\n cache[key] = result\n return result\n\n return _wrap",
"def memorized(f):\n cache = {}\n @wraps(f)\n def wrapped(*args):\n try:\n result = cache[args]\n except KeyError:\n result = cache[args] = f(*args)\n return result\n return wrapped",
"def cache(fn):\n\tcache.c = dict()\n\tdef _fn(*args, **kwargs):\n\t\tkey = fn.__name__ + str(args) + str(kwargs)\n\t\ttry:\n\t\t\tret = cache.c[key]\n\t\texcept KeyError, e:\n\t\t\tret = fn(*args, **kwargs)\n\t\t\tcache.c[key] = ret\n\t\treturn ret\n\treturn _fn",
"def decorate(func, *args, **kws):\n # setting cache expires for given decorated function,\n # if argument 'expire' is given.\n if expire:\n self.cache_expires[func] = expire\n else:\n self.cache_expires[func] = self.get_config().page_cache_expire\n if namespace_func:\n self.cache_nsfuncs[func] = namespace_func\n\n def do_cache(*args, **kws):\n \"\"\"\n A function works every time decorated functions are called.\n \"\"\"\n resp = self.response\n out = resp.out\n namespace = ''\n if self.cache_nsfuncs.get(func, None):\n namespace = self.cache_nsfuncs[func](self.request)\n p = urlsplit(self.request.url)[2]\n c = memcache.get(p, namespace)\n if c:\n # in case cache is found, use it \n # instead of rendering by calling function.\n out.write(c['body'])\n for k, i in c['hdr'].items():\n resp.headers[k] = i\n return\n\n r = func(*args, **kws)\n expire = self.cache_expires.get(func, 0)\n if expire == 0:\n return\n out.seek(0)\n try:\n p = urlsplit(self.request.url)[2]\n memcache.set(p, {'hdr':resp.headers,'body':out.read()},\n expire, namespace=namespace)\n logging.debug('%s is cahed' % p)\n except:\n memcache.flush_all()\n logging.debug('memcache is flashed.')\n return do_cache",
"def decorator(func):\n\n def wrapper():\n \"\"\"\n decorates the given function and makes it a lazy one.\n\n :returns: function result.\n \"\"\"\n\n result = caching_services.try_get('permanent', func, None)\n if result is not None:\n return result\n\n result = func()\n caching_services.try_set('permanent', result, func, None)\n return result\n\n return update_wrapper(wrapper, func)",
"def memo(func):\n cache = {}\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n key = str(args) + str(kwargs)\n try:\n return cache[key]\n except KeyError:\n rc = func(*args, **kwargs)\n cache[key] = rc\n return rc\n return wrapper",
"def cached_func(*args):\n try: # fails if cache is not instantiated\n return self.data['run'][func.__name__]\n except KeyError:\n value = func(*args)\n self.data['run'][func.__name__] = value\n return value",
"def memoize(f):\n cache = {}\n @functools.wraps(f)\n def g(*args):\n ret = cache.get(args, cache)\n if ret is cache:\n ret = cache[args] = f(*args)\n return ret\n return g",
"def cached(func):\n cache_dct = {}\n\n @wraps(func)\n def _lru_cache_decorator(*args):\n key = args\n if key in cache_dct:\n return cache_dct[key]\n else:\n cache_dct[key] = func(*args)\n return cache_dct[key]\n return _lru_cache_decorator",
"def cache_method_result(func: _t.Callable) -> _t.Callable:\n name: _t.Text = f'__cache_{func.__name__}'\n\n def wrapper(self, *args, **kwargs):\n result = getattr(self, name, None)\n\n if result is None or result[0] != args or result[1] != kwargs:\n result = (args, kwargs, func(self, *args, **kwargs))\n setattr(self, name, result)\n\n return result[2]\n\n return wrapper",
"def memoize(f):\n cache = OrderedDict({})\n\n def wrapper(*keys, **kwargs):\n \"\"\" Search for invoker function's return value in cache for given arguments,\n if found then return else store function parameters as key\n and function return value as value in cache\n If cache size exceeds 2, delete the oldest used key value record\n \"\"\"\n key = str(keys) + str(kwargs)\n if key in cache:\n value = cache.pop(key)\n cache[key] = value\n return cache[key]\n while len(cache)>1:\n cache.popitem(False)\n cache[key] = f(*keys, **kwargs)\n return cache[key]\n return wrapper",
"def _wrapper(self, *args, **kwargs):\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val",
"def cached(key):\n\n def decorator(fn):\n def decorated(cls):\n value = cls.get_cache(key)\n if value is not None:\n return value\n else:\n value = fn(cls)\n cls.set_cache(key, value)\n return value\n\n return decorated\n\n return decorator"
] | [
"0.80434555",
"0.802811",
"0.80147463",
"0.7940685",
"0.78886366",
"0.7886459",
"0.77482986",
"0.7695859",
"0.7625713",
"0.7599214",
"0.75760525",
"0.74956316",
"0.7471002",
"0.7471002",
"0.7467602",
"0.7437199",
"0.741782",
"0.74044234",
"0.73983026",
"0.73818004",
"0.7380074",
"0.7345714",
"0.7334566",
"0.73119164",
"0.73074865",
"0.7307084",
"0.7279092",
"0.72575",
"0.7249421",
"0.7246533"
] | 0.8269284 | 0 |
Groups chunks by entities retrieved from NL API Entity Analysis. | def _group_chunks_by_entities(self, chunks, entities):
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _group_chunks_by_entities(self, chunks, entities):\n for entity in entities:\n chunks_to_concat = chunks.get_overlaps(\n entity['beginOffset'], len(entity['content']))\n if not chunks_to_concat: continue\n new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])\n new_chunk = Chunk(new_chunk_word)\n chunks.swap(chunks_to_concat, new_chunk)\n return chunks",
"def entity_groups(self):\n entities = self.entities()\n if not entities:\n return None\n non_ent = self.opts.get('non_ent', 'O')\n groups = []\n idx = 0\n while idx < len(entities):\n ner_tag = entities[idx]\n # Check for entity tag\n if ner_tag != non_ent:\n # Chomp the sequence\n start = idx\n while (idx < len(entities) and entities[idx] == ner_tag):\n idx += 1\n groups.append((self.slice(start, idx).untokenize(), ner_tag))\n else:\n idx += 1\n return groups",
"def entity_groups(self):\n entities = self.entities()\n if not entities:\n return None\n non_ent = self.opts.get('non_ent', 'O')\n groups = []\n idx = 0\n while idx < len(entities):\n ner_tag = entities[idx]\n # Check for entity tag\n if ner_tag != non_ent:\n # Chomp the sequence\n start = idx\n while (idx < len(entities) and entities[idx] == ner_tag):\n idx += 1\n groups.append((self.slice(start, idx).untokenize(), ner_tag))\n else:\n idx += 1\n return groups",
"def add_entities(doc):\n\n # Calls function to tokenize the document, stores as list of strings\n tokens = tokenize(doc)\n\n # Calls function to find named entities in the tokens, stores as list of strings\n chunks = chunk(tokens)\n\n return chunks",
"def _get_chunks_with_api(self, input_text, language=None, use_entity=False):\n chunks = self._get_source_chunks(input_text, language)\n if use_entity:\n entities = api.get_entities(self.service, input_text, language)\n chunks = self._group_chunks_by_entities(chunks, entities)\n chunks = self._resolve_dependency(chunks)\n return chunks",
"def extract_entities(self) :\n entities = []\n googleEntityList = self.googleLanguageModel.analyze_entities() \n watsonEntityList = self.watsonLanguageModel['entities']\n\n for entity in googleEntityList.entities[:self.entitySizeLimit]:\n if len(entity.metadata) > 0:\n entities.append({ 'name' : entity.name, 'metadata' : entity.metadata})\n \n for entity in watsonEntityList[:self.entitySizeLimit]: \n entities.append({ 'name': entity['text'], 'metadata': entity.get('disambiguation', {})}) \n\n return entities",
"def gallery_groups(self):\n\n \"Collect data into fixed-length chunks or blocks\"\n # grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\n n = 3\n iterable = self.context['gallery'].values()\n args = [iter(iterable)] * 3\n return izip_longest(fillvalue=None, *args)",
"def getChunks():",
"def post(self):\n data = request.json\n client_chunks = g.client.chunks.all()\n chunks = Chunk.query.filter(data['lat'] - data['size'] <= Chunk.lat, Chunk.lat <= data['lat'] + data['size']).filter(data['long'] - data['size'] <= Chunk.long, Chunk.long <= data['long'] + data['size']).all()\n\n for chunk in chunks:\n chunk.owned = chunk in client_chunks\n\n return {'items': chunks}",
"def chunker(results, n):\n\n def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n m = int(len(results) / n)\n return list(grouper(iterable=results, n=m, fillvalue=None))",
"def get_chunks_result(self, data_keys: List[str], fetch_only: bool = False) -> List:",
"def get_line_groups(self, fb_brw):\n group_max = self.get_group_size(fb_brw)\n lines_height = self.get_lines_height(fb_brw)\n res = []\n line_subset = OrderedDict()\n group_height = 0\n first_page = group_max - 1\n other_page = group_max - 2\n page = 1\n\n page_max = first_page\n for (line, line_height) in lines_height.iteritems():\n if line_height + group_height <= page_max:\n line_subset.update([(line, line_height)])\n group_height += line_height\n else:\n # save group\n res.append(self.get_group(line_subset, group_height,\n page, page_max))\n # init new group\n line_subset = OrderedDict([(line, line_height)])\n group_height = line_height\n page_max = other_page\n page += 1\n res.append(self.get_group(line_subset, group_height, page, page_max))\n return res",
"def corpus2chunks(corpus_fname: Union[str, Path], n: int):\n with open(corpus_fname) as f:\n out = f.read()\n print(\"Starting spacy processing of document\")\n doc = nlp(out)\n print(\"Finished spacy processing.\")\n sentences = [s.text for s in doc.sents]\n\n chunked_sents = [\n remove_newlines_and_spaces(\" \".join(b)) for b in batch_list(sentences, n)\n ]\n return chunked_sents",
"def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]",
"def generate_entities(self, data):\r\n\t\t# create an empty dictionary to hold entities\r\n\t\tent_dic = {}\r\n\r\n\t\tfor row in data.itertuples():\r\n\t\t\t# feed nlp the first line's set of keywords\r\n\t\t\tdoc = self.nlp(row.keywords)\t\r\n\t\t\t# begin iterating through the nlp's entities\r\n\t\t\tfor ent in doc.ents:\r\n\r\n\t\t\t\t# For each entity, check if the label exists in 'ent_dic'.\r\n\t\t\t\t# If it does, append the entity into the key, value pair.\r\n\t\t\t\t# If it doesn't, create a new key, value pair\r\n\t\t\t\tkey = str(ent.label_) + ''\r\n\t\t\t\tif ent.label_ in ent_dic:\r\n\t\t\t\t\tent_dic[key].append(str(ent)) if not str(ent) in ent_dic[key] else print(f'The entity: {ent} is already in the array')\r\n\t\t\t\telse: \r\n\t\t\t\t\tent_dic[key] = [str(ent)]\r\n\r\n\t\t# return the dictionary of entities\r\n\t\treturn ent_dic",
"def chunk(tokens):\n\n # Uses NLTK function to pair each token with its Part Of Speech\n entity_list = []\n pos = nltk.pos_tag(tokens)\n named_entities_chunk = nltk.ne_chunk(pos, binary=True)\n\n # Finds named entities in tokens, stores in list of strings\n for i in range(0, len(named_entities_chunk)):\n ents = named_entities_chunk.pop()\n if getattr(ents, 'label', None) is not None and ents.label() == \"NE\":\n entity_list.append([ne for ne in ents])\n\n # Combines named entity components, pulls off the POF labels\n return [' '.join(next(zip(*l))) for l in entity_list]",
"def batch_chunks(exp_chunks):\n import numpy as np\n batch_idx = np.array([chunk[0]['batch_id'] for chunk in exp_chunks])\n unique_batch_idx = np.unique(batch_idx)\n ids_per_array = [np.where(batch_idx == array_bidx)[0] for array_bidx in unique_batch_idx]\n exp_arrays = [[exp_chunks[idx] for idx in chunk_ids] for chunk_ids in ids_per_array]\n return exp_arrays",
"def create_entities(self, entity_type):\n data = self.read_file(entity_type)\n base_url = data['url']\n for entity in data['entities']:\n url = base_url + entity['url']\n for data in entity['entities']:\n r = requests.post(url, json.dumps(data))\n print(r.text)",
"def split_large_groups(ctx):\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))",
"def getDataBatch(self, batch_size):\n for i in range(batch_size):\n params.offset = params.offset+i #increment by 1 for the next set of batch\n url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json'\n url_params = {'q': self.args.query.replace(' ', '+'),'api-key': self.args.api_key,'page': params.offset}\n response = requests.get(url, params=url_params)\n r = response.json()\n\n #start by checking call was successful\n if response.ok:\n if r['status'] != 'OK':\n log.error(\"Error with API call, NYT status not ok\")\n return None\n\n # TODO: implement - this dummy implementation returns one batch of data\n list_of_art = []\n for art in r['response']['docs']:\n list_of_art.append(functions.flatten_json(art)) #attach to list returned in call\n yield list_of_art\n else:\n log.error(\"Error during API call on request side\")",
"def return_entity_collection(self, entities, request, environ,\n start_response, response_headers):\n response_type = self.content_negotiation(\n request, environ, self.FeedTypes)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n 'xml, json or plain text formats supported', 406)\n entities.set_topmax(self.topmax)\n if response_type == \"application/json\":\n data = str('{\"d\":%s}' % ''.join(\n entities.generate_entity_set_in_json(request.version)))\n else:\n # Here's a challenge, we want to pull data through the feed\n # by yielding strings just load in to memory at the moment\n f = core.Feed(None, entities)\n doc = core.Document(root=f)\n f.collection = entities\n f.set_base(str(self.service_root))\n data = str(doc)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"%i %s\" % (200, \"Success\"), response_headers)\n return [data]",
"def child_ents(self) -> Iterator['Entity']:\n for ent in self.vmf.entities:\n if self.id in ent.visgroup_ids:\n yield ent",
"def get_entities(snippets, model, tag_type, batch_size):\n sentences, id2sentences = get_tokenized_sentences(snippets)\n model.predict(sentences, mini_batch_size=batch_size)\n\n # We group the prediction such that they correspond to the input snippets\n # and also we adjust the start and end position of the entities to correspond to their position in the snippet\n # and not in the splitted sentences\n output = {\"snippets\": []}\n index_snippet = 0\n snippet = {\"text\": snippets[index_snippet][1], \"entities\": []}\n index_sentence = 0 # length of sentences of the snippet we have already processed\n for i in range(len(sentences)):\n if snippets[index_snippet][0] != id2sentences[i]: # we moved to another snippet\n output[\"snippets\"].append((snippets[index_snippet][0], snippet))\n index_snippet += 1\n index_sentence = 0\n snippet = {\"text\": snippets[index_snippet][1], \"entities\": []}\n\n tagged_sentence = sentences[i].to_dict(tag_type=tag_type)\n # a tagged entity is represented as a dictionary of this form\n # {\"confidence\":0.9999816417694092,\"end_pos\":5,\"start_pos\":0,\"text\":\"Paris\",\"type\":\"LOC\"}\n entities = tagged_sentence[\"entities\"]\n for entity in entities:\n start_pos = index_sentence + entity[\"start_pos\"]\n end_pos = start_pos + len(entity[\"text\"])\n new_entity = entity\n new_entity[\"start_pos\"] = start_pos\n new_entity[\"end_pos\"] = end_pos\n snippet[\"entities\"].append(new_entity)\n\n index_sentence += len(tagged_sentence[\"text\"])\n\n output[\"snippets\"].append((snippets[index_snippet][0], snippet))\n\n return output",
"def build_chunks(results, metadata):\n\n for result in results:\n chunk = connector_pb2.DataChunk()\n for field in metadata.fieldInfo:\n set_value(result, field.name, chunk)\n yield chunk",
"def _chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]",
"def readEntities(self):\r\n entities = {}\r\n \r\n # Regexes must be greedy to prevent matching outer entity and end_entity strings\r\n # Regexes have re.DOTALL to match newlines\r\n for m in re.finditer(\"ENTITY (.*?)END_ENTITY;\", self.data, re.DOTALL):\r\n entity = {}\r\n raw_entity_str = m.groups()[0]\r\n\r\n entity[\"name\"] = re.search(\"(.*?)[;|\\s]\", raw_entity_str).groups()[0].upper()\r\n\r\n subtypeofmatch = re.search(\".*SUBTYPE OF \\((.*?)\\);\", raw_entity_str)\r\n entity[\"supertype\"] = subtypeofmatch.groups()[0].upper() if subtypeofmatch else None\r\n\r\n # find the shortest string matched from the end of the entity type header to the\r\n # first occurence of a NO_ATTR string (when it occurs on a new line)\r\n inner_str = re.search(\";(.*?)$\", raw_entity_str, re.DOTALL).groups()[0] \r\n\r\n attrs_str = min([inner_str.partition(\"\\r\\n \"+a)[0] for a in self.NO_ATTR])\r\n attrs = []\r\n for am in re.finditer(\"(.*?) : (.*?);\", attrs_str, re.DOTALL):\r\n name, attr_type = [s.replace(\"\\r\\n\\t\",\"\") for s in am.groups()]\r\n attrs.append((name, attr_type))\r\n \r\n entity[\"attributes\"] = attrs\r\n entities[entity[\"name\"]] = entity\r\n \r\n\r\n return entities",
"def get_entities(doc, clusters):\n ent_clusts = []\n for clust in clusters:\n ent_clust = []\n for (s, e) in clust:\n ent_clust.append(doc[s : e + 1])\n ent_clusts.append(ent_clust)\n return ent_clusts",
"async def split_large_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n progress = ClickIndeterminate(\"Splitting large groups\")\n progress.start()\n splitting = True\n stmt = select(Group).options(selectinload(Group.items), selectinload(Group.children))\n while splitting:\n splitting = False\n result = await dbsession.execute(stmt)\n for group in result.scalars():\n if len(group.children) == 0:\n if len(group.items) > 120 and len(group.items) < 300: # noqa: PLR2004\n if split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n elif len(group.items) >= 300: # noqa: PLR2004\n if split_by_attribute(dbsession, group, \"concepts\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"subjects\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"materials\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"techniques\"):\n splitting = True\n elif split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n await dbsession.commit()\n progress.stop()",
"def reduce_entites(entity_name, collection_name):\n num = 0\n list1 = set()\n for x in mongodb.import_object(collection_name, entity_name, parameters={}):\n for y in x:\n for z in y:\n\n if re.search(r'([A-Z][a-z]+\\s?){2,}', z['entity']):\n list1.add(z['entity'])\n num += 1\n # if re.search(r'[A-Z]{5,}', z['entity']):\n # print z['entity']\n\n print 'Number of ' + entity_name + 'entities: ',\n print num\n new_entities = list(reducer(list(list1)))\n print 'Reduced list :',\n print new_entities\n print 'Number of reduced entities: ',\n print len(new_entities)",
"def pages_split(text: str, document: dict, uri, gcs_output_uri : str, gcs_output_uri_prefix :str ):\n for i, entity in enumerate(document.entities):\n confidence = entity.confidence\n text_entity = ''\n for segment in entity.text_anchor.text_segments:\n start = segment.start_index\n end = segment.end_index\n text_entity += text[start:end]\n \n pages = [p.page for p in entity.page_anchor.page_refs]\n print(f\"*** Entity number: {i}, Split Confidence: {confidence} ***\")\n print(f\"*** Pages numbers: {[p for p in pages]} ***\\nText snippet: {text_entity[:100]}\")\n print(\"type: \" + entity.type_)\n start_page= pages[0]\n end_page = pages[len(pages)-1]\n print(start_page)\n print(end_page)\n \n storage_client = storage.Client()\n bucket = storage_client.get_bucket(uri.hostname)\n blob = bucket.get_blob(uri.path[1:])\n\n inputpdf= PdfFileReader(\n io.BytesIO(blob.download_as_bytes())\n ,strict=False) \n \n split_pdf(inputpdf, start_page, end_page, uri,gcs_output_uri, gcs_output_uri_prefix + \"/\" + entity.type_)"
] | [
"0.78132176",
"0.6900138",
"0.6900138",
"0.623251",
"0.60541534",
"0.6033899",
"0.599321",
"0.5869792",
"0.5774038",
"0.5766394",
"0.5745294",
"0.57265353",
"0.5688995",
"0.5677096",
"0.56333464",
"0.55718493",
"0.55683404",
"0.55677456",
"0.5554525",
"0.555384",
"0.5523666",
"0.55226916",
"0.55124635",
"0.5502346",
"0.54977214",
"0.54864717",
"0.5483721",
"0.5477632",
"0.5471269",
"0.5469718"
] | 0.7806981 | 1 |
Returns the list of annotations retrieved from the given text. | def _get_annotations(self, text, language=''):
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().annotateText(body=body)
response = request.execute()
tokens = response.get('tokens', [])
language = response.get('language')
return {'tokens': tokens, 'language': language} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_raw_annotations_for_text(text, ontologies='MESH', semantic_types=None):\n\n if semantic_types is None:\n semantic_types = ()\n\n params = {}\n params['text'] = text\n params['ontologies'] = ontologies\n params['semantic_types'] = ','.join(semantic_types)\n response = _make_api_call('http://data.bioontology.org/annotator', params)\n raw_annotations = response.json()\n return raw_annotations",
"def get_annotations_for_text(text, ontologies='MESH', semantic_types=(), debug=False):\n results = {'status': 'ERROR', 'data': []}\n\n if debug:\n print \"bioportal.get_annotations_for_text\"\n\n annotations = _get_raw_annotations_for_text(\n text,\n ontologies=ontologies,\n semantic_types=semantic_types\n )\n\n if not isinstance(annotations, list):\n results['message'] = 'BioPortal get annotations: Invalid format annotations'\n return results\n\n for annotation in annotations:\n ontology_data = re.findall(\n r'.*/([A-Z0-9]+)/([A-Z0-9]+)$', annotation['annotatedClass']['@id']\n ) or []\n\n info = {\n 'id': annotation['annotatedClass']['@id'],\n 'class': annotation['annotatedClass']['links']['self'],\n 'frequency': len(annotation['annotations']),\n 'matched_terms': list(\n set([an.get('text').lower() for an in annotation.get('annotations')])\n )\n }\n\n if len(ontology_data) == 1:\n info['ontology_type'] = ontology_data[0][0]\n info['ontology_quote_id'] = ontology_data[0][1]\n\n results['data'].append(info)\n\n results['status'] = 'OK'\n return results",
"def _annotations(request):\n result = Search(request).run(MultiDict(request.params))\n\n return request.find_service(AnnotationReadService).get_annotations_by_id(\n ids=result.annotation_ids\n )",
"def parse(self, text):\n assert isinstance(text, str)\n if text.strip() == '':\n return [], []\n\n output = self._annotate(text, properties={\n \"annotators\": \"tokenize,ssplit,pos\",\n \"coref.md.type\": \"dep\",\n \"coref.mode\": \"statistical\"\n })\n\n words = []\n postags = []\n\n for sentence in output['sentences']:\n for token in sentence['tokens']:\n word = token['word']\n pos = token['pos']\n word = re.sub(r'\\s', '', word)\n words.append(word)\n postags.append(pos)\n return words, postags",
"def extract(self, text: str) -> list:\n nes={}\n if self.ner_model == 'spacy':\n nes=self.extract_spacy(text)\n return nes",
"def get_annotation_list(\n self,\n project_id: int,\n doc_id: int\n ) -> requests.models.Response:\n return self.get(\n 'v1/projects/{project_id}/docs/{doc_id}/annotations'.format(\n project_id=project_id,\n doc_id=doc_id\n )\n )",
"def get_annotations(xmlsent):\n annotations = []\n annotation_elements = xmlsent.findall(\".//{%s}a\" % NS)\n for element in annotation_elements:\n annotation = {}\n annotation['type'] = element.attrib.get('type')\n annotation['flavor'] = element.attrib.get('flavor')\n annotation['who'] = element.attrib.get('who')\n annotation['text'] = element.text\n annot = {'type': element.attrib.get('type'), 'flavor': element.attrib.get('flavor'), \n 'who': element.attrib.get('who'), 'text': element.text}\n annotations.append(annot)\n return annotations",
"def generate_annotations(self, caching=CachingType.NONE):\n # Make the nltk Text list of words\n text = self.nltk_text(self.text)\n\n # Get the uncommon_words\n uncommon_words = self.eliminate_common(text)\n # Get the places / VIPs / hystorical events / etc.\n extras = self.get_extras(text)\n # Generate the annotations\n annotations = []\n for word in uncommon_words:\n ann = annot.TextAnnotation(word, AnnotationType.UNCOMMON_WORD,\n caching)\n ann.save_to_db()\n if ann.data is None or not ann.data:\n continue\n annotations.append(ann)\n for word in extras:\n ann = annot.TextAnnotation(word, AnnotationType.EXTRA, caching)\n ann.save_to_db(case_sensitive=True)\n if ann.data is None or not ann.data:\n continue\n annotations.append(ann)\n # Return the list of annotations\n return annotations",
"def _get_annotations(self) -> List[Dict[int, Dict[str, Any]]]:\n annotations = []\n for item in self.collector:\n data_file_type = os.path.basename(item).split(\".\")[-1]\n annotations.append(\n load_annotation_file(\n os.path.join(\n self.annotation_folder,\n os.path.basename(item).replace(data_file_type, \"json\"),\n )\n )\n )\n\n return annotations",
"def annotate(api_key, text, ontologies=[], longest_only=False, expand_mappings=False, include=[]):\n annotations = []\n url = BIOPORTAL_API_BASE + '/annotator'\n\n headers = {\n 'content-type': \"application/json\",\n 'authorization': \"apikey token=\" + api_key\n }\n\n if len(text) > 0:\n payload = {'text': text,\n 'longest_only': longest_only,\n 'expand_mappings': expand_mappings}\n\n if len(ontologies) > 0:\n payload['ontologies'] = ','.join(ontologies)\n\n if len(include) > 0:\n payload['include'] = ','.join(include)\n\n response = requests.post(url, json=payload, headers=headers, verify=False)\n\n if response.status_code != 200:\n raise Exception('Problem when calling the Annotator: ' + response.text)\n\n\n\n # print(payload)\n # print(response.url)\n # print(response.status_code)\n # print(response.text)\n annotations = json.loads(response.text)\n\n return annotations",
"def extract_text_recognition_dataset(self, path):\n\n os.makedirs(os.path.join(path, 'images'))\n\n annotation = []\n\n for frame in tqdm(self.annotation['images']):\n image = cv2.imread(frame['file_name'], cv2.IMREAD_IGNORE_ORIENTATION | cv2.IMREAD_COLOR)\n for ann_id in self.img_id_2_ann_id[frame['id']]:\n obj = self.annotation['annotations'][ann_id]\n if obj['attributes']['legible']:\n bbox = obj['bbox']\n try:\n transcription = obj['attributes']['transcription']\n if transcription.isalnum():\n coord_x1, coord_y1, coord_x2, coord_y2 = bbox[0], bbox[1], bbox[0] + \\\n bbox[2], bbox[1] + bbox[3]\n coord_x1 = max(0, coord_x1)\n coord_x2 = min(image.shape[1] - 1, coord_x2)\n coord_y1 = max(0, coord_y1)\n coord_y2 = min(image.shape[0] - 1, coord_y2)\n crop_path = os.path.join(path, 'images', f'image{len(annotation)}.jpg')\n annotation.append(f'{crop_path} {transcription}')\n cv2.imwrite(crop_path, image[coord_y1:coord_y2, coord_x1:coord_x2])\n except KeyError:\n print('Missing transcription in ', frame['file_name'])\n break\n except IndexError:\n print('Error in image processing ', frame['file_name'])\n break\n\n with open(os.path.join(path, 'annotation.txt'), 'w') as file:\n file.write('\\n'.join(annotation))",
"def get_filtered_dataset_annotations(config):\n\n images_filenames = net.data.get_dataset_filenames(\n config[\"voc\"][\"data_directory\"], config[\"voc\"][\"validation_set_path\"])\n\n annotations_paths = [os.path.join(config[\"voc\"][\"data_directory\"], \"Annotations\", image_filename + \".xml\")\n for image_filename in images_filenames]\n\n labels_to_categories_index_map = {label: index for (index, label) in enumerate(config[\"categories\"])}\n\n all_annotations = []\n\n for annotations_path in tqdm.tqdm(annotations_paths):\n\n with open(annotations_path) as file:\n\n image_annotations_xml = xmltodict.parse(file.read())\n\n image_size = \\\n int(image_annotations_xml[\"annotation\"][\"size\"][\"height\"]), \\\n int(image_annotations_xml[\"annotation\"][\"size\"][\"width\"])\n\n # Read annotations\n annotations = net.data.get_objects_annotations(\n image_annotations=image_annotations_xml,\n labels_to_categories_index_map=labels_to_categories_index_map)\n\n # Resize annotations in line with how we would resize the image\n annotations = [annotation.resize(image_size, config[\"size_factor\"]) for annotation in annotations]\n\n # Discard odd sized annotations\n annotations = \\\n [annotation for annotation in annotations\n if not net.utilities.is_annotation_size_unusual(annotation, **config[\"objects_filtering\"])]\n\n all_annotations.extend(annotations)\n\n return all_annotations",
"def get_annotations(self, img_id):\n return self._img_id2annotations.get(img_id, [])",
"def __call__(self, text:str) -> List[Dict[str,any]]:\n\n # Extract matches\n if len(self.separators) == 0:\n return list(self.decorate_spans(self.keep_maximal_matches(self.iterate_over_matches(text))))\n else:\n return list(self.decorate_spans(self.keep_tokens(\n self.keep_maximal_matches(self.iterate_over_matches(text)), text, self.separators)))",
"def annotations(self):\n return self._annotations",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']]\n for text in video_info['text']:\n info = {}\n frame_dir = video_info['filename']\n filename = osp.join(self.data_prefix, video_info['filename']+'.mp4') \n info['filename'] = filename\n info['frame_dir'] = frame_dir\n info['index'] = i\n info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n info['text'] = [text]\n if self.is_ret:\n pass\n elif self.is_mc:\n info['clip_text_candidate'] = [0, 1, 2, 3, 4]\n elif self.is_qa:\n pass\n video_infos.append(info) \n del ann_info\n\n return video_infos",
"def locate_all_text(pipeline: Pipeline, text: str, img: ImageBGR) -> List[ndarray]:\n\n predictions = recognize(pipeline, img)\n\n return [box for (txt, box) in predictions if txt == text]",
"def annotations(self):\n\n return self._annotations",
"def select_annotations(\n self, center_x: int, center_y: int, width: int, height: int\n ) -> List[Annotation]:\n\n box = geometry.box(\n center_x - width // 2,\n center_y - height // 2,\n center_x + width // 2,\n center_y + height // 2,\n )\n\n annotations = [\n self._annotations[pos] for pos in self._tree.intersection(box.bounds)\n ]\n\n for sorter in self._sorters:\n annotations = sorter(annotations)\n return annotations",
"def getURIs(text):\n\n \n URIList=[]\n annotatedWords=[]\n annotations = spotlight.annotate('http://spotlight.sztaki.hu:2222/rest/annotate',text,confidence=0.4, support=20, spotter='Default')\n for i in annotations:\n a=i[\"URI\"]\n URIList.append(a.encode(\"UTF-8\"))\n try:\n annotatedWords.append(i[\"surfaceForm\"].encode(\"utf-8\"))\n except AttributeError as e:\n annotatedWords.append(\"\")\n print 'Error adding this word: \"%s\" (%s)'% (i[\"surfaceForm\"], e.message)\n return URIList, annotatedWords",
"def get_annotations(graph):\n return set(_annotation_iter_helper(graph))",
"def annotate(self, text, lang = None):\n return self._er.jsonRequestAnalytics(\"/api/v1/annotate\", { \"lang\": lang, \"text\": text })",
"def define_annotation_list(y_pos, bgcolor, text, colorfont, name, hovertext):\n return dict(\n x = -0,\n y = y_pos,\n xanchor = 'left',\n text = text,\n hovertext = hovertext,\n showarrow = False,\n captureevents = True,\n bgcolor = bgcolor,\n font = { 'size' : 12, 'color' : colorfont },\n height = 14\n )",
"def annotated_text_(self):\n cur_annotation_idx = 0\n text_idx = 0\n annotated_line = \"\"\n while cur_annotation_idx < len(self.annotations):\n # Iteratively append chunks of text plus the annotation.\n cur_annotation = self.annotations[cur_annotation_idx]\n annotated_line += (\n self.plain_text_[text_idx:cur_annotation.offset[0]] +\n cur_annotation.to_inline_string())\n text_idx = cur_annotation.offset[1] + 1\n cur_annotation_idx += 1\n else:\n # If no annotations are left, append the rest of the text.\n annotated_line += self.plain_text_[text_idx:]\n return annotated_line",
"def get_alignable_annotations(self, root):\n\n aas = root.findall(\".//ALIGNABLE_ANNOTATION\")\n return {aa.attrib[\"ANNOTATION_ID\"]: aa for aa in aas}",
"def load_annotations(self, index):\n anns_file = open(os.path.join(self.folder_path, self.image_ids[index] + '.json'))\n labels = json.load(anns_file)\n labels = labels[\"shapes\"]\n anns_file.close()\n return labels.copy()",
"def get_annotation_object(annots_path):\n\n lines = annots_path.read_text().split('\\n')\n \n annots = []\n for line in lines:\n if not line:\n continue\n \n annot = {}\n splot = line.split(' ')\n annot['class_id'] = int(splot[0])\n annot['center_x'] = float(splot[1])\n annot['center_y'] = float(splot[2])\n annot['width'] = float(splot[3])\n annot['height'] = float(splot[4])\n annot['class_name'] = splot[-1]\n \n if splot[5].startswith('px:'):\n px = splot[5].strip('px:')\n py = splot[6].strip('py:')\n \n if not (px == 'auto'):\n px = px.split(',')\n py = py.split(',')\n annot['px'] = [float(x) for x in px]\n annot['py'] = [float(x) for x in py]\n else:\n annot['px'] = 'auto'\n annot['py'] = 'auto'\n \n elif splot[5].startswith('conf:'):\n annot['conf'] = float(splot[5].split(':')[1])\n\n annots.append(annot)\n \n return annots",
"def extract_nps(text, annotation):\n np_starts = [i for i in range(len(annotation)) if annotation[i] == 'B-NP']\n np_indexes = []\n for s in np_starts:\n i = 1\n while s+i < len(annotation) and annotation[s + i] == 'I-NP':\n i += 1\n np_indexes.append((s, s + i))\n return [' '.join(text[s:e]) for s, e in np_indexes]",
"def extract(self, text: str) -> List[Extraction]:\n\n doc = self._parser(text)\n\n extractions = list()\n for sent in doc.sents:\n this_extraction = Extraction(value=sent.text,\n extractor_name=self.name,\n start_token=sent[0],\n end_token=sent[-1],\n start_char=sent.text[0],\n end_char=sent.text[-1])\n extractions.append(this_extraction)\n\n return extractions",
"def get_all_anns(filepaths):\n annotations = []\n for filepath in filepaths:\n with open(filepath, 'r') as f:\n for line in f:\n ent = line.split('\\t')[0].strip()\n tag = line.split('\\t')[-1].strip()\n # get rid of newlines\n if ent != '' and tag != '':\n annotations.append((ent, tag))\n\n return annotations"
] | [
"0.72478795",
"0.7009146",
"0.6540529",
"0.651889",
"0.6381263",
"0.63721395",
"0.6350639",
"0.62808055",
"0.61848694",
"0.6099416",
"0.6034471",
"0.60143244",
"0.60053957",
"0.594642",
"0.5920167",
"0.5902729",
"0.5899829",
"0.5843016",
"0.58206964",
"0.5813129",
"0.5790471",
"0.5784984",
"0.5748762",
"0.57482934",
"0.5748128",
"0.57193184",
"0.5709248",
"0.56906974",
"0.5666528",
"0.5663743"
] | 0.7336573 | 0 |
Returns the list of entities retrieved from the given text. | def _get_entities(self, text, language=''):
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if not mentions:
continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def entities_text(text):\n if len(text) == 0:\n return None\n\n client = language.LanguageServiceClient()\n\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n\n # Instantiates a plain text document.\n document = types.Document(\n content=text,\n type=enums.Document.Type.PLAIN_TEXT)\n\n # Detects entities in the document. You can also analyze HTML with:\n # document.type == enums.Document.Type.HTML\n entities = client.analyze_entities(document).entities\n\n # entity types from enums.Entity.Type\n entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION',\n 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER')\n\n for entity in entities:\n print(u'{:<16}\\t{:8}\\t{}'.format(entity.salience, entity.name, entity_type[entity.type]))\n \"\"\"print('=' * 20)\n print(u'{:<16}: {}'.format('name', entity.name))\n print(u'{:<16}: {}'.format('type', entity_type[entity.type]))\n print(u'{:<16}: {}'.format('metadata', entity.metadata))\n print(u'{:<16}: {}'.format('salience', entity.salience))\n print(u'{:<16}: {}'.format('wikipedia_url',\n entity.metadata.get('wikipedia_url', '-')))\"\"\"\n\n return entities",
"def get_entities(self, text):\n\n extractor = twitter_text.Extractor(text)\n\n entities = {}\n entities['user_mentions'] = []\n for um in extractor.extract_mentioned_screen_names_with_indices():\n entities['user_mentions'].append(um)\n\n entities['hashtags'] = []\n for ht in extractor.extract_hashtags_with_indices():\n\n # massage field name to match production twitter api\n ht['text'] = ht['hashtag']\n del ht['hashtag']\n entities['hashtags'].append(ht)\n\n entities['urls'] = []\n for url in extractor.extract_urls_with_indices():\n entities['urls'].append(url)\n\n return entities",
"def get_sentences(text):\n \n return text.split('.')",
"def extract_entities(event):\n # TODO The text should probably already be tagged and tokenized before this step\n tree = ne_chunk(event.pos_tagged)\n entities = set([])\n\n people = tree.subtrees(lambda x: x.node == \"PERSON\")\n for person in people:\n entities.add(\" \".join([leaf[0] for leaf in person.leaves()]))\n\n places = tree.subtrees(lambda x: x.node == \"GPE\")\n for place in places:\n entities.add(\" \".join([leaf[0] for leaf in place.leaves()]))\n\n organizations = tree.subtrees(lambda x: x.node == \"ORGANIZATION\")\n for org in organizations:\n entities.add(\" \".join([leaf[0] for leaf in org.leaves()]))\n \n return entities",
"def get_entities(self, clean=False):\n return list(self.iter_entities(clean=clean))",
"def find(\n self, text: str, labels: istr = None, pipeline: str = \"default\"\n ) -> List[Entity]:",
"def findAll(self, text):\n\n\t\tfindAllResults = list()\n\n\t\t# CALL THESE JUST ONCE BEFORE LOOP(S)\n\t\tfindInstance = self.findInstance\n\t\tappend \t\t = findAllResults.append\t\t \n\t\t# - - - - - - - - - - - - - - - - - -\n\n\t\tfor i in xrange(len(self.toWORD)):\n\n\t\t\tword = self.toWORD[i]\n\n\t\t\tif i == 0:\n\t\t\t\t#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*#\n\t\t\t\t# Skip the zeroeth index to avoid including punctuation in the findAllResults list\t\t #\n\t\t\t\t#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*#\n\t\t\t\tpass\n\n\t\t\telse:\n\t\t\t\tfor w in word:\n\n\t\t\t\t\tif len(w) > 0:\n\t\t\t\t\t\tresults = findInstance(text = text, term = w)\n\n\t\t\t\t\t\tif len(results) > 0:\n\t\t\t\t\t\t\tappend((i, results))\n\n\t\treturn findAllResults",
"def extract_sentences_from_text(self, text_data):\n pass",
"def extract(self, text: str) -> List[Extraction]:\n\n doc = self._parser(text)\n\n extractions = list()\n for sent in doc.sents:\n this_extraction = Extraction(value=sent.text,\n extractor_name=self.name,\n start_token=sent[0],\n end_token=sent[-1],\n start_char=sent.text[0],\n end_char=sent.text[-1])\n extractions.append(this_extraction)\n\n return extractions",
"def add_entities(doc):\n\n # Calls function to tokenize the document, stores as list of strings\n tokens = tokenize(doc)\n\n # Calls function to find named entities in the tokens, stores as list of strings\n chunks = chunk(tokens)\n\n return chunks",
"def split(text):\n doc = nlp(text)\n sentences = [x.text_with_ws for x in doc.sents]\n return sentences",
"def entity_recognition(text: str) -> spacy:\n nlp = spacy.load('en_core_web_sm')\n document = nlp(text)\n return document",
"def list_texts(self, start: int = None, end: int = None) -> List:\n return [str(i.text) for i in self.data[start:end]]",
"def extract_entities(self) :\n entities = []\n googleEntityList = self.googleLanguageModel.analyze_entities() \n watsonEntityList = self.watsonLanguageModel['entities']\n\n for entity in googleEntityList.entities[:self.entitySizeLimit]:\n if len(entity.metadata) > 0:\n entities.append({ 'name' : entity.name, 'metadata' : entity.metadata})\n \n for entity in watsonEntityList[:self.entitySizeLimit]: \n entities.append({ 'name': entity['text'], 'metadata': entity.get('disambiguation', {})}) \n\n return entities",
"def entities_text(text):\n client = language.LanguageServiceClient()\n\n # Instantiates a plain text document.\n document = types.Document(\n content=text,\n type=enums.Document.Type.PLAIN_TEXT)\n\n # Detects entities in the document. You can also analyze HTML with:\n # document.type == enums.Document.Type.HTML\n entities = client.analyze_entities(document).entities\n\n # entity types from enums.Entity.Type\n entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION',\n 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER')\n \n ingredients = []\n #out = \"\"\n products = [] \n wegmans = WegmansClass()\n\n for entity in entities:\n ingredients.append(entity.name)\n #out += entity.name + '\\n'\n weg_sku = wegmans.GetSKUs(entity.name)\n\n prod = []\n\n for x in weg_sku:\n r = wegmans.GetProduct(x)\n prod.append(r)\n #out += r.name + \", \" + str(r.price) + \", \" + str(r.image) + str(r.velocity) + '#'\n\n products.append(prod)\n\n #print('=' * 20)\n #print(u'{:<16}: {}'.format('name', entity.name))\n #print(u'{:<16}: {}'.format('type', entity_type[entity.type]))\n #print(u'{:<16}: {}'.format('metadata', entity.metadata))\n #print(u'{:<16}: {}'.format('salience', entity.salience))\n #print(u'{:<16}: {}'.format('wikipedia_url',\n # entity.metadata.get('wikipedia_url', '-')))\n\n #for x in ingredients:\n # print(x + '\\n')\n\n #for x in products:\n # for y in x:\n # print(y.name)\n # print(y.price)\n # print(y.velocity)\n # print(y.image)\n # print('\\t')\n\n js = json.dumps(products, default=lambda o: o.__dict__)\n \n return jsonify({\"results\": json.loads(js)})",
"def extract(self, text: str) -> list:\n nes={}\n if self.ner_model == 'spacy':\n nes=self.extract_spacy(text)\n return nes",
"def get_sentences(text, nlp):\n\n # get sentences from text\n sentences = [sentence for sentence in\n text.replace('!', '.').replace('?', '.').split('.')]\n\n processed_sentences = [convert_to_string(remove_junk(tokenize_text(sentence, nlp))) for sentence in\n text.replace('!', '.').replace('?', '.').split('.')]\n\n # convert the sentences into a list of document vectors\n sentence_vector_list = [nlp(sentence).vector for sentence in processed_sentences]\n\n return sentences, sentence_vector_list",
"def extract_content(tweets):\n result = []\n for t in tweets:\n text = t.text\n result.append(tokenize(text))\n return result",
"def get_texts(self) -> List[str]:\n return self.texts",
"def get_text(self) -> List[str]:\n return self.__texts",
"def extract_entities_with_indices(self, options = {}, transform = lambda x: x):\r\n if not self.text:\r\n return []\r\n\r\n # extract all entities\r\n entities = self.extract_urls_with_indices(options) + \\\r\n self.extract_hashtags_with_indices({'check_url_overlap': False}) + \\\r\n self.extract_mentions_or_lists_with_indices() + \\\r\n self.extract_cashtags_with_indices()\r\n\r\n entities = self._remove_overlapping_entities(entities)\r\n\r\n for entity in entities:\r\n entity = transform(entity)\r\n\r\n return entities",
"def get_entity_list(text: str,\n config: pd.DataFrame,\n parse_columns: Union[List[str], Tuple[str]],\n result_columns: Union[dict, None] = None,\n preformed_entity: Union[dict, None] = None,\n priority_sort_column: Union[str, None] = None,\n priority_sort_ascending: bool = True,\n cell_values_separator: Union[str, None] = ';',\n unique_column_values: bool = True) -> List:\n return DataframeEntityParser(dataframe=config,\n parse_columns=parse_columns,\n result_columns=result_columns,\n preformed_entity=preformed_entity,\n priority_sort_column=priority_sort_column,\n priority_sort_ascending=priority_sort_ascending,\n cell_values_separator=cell_values_separator,\n unique_column_values=unique_column_values).get_entity_list(text)",
"def get_entities(tags):\n pass",
"def sentences(self, text):\n if not self.__isValidInput(text):\n return [Sentence(text, Sentence.NONE)]\n\n uniText = unicode_str(text)\n result = []\n textLen = len(uniText)\n sentenceLen = c_size_t()\n position = 0\n while textLen > 0:\n sentenceType = self.__lib.voikkoNextSentenceStartUcs4(\n self.__handle,\n uniText[position:],\n textLen,\n byref(sentenceLen),\n )\n sentenceText = uniText[position:position + sentenceLen.value]\n result.append(Sentence(sentenceText, sentenceType))\n if sentenceType == Sentence.NONE:\n break\n position = position + sentenceLen.value\n textLen = textLen - sentenceLen.value\n return result",
"def get_sentences(self):\n return [s for s in self.text.split('\\n')]",
"def sentences(self, text):\n return re.findall(r'([A-Z][^\\.!?]*[\\.!?])', text)",
"def process_text(text):\n return [token.text for token in nlp(text) if not token.is_stop]",
"def split_sentences(text: str) -> List[str]:\n return sent_tokenize(text)",
"def split_sentences(self, text):\n assert isinstance(text, str)\n text = text.replace('\\n', '')\n\n if text.strip() == '':\n return []\n\n output = self._annotate(text, properties={\n \"annotators\": \"tokenize,ssplit\",\n \"coref.md.type\": \"dep\",\n \"coref.mode\": \"statistical\"\n })\n\n sentences = []\n for sentence in output['sentences']:\n num_token = len(sentence['tokens'])\n start_index = sentence['tokens'][0]['characterOffsetBegin']\n end_index = sentence['tokens'][num_token - 1]['characterOffsetEnd']\n sentences.append(text[start_index:end_index])\n return sentences",
"def tokenize(text):\n sentence = Sentence(text)\n return sentence.tokens()"
] | [
"0.7677393",
"0.7066265",
"0.67262036",
"0.6620404",
"0.66058445",
"0.6437475",
"0.6425754",
"0.6414616",
"0.6380142",
"0.63607067",
"0.6341214",
"0.633624",
"0.62974226",
"0.6291807",
"0.6288747",
"0.6282542",
"0.62515336",
"0.61551136",
"0.6136373",
"0.61311215",
"0.6127788",
"0.61201876",
"0.6110274",
"0.6084302",
"0.6081276",
"0.6077617",
"0.60611266",
"0.60470164",
"0.60466963",
"0.6027018"
] | 0.80995923 | 0 |
Obtaining Account type key for given label | def get_account_type_value(self, label=None):
if label is not None:
index = list(map(lambda x: x[1], self.ACCOUNT_TYPES)).index(label)
return self.ACCOUNT_TYPES[index][0]
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def key_type(self) -> global___Type:",
"def get_keypair_name ( aws_account_type, region_name, keytype ) :\n return aws_account_type.upper( ) + '-' + keytype.upper( ) + '-' + region_name.lower( ) + '-keypair'",
"def _create_key(_type, name):\n return \"{}{}{}\".format(_type, DiagnosticManager._type_separator, name)",
"def getKey(instance):\n return instance['name']",
"def raw_label_key(self) -> str:\n\n # TODO(nikhilmehta): Change the task object to allow label_key to be a list.\n task_type = self._problem_statement.tasks[0].type\n if task_type.HasField('multi_class_classification'):\n return task_type.multi_class_classification.label\n if task_type.HasField('binary_classification'):\n return task_type.binary_classification.label\n if task_type.HasField('one_dimensional_regression'):\n return task_type.one_dimensional_regression.label\n raise ValueError('Invalid task type: {}'.format(task_type))",
"def get_type_label(type_url):\n return type_dict[type_url]",
"def _get_key(var_type, attr):\n if attr is None:\n return var_type\n return f'{var_type}{SEP}{attr}'",
"def _get_bond_type_key(\n bond, sigma_conversion_factor, epsilon_conversion_factor\n):\n bond_k_constant = round(\n bond.type.k\n * (sigma_conversion_factor**2 / epsilon_conversion_factor),\n 8,\n )\n bond_bo_length = round(bond.type.req / sigma_conversion_factor, 8)\n bond_atom_1_and_2_types_tuple = tuple(\n sorted((bond.atom1.type, bond.atom2.type))\n )\n bond_atom_1_residue_name = bond.atom1.residue.name\n bond_atom_2_residue_name = bond.atom2.residue.name\n\n return (\n bond_k_constant,\n bond_bo_length,\n bond_atom_1_and_2_types_tuple,\n bond_atom_1_residue_name,\n bond_atom_2_residue_name,\n )",
"def key(self):\n return self.account_name()",
"def key(self):\n return self.account_name()",
"def account_type(self) -> str:\n return pulumi.get(self, \"account_type\")",
"def aus_label_key(config_atom: str) -> str:\n return sre_capability_label_key(\"aus\", config_atom)",
"def _get_key(self, object_type, user_key = None):\n\t\tif not user_key and not self.object_type_keys.has_key(object_type):\n\t\t\traise ParserError(\"Unknown key for object type: %s\\n\" % object_type)\n\n\t\t## Use a default key\n\t\tif not user_key:\n\t\t\tuser_key = self.object_type_keys[object_type]\n\n\t\treturn user_key",
"def label_type(self) -> str:\n return pulumi.get(self, \"label_type\")",
"def make_key_from_name(datablock):\r\n key = datablock.name\r\n if hasattr(datablock, \"type\"):\r\n key += datablock.type\r\n if hasattr(datablock, \"data\") and hasattr(datablock.data, \"type\"):\r\n key += datablock.data.type\r\n if datablock.library:\r\n key += datablock.library.name\r\n return key",
"def account_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"account_key\")",
"def key_to_obj_type(self, key):\n\t\tif key.endswith('ids'):\n\t\t\tkey = key[0:-1]\n\t\tif key == 'order_id' or key == 'user_id':\n\t\t\treturn key[0:-2]\n\t\telif key == 'partner_id' or key == 'demand_partner_id':\n\t\t\treturn 'account'\n\t\telif key == 'openx_buyer_id':\n\t\t\treturn 'buyer'\n\t\telse:\n\t\t\treturn key[0:-3]",
"def _get_resource(self, label: str, source: dict, resource_type: str):\r\n try:\r\n return source[label]\r\n except KeyError:\r\n raise ValueError(\"Cannot find {0} with label '{1}'.\\nExisting {0} labels: {2}\".format(\r\n resource_type, label, list(source.keys())))",
"def by_label(self, value: str) -> RegistryType:\n return {k: v for k, v in self.items() if k == value}",
"def get_label(cls):\n return cls._type_name(cls.label)",
"def get_label(cls):\r\n return cls._type_name(cls.label)",
"def get_name_type_label(self):\n id, name_type = self.NAME_TYPE_CHOICES[self.name_type]\n return name_type",
"def _get_autostorage_credentials_label():\n return 'autostorage_account'",
"def getKey(self, addr):\n return self.openAccount.getPrivKeyForAddress(addr)",
"def test_credential_type_label(self):\n request = Request.objects.get(id=1)\n field_label = request._meta.get_field('credential_type').verbose_name\n self.assertEquals(field_label, 'ip/domain')",
"def get_grouping_key(self, invoice_tax_val):\n self.ensure_one()\n return str(invoice_tax_val['tax_id']) + '-' + \\\n str(invoice_tax_val['account_id']) + '-' + \\\n str(invoice_tax_val['account_analytic_id'])",
"def _load_key(client, entity_type, entity_id=None, parent_key=None):\n\n key = None\n if entity_id:\n key = client.key(entity_type, entity_id, parent=parent_key)\n else:\n # this will generate an ID\n key = client.key(entity_type)\n return key",
"def get_label(self):\n\n auth = self.authorizations[0]\n return auth.label",
"def lookup_class_idx(self,label):\r\n \r\n return self.class_labels[label]",
"def get_enum_key(key, choices):\n if key in choices:\n return key\n keys = [k for k in choices if k.startswith(key)]\n if len(keys) == 1:\n return keys[0]"
] | [
"0.61789745",
"0.61113775",
"0.5973853",
"0.5878963",
"0.5767249",
"0.5738894",
"0.57032627",
"0.566649",
"0.56554073",
"0.56554073",
"0.56357807",
"0.5621801",
"0.5605613",
"0.5601492",
"0.55755854",
"0.55486083",
"0.55069405",
"0.5498101",
"0.547072",
"0.5462579",
"0.5443803",
"0.54221517",
"0.5409676",
"0.53891045",
"0.53488773",
"0.5337708",
"0.53198326",
"0.53053546",
"0.53025913",
"0.5289335"
] | 0.67215216 | 0 |
Test bills page (/bills) | def test_bills_page(self):
self.make_request("/bills", follow_redirects=True)
headings = [
"Current Bills",
"All Tabled Bills",
"Private Member & Committee Bills",
"All Tabled & Draft Bills",
"Draft Bills",
"Bills Explained",
]
for heading in headings:
self.assertIn(heading, self.html) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_bill_page(self):\n bill = self.fx.BillData.food\n self.make_request(\"/bill/%d/\" % bill.id, follow_redirects=True)\n self.assertIn(bill.type.name, self.html)\n # Check if \"na\" stage is in page\n self.assertIn(\"stage2\", self.html)\n # Check if plenary event is shown in Bill History\n self.assertIn(\"Bill history\", self.html)\n self.assertIn(\"National Assembly\", self.html)",
"def test_current_bills_page(self):\n self.make_request(\"/bills/current\", follow_redirects=True)\n self.assertIn(\"Current Bills\", self.html)\n self.assertIn(\"Weekly update for all current bills\", self.html)\n for bill_key in self.fx.BillData:\n bill = getattr(self.fx.BillData, bill_key[0])\n if bill.status and bill.status.name in self.current_statuses:\n self.contains_bill(bill)\n else:\n self.doesnt_contain_bill(bill)",
"def test_bills_page_for_year(self):\n year = 2019\n response = self.make_request(\n \"/bills/all/year/%d/\" % year, follow_redirects=True\n )\n self.assertEqual(200, response.status_code)\n self.assertIn(self.fx.BillData.bill_with_none_number.title, self.html)\n self.assertIn(self.fx.BillData.sport.title, self.html)\n self.assertIn(self.fx.BillData.identical_date_events.title, self.html)",
"def test_show_bag(self):\n response = self.client.get('/shopping_bag/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'shopping_bag/bag.html')",
"def test_draft_bills_page(self):\n response = self.make_request(\"/bills/draft/\", follow_redirects=False)\n self.assertEqual(302, response.status_code)\n current_year = datetime.datetime.today().year\n self.assertEqual(\n urlparse(response.location).path, \"/bills/draft/year/%d/\" % current_year\n )",
"def get_bills(request):\n response = ApiJsonResponse()\n try:\n user = MyUser.objects.get(pk=request.user.pk)\n except ObjectDoesNotExist:\n return Response({\n \"msg\": _('MSG_USER_NOT_EXIST'),\n \"status\": 404\n }, status=404)\n try:\n company = Company.objects.get(owner=user)\n except ObjectDoesNotExist:\n return Response({\n \"msg\": _('MSG_COMPANY_NOT_EXIST'),\n \"status\": 404\n }, status=404)\n try:\n bills = Bills.objects.filter(company=company)\n except ObjectDoesNotExist:\n response.set_error(1)\n response.set_result_code(404)\n response.set_result_msg(\"MSG_NO_BILLS_FOUNDED\")\n return JsonResponse(response.get_dict())\n try:\n for bill in bills:\n response.set_multiples_data(serialize_bill_object(bill))\n except Exception:\n response.set_multiples_data(serialize_bill_object(bills))\n response.set_result_code(200)\n response.set_result_msg(\"MSG_PROMOTION_FOUNDED\")\n return JsonResponse(response.get_dict())",
"def test_draft_bills_page_for_year(self):\n year = 2019\n response = self.make_request(\n \"/bills/draft/year/%d/\" % year, follow_redirects=True\n )\n self.assertEqual(200, response.status_code)\n bill = self.fx.BillData.draft\n self.assertIn(bill.title, self.html)",
"def test_pay_bill(self):\n url = \"/pay_bill\"\n data = {\n \"booking\": 4,\n \"amount\": 10\n }\n response = app.test_client().post(url,\n json=data,\n content_type='application/json')\n assert response.status_code == 200, logging.error(\n \"Paying Bill Failed!\")\n logging.info(\"Pay Bill Tested!\")",
"def test_list_view(self):\n response = self.client.get(reverse('misago:admin:users:bans:index'))\n self.assertEqual(response.status_code, 302)\n\n response = self.client.get(response['location'])\n self.assertEqual(response.status_code, 200)",
"def test_shelf_page(self, *_):\n view = views.Shelf.as_view()\n shelf = self.local_user.shelf_set.first()\n request = self.factory.get(\"\")\n request.user = self.local_user\n with patch(\"bookwyrm.views.shelf.is_api_request\") as is_api:\n is_api.return_value = False\n result = view(request, self.local_user.username, shelf.identifier)\n self.assertIsInstance(result, TemplateResponse)\n validate_html(result.render())\n self.assertEqual(result.status_code, 200)\n\n with patch(\"bookwyrm.views.shelf.is_api_request\") as is_api:\n is_api.return_value = True\n result = view(request, self.local_user.username, shelf.identifier)\n self.assertIsInstance(result, ActivitypubResponse)\n self.assertEqual(result.status_code, 200)\n\n request = self.factory.get(\"/?page=1\")\n request.user = self.local_user\n with patch(\"bookwyrm.views.shelf.is_api_request\") as is_api:\n is_api.return_value = True\n result = view(request, self.local_user.username, shelf.identifier)\n self.assertIsInstance(result, ActivitypubResponse)\n self.assertEqual(result.status_code, 200)",
"def test_get_boat(self):\n pass",
"def test_get_dealer_landing_page(self):\n pass",
"def test_only_borrowed_book_in_list(self):\n login = self.client.login(\n username='testuser1',\n password='1X<ISRUkw+tuK'\n )\n response = self.client.get(reverse('my-borrowed'))\n\n # Check that user is logged in\n self.assertEqual(str(response.context['user']), 'testuser1')\n\n # Check that we got a response \"success\"\n self.assertEqual(response.status_code, 200)\n\n # Check that we don't have any book loaned\n self.assertTrue('bookinstancelist' in response.context)\n self.assertEqual(len(response.context['bookinstancelist']), 0)\n\n # Change some books's status to loan('o')\n books = BookInstance.objects.all()[:10]\n for book in books:\n book.status = 'o'\n book.save()\n\n # Repeat the login proccess\n response = self.client.get(reverse('my-borrowed'))\n self.assertEqual(str(response.context['user']), 'testuser1')\n self.assertEqual(response.status_code, 200)\n\n # Check borrowed book list in page\n self.assertTrue('bookinstancelist' in response.context)\n\n # Confirm all book belong to testuser1\n for book in response.context['bookinstancelist']:\n self.assertEqual(response.context['user'], book.borrower)\n self.assertEqual('o', book.status)",
"def test_view_browsable_api(client):\n # Arbitrary endpoint. Important thing is we access an endpoint of\n # the browsable API and force an HTML response so that template\n # rendering is used.\n client.get(\"/accounts/users/\", HTTP_ACCEPT=\"text/html\")",
"def test_shoppinglist_page(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # send a GET request\n res = self.app.get('/shoppinglist')\n self.assertEqual(res.status_code, 200)\n # check if page was loaded by looking for text in the page\n self.assertIn(\"Shopping List\", str(res.data))",
"def open_accounts_page(self):\n log.info(\"In landing page: click bill view button\")\n bills_page_for_meters_link = self.driver.find_element(\n *self.link_to_accs_locator\n )\n bills_page_for_meters_link.click()\n self.driver.sleep(5)\n self.driver.switch_to.window(self.driver.window_handles[-1])",
"def test_book_pages(self):\n url = reverse(\"book:book-detail\", kwargs={\"slug\": self.book.slug})\n response = self.client.get(url)\n assert response.status_code == 200\n assert not \"book_pages\" in json.loads(response.content)",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template",
"def test_postflight_page_status(self):\n response = self.client.get('/postflight/')\n self.assertEqual(response.status_code, 200)",
"def test_get(self):\n response = self.client.get(self.url)\n\n # Standard response\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(\"td_biblio/entry_list.html\")",
"def list(self, request, *args, **kwargs):\n return super(BalanceBillsViewSet, self).list(\n request,\n *args,\n **kwargs\n )",
"def test_Sms_bundle_page(self):\n self.client.login(username='arch', password='admin')\n response = self.client.get(reverse('echo:sms_bundle'))\n self.assertEqual(response.status_code, 200)",
"def test_get_ban(self):\n pass",
"def test_abbeys_get(self):\n query_string = [('label', 'label_example'),\n ('page', 1),\n ('per_page', 100)]\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/abbeys',\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_shoppingitems_page(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # send a GET request\n res = self.app.get('/shoppingitems/Easter')\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"You can now add your items\", str(res.data))",
"def test_homepage(self):\n\n response = self.client.get(\"/\")\n self.assertIn(\"Books</title>\", response.data)\n self.assertIn(\"Goodreads ID\", response.data)",
"def test_main_page_load(self):\n response = self.client.get(reverse(\"index\"))\n self.assertEqual(response.status_code, 200)",
"def test_basic_render_index_page(self):\n url = reverse('shipping.views.index')\n response = self.client.get(url)\n eq_(response.status_code, 200)\n self.assert_all_embeds(response.content)",
"def test_home_pg(self):\n\n with self.client as client:\n resp = client.get(\"/\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n # p1 should be top 3 in trending while p4 is not\n self.assertIn(f'<a href=\"/post/{self.p1_id}\">', html)\n self.assertNotIn(f'<a href=\"/post/{self.p4_id}\">', html)\n # p5 is oldest post and should not appear\n self.assertNotIn(f'<a href=\"/post/{self.p5_id}\">', html)",
"def test_dashboard_page(self):\r\n\r\n result = self.client.get(\"/dashboard\", follow_redirects = True)\r\n self.assertNotIn(b\"Family Ties - Dashboard\", result.data)"
] | [
"0.7680774",
"0.70165765",
"0.67381495",
"0.671697",
"0.6493225",
"0.6169521",
"0.6162236",
"0.61248475",
"0.60678047",
"0.5941131",
"0.5916215",
"0.58462006",
"0.5843386",
"0.5824207",
"0.5804087",
"0.5803388",
"0.57419884",
"0.57244706",
"0.5711201",
"0.5695939",
"0.5693122",
"0.5690703",
"0.5679476",
"0.5643503",
"0.56048656",
"0.5601106",
"0.5600448",
"0.5593326",
"0.5584687",
"0.55811435"
] | 0.8518053 | 0 |
Test bill page (/bill/) | def test_bill_page(self):
bill = self.fx.BillData.food
self.make_request("/bill/%d/" % bill.id, follow_redirects=True)
self.assertIn(bill.type.name, self.html)
# Check if "na" stage is in page
self.assertIn("stage2", self.html)
# Check if plenary event is shown in Bill History
self.assertIn("Bill history", self.html)
self.assertIn("National Assembly", self.html) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_bills_page(self):\n self.make_request(\"/bills\", follow_redirects=True)\n headings = [\n \"Current Bills\",\n \"All Tabled Bills\",\n \"Private Member & Committee Bills\",\n \"All Tabled & Draft Bills\",\n \"Draft Bills\",\n \"Bills Explained\",\n ]\n for heading in headings:\n self.assertIn(heading, self.html)",
"def test_pay_bill(self):\n url = \"/pay_bill\"\n data = {\n \"booking\": 4,\n \"amount\": 10\n }\n response = app.test_client().post(url,\n json=data,\n content_type='application/json')\n assert response.status_code == 200, logging.error(\n \"Paying Bill Failed!\")\n logging.info(\"Pay Bill Tested!\")",
"def test_show_bag(self):\n response = self.client.get('/shopping_bag/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'shopping_bag/bag.html')",
"def test_bills_page_for_year(self):\n year = 2019\n response = self.make_request(\n \"/bills/all/year/%d/\" % year, follow_redirects=True\n )\n self.assertEqual(200, response.status_code)\n self.assertIn(self.fx.BillData.bill_with_none_number.title, self.html)\n self.assertIn(self.fx.BillData.sport.title, self.html)\n self.assertIn(self.fx.BillData.identical_date_events.title, self.html)",
"def test_shelf_page(self, *_):\n view = views.Shelf.as_view()\n shelf = self.local_user.shelf_set.first()\n request = self.factory.get(\"\")\n request.user = self.local_user\n with patch(\"bookwyrm.views.shelf.is_api_request\") as is_api:\n is_api.return_value = False\n result = view(request, self.local_user.username, shelf.identifier)\n self.assertIsInstance(result, TemplateResponse)\n validate_html(result.render())\n self.assertEqual(result.status_code, 200)\n\n with patch(\"bookwyrm.views.shelf.is_api_request\") as is_api:\n is_api.return_value = True\n result = view(request, self.local_user.username, shelf.identifier)\n self.assertIsInstance(result, ActivitypubResponse)\n self.assertEqual(result.status_code, 200)\n\n request = self.factory.get(\"/?page=1\")\n request.user = self.local_user\n with patch(\"bookwyrm.views.shelf.is_api_request\") as is_api:\n is_api.return_value = True\n result = view(request, self.local_user.username, shelf.identifier)\n self.assertIsInstance(result, ActivitypubResponse)\n self.assertEqual(result.status_code, 200)",
"def test_current_bills_page(self):\n self.make_request(\"/bills/current\", follow_redirects=True)\n self.assertIn(\"Current Bills\", self.html)\n self.assertIn(\"Weekly update for all current bills\", self.html)\n for bill_key in self.fx.BillData:\n bill = getattr(self.fx.BillData, bill_key[0])\n if bill.status and bill.status.name in self.current_statuses:\n self.contains_bill(bill)\n else:\n self.doesnt_contain_bill(bill)",
"def test_draft_bills_page_for_year(self):\n year = 2019\n response = self.make_request(\n \"/bills/draft/year/%d/\" % year, follow_redirects=True\n )\n self.assertEqual(200, response.status_code)\n bill = self.fx.BillData.draft\n self.assertIn(bill.title, self.html)",
"def test_draft_bills_page(self):\n response = self.make_request(\"/bills/draft/\", follow_redirects=False)\n self.assertEqual(302, response.status_code)\n current_year = datetime.datetime.today().year\n self.assertEqual(\n urlparse(response.location).path, \"/bills/draft/year/%d/\" % current_year\n )",
"def test_sample(self):\n response = self.tester.get('/sample-household/',\n content_type='html/text')\n self.assertEqual(response.status_code, 200)",
"def test_basic_render_index_page(self):\n url = reverse('shipping.views.index')\n response = self.client.get(url)\n eq_(response.status_code, 200)\n self.assert_all_embeds(response.content)",
"def test_quotes(self):\n client = Client()\n response = client.get(reverse('quotes:index'))\n assert response.status_code == 200",
"def test_postflight_page_status(self):\n response = self.client.get('/postflight/')\n self.assertEqual(response.status_code, 200)",
"def test_get_dealer_landing_page(self):\n pass",
"def test_home_pg(self):\n\n with self.client as client:\n resp = client.get(\"/\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n # p1 should be top 3 in trending while p4 is not\n self.assertIn(f'<a href=\"/post/{self.p1_id}\">', html)\n self.assertNotIn(f'<a href=\"/post/{self.p4_id}\">', html)\n # p5 is oldest post and should not appear\n self.assertNotIn(f'<a href=\"/post/{self.p5_id}\">', html)",
"def test_view_browsable_api(client):\n # Arbitrary endpoint. Important thing is we access an endpoint of\n # the browsable API and force an HTML response so that template\n # rendering is used.\n client.get(\"/accounts/users/\", HTTP_ACCEPT=\"text/html\")",
"def test_book_by_pk_view(self):\n response = self.client.get(self.book_by_pk_url)\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'products/book.html')",
"def test_invoice_list(self):\n self.url = reverse(\"invoice-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template",
"def test_shoppinglist_page(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # send a GET request\n res = self.app.get('/shoppinglist')\n self.assertEqual(res.status_code, 200)\n # check if page was loaded by looking for text in the page\n self.assertIn(\"Shopping List\", str(res.data))",
"def test_book_pages(self):\n url = reverse(\"book:book-detail\", kwargs={\"slug\": self.book.slug})\n response = self.client.get(url)\n assert response.status_code == 200\n assert not \"book_pages\" in json.loads(response.content)",
"def test_homepage(self):\n\n response = self.client.get(\"/\")\n self.assertIn(\"Books</title>\", response.data)\n self.assertIn(\"Goodreads ID\", response.data)",
"def test_get(self):\n response = self.client.get(self.url)\n\n # Standard response\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(\"td_biblio/entry_list.html\")",
"def test_calculate_number_page(self):\n self.requestapi = RequestApi('snack')\n result = self.requestapi.calculate_number_page()\n self.assertEqual(result, 1)",
"def test_household_get(self):\n url = '/household/'+ self.test_id + '/'\n response = self.tester.get(url,\n content_type='application/json')\n self.assertEqual(response.status_code, 200)",
"def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data",
"def test_index(self):\n tester = app.test_client(self)\n response = tester.get(\"/\")\n self.assertEqual(response.status_code,200)\n assert b\"Moscow Ring Road Distance Finder\" in response.data\n assert b\"search address\" in response.data",
"def test_get_page(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)",
"def test_get_page(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)",
"def test_Sms_bundle_page(self):\n self.client.login(username='arch', password='admin')\n response = self.client.get(reverse('echo:sms_bundle'))\n self.assertEqual(response.status_code, 200)",
"def test_list_view(self):\n response = self.client.get(reverse('misago:admin:users:bans:index'))\n self.assertEqual(response.status_code, 302)\n\n response = self.client.get(response['location'])\n self.assertEqual(response.status_code, 200)"
] | [
"0.7101309",
"0.6749772",
"0.6648777",
"0.64527935",
"0.62037534",
"0.6166715",
"0.61598116",
"0.61127925",
"0.60858375",
"0.6062945",
"0.60293883",
"0.5998675",
"0.5993028",
"0.59900546",
"0.5974807",
"0.5946354",
"0.58807683",
"0.5874141",
"0.5833708",
"0.5829351",
"0.5825378",
"0.58235383",
"0.58223325",
"0.5805331",
"0.57892996",
"0.57789296",
"0.5774655",
"0.5774655",
"0.57627785",
"0.57600284"
] | 0.8359883 | 0 |
Test current bills page (/bills/current) | def test_current_bills_page(self):
self.make_request("/bills/current", follow_redirects=True)
self.assertIn("Current Bills", self.html)
self.assertIn("Weekly update for all current bills", self.html)
for bill_key in self.fx.BillData:
bill = getattr(self.fx.BillData, bill_key[0])
if bill.status and bill.status.name in self.current_statuses:
self.contains_bill(bill)
else:
self.doesnt_contain_bill(bill) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_bills_page(self):\n self.make_request(\"/bills\", follow_redirects=True)\n headings = [\n \"Current Bills\",\n \"All Tabled Bills\",\n \"Private Member & Committee Bills\",\n \"All Tabled & Draft Bills\",\n \"Draft Bills\",\n \"Bills Explained\",\n ]\n for heading in headings:\n self.assertIn(heading, self.html)",
"def test_bill_page(self):\n bill = self.fx.BillData.food\n self.make_request(\"/bill/%d/\" % bill.id, follow_redirects=True)\n self.assertIn(bill.type.name, self.html)\n # Check if \"na\" stage is in page\n self.assertIn(\"stage2\", self.html)\n # Check if plenary event is shown in Bill History\n self.assertIn(\"Bill history\", self.html)\n self.assertIn(\"National Assembly\", self.html)",
"def _is_current_page(self):\n self.selenium.wait_until_location_contains(\"/list\",timeout=60, message=\"Records list view did not load in 1 min\")\n self.selenium.location_should_contain(\"General_Accounting_Unit__c\",message=\"Current page is not a DataImport List view\")",
"def test_tags_browse_pagination_current_page(self):\n\n po = self.catalog.load_pageobject('TagsBrowsePage')\n po.goto_page()\n current_page_number = po.get_current_page_number()\n assert current_page_number == '1', \\\n \"after loading the page %s and examining\" % (po.current_url()) \\\n + \" the page links, the current page number\" \\\n + \" is '%s', expected '1'\" % (current_page_number)",
"def test_draft_bills_page(self):\n response = self.make_request(\"/bills/draft/\", follow_redirects=False)\n self.assertEqual(302, response.status_code)\n current_year = datetime.datetime.today().year\n self.assertEqual(\n urlparse(response.location).path, \"/bills/draft/year/%d/\" % current_year\n )",
"def _is_current_page(self, **kwargs):\n if kwargs:\n # do a lookup to get the object i\n object_id = self._get_object(**kwargs)[\"Id\"]\n pattern = r\"/lightning/r/{}/{}/view$\".format(self.object_name, object_id)\n else:\n # no kwargs means we should just verify we are on a detail\n # page without regard to which object\n pattern = r\"/lightning/r/{}/.*/view$\".format(self.object_name)\n\n location = self.selenium.get_location()\n if not re.search(pattern, location):\n raise Exception(\n \"Location '{}' didn't match pattern {}\".format(location, pattern)\n )",
"def current():\n\n return {\n 'page': 'current',\n }",
"def _is_current_page(self):\n location = \"/lightning/n/{}{}\".format(self.eda.get_eda_namespace_prefix(), self._object_name)\n self.selenium.location_should_contain(location)\n\n locator_tab = eda_lex_locators[\"eda_settings\"][\"tab\"].format(\"Relationships\")\n self.selenium.wait_until_page_contains_element(\n locator_tab,\n error=f\"Relationships tab with locator '{locator_tab}' is not available on the page\"\n )",
"def test_some_page_in_context(self):\n path = '/about/history'\n with self.app.test_client() as client:\n client.get(path)\n self.assertContext('current_page', self.app.get_page(path))",
"def biz_pending_currents_assertion(\n self,\n biz_admin, # User, biz admin\n expected_pending_current, # integer\n ):\n self.client.login(\n username=biz_admin.username, password='password')\n response = self.client.get('/biz-admin/')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.context['currents_pending'], expected_pending_current)\n\n return response",
"def test_index_in_context(self):\n path = '/'\n with self.app.test_client() as client:\n client.get(path)\n self.assertContext('current_page', self.app.get_page(path))",
"def test_tags_view_pagination_current_page(self,tag_with_items):\n\n self.tag_name = tag_with_items\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n po.search_for_content([self.tag_name])\n\n po = self.catalog.load_pageobject('TagsViewPage')\n\n current_page_number = po.get_current_page_number()\n assert current_page_number == '1', \\\n \"after loading the page %s and examining the page links,\" \\\n % (po.current_url()) \\\n + \" the current page number is '%s', expected '1'\" \\\n % (current_page_number)",
"def open_accounts_page(self):\n log.info(\"In landing page: click bill view button\")\n bills_page_for_meters_link = self.driver.find_element(\n *self.link_to_accs_locator\n )\n bills_page_for_meters_link.click()\n self.driver.sleep(5)\n self.driver.switch_to.window(self.driver.window_handles[-1])",
"def test_list_view(self):\n response = self.client.get(reverse('misago:admin:users:bans:index'))\n self.assertEqual(response.status_code, 302)\n\n response = self.client.get(response['location'])\n self.assertEqual(response.status_code, 200)",
"def test_get_current(self):\n self.assertEqual(api.user.get_current().getUserName(), TEST_USER_NAME)",
"def index(request): \n \n # finds a current bid for each listing via a SQL query\n current_bid = Auction_listing.objects.annotate(max_bid=Max('bid__bid'))\n \n return render(request, \"auctions/index.html\", { \n \"auctions\": Auction_listing.objects.filter(active=True),\n \"current_bid\": current_bid\n })",
"def current_listing(request, auction_id):\n \n # if user is not logged in, display an error message\n if not request.user.is_authenticated:\n return render(request, 'auctions/apology.html', {\n 'message': \"You must be logged in to see this listing.\"\n })\n \n else:\n # query for watchlist status of the selected listing\n watchlist_item = Watchlist.objects.filter(user = request.user, auction_listing_id = auction_id)\n # query for the selected listing's data in the database\n listing = Auction_listing.objects.get(pk = auction_id)\n # if data is submitted\n if request.method == 'POST':\n # if user submits form via the watchlist button\n if request.POST.get('Watchlist_delete') or request.POST.get('Watchlist_add'):\n # check whether listing is on watchlist, if not add it, if yes remove it from watchlist\n if watchlist_item:\n watchlist_item.delete()\n else:\n watchlist = Watchlist(user = request.user, auction_listing_id = auction_id)\n watchlist.save()\n # if user submits form via the place bid button\n elif request.POST.get('min_bid') or request.POST.get('min_price'):\n # if previous bids were already made\n if request.POST.get('min_bid'):\n # if user provided amount is greater than the current highest bid\n if Decimal(request.POST.get('min_bid')) > Bid.objects.filter(auction_listing_id = auction_id).aggregate(Max('bid')).get('bid__max'):\n bid = Bid(user = request.user, auction_listing_id = auction_id, bid = request.POST.get('min_bid'))\n bid.save()\n # return an error message if user tries to bypass HTML verification\n else:\n return render(request, 'auctions/apology.html', {\n 'message': \"Looks you tried to bypass the HTML verification. Unfortunately, your hacker level is too low to break this site.\"\n })\n # if no bids were made yet \n elif request.POST.get('min_price'):\n # if user provided amount is greater than or equal to the starting price\n if Decimal(request.POST.get('min_price')) >= listing.price:\n bid = Bid(user = request.user, auction_listing_id = auction_id, bid = request.POST.get('min_price'))\n bid.save()\n # return an error message if user tries to bypass HTML verification\n else:\n return render(request, 'auctions/apology.html', {\n 'message': \"Looks you tried to bypass the HTML verification. Unfortunately, your hacker level is too low to break this site.\"\n })\n # if user submits form via the post comment button \n elif request.POST.get('post'):\n form = CommentForm(request.POST)\n # verify form is valid\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.auction_listing_id = auction_id\n instance.save()\n # else return an error message\n else:\n return render(request, 'auctions/apology.html', {\n 'message': \"Form is invalid.\"\n })\n # if user submits form via the close auction button\n elif request.POST.get('close'):\n listing.active = False\n listing.save()\n \n return HttpResponseRedirect(reverse(\"current_listing\", kwargs={'auction_id': auction_id }))\n \n # if reached via URL\n else:\n form = CommentForm()\n # check if bid exists for current auction listing\n if Bid.objects.filter(auction_listing_id = auction_id).aggregate(Max('bid')).get('bid__max'):\n # query for the current bid in current listing\n current_bid = round((Bid.objects.filter(auction_listing_id = auction_id).aggregate(Max('bid')).get('bid__max')), 2)\n # find the user who made the current bid\n max_price = Bid.objects.get(auction_listing_id = auction_id, bid = Bid.objects.filter(auction_listing_id = auction_id).aggregate(Max('bid')).get('bid__max'))\n winner = max_price.user\n # if not bids were made, initiliaze both variables to 0 \n else:\n current_bid = 0\n winner = 0\n return render(request, 'auctions/current_listing.html', {\n 'listing': listing,\n 'price': listing.price,\n 'watchlist': watchlist_item,\n \"bid_count\": Bid.objects.filter(auction_listing_id = auction_id).count(),\n \"min_bid\": current_bid + Decimal(0.01),\n \"current_bid\": current_bid,\n \"winner\": winner,\n \"form\": form,\n \"comments\": Comment.objects.filter(auction_listing_id = auction_id),\n \"user\": request.user\n })",
"def testHrtCurrent(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"current\")\n\n self.util.boolPropertyTest(self, attr, \"current\")",
"def test_bills_page_for_year(self):\n year = 2019\n response = self.make_request(\n \"/bills/all/year/%d/\" % year, follow_redirects=True\n )\n self.assertEqual(200, response.status_code)\n self.assertIn(self.fx.BillData.bill_with_none_number.title, self.html)\n self.assertIn(self.fx.BillData.sport.title, self.html)\n self.assertIn(self.fx.BillData.identical_date_events.title, self.html)",
"def i_see_the_active_cases_page(browser):\n assert browser.find_by_text('Active Cases')",
"def test_show_bag(self):\n response = self.client.get('/shopping_bag/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'shopping_bag/bag.html')",
"def isCurrent(element):\n rr = element.find(\"RecordStatus\")\n if rr == None:\n return False\n else:\n return(rr.text == \"current\")",
"def test_get_tab(self):\n actions.login(ADMIN_EMAIL, is_admin=True)\n response = self.get(self.TAB_URL)\n self.assertEqual(response.status_code, 200)",
"def current_hunt(request):\n return hunt(request, Hunt.objects.get(is_current_hunt=True).hunt_number)",
"def _testCurrentPageWebAttribute(self, attr):\n settings = self._currentPageSettings()\n return settings is not None and settings.testAttribute(attr)",
"def test_only_borrowed_book_in_list(self):\n login = self.client.login(\n username='testuser1',\n password='1X<ISRUkw+tuK'\n )\n response = self.client.get(reverse('my-borrowed'))\n\n # Check that user is logged in\n self.assertEqual(str(response.context['user']), 'testuser1')\n\n # Check that we got a response \"success\"\n self.assertEqual(response.status_code, 200)\n\n # Check that we don't have any book loaned\n self.assertTrue('bookinstancelist' in response.context)\n self.assertEqual(len(response.context['bookinstancelist']), 0)\n\n # Change some books's status to loan('o')\n books = BookInstance.objects.all()[:10]\n for book in books:\n book.status = 'o'\n book.save()\n\n # Repeat the login proccess\n response = self.client.get(reverse('my-borrowed'))\n self.assertEqual(str(response.context['user']), 'testuser1')\n self.assertEqual(response.status_code, 200)\n\n # Check borrowed book list in page\n self.assertTrue('bookinstancelist' in response.context)\n\n # Confirm all book belong to testuser1\n for book in response.context['bookinstancelist']:\n self.assertEqual(response.context['user'], book.borrower)\n self.assertEqual('o', book.status)",
"def api_is_active(self):\n # Author is a required field in our model.\n # Create a user for this test and save it to the test database.\n user = User()\n user.save()\n\n # Create and save a new page to the test database.\n response = self.client.get('/api/guru')\n\n # Make sure the slug that was generated in Page.save()\n # matches what we think it should be.\n self.assertEqual(response, 200)",
"def test_get_current(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_current()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"derrida\"] in qs\n assert projects[\"pliny\"] in qs\n assert projects[\"ocampo\"] in qs\n assert projects[\"slavic\"] not in qs",
"def status():\n if not session.get('user_id'):\n return redirect(url_for('home.login'))\n if request.method == 'GET':\n payment = controllers.get_last_payment(session['user_id'])\n if not payment:\n return redirect(url_for('membership.pay'))\n return render_template('status.html', payment=payment)",
"def test_postflight_page_status(self):\n response = self.client.get('/postflight/')\n self.assertEqual(response.status_code, 200)"
] | [
"0.70060414",
"0.6251623",
"0.61162347",
"0.59430474",
"0.5937214",
"0.5659221",
"0.56577533",
"0.56432396",
"0.5577943",
"0.5549721",
"0.55134785",
"0.5268124",
"0.5258236",
"0.5243746",
"0.52388626",
"0.5181961",
"0.5178942",
"0.5174875",
"0.5121769",
"0.5106999",
"0.50839084",
"0.50751436",
"0.5066894",
"0.50626314",
"0.5024388",
"0.50187796",
"0.5012739",
"0.5011752",
"0.5010122",
"0.50056857"
] | 0.84873515 | 0 |
Test draft bills page (/bills/draft/) redirects to current year's draft bills page. | def test_draft_bills_page(self):
response = self.make_request("/bills/draft/", follow_redirects=False)
self.assertEqual(302, response.status_code)
current_year = datetime.datetime.today().year
self.assertEqual(
urlparse(response.location).path, "/bills/draft/year/%d/" % current_year
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_draft_bills_page_for_year(self):\n year = 2019\n response = self.make_request(\n \"/bills/draft/year/%d/\" % year, follow_redirects=True\n )\n self.assertEqual(200, response.status_code)\n bill = self.fx.BillData.draft\n self.assertIn(bill.title, self.html)",
"def test_bills_page(self):\n self.make_request(\"/bills\", follow_redirects=True)\n headings = [\n \"Current Bills\",\n \"All Tabled Bills\",\n \"Private Member & Committee Bills\",\n \"All Tabled & Draft Bills\",\n \"Draft Bills\",\n \"Bills Explained\",\n ]\n for heading in headings:\n self.assertIn(heading, self.html)",
"def test_bills_page_for_year(self):\n year = 2019\n response = self.make_request(\n \"/bills/all/year/%d/\" % year, follow_redirects=True\n )\n self.assertEqual(200, response.status_code)\n self.assertIn(self.fx.BillData.bill_with_none_number.title, self.html)\n self.assertIn(self.fx.BillData.sport.title, self.html)\n self.assertIn(self.fx.BillData.identical_date_events.title, self.html)",
"def test_bill_page(self):\n bill = self.fx.BillData.food\n self.make_request(\"/bill/%d/\" % bill.id, follow_redirects=True)\n self.assertIn(bill.type.name, self.html)\n # Check if \"na\" stage is in page\n self.assertIn(\"stage2\", self.html)\n # Check if plenary event is shown in Bill History\n self.assertIn(\"Bill history\", self.html)\n self.assertIn(\"National Assembly\", self.html)",
"def test_draft_pages():\n app = create_ctfd()\n with app.app_context():\n gen_page(app.db, title=\"Title\", route=\"this-is-a-route\", html=\"This is some HTML\", draft=True)\n\n with app.test_client() as client:\n r = client.get('/this-is-a-route')\n assert r.status_code == 404\n\n register_user(app)\n client = login_as_user(app)\n r = client.get('/this-is-a-route')\n assert r.status_code == 404\n destroy_ctfd(app)",
"def test_home_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertContains(response, \"No posts are available.\")\n self.assertQuerysetEqual(response.context['posts'], [])",
"def test_redirect_on_all_borrowed_book(self):\n login = self.client.login(\n username='testuser2',\n password='2HJ1vRV0Z&3iD')\n\n # Valid date for book renewal\n valid_date = datetime.date.today() + datetime.timedelta(weeks=2)\n response = self.client.post(\n reverse('librarian-renew-book',\n kwargs={'pk': self.test_bookinstance1.pk}),\n {'due_back': valid_date})\n self.assertRedirects(response, reverse('borrowed-list'))",
"def test_show_post_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n draft_post = create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n url = reverse('blog.post', args=(draft_post.id,))\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)",
"def redirect_old_draft(page):\r\n return redirect(url_for('.draft', page=page), 301)",
"def test_current_bills_page(self):\n self.make_request(\"/bills/current\", follow_redirects=True)\n self.assertIn(\"Current Bills\", self.html)\n self.assertIn(\"Weekly update for all current bills\", self.html)\n for bill_key in self.fx.BillData:\n bill = getattr(self.fx.BillData, bill_key[0])\n if bill.status and bill.status.name in self.current_statuses:\n self.contains_bill(bill)\n else:\n self.doesnt_contain_bill(bill)",
"def test_draft_list_is_for_authenticated_users_only(client, contributor):\n\n drafts_url = reverse('aid_draft_list_view')\n res = client.get(drafts_url)\n assert res.status_code == 302\n\n client.force_login(contributor)\n res = client.get(drafts_url)\n assert res.status_code == 200",
"def test_01_check_to_state_draft_post(self):\r\n cr, uid = self.cr, self.uid\r\n filter_draft = self.create_filter_draft(cr, uid)\r\n self.create_rule(cr, uid, 'on_create')\r\n new_lead_id = self.create_lead_test_1(cr, uid)\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'draft')\r\n self.assertEquals(new_lead.user_id.id, self.demo)\r\n self.delete_rules(cr, uid)",
"def test_home_view_with_draft_post_and_published_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Published Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Published')\n create_post(category=category, author=author, name='Draft Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertQuerysetEqual(\n response.context['posts'],\n ['<Post: Published Post>']\n )",
"def test_postflight_page_status(self):\n response = self.client.get('/postflight/')\n self.assertEqual(response.status_code, 200)",
"def test_draft_unit_page_html(self):\r\n draft_unit = modulestore('draft').convert_to_draft(self.vertical.location)\r\n html = self.get_page_html(draft_unit)\r\n self.validate_html_for_add_buttons(html)",
"def test_20_app_index_draft(self, mock):\r\n # Create root\r\n with self.flask_app.app_context():\r\n self.register()\r\n self.new_application()\r\n self.signout()\r\n # Create a user\r\n self.register(fullname=\"jane\", name=\"jane\", email=\"[email protected]\")\r\n self.signout()\r\n\r\n # As Anonymous\r\n res = self.app.get('/app/draft', follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous should not see draft apps\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # As authenticated but not admin\r\n self.signin(email=\"[email protected]\", password=\"p4ssw0rd\")\r\n res = self.app.get('/app/draft', follow_redirects=True)\r\n assert res.status_code == 403, \"Non-admin should not see draft apps\"\r\n self.signout()\r\n\r\n # As Admin\r\n self.signin()\r\n res = self.app.get('/app/draft', follow_redirects=True)\r\n assert \"Applications\" in res.data, res.data\r\n assert \"app-published\" not in res.data, res.data\r\n assert \"draft\" in res.data, res.data\r\n assert \"Sample App\" in res.data, res.data",
"def draft(page):\r\n return app_index(page, cached_apps.get_draft, 'draft',\r\n False, True)",
"def test_redirect_if_not_logged_in(self):\n response = self.client.get(reverse('my-borrowed'))\n self.assertRedirects(\n response,\n '/accounts/login/?next=/catalog/mybooks/'\n )",
"def test_yearArchive(self):\n self.post.status = 'publish'\n self.post.save()\n\n url = self.post.get_year_archive_url()\n response = self.client.get(url)\n self.assertContains(response, self.post.title)",
"def test_post_opening_balance_journals(self):\n pass",
"def action_draft(self, cr, uid, ids, context=None):\n if self.search(cr, uid, [('id', 'in', ids), ('fiscalyear_id.state', '!=', 'draft')], context=context):\n raise osv.except_osv(_('Warning!'), _('You can not re-open a period which belongs to closed fiscal year'))\n return super(account_period, self).action_draft(cr, uid, ids, context)",
"def test_bill_identical_date_events_page(self):\n bill = self.fx.BillData.identical_date_events\n self.make_request(\"/bill/%d/\" % bill.id, follow_redirects=True)\n\n # Check if plenary event is shown in Bill History\n self.assertIn(\"Bill history\", self.html)\n self.assertIn(\"National Assembly\", self.html)",
"def test_only_borrowed_book_in_list(self):\n login = self.client.login(\n username='testuser1',\n password='1X<ISRUkw+tuK'\n )\n response = self.client.get(reverse('my-borrowed'))\n\n # Check that user is logged in\n self.assertEqual(str(response.context['user']), 'testuser1')\n\n # Check that we got a response \"success\"\n self.assertEqual(response.status_code, 200)\n\n # Check that we don't have any book loaned\n self.assertTrue('bookinstancelist' in response.context)\n self.assertEqual(len(response.context['bookinstancelist']), 0)\n\n # Change some books's status to loan('o')\n books = BookInstance.objects.all()[:10]\n for book in books:\n book.status = 'o'\n book.save()\n\n # Repeat the login proccess\n response = self.client.get(reverse('my-borrowed'))\n self.assertEqual(str(response.context['user']), 'testuser1')\n self.assertEqual(response.status_code, 200)\n\n # Check borrowed book list in page\n self.assertTrue('bookinstancelist' in response.context)\n\n # Confirm all book belong to testuser1\n for book in response.context['bookinstancelist']:\n self.assertEqual(response.context['user'], book.borrower)\n self.assertEqual('o', book.status)",
"def test_00_check_to_state_draft_pre(self):\r\n cr, uid = self.cr, self.uid\r\n filter_draft = self.create_filter_draft(cr, uid)\r\n self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft)\r\n new_lead_id = self.create_lead_test_1(cr, uid)\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'draft')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n self.delete_rules(cr, uid)",
"def test_booklist_ordered_by_due_date(self):\n # Change all book's status to loan('o')\n for book in BookInstance.objects.all():\n book.status = 'o'\n book.save()\n\n # Login into page\n login = self.client.login(\n username='testuser1',\n password='1X<ISRUkw+tuK')\n response = self.client.get(reverse('my-borrowed'))\n\n # Check that user is logged in\n self.assertEqual(str(response.context['user']), 'testuser1')\n self.assertEqual(response.status_code, 200)\n\n # Confirm that only 10 items are displayed per page\n self.assertEqual(len(response.context['bookinstancelist']), 10)\n\n last_date = 0\n for book in response.context['bookinstancelist']:\n if last_date == 0:\n last_date = book.due_back\n else:\n self.assertTrue(last_date <= book.due_back)\n last_date = book.due_back",
"def test_make_draft(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'create_draft'}\r\n )\r\n # Update the draft version and check that published is different.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'metadata': {'due': '2077-10-10T04:00Z'}}\r\n )\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n self.assertIsNone(published.due)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertEqual(draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))",
"def test_list_view(self):\n response = self.client.get(reverse('misago:admin:users:bans:index'))\n self.assertEqual(response.status_code, 302)\n\n response = self.client.get(response['location'])\n self.assertEqual(response.status_code, 200)",
"def test_redirect_if_not_logged_in(self):\n response = self.client.get(\n reverse('librarian-renew-book',\n kwargs={'pk': self.test_bookinstance1.pk}))\n self.assertEqual(response.status_code, 302)\n self.assertTrue(response.url.startswith('/accounts/login/'))",
"def test_02_check_from_draft_to_done_without_steps(self):\r\n cr, uid = self.cr, self.uid\r\n filter_draft = self.create_filter_draft(cr, uid)\r\n filter_done = self.create_filter_done(cr, uid)\r\n self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft, filter_id=filter_done)\r\n new_lead_id = self.create_lead_test_1(cr, uid)\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'draft')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n \"\"\" change the state of new_lead to done and check that responsible change to Demo_user\"\"\"\r\n new_lead.write({'state': 'done'})\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'done')\r\n self.assertEquals(new_lead.user_id.id, self.demo)\r\n self.delete_rules(cr, uid)",
"def test_get_dealer_landing_page(self):\n pass"
] | [
"0.796481",
"0.70448816",
"0.70290816",
"0.665954",
"0.6471485",
"0.6356927",
"0.635401",
"0.632725",
"0.6307408",
"0.61629856",
"0.6149977",
"0.58866006",
"0.57728565",
"0.5759125",
"0.568433",
"0.5669418",
"0.5656988",
"0.560628",
"0.55976933",
"0.550295",
"0.54979414",
"0.5494648",
"0.5478492",
"0.5475165",
"0.54658204",
"0.5436953",
"0.54287994",
"0.54193544",
"0.53230196",
"0.53219956"
] | 0.8807852 | 0 |
Test draft bills page for a year (/bills/draft/). | def test_draft_bills_page_for_year(self):
year = 2019
response = self.make_request(
"/bills/draft/year/%d/" % year, follow_redirects=True
)
self.assertEqual(200, response.status_code)
bill = self.fx.BillData.draft
self.assertIn(bill.title, self.html) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_draft_bills_page(self):\n response = self.make_request(\"/bills/draft/\", follow_redirects=False)\n self.assertEqual(302, response.status_code)\n current_year = datetime.datetime.today().year\n self.assertEqual(\n urlparse(response.location).path, \"/bills/draft/year/%d/\" % current_year\n )",
"def test_bills_page_for_year(self):\n year = 2019\n response = self.make_request(\n \"/bills/all/year/%d/\" % year, follow_redirects=True\n )\n self.assertEqual(200, response.status_code)\n self.assertIn(self.fx.BillData.bill_with_none_number.title, self.html)\n self.assertIn(self.fx.BillData.sport.title, self.html)\n self.assertIn(self.fx.BillData.identical_date_events.title, self.html)",
"def test_bills_page(self):\n self.make_request(\"/bills\", follow_redirects=True)\n headings = [\n \"Current Bills\",\n \"All Tabled Bills\",\n \"Private Member & Committee Bills\",\n \"All Tabled & Draft Bills\",\n \"Draft Bills\",\n \"Bills Explained\",\n ]\n for heading in headings:\n self.assertIn(heading, self.html)",
"def test_bill_page(self):\n bill = self.fx.BillData.food\n self.make_request(\"/bill/%d/\" % bill.id, follow_redirects=True)\n self.assertIn(bill.type.name, self.html)\n # Check if \"na\" stage is in page\n self.assertIn(\"stage2\", self.html)\n # Check if plenary event is shown in Bill History\n self.assertIn(\"Bill history\", self.html)\n self.assertIn(\"National Assembly\", self.html)",
"def test_yearArchive(self):\n self.post.status = 'publish'\n self.post.save()\n\n url = self.post.get_year_archive_url()\n response = self.client.get(url)\n self.assertContains(response, self.post.title)",
"def test_home_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertContains(response, \"No posts are available.\")\n self.assertQuerysetEqual(response.context['posts'], [])",
"def test_current_bills_page(self):\n self.make_request(\"/bills/current\", follow_redirects=True)\n self.assertIn(\"Current Bills\", self.html)\n self.assertIn(\"Weekly update for all current bills\", self.html)\n for bill_key in self.fx.BillData:\n bill = getattr(self.fx.BillData, bill_key[0])\n if bill.status and bill.status.name in self.current_statuses:\n self.contains_bill(bill)\n else:\n self.doesnt_contain_bill(bill)",
"def test_draft_pages():\n app = create_ctfd()\n with app.app_context():\n gen_page(app.db, title=\"Title\", route=\"this-is-a-route\", html=\"This is some HTML\", draft=True)\n\n with app.test_client() as client:\n r = client.get('/this-is-a-route')\n assert r.status_code == 404\n\n register_user(app)\n client = login_as_user(app)\n r = client.get('/this-is-a-route')\n assert r.status_code == 404\n destroy_ctfd(app)",
"def test_load_draft(league):\n draft = league.draft_results()\n assert(len(draft) == 144)\n #mcdavid 1st\n assert(draft[0]['player_key'] == '396.p.6743')\n # carter hart 67th\n assert(draft[66]['player_key'] == '396.p.7156')\n # zadorov last\n assert(draft[-1]['player_key'] == '396.p.5995')",
"def test_show_post_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n draft_post = create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n url = reverse('blog.post', args=(draft_post.id,))\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)",
"def test_draft_unit_page_html(self):\r\n draft_unit = modulestore('draft').convert_to_draft(self.vertical.location)\r\n html = self.get_page_html(draft_unit)\r\n self.validate_html_for_add_buttons(html)",
"def betting_lines(year):\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # Webapges are by dates\n all_dates = m.find('game_log', {'season': year}, {'_id': 0, 'date': 1}).distinct('date')\n\n browser = webdriver.Chrome('chromedriver')\n\n # Iterate through each date in a season\n for game_date in all_dates:\n\n # Get URL\n url = 'https://classic.sportsbookreview.com/betting-odds/nba-basketball/money-line/?date=' + datetime.strftime(game_date, '%Y%m%d')\n\n scrape_betting_page(url, browser, m, game_date)\n\n browser.close()",
"def test_booklist_ordered_by_due_date(self):\n # Change all book's status to loan('o')\n for book in BookInstance.objects.all():\n book.status = 'o'\n book.save()\n\n # Login into page\n login = self.client.login(\n username='testuser1',\n password='1X<ISRUkw+tuK')\n response = self.client.get(reverse('my-borrowed'))\n\n # Check that user is logged in\n self.assertEqual(str(response.context['user']), 'testuser1')\n self.assertEqual(response.status_code, 200)\n\n # Confirm that only 10 items are displayed per page\n self.assertEqual(len(response.context['bookinstancelist']), 10)\n\n last_date = 0\n for book in response.context['bookinstancelist']:\n if last_date == 0:\n last_date = book.due_back\n else:\n self.assertTrue(last_date <= book.due_back)\n last_date = book.due_back",
"def test_home_view_with_draft_post_and_published_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Published Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Published')\n create_post(category=category, author=author, name='Draft Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertQuerysetEqual(\n response.context['posts'],\n ['<Post: Published Post>']\n )",
"def test_get_not_avail_page_renders(self, start_year, start_year_is_none):\n fields = get_post_data(start_year, _ID_BenefitSurtax_Switches=False)\n fields['BE_sub'] = ['0.25']\n fields[\"first_year\"] = start_year\n unique_url = get_taxbrain_model(fields,\n first_year=start_year,\n taxcalc_vers=\"0.14.2\",\n webapp_vers=\"1.3.0\",\n Form=DynamicBehavioralInputsModelForm,\n UrlModel=DynamicBehaviorOutputUrl)\n\n model = unique_url.unique_inputs\n model.raw_input_fields = None\n model.input_fields = None\n model.deprecated_fields = None\n model.tax_result = \"unrenderable\"\n if start_year_is_none:\n model.first_year = None\n model.save()\n unique_url.unique_inputs = model\n unique_url.save()\n\n pk = unique_url.pk\n url = '/dynamic/behavior_results/{}/'.format(pk)\n response = CLIENT.get(url)\n assert any([t.name == 'taxbrain/not_avail.html'\n for t in response.templates])\n edit_exp = '/dynamic/behavioral/edit/{}/?start_year={}'.format(\n pk,\n start_year\n )\n assert response.context['edit_href'] == edit_exp",
"def draft(page):\r\n return app_index(page, cached_apps.get_draft, 'draft',\r\n False, True)",
"def test_only_borrowed_book_in_list(self):\n login = self.client.login(\n username='testuser1',\n password='1X<ISRUkw+tuK'\n )\n response = self.client.get(reverse('my-borrowed'))\n\n # Check that user is logged in\n self.assertEqual(str(response.context['user']), 'testuser1')\n\n # Check that we got a response \"success\"\n self.assertEqual(response.status_code, 200)\n\n # Check that we don't have any book loaned\n self.assertTrue('bookinstancelist' in response.context)\n self.assertEqual(len(response.context['bookinstancelist']), 0)\n\n # Change some books's status to loan('o')\n books = BookInstance.objects.all()[:10]\n for book in books:\n book.status = 'o'\n book.save()\n\n # Repeat the login proccess\n response = self.client.get(reverse('my-borrowed'))\n self.assertEqual(str(response.context['user']), 'testuser1')\n self.assertEqual(response.status_code, 200)\n\n # Check borrowed book list in page\n self.assertTrue('bookinstancelist' in response.context)\n\n # Confirm all book belong to testuser1\n for book in response.context['bookinstancelist']:\n self.assertEqual(response.context['user'], book.borrower)\n self.assertEqual('o', book.status)",
"def test_templates_person_detail_cms_draft_content(self):\n user = UserFactory(is_staff=True, is_superuser=True)\n self.client.login(username=user.username, password=\"password\")\n\n published_category = CategoryFactory(should_publish=True)\n not_published_category = CategoryFactory()\n\n published_organization = OrganizationFactory(should_publish=True)\n not_published_organization = OrganizationFactory()\n\n person = PersonFactory(\n page_title=\"My page title\",\n fill_portrait=True,\n fill_bio=True,\n fill_maincontent=True,\n fill_categories=[published_category, not_published_category],\n fill_organizations=[published_organization, not_published_organization],\n )\n\n # Modify the draft version of the published category\n title_obj = published_category.extended_object.title_set.get(language=\"en\")\n title_obj.title = \"modified category\"\n title_obj.save()\n\n # Modify the draft version of the published organization\n title_obj = published_category.extended_object.title_set.get(language=\"en\")\n title_obj.title = \"modified organization\"\n title_obj.save()\n page = person.extended_object\n\n # The page should be visible as draft to the superuser\n url = page.get_absolute_url()\n response = self.client.get(url)\n content = htmlmin.minify(\n response.content.decode(\"UTF-8\"),\n reduce_empty_attributes=False,\n remove_optional_attribute_quotes=False,\n )\n\n self.assertContains(\n response,\n \"<title>My page title - example.com</title>\",\n html=True,\n status_code=200,\n )\n title = person.extended_object.get_title()\n self.assertContains(\n response,\n f'<h1 class=\"subheader__title\">{title:s}</h1>',\n html=True,\n )\n\n # Main content should be present when not empty\n self.assertContains(response, \"person-detail__maincontent\")\n\n # The published category should be on the page in its published version\n self.assertContains(\n response,\n (\n # pylint: disable=consider-using-f-string\n '<a class=\"category-badge\" href=\"{:s}\">'\n '<span class=\"offscreen\">Category</span>'\n '<span class=\"category-badge__title\">{:s}</span></a>'\n ).format(\n published_category.public_extension.extended_object.get_absolute_url(),\n published_category.public_extension.extended_object.get_title(),\n ),\n html=True,\n )\n # The not published category should not be on the page\n self.assertContains(\n response,\n (\n # pylint: disable=consider-using-f-string\n '<a class=\"category-badge category-badge--draft\" href=\"{:s}\">'\n '<span class=\"offscreen\">Category</span>'\n '<span class=\"category-badge__title\">{:s}</span></a>'\n ).format(\n not_published_category.extended_object.get_absolute_url(),\n not_published_category.extended_object.get_title(),\n ),\n html=True,\n )\n\n # The published organization should be on the page in its published version\n self.assertIn(\n # pylint: disable=consider-using-f-string\n '<div class=\"organization-glimpse\" property=\"contributor\" '\n 'typeof=\"CollegeOrUniversity\"><a href=\"{:s}\" title=\"{:s}\">'.format(\n published_organization.extended_object.get_absolute_url(),\n published_organization.extended_object.get_title(),\n ),\n content,\n )\n self.assertContains(\n response,\n # pylint: disable=consider-using-f-string\n '<h2 class=\"organization-glimpse__title\" property=\"name\">{:s}</h2>'.format(\n published_organization.public_extension.extended_object.get_title()\n ),\n html=True,\n )\n # The not published organization should not be on the page\n self.assertIn(\n # pylint: disable=consider-using-f-string\n '<a href=\"{:s}\" title=\"{:s}\">'.format(\n not_published_organization.extended_object.get_absolute_url(),\n not_published_organization.extended_object.get_title(),\n ),\n content,\n )\n\n self.assertContains(\n response,\n # pylint: disable=consider-using-f-string\n '<h2 class=\"organization-glimpse__title\" property=\"name\">{:s}</h2>'.format(\n not_published_organization.extended_object.get_title()\n ),\n html=True,\n )\n\n self.assertNotContains(response, \"modified\")",
"def action_draft(self, cr, uid, ids, context=None):\n if self.search(cr, uid, [('id', 'in', ids), ('fiscalyear_id.state', '!=', 'draft')], context=context):\n raise osv.except_osv(_('Warning!'), _('You can not re-open a period which belongs to closed fiscal year'))\n return super(account_period, self).action_draft(cr, uid, ids, context)",
"def test_debts_sorted_by_fee(self):\n card = CreditCard.objects.create(\n name='One',\n interest_rate=20.0,\n balance=1000_00,\n min_payment=10_00,\n min_payment_percent=10.0,\n annual_fee=100_00,\n user=self.user,\n )\n overdraft = Overdraft.objects.create(\n name='Over',\n interest_rate=20.0,\n balance=1000_00,\n monthly_fee=9_00,\n user=self.user,\n )\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertJSONEqual(\n response.content,\n [overdraft.to_JSON(), card.to_JSON()],\n )",
"def test_draft_list_is_for_authenticated_users_only(client, contributor):\n\n drafts_url = reverse('aid_draft_list_view')\n res = client.get(drafts_url)\n assert res.status_code == 302\n\n client.force_login(contributor)\n res = client.get(drafts_url)\n assert res.status_code == 200",
"def test_post_opening_balance_journals(self):\n pass",
"def test_01_check_to_state_draft_post(self):\r\n cr, uid = self.cr, self.uid\r\n filter_draft = self.create_filter_draft(cr, uid)\r\n self.create_rule(cr, uid, 'on_create')\r\n new_lead_id = self.create_lead_test_1(cr, uid)\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'draft')\r\n self.assertEquals(new_lead.user_id.id, self.demo)\r\n self.delete_rules(cr, uid)",
"def test_monthArchive(self):\n self.post.status = 'publish'\n self.post.save()\n\n # url = reverse('xblog:month-archive',\n # kwargs={'year':self.post.pub_date.year,\n # 'month': self.post.pub_date.strftime('%b').lower(),\n # })\n url = self.post.get_month_archive_url()\n response = self.client.get(url)\n LOGGER.debug(\"XXX\" + str(response))\n self.assertContains(response, self.post.title)",
"def test_bill_identical_date_events_page(self):\n bill = self.fx.BillData.identical_date_events\n self.make_request(\"/bill/%d/\" % bill.id, follow_redirects=True)\n\n # Check if plenary event is shown in Bill History\n self.assertIn(\"Bill history\", self.html)\n self.assertIn(\"National Assembly\", self.html)",
"def test_draft_list_does_not_show_deleted_aids(client, contributor):\n\n AidFactory(name='Is this the real life?', author=contributor,\n status='deleted')\n client.force_login(contributor)\n drafts_url = reverse('aid_draft_list_view')\n res = client.get(drafts_url)\n\n content = res.content.decode('utf-8')\n assert 'Is this the real life?' not in content",
"def test_make_draft(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'create_draft'}\r\n )\r\n # Update the draft version and check that published is different.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'metadata': {'due': '2077-10-10T04:00Z'}}\r\n )\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n self.assertIsNone(published.due)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertEqual(draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))",
"def test_postflight_page_status(self):\n response = self.client.get('/postflight/')\n self.assertEqual(response.status_code, 200)",
"def test_get_drafts(self):\n r1 = Recipes.objects.create(chef=self.user, name=\"Recipe 1\", draft=True)\n r2 = Recipes.objects.create(chef=self.user, name=\"Recipe 2\", draft=False)\n\n url = '/0/chefs/%i/drafts' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('drafts', resp.data)\n self.assertEqual(1, len(resp.data['drafts']))\n keys = (\"liked\", \"public_url\", \"edit_date\", \"ingredients\", \"shared\", \"tags\", \"commented\",\n \"private\", \"id\", \"chef\", \"reported\", \"nb_shares\", \"added\", \"nb_added\",\n \"nb_comments\", \"draft\", \"commensals\", \"creation_date\", \"nb_likes\", \"name\",\n \"products\", \"prep_time\", \"serves\", \"bought\", \"book_for_sale\", \"description\")\n self.assertEqual(set(keys), set(resp.data['drafts'][0].keys()))\n self.assertEqual(r1.pk, resp.data['drafts'][0]['id'])",
"def test_sad_booking_past_compet(self):\n\n rv = self.app.get(\n f\"/book/{self.competitions[0]['name']}/{self.clubs[0]['name']}\"\n )\n assert rv.status_code in [400]\n assert b\"The booking page for a past competition is closed\" in rv.data\n assert b\"Welcome\" in rv.data"
] | [
"0.84138995",
"0.7889149",
"0.6775302",
"0.65236485",
"0.62575066",
"0.6029107",
"0.5984826",
"0.59390867",
"0.58819306",
"0.5876933",
"0.5722013",
"0.5572942",
"0.5545011",
"0.5518728",
"0.5517939",
"0.5496567",
"0.54783195",
"0.54646355",
"0.5458014",
"0.54177934",
"0.53996116",
"0.53986824",
"0.5396403",
"0.53919494",
"0.53890085",
"0.53439224",
"0.5339275",
"0.53270525",
"0.53257483",
"0.52950907"
] | 0.88270897 | 0 |
Test bills page for a year (/bills/). | def test_bills_page_for_year(self):
year = 2019
response = self.make_request(
"/bills/all/year/%d/" % year, follow_redirects=True
)
self.assertEqual(200, response.status_code)
self.assertIn(self.fx.BillData.bill_with_none_number.title, self.html)
self.assertIn(self.fx.BillData.sport.title, self.html)
self.assertIn(self.fx.BillData.identical_date_events.title, self.html) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_draft_bills_page_for_year(self):\n year = 2019\n response = self.make_request(\n \"/bills/draft/year/%d/\" % year, follow_redirects=True\n )\n self.assertEqual(200, response.status_code)\n bill = self.fx.BillData.draft\n self.assertIn(bill.title, self.html)",
"def test_draft_bills_page(self):\n response = self.make_request(\"/bills/draft/\", follow_redirects=False)\n self.assertEqual(302, response.status_code)\n current_year = datetime.datetime.today().year\n self.assertEqual(\n urlparse(response.location).path, \"/bills/draft/year/%d/\" % current_year\n )",
"def test_bills_page(self):\n self.make_request(\"/bills\", follow_redirects=True)\n headings = [\n \"Current Bills\",\n \"All Tabled Bills\",\n \"Private Member & Committee Bills\",\n \"All Tabled & Draft Bills\",\n \"Draft Bills\",\n \"Bills Explained\",\n ]\n for heading in headings:\n self.assertIn(heading, self.html)",
"def test_bill_page(self):\n bill = self.fx.BillData.food\n self.make_request(\"/bill/%d/\" % bill.id, follow_redirects=True)\n self.assertIn(bill.type.name, self.html)\n # Check if \"na\" stage is in page\n self.assertIn(\"stage2\", self.html)\n # Check if plenary event is shown in Bill History\n self.assertIn(\"Bill history\", self.html)\n self.assertIn(\"National Assembly\", self.html)",
"def test_current_bills_page(self):\n self.make_request(\"/bills/current\", follow_redirects=True)\n self.assertIn(\"Current Bills\", self.html)\n self.assertIn(\"Weekly update for all current bills\", self.html)\n for bill_key in self.fx.BillData:\n bill = getattr(self.fx.BillData, bill_key[0])\n if bill.status and bill.status.name in self.current_statuses:\n self.contains_bill(bill)\n else:\n self.doesnt_contain_bill(bill)",
"def test_yearArchive(self):\n self.post.status = 'publish'\n self.post.save()\n\n url = self.post.get_year_archive_url()\n response = self.client.get(url)\n self.assertContains(response, self.post.title)",
"def betting_lines(year):\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # Webapges are by dates\n all_dates = m.find('game_log', {'season': year}, {'_id': 0, 'date': 1}).distinct('date')\n\n browser = webdriver.Chrome('chromedriver')\n\n # Iterate through each date in a season\n for game_date in all_dates:\n\n # Get URL\n url = 'https://classic.sportsbookreview.com/betting-odds/nba-basketball/money-line/?date=' + datetime.strftime(game_date, '%Y%m%d')\n\n scrape_betting_page(url, browser, m, game_date)\n\n browser.close()",
"def test_yearly_report(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 200, 'date_of_expense': '10-01-2021'})\n consolidated_total = 212.23\n res = self.client().get('/yearly_report?year=2021', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['consolidated_total'], consolidated_total)",
"def test_spider_gets_specific_year(self):\n spider = Eia923Spider()\n resp = factories.TestResponseFactory(eia923=True)\n\n result = spider.form_for_year(resp, 2007)\n\n assert result is not None\n assert result.url == \"https://www.eia.gov/electricity/data/eia923/\" \\\n \"archive/xls/f906920_2007.zip\"\n assert result.meta[\"year\"] == 2007\n\n for year in range(2001, 2019):\n result = spider.form_for_year(resp, year)\n assert result is not None",
"def test_found_all_years(self):\n ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')\n self.assertEqual(ar.years, [2008,2009])",
"def test_get_not_avail_page_renders(self, start_year, start_year_is_none):\n fields = get_post_data(start_year, _ID_BenefitSurtax_Switches=False)\n fields['BE_sub'] = ['0.25']\n fields[\"first_year\"] = start_year\n unique_url = get_taxbrain_model(fields,\n first_year=start_year,\n taxcalc_vers=\"0.14.2\",\n webapp_vers=\"1.3.0\",\n Form=DynamicBehavioralInputsModelForm,\n UrlModel=DynamicBehaviorOutputUrl)\n\n model = unique_url.unique_inputs\n model.raw_input_fields = None\n model.input_fields = None\n model.deprecated_fields = None\n model.tax_result = \"unrenderable\"\n if start_year_is_none:\n model.first_year = None\n model.save()\n unique_url.unique_inputs = model\n unique_url.save()\n\n pk = unique_url.pk\n url = '/dynamic/behavior_results/{}/'.format(pk)\n response = CLIENT.get(url)\n assert any([t.name == 'taxbrain/not_avail.html'\n for t in response.templates])\n edit_exp = '/dynamic/behavioral/edit/{}/?start_year={}'.format(\n pk,\n start_year\n )\n assert response.context['edit_href'] == edit_exp",
"def test_year_filtering(self):\n # Get a valid date\n entry = Entry.objects.get(id=1)\n params = {\"year\": entry.publication_date.year}\n\n self._test_filtering(**params)",
"def scrape(startyear, startmonth, endyear, endmonth):\n year = startyear\n month = startmonth\n while (not (year == endyear and month == endmonth)):\n ys = \"{}\".format(year)\n ms = \"{:02d}\".format(month)\n gather_all_profiles(ys,ms) \n if month == 12:\n year += 1\n month = 0\n month += 1",
"def test_get_boat(self):\n pass",
"def test_only_borrowed_book_in_list(self):\n login = self.client.login(\n username='testuser1',\n password='1X<ISRUkw+tuK'\n )\n response = self.client.get(reverse('my-borrowed'))\n\n # Check that user is logged in\n self.assertEqual(str(response.context['user']), 'testuser1')\n\n # Check that we got a response \"success\"\n self.assertEqual(response.status_code, 200)\n\n # Check that we don't have any book loaned\n self.assertTrue('bookinstancelist' in response.context)\n self.assertEqual(len(response.context['bookinstancelist']), 0)\n\n # Change some books's status to loan('o')\n books = BookInstance.objects.all()[:10]\n for book in books:\n book.status = 'o'\n book.save()\n\n # Repeat the login proccess\n response = self.client.get(reverse('my-borrowed'))\n self.assertEqual(str(response.context['user']), 'testuser1')\n self.assertEqual(response.status_code, 200)\n\n # Check borrowed book list in page\n self.assertTrue('bookinstancelist' in response.context)\n\n # Confirm all book belong to testuser1\n for book in response.context['bookinstancelist']:\n self.assertEqual(response.context['user'], book.borrower)\n self.assertEqual('o', book.status)",
"def test_iter_all_years(self):\n ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')\n self.assertEqual([ary.year for ary in ar], [2008,2009])",
"def test_yearly_report_error(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 200, 'date_of_expense': '10-01-2021'})\n year = 'sdfg'\n res = self.client().get(f'/yearly_report?year={year}', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 400)\n results = json.loads(res.data)\n self.assertEqual(results['message'], f'The date {year} does not match the format YYYY')",
"def main():\r\n windows_driver = '/mnt/c/Users/kurtrm/Documents/bin/chromedriver.exe'\r\n browser = Chrome(executable_path=windows_driver)\r\n\r\n url = 'https://www.pcta.org/discover-the-trail/' \\\r\n 'thru-hiking-long-distance-hiking/2600-miler-list/'\r\n\r\n browser.get(url)\r\n year_range = range(1952, 2018) # Range of years of recorded thru-hikes\r\n\r\n for year in year_range:\r\n select = Select(browser.find_element_by_id('year'))\r\n select.select_by_value(str(year))\r\n time.sleep(1.5)\r\n miler_list = browser.find_elements_by_css_selector('td')\r\n if miler_list[0].text != 'No records found for the selected year.':\r\n people = extract_names(miler_list, year)\r\n load_mongo_db('pct', 'completions', people)",
"def get_bills(request):\n response = ApiJsonResponse()\n try:\n user = MyUser.objects.get(pk=request.user.pk)\n except ObjectDoesNotExist:\n return Response({\n \"msg\": _('MSG_USER_NOT_EXIST'),\n \"status\": 404\n }, status=404)\n try:\n company = Company.objects.get(owner=user)\n except ObjectDoesNotExist:\n return Response({\n \"msg\": _('MSG_COMPANY_NOT_EXIST'),\n \"status\": 404\n }, status=404)\n try:\n bills = Bills.objects.filter(company=company)\n except ObjectDoesNotExist:\n response.set_error(1)\n response.set_result_code(404)\n response.set_result_msg(\"MSG_NO_BILLS_FOUNDED\")\n return JsonResponse(response.get_dict())\n try:\n for bill in bills:\n response.set_multiples_data(serialize_bill_object(bill))\n except Exception:\n response.set_multiples_data(serialize_bill_object(bills))\n response.set_result_code(200)\n response.set_result_msg(\"MSG_PROMOTION_FOUNDED\")\n return JsonResponse(response.get_dict())",
"def test_booklist_ordered_by_due_date(self):\n # Change all book's status to loan('o')\n for book in BookInstance.objects.all():\n book.status = 'o'\n book.save()\n\n # Login into page\n login = self.client.login(\n username='testuser1',\n password='1X<ISRUkw+tuK')\n response = self.client.get(reverse('my-borrowed'))\n\n # Check that user is logged in\n self.assertEqual(str(response.context['user']), 'testuser1')\n self.assertEqual(response.status_code, 200)\n\n # Confirm that only 10 items are displayed per page\n self.assertEqual(len(response.context['bookinstancelist']), 10)\n\n last_date = 0\n for book in response.context['bookinstancelist']:\n if last_date == 0:\n last_date = book.due_back\n else:\n self.assertTrue(last_date <= book.due_back)\n last_date = book.due_back",
"def test_post_opening_balance_journals(self):\n pass",
"def test_process_newly_public_domain_height_of_creation(self):\n self.page_mock.text = \"\"\"{{REDaten\n|BAND=S I\n|KEINE_SCHÖPFUNGSHÖHE=OFF\n|TODESJAHR=1950\n}}\nbla\n{{REAutor|Stein.}}\"\"\"\n re_page = RePage(self.page_mock)\n task = PDKSTask(None, self.logger)\n compare({\"success\": True, \"changed\": False}, task.run(re_page))\n compare(\"1950\", re_page[0][\"TODESJAHR\"].value)",
"def test_get_queryset(self):\n year = 2012\n response = self.client.get(self.url, {\"year\": year})\n self.assertEqual(response.status_code, 200)\n\n # Context\n date = datetime.date(year, 1, 1)\n self.assertEqual(response.context[\"current_publication_year\"], date)\n\n self.assertEqual(\n response.context[\"n_publications_filter\"], self.n_publications_per_year\n )",
"def test_retail_year_2010(self):\n mock_todays = [date(2009, 10, 1), # random earlier half\n date(2010, 2, 1), # random later half\n date(2009, 12, 31), # start of calendar year\n date(2010, 1, 1), # end of calendar year\n date(2009, 7, 26), # start of retail year\n date(2010, 7, 31), # end of retail year\n ]\n\n for today_2010 in mock_todays:\n self._curr_retail_2010_tests(today_2010)\n self._prev_retail_2010_tests(today_2010)\n pass",
"def test_award_list(self):\n resp = self.client.get('/api/v1/awards/')\n self.assertEqual(resp.status_code, 200)\n self.assertTrue(len(resp.data) >= 2)\n\n self.assertEqual(self.client.get('/api/v1/awards/fain/ABCD').status_code, 200)\n self.assertEqual(self.client.get('/api/v1/awards/uri/ABCD').status_code, 200)\n self.assertEqual(self.client.get('/api/v1/awards/piid/ABCD').status_code, 200)",
"def test_date_by_gt_yr(self):\n spi_search = \"find date > 1980\"\n inv_search = 'year:1980->9999'\n self._compare_searches(inv_search, spi_search)",
"def test_get_opening_balance_journals(self):\n pass",
"def test_sad_booking_past_compet(self):\n\n rv = self.app.get(\n f\"/book/{self.competitions[0]['name']}/{self.clubs[0]['name']}\"\n )\n assert rv.status_code in [400]\n assert b\"The booking page for a past competition is closed\" in rv.data\n assert b\"Welcome\" in rv.data",
"def test_show_bag(self):\n response = self.client.get('/shopping_bag/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'shopping_bag/bag.html')",
"def test_retail_year_2006(self):\n mock_todays = [date(2005, 10, 1), # random earlier half\n date(2006, 2, 1), # random later half\n date(2005, 12, 31), # start of calendar year\n date(2006, 1, 1), # end of calendar year\n date(2005, 7, 31), # start of fiscal year\n date(2006, 7, 29), # end of fiscal year\n ]\n\n for today_2006 in mock_todays:\n self._curr_retail_2006_tests(today_2006)\n self._prev_retail_2006_tests(today_2006)\n pass"
] | [
"0.7934322",
"0.73883474",
"0.7205412",
"0.6592645",
"0.6201756",
"0.5930797",
"0.5895609",
"0.5849038",
"0.56935006",
"0.56164986",
"0.55925894",
"0.5533068",
"0.5509524",
"0.53481203",
"0.5337408",
"0.53338116",
"0.5332658",
"0.5323729",
"0.5322545",
"0.52949613",
"0.5285188",
"0.5278833",
"0.5264573",
"0.52630407",
"0.5262097",
"0.52604294",
"0.52411693",
"0.52243304",
"0.52198213",
"0.52132046"
] | 0.844176 | 0 |
Print a Fibonacci series up to n. | def fib(n): # write Fibonacci series up to n
a, b = 0, 1
while a < n:
print(a, end=' ')
a, b = b, a+b
print() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fib(n): # write Fibonacci series up to n\n a, b = 1, 1\n while a < n:\n print(a, end=' ')\n a, b = b, a + b\n print()",
"def fibon(n):\r\n a, b = 0, 1\r\n while b < n:\r\n print(b, end=' ')\r\n a, b = b, a + b\r\n\r\n print()",
"def fib(n):\n a, b = 0, 1\n while a < n:\n print(a, end=' ')\n a, b = b, a+b\n print()",
"def fib(n):\n a, b = 0, 1\n while b < n:\n print(b, end=' ')\n a, b = b, a+b\n print()",
"def fib(n):\n a, b = 0, 1\n while b < n:\n print(b, end=' ')\n a, b = b, a + b",
"def fib(n):\n\ta, b = 0, 1\n\twhile a < n:\n\t\tprint a,\n\t\ta, b = b, a+b",
"def fib(n):#return None #optional\n a, b = 0, 1\n while a < n:\n print(a , end = ' ')\n a, b = b, a+b\n n = 1000\n print(n)",
"def fib(n):\n\ta,b=0,1\n\twhile b<n:\n\t\tprint b,\n\t\ta,b=b,a+b",
"def fibonacci(n):",
"def fib(n):\n a, b = 0, 1\n while b < n:\n print b,\n a, b = b, a+b",
"def fib (n):\n a,b=0,1\n while b<n:\n\tprint b,\n\ta,b = b,a+b",
"def fibonacci(n):\n\tfib_seq = []\n\tnth_term = 0\n\t\n\tfor i in range(0,n+1):\n\t\tif i == 0:\n\t\t\tfib_seq.append(0)\n\t\tif i == 1:\n\t\t\tfib_seq.append(1)\n\t\tif i > 1:\n\t\t\tnth_term = fib_seq[-1] + fib_seq[-2]\n\t\t\tfib_seq.append(nth_term)\n\t\n\tprint(fib_seq)\n\tprint(fib_seq[n])\n\treturn(fib_seq[n])",
"def fibonacci(n):\n fibval = sum_series(n, 0, 1)\n print(fibval)\n return fibval",
"def fib(n):\n print(\"fib({})\".format(n))\n if(n <= 2):\n return 1\n else:\n return fib(n-1) + fib(n-2)",
"def fibonacci(n):\n\n ## Auxiliary functions for working in our polynomial ring.\n def poly_sqr((a, b)):\n a2 = a*a\n return 2*a*b + a2, a2 + b*b\n def poly_mul((a, b), (c, d)):\n ac = a*c\n return a*d + b*c + ac, ac + b*d\n\n ## Do the job. For negative indices, we take powers of t^{-1}.\n if n < 0: return power((1, -1), -n, (0, 1), poly_sqr, poly_mul)\n else: return power((1, 0), n, (0, 1), poly_sqr, poly_mul)",
"def fibonacci(self, n):\n\n if n == 1:\n return 1\n elif n <= 0:\n return 0\n else:\n return self.fibonacci(n - 1) + self.fibonacci(n - 2)",
"def fibonacci(n):\n sequence = [0, 1]\n for i in range(n + 1):\n value = add(sequence[-2], sequence[-1])\n sequence.append(value)\n return sequence[n]",
"def fib_iterative(n: int) -> int:\n print(n)\n return 0",
"def fibonacci_number(n):\r\n l = [0, 1] \r\n for i in range(n - 1):\r\n l = [*l, l[-1] + l[-2]]\r\n return l[n - 1]",
"def fibonacci_numbers():\n print(\"Problem: Fibonacci numbers\")\n\n n = int(input())\n\n result = fib(n)\n print(result)",
"def fib(n:int) -> int:\n if n<= 2:\n return 1\n else:\n return fibonacci.fib(n-1) + fibonacci.fib(n-2)",
"def fibonacci (n):\n\tif n == 0:\n\t\treturn 0\n\telif n == 1:\n\t\treturn 1\n\telse:\n\t\treturn fibonacci(n-2) + fibonacci(n-1)",
"def fib(n):\r\n to_return = []\r\n for i in range(0, n):\r\n to_return.append(FibSeq.fib_r(i))\r\n\r\n return to_return",
"def fib (n):\r\n if n == 0 or n == 1:\r\n return 1\r\n else:\r\n return fib(n-1) + fib(n-2)",
"def fibonacci(n):\n\tif n == 0:\n\t\treturn 0\n\telif n == 1:\n\t\treturn 1\n\telse:\n\t\treturn fibonacci(n-1) + fibonacci(n-2)",
"def fibonacci_iter(n):\n f = []\n for x in range(n + 1):\n if x == 0:\n f.append(x)\n elif x == 1:\n f.append(x)\n else:\n f.append(f[-1] + f[-2])\n return f[-1]",
"def fibonacci(n):\n print(n)\n if n == 0 or n == 1:\n return 1\n\n return fibonacci(n - 1) + fibonacci(n - 2)",
"def fibo(n):\n first = 0\n second = 1\n for i in range (1,n+1):\n if (i<=1): \n #begins sequence (terms 0 and 1 do not have two prior terms)\n newVal = i\n else:\n #continues sequence by adding the previous two numbers in the\n #sequence, and updating the variables\n newVal = first + second\n first = second\n second = newVal\n print(i,newVal)",
"def fib(n):\n a, b = 1, 1\n while n:\n a, b = b, a + b\n n -= 1\n return a",
"def fib(n):\n if n == 0: return 0\n if n == 1: return 1\n return fib(n-1) + fib(n-2)"
] | [
"0.86665875",
"0.865262",
"0.85437036",
"0.8514844",
"0.83812505",
"0.8376221",
"0.8372399",
"0.82822764",
"0.8197588",
"0.818762",
"0.8139177",
"0.7916685",
"0.79095733",
"0.7795058",
"0.7737876",
"0.7699194",
"0.76678586",
"0.7626423",
"0.7606383",
"0.7585965",
"0.7549635",
"0.7506542",
"0.75053155",
"0.74964845",
"0.74940324",
"0.74756205",
"0.7473453",
"0.74662596",
"0.74367774",
"0.7412018"
] | 0.8673606 | 0 |
Attempts to decode a message hidden in the image The secret_key and random_seed MUST be the same as the ones used to embed the message initially message_length will ensure that the decoded string doesn't contain any extra characters. If not specified, the decoder will keep trying to decode until an error is thrown | def decode (self, secret_key, random_seed, message_length=math.inf):
# seed the random number generator with the seed used to embed
random.seed(random_seed)
bytes_visited = {} # a dictionary of the unique bytes already visited
color_offset = StegImage.color_offset # the color plane where the message exists
recent_bits = [] # an array. each element is a single bit
message = ""
message_over = False
character_offset = 0
while ((len(bytes_visited) < message_length * self.binary_size) and not message_over) and len(bytes_visited) < (len(self.bytes) - 54)/3: # will try to decode one letter at a time until an error is thrown or it reaches the end of the image. (the algo has no idea when the message stops)
index_of_byte = None
while (index_of_byte is None or index_of_byte in bytes_visited): # if the byte is visited twice, in the embed algo, it just skips it the second time and moves on, so do the same when decoding
index_of_byte = random.randint(self.offset, self.number_of_pixels * 3)
index_of_byte += color_offset
bytes_visited[index_of_byte] = True
byte = self.binary_array[index_of_byte]
bit = data_manipulation.get_bit_from_byte(byte, self.binary_size - 1) # get the last bit of the byte
recent_bits.append(bit)
if len(recent_bits) == StegImage.binary_size: # if an entire byte is stored:
# attempt to decrypt
try:
letter = EncryptString.decrypt(recent_bits, secret_key, character_offset = character_offset) # if this throws an error, assume the end of the message has been reached
# a letter has been successfully decrypted if it reaches this point
message += letter
character_offset += 1 # another character in the message has been found
recent_bits = []
except:
# print("The end of the message has been reached or the message was not encoded successfully/the wrong decode parameters were given")
message_over = True # assume the emssage is over if an error ahs been reached
#traceback.print_exc() # since an error is expected (a utf-8 decode error), don't print it
return message | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decrypt_message(encrypted_message):",
"def decode(decryption=None):\n\n key_to_encrypt = {'a': 'q', 'b': 'v', 'c': 'x', 'd': 'z', 'e': 'y', 'f': 'w', 'g': 'u', 'h': 't', 'i': 's',\n 'j': 'r',\n 'k': 'p', 'l': 'o', 'm': 'n', 'n': 'm', 'o': 'l', 'p': 'k', 'r': 'j', 's': 'i', 't': 'h',\n 'u': 'g', 'w': 'f',\n 'y': 'e', 'z': 'd', 'x': 'c', 'v': 'b', 'q': 'a',\n 'A': 'Q', 'B': 'V', 'C': 'X', 'D': 'Z', 'E': 'Y', 'F': 'W', 'G': 'U', 'H': 'T', 'I': 'S',\n 'J': 'R', 'K': 'P',\n 'L': 'O', 'M': 'N', 'N': 'M', 'O': 'L', 'P': 'K', 'R': 'J', 'S': 'I', 'T': 'H', 'U': 'G',\n 'W': 'F', 'Y': 'E',\n 'Z': 'D', 'X': 'C', 'V': 'B', 'Q': 'S',\n '1': '5', '2': '9', '3': '8', '4': '7', '5': '6', '6': '4', '7': '3', '8': '2', '9': '1',\n '.': ',', ',': '.', ':': ';', ';': ':', '?': '!', '!': '?', '-': '_', '_': '-', '(': ')',\n ')': '(',\n '%': '$', '$': '%', ' ': '&', '&': ' ', '+': '*', '*': '+'}\n\n k1 = key.Key(key_to_encrypt)\n reversed_key = k1.createReverseKey()\n\n entered_image = input(\"Image name with extension: \")\n img = Image.open(entered_image, 'r')\n\n decoded_message = ''\n data_from_image = iter(img.getdata())\n\n while (True):\n pixels = [value for value in data_from_image.__next__()[:3] +\n data_from_image.__next__()[:3] +\n data_from_image.__next__()[:3]]\n\n binary = ''\n\n for i in pixels[:8]:\n if (i % 2 == 0):\n binary += '0'\n else:\n binary += '1'\n\n decoded_message += chr(int(binary, 2))\n d1 = monoalphabetic_decryption.Decryption(reversed_key, decoded_message)\n message = d1.decrypt()\n if (pixels[-1] % 2 != 0):\n return message",
"def decrypt_message(self):\r\n\r\n\t\t#Will not let user input useless messages that cannot be decrypted.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to decrypt. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\r\n\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\t\tprint(\"Your decrypted message is\")\r\n\t\tprint(self.my_code + \"|\")",
"def decrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.decrypt(message)",
"def decrypt_message(self, cipher):\n\t\tmessage = cipher ** self.private_key % self.hidden_primes_product\n\t\treturn message",
"def decrypt_faces(msg, nkey=key):\n newmsg = msg[:-20]\n obj = DES.new(nkey, DES.MODE_ECB)\n return obj.decrypt(newmsg)",
"def hide(self, img, message):\r\n encoded = img.copy()\r\n width, height = img.size\r\n index = 0\r\n\r\n message = message + '~~~'\r\n message_bits = \"\".join(tools.a2bits_list(message))\r\n\r\n npixels = width * height\r\n if len(message_bits) > npixels * 3:\r\n return \"\"\"Too long message (%s > %s).\"\"\" \"\"\"%\"\"\"\r\n (len(message_bits), npixels * 3)\r\n\r\n for row in range(height):\r\n for col in range(width):\r\n if index + 3 <= len(message_bits) :\r\n\r\n # Get the colour component.\r\n (r, g, b) = img.getpixel((col, row))\r\n\r\n # Change the Least Significant Bit of each colour component.\r\n r = tools.setlsb(r, message_bits[index])\r\n g = tools.setlsb(g, message_bits[index+1])\r\n b = tools.setlsb(b, message_bits[index+2])\r\n\r\n # Save the new pixel\r\n encoded.putpixel((col, row), (r, g , b))\r\n\r\n index += 3\r\n\r\n return encoded\r\n self.resultLbl.SetLabel(\"Message successfully encoded.\")",
"def decrypt(self, key, msg, b64decode=True):\n if b64decode:\n msg = base64.b64decode(msg)\n iv = msg[:self.cipher.block_size]\n cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)\n\n padded = cipher.decrypt(msg[self.cipher.block_size:])\n l = ord(padded[-1:]) + 1\n plain = padded[:-l]\n return plain",
"def decrypt(self, message):\n message = base64.b64decode(message)\n initialization_vector = message[:self._block_size]\n cipher = AES.new(self._key, AES.MODE_CBC, initialization_vector)\n raw_message = cipher.decrypt(message[self._block_size:])\n return self._remove_padding(raw_message).decode('utf-8')",
"def decrypt_message(message: bytes, receiver_private_key: RsaKey) -> bytes:\n iv = message[:IV_LEN]\n enc_aes_key = message[IV_LEN:IV_LEN + receiver_private_key.size_in_bytes()] # Assume encryption has been done with same key size\n enc_message = message[IV_LEN + receiver_private_key.size_in_bytes():]\n\n cipher_rsa = PKCS1_OAEP.new(receiver_private_key)\n aes_key = cipher_rsa.decrypt(enc_aes_key)\n\n cipher_aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(cipher_aes.decrypt(enc_message), AES.block_size) # Padding have to be removed",
"def _decrypt(self, msg):\r\n # they must be real crypto experts at pubnub.com\r\n # two lines of code and two capital mistakes :-(\r\n # pylint: disable=E1101\r\n key = hashlib.sha256(self.cipher).hexdigest()[0:32]\r\n aes = AES.new(key, AES.MODE_CBC, \"0123456789012345\")\r\n decrypted = aes.decrypt(base64.decodestring(msg))\r\n return json.loads(decrypted[0:-ord(decrypted[-1])])",
"def decode_text():\n print(f\"{YELLOW}[{MIDDLE_DOT}]{RESET} Enter message to decode: \", end=\"\")\n message = input()\n extract_encoded_message = message.split(LEFT_TO_RIGHT_MARK)[1]\n message = extract_encoded_message\n extract_encoded_message = message.split(RIGHT_TO_LEFT_MARK)[0]\n encoded = ''\n decoded = ''\n\n for message_char in message:\n if message_char in zero_space_symbols:\n encoded = encoded + str(zero_space_symbols.index(message_char))\n\n cur_encoded_char = ''\n\n for index, encoded_char in enumerate(encoded):\n cur_encoded_char = cur_encoded_char + encoded_char\n if index > 0 and (index + 1) % padding == 0:\n decoded = decoded + chr(int(cur_encoded_char, len(zero_space_symbols)))\n cur_encoded_char = ''\n\n return decoded",
"def doDecode(self):\n raise CipherError(\"override this funct and return the decoded msg\")",
"def decrypt(self, message):\n # message = message.upper().split()\n # message = \"\".join(message)\n # desalting the message to remove 5 characters blocks\n padding = input(\"Have you used 5 characters blocks? y/n \")\n if padding == \"y\":\n message = message.replace(\" \", \"\")\n message = self.desalt_random(message)\n message = \"\".join(message)\n\n message = message.upper()\n message_list = []\n for ch in message:\n message_list.append(self.main_dict[ch][0])\n\n # OTP Encryption / process the message with OTP\n otp = input(\"What is the OTP that was generated for you during \"\n \"encryption process?: \")\n otp = otp.upper()\n random_otp = []\n for ch in otp:\n random_otp.append(self.main_dict[ch][0])\n\n # If OTP is correct, decrypt the message with mod27\n if len(message_list) != len(random_otp):\n print(\"You typed a wrong OTP.\")\n return None\n else:\n math_list = []\n for i, item in enumerate(message_list):\n if message_list[i] >= random_otp[i]:\n x = message_list[i] - random_otp[i]\n for key, value in self.main_dict.items():\n if value[0] == x:\n math_list.append(key)\n else:\n for key, value in self.main_dict.items():\n if item == value[0]:\n x = value[1] - random_otp[i]\n for key, value in self.main_dict.items():\n if value[0] == x:\n math_list.append(key)\n return \"\".join(math_list)",
"def decrypt_message(self, encrypted_message):\n f = Fernet(bytes(self.key))\n decrypted_message = f.decrypt(encrypted_message)\n return decrypted_message",
"def decrypt(self, message: bytearray) -> bytearray:\n return self.__PRGA(message)",
"def test_decode_messages():\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3",
"def embed_message(self, message, secret_key, random_seed):\n encrypt = EncryptString(string = message)\n encrypt.encrypt(secret_key)\n encrypted_bits = encrypt.bits\n self.embed_bits(encrypted_bits, self.color_offset, random_seed) # 0 (self.color_offset) because only modify the red plane",
"def decrypt(self,message, key):\n return self.translateMessage(message, key, \"decrypt\")",
"async def unpack_message(\n auth_manager: AuthKeyManager,\n schema: Schema,\n encrypted_message: bytes\n) -> EncryptedMessage:\n auth_key = await get_auth_key(auth_manager, encrypted_message)\n\n msg_key = load_int128(encrypted_message[8:]).value\n\n key_pair = generate_key_iv(\n auth_key,\n msg_key,\n key_type='client'\n )\n\n message_bytes = ige256_decrypt(\n encrypted_message[24:],\n key_pair.key,\n key_pair.iv\n )\n\n return await load_message(schema, message_bytes)",
"def decrypt():\n request_data = request.get_json()\n\n if ('ciphertext' in request_data and\n 'tag' in request_data and\n 'enc_session_key' in request_data and\n 'nonce' in request_data):\n\n try:\n for key in request_data.keys():\n request_data[key] = b64decode(request_data[key])\n except binascii.Error:\n return Response(\n json.dumps(\n {\n 'error': 'Malformed payload'\n }\n ),\n 400,\n mimetype='application/json'\n )\n\n encryption = Decryption(request_data['enc_session_key'], request_data['nonce'])\n try:\n message = encryption.decrypt(\n (request_data['ciphertext'], request_data['tag'])\n ).decode()\n except ValueError as error:\n return Response(\n json.dumps(\n {\n 'error': f'Failed to decrypt the message due to the error: [{error}]'\n }\n ),\n 400,\n mimetype='application/json'\n )\n\n return jsonify({'message': message}), 200\n\n return Response(\n json.dumps(\n {\n 'error': (\n 'Tag / Ciphertext / Nonce / Encrypted Session Key'\n ' missing in the request body'\n )\n }\n ),\n 400,\n mimetype='application/json'\n )",
"def decrypt_message(encrypted_message):\r\n\r\n # conversion to bytes\r\n encrypted_message = bytes(encrypted_message, \"ascii\")\r\n\r\n # loading key\r\n key = load_key()\r\n\r\n # creating a fernet object\r\n f = Fernet(key)\r\n\r\n # decrypting the messsage\r\n decrypted_message = f.decrypt(encrypted_message)\r\n\r\n return decrypted_message.decode()",
"def decipher2(s, key): # s = message\n return decipher_raw2(s, key).rstrip(bytes('\\x00'.encode('utf-8')))",
"def decrypt(self, message):\n #check validity of _private_key\n if self._private_key is None:\n raise Exception(\"invalid private key\")\n\n output = \"\"\n\n d = self._private_key[0]\n n = self._private_key[1]\n\n for i in xrange(len(ciphertext)):\n m = pow(ciphertext[i], d, n)\n output += int_to_string(m)\n return output",
"def decrypt(self, message):\n return self._keypair.decrypt(message)",
"def decipher(s, key): # s = message\n return decipher_raw(s, key).rstrip(bytes('\\x00'.encode('utf-8')))",
"def decrypt(self, msg):\n\n if type(msg) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n return self.gpg.decrypt(msg).data",
"def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)",
"def decrypt_message(msg):\n with urllib.request.urlopen(format_url(main_url+\"decrypt.php\",msg)) as f:\n decryptedmessage = f.read().decode('utf-8',\"strict\")\n return decryptedmessage",
"def decryptSecret(self, encoded_secret):\n \n plain_secret = self._encryptor.decrypt(encoded_secret)\n\n if len(plain_secret) > MAX_SECRET_LENGTH:\n raise ValueError(\"decryption resulted in a plain secret longer than the maximum length of %d bytes\" % MAX_SECRET_LENGTH)\n \n return(plain_secret)"
] | [
"0.64776963",
"0.63632095",
"0.6322363",
"0.61773396",
"0.6124936",
"0.6089968",
"0.6028787",
"0.60207546",
"0.5953228",
"0.594839",
"0.5930802",
"0.59212095",
"0.5885464",
"0.57913494",
"0.57555205",
"0.57384247",
"0.5734379",
"0.5689325",
"0.5677325",
"0.5625216",
"0.56136096",
"0.56135637",
"0.55850416",
"0.55839133",
"0.5573761",
"0.5540815",
"0.55237275",
"0.5501442",
"0.54972976",
"0.54944867"
] | 0.8330446 | 0 |
index_of_byte index of byte in array index_of_bit index of the bit in the byte new_value the new value at the index of the bit (0 or 1) | def set_bit(self, index_of_byte, index_of_bit, new_value):
if index_of_bit >= self.binary_size:
print("You tried to modify a byte at %d index. This cannot be done. The maximum index is %d."%(index_of_bit, self.binary_size - 1))
else:
new_value = str(new_value)
byte = self.binary_array[index_of_byte]
new_byte = byte[0:index_of_bit] + new_value
if index_of_bit < self.binary_size - 1: # you aren't changing the final bit in the byte
new_byte += byte[index_of_bit + 1:]
#apply changes
self.binary_array[index_of_byte] = new_byte | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def updatebyte(byte, bit, value):\n if value == 0:\n return byte & ~(1 << bit)\n elif value == 1:\n return byte | (1 << bit)",
"def modify_byte(byte: int, message_bit: bool) -> int:\n byte = bin(byte)\n byte_new_last_bit = int(message_bit)\n new_byte = int(byte[:-1] + str(byte_new_last_bit), base=2)\n return new_byte",
"def bit(self, idx: int) -> int:\n pos = self.start() + idx\n chunk = self.raw_key()[(pos // 8)]\n bit = pos % 8\n return ((1 << bit) & chunk) >> bit",
"def update_bit(num, i, v):\n return num & ~(1 << i) | (v << i)",
"def _get_bit(byte, ii):\n return (byte >> (7 - ii)) & 1",
"def __getitem__(self, index):\n nth_int, nth_bit = divmod(index, BitArray._UNSIGNED_INT)\n return self.bits[nth_int] & (1 << nth_bit)",
"def swap_byte(byte_array, index):\n\n if byte_array[index] == 0:\n changed_byte_array = byte_array[0:index] + b\"\\xff\" + byte_array[index + 1 :]\n changed_byte_array = byte_array[0:index] + b\"\\x00\" + byte_array[index + 1 :]\n return changed_byte_array",
"def __setitem__(self, n, bit):\n self.num ^= (np.uint64(-bit) ^ self.num) & (UINT64_ONE << np.uint64(n))",
"def setbit(self, key, offset, value):\n key = self._encode(key)\n index, bits, mask = self._get_bits_and_offset(key, offset)\n\n if index >= len(bits):\n bits.extend(b\"\\x00\" * (index + 1 - len(bits)))\n\n prev_val = 1 if (bits[index] & mask) else 0\n\n if value:\n bits[index] |= mask\n else:\n bits[index] &= ~mask\n\n self.redis[key] = bytes(bits)\n\n return prev_val",
"def update(self, idx, x):\n while idx < len(self.bit):\n self.bit[idx] += x\n idx |= idx + 1",
"def bit_get(val, idx):\n return (val >> idx) & 1",
"def access_bit(data, num):\n \n base = int(num // 8)\n shift = int(num % 8)\n return (data[base] & (1<<shift)) >> shift",
"def get_bit(a, bit_pos):\n return np.clip(np.bitwise_and(a, 2 ** (bit_pos-1)), 0, 1)",
"def update(self, idx, add):\n idx += 1\n while idx < len(self.array):\n self.array[idx] += add\n idx += idx & -idx #Adding the last bit",
"def get_bit(byte, bit_num):\n return (byte & (1 << bit_num)) >> bit_num",
"def test_bit_set_bit_random_byte_random_offset(self):\n value = bytearray()\n rand_byte = random.randint(0, 255)\n value.append(rand_byte)\n rand_offset = random.randint(0, 4) * 8\n ops = [bitwise_operations.bit_set(self.test_bin_zeroes, rand_offset, 8, 1, value, None)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 5)\n expected_result[rand_offset // 8] = rand_byte\n assert bins[self.test_bin_zeroes] == expected_result\n # should set the byte at rand_offset/8 to rand_byte",
"def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i]\n while i <= self.size:\n self.BITree[i] += v\n\n # Update index to next set bit in binary representation\n i += i & (-i)",
"def set_bit(self, register, bit_index, state):\n oldvalue = self.device.readregister(register)\n if state:\n newvalue = oldvalue | 2 ** bit_index\n else:\n newvalue = oldvalue & ~(2 ** bit_index)\n \n self.device.writeregister(register, newvalue)",
"def bitfrombyte(self, val: int, targetIndex: int):\n mask = 0b1 << targetIndex\n bit = val & mask\n bit = bit >> targetIndex\n return bit",
"def swap_bits_bit_array(x, bit_array):\n\n if (is_power_two(bit_array)):\n bit_array |= bit_array << 1\n\n if (is_power_two(x & bit_array)):\n x ^= bit_array\n return x",
"def lsb_update(self, index, new_lsb):\n self.set_bit(index, self.binary_size - 1, new_lsb)",
"def __setitem__(self, index, fill):\n nth_int, nth_bit = divmod(index, BitArray._UNSIGNED_INT)\n if fill:\n self.bits[nth_int] |= (1 << nth_bit)\n else:\n self.bits[nth_int] &= ~(1 << nth_bit)",
"def __checkbit(byte, bit):\n value = 0\n if byte & (1 << bit):\n value = 1\n return value",
"def get_bit(num, position):\n\treturn (num >> position) & 0b1",
"def swap_bits_index(x, i, j=python.Parameter.OTHER_ARGUMENT):\n\n if (j is python.Parameter.OTHER_ARGUMENT):\n j = i + 1\n\n if (get_bit(x, i) != get_bit(x, j)):\n x = toggle_bit(toggle_bit(x, i), j)\n return x",
"def test_bit_set_bit_index_out_of_range(self):\n value = bytearray()\n value.append(255)\n ops = [bitwise_operations.bit_set(self.test_bin_zeroes, 41, 8, 1, value, None)]\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)",
"def setbit(integer, nth_bit):\n if nth_bit < 0:\n raise ValueError('Negative bit number.')\n mask = 1 << nth_bit\n integer |= mask\n return integer",
"def toggle_bit(bit) -> int:\n\treturn 1 if bit == 0 else 0",
"def GetBits(self, num_bits):\n old_idx_boff = self.idx_boff\n\n bits_available = self.NumBits() - (8*self.idx_byte + self.idx_boff)\n if num_bits > bits_available:\n print \"num_bits: %d but bits_available: %d\" % (num_bits, bits_available)\n raise StandardError()\n retval = []\n bits_left = num_bits\n if self.idx_boff == 0:\n while bits_left >= 8:\n retval.append(self.output[self.idx_byte])\n self.idx_byte += 1\n bits_left -= 8\n if bits_left:\n retval.append( ~(255 >> bits_left) & self.output[self.idx_byte])\n self.idx_boff += bits_left\n self.idx_boff %= 8\n bits_left = 0\n else:\n # We know there is a non-zero bit offset if we're below here.\n cur_byte = 0\n cur_boff = 0\n lob = len(self.output)\n while bits_left > 0:\n if bits_left >= 8 and lob > self.idx_byte:\n cur_byte = 255 & (self.output[self.idx_byte] << self.idx_boff)\n self.idx_byte += 1\n cur_byte |= (self.output[self.idx_byte] >> (8 - self.idx_boff))\n retval.append(cur_byte)\n cur_byte = 0\n bits_left -= 8\n else:\n bits_to_consume = min(min(8 - cur_boff, 8 - self.idx_boff),\n bits_left)\n\n c = self.output[self.idx_byte]\n c <<= self.idx_boff\n c &= 255\n cur_byte |= (c & ~(255 >> (bits_to_consume))) >> cur_boff\n bits_left -= bits_to_consume\n cur_boff += bits_to_consume\n self.idx_boff += bits_to_consume\n if cur_boff >= 8:\n retval.append(cur_byte)\n cur_byte = 0\n cur_boff -= 8\n if self.idx_boff >= 8:\n self.idx_byte += 1\n self.idx_boff -= 8\n if self.idx_boff >= 8:\n raise StandardError()\n if cur_boff:\n retval.append(cur_byte)\n if (old_idx_boff + num_bits) % 8 != self.idx_boff:\n print \"old_idx_boff(%d) + num_bits(%d) != self.idx_boff(%d) \" % (\n old_idx_boff, num_bits, self.idx_boff)\n print \"retval: \", (retval, num_bits)\n raise StandardError()\n return (retval, num_bits)",
"def test_bit_set_bit_inbetween_bytes(self):\n value = bytearray()\n value.append(255)\n ops = [bitwise_operations.bit_set(self.test_bin_zeroes, 4, 8, 1, value, None)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([15] * 1 + [240] * 1 + [0] * 3)\n assert bins[self.test_bin_zeroes] == expected_result"
] | [
"0.74368215",
"0.6721762",
"0.6662158",
"0.65792656",
"0.6568271",
"0.65516293",
"0.64147186",
"0.62618554",
"0.6233598",
"0.6209762",
"0.62077487",
"0.61962634",
"0.6191687",
"0.6158861",
"0.6106333",
"0.60963595",
"0.6089261",
"0.60875833",
"0.6047753",
"0.60092896",
"0.5893063",
"0.5837587",
"0.575225",
"0.57443863",
"0.5732398",
"0.5728358",
"0.5721536",
"0.5700773",
"0.5697769",
"0.5693207"
] | 0.8123513 | 0 |
Updates the binary_array array based on the bytes private variable | def update_binary_bytes(self):
self.binary_array = data_manipulation.convert_byte_array(self.bytes, self.binary_size) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_bytes_array(self):\n self.bytes = data_manipulation.convert_from_binary_array(self.binary_array)",
"def update_binary(self, offset, data):\n log.debug(\"write binary {0} to {1}\".format(offset, offset+len(data)))\n cmd = bytearray([0x00, 0xD6, offset/256, offset%256, len(data)])\n cmd = cmd + bytearray(data)\n rsp = self.transceive(cmd)\n if rsp[-2:] != \"\\x90\\x00\":\n raise Type4TagError(rsp[-2:])",
"def setByteArray(self, name: unicode, value: List[int]) -> None:\n ...",
"def test_byte_array_conversion():\n ob = ConversionTest()\n\n assert ob.ByteArrayField is None\n\n ob.ByteArrayField = [0, 1, 2, 3, 4]\n array = ob.ByteArrayField\n assert len(array) == 5\n assert array[0] == 0\n assert array[4] == 4\n\n value = b\"testing\"\n ob.ByteArrayField = value\n array = ob.ByteArrayField\n for i, _ in enumerate(value):\n assert array[i] == operator.getitem(value, i)",
"def _update_binary(self, field, tag_id, value):\n # Are we processing a binary tag?\n # pylint: disable=consider-using-f-string\n if self._binary_tag == 0:\n if tag_id in self._binary_fields:\n self._binary_length = len(str(tag_id + 1)) + int(value)\n if self._binary_length > self._max_length:\n raise FIXLengthTooLongError(\n 'binary field too long: {0} ref:{1}'.format(\n self._binary_length, tag_id))\n self._binary_tag = tag_id\n else:\n self._binary_length = -1\n else:\n # Is this the wrong tag?\n if tag_id != (self._binary_tag + 1):\n raise FIXParserError(\n f'expected binary tag {self._binary_tag+1} found {tag_id}')\n if len(field) != self._binary_length + 1:\n raise FIXParserError(\n 'binary length: expected {0} found {1}'.format(\n self._binary_length + 1, len(field)))\n self._binary_tag = 0\n self._binary_length = -1",
"def write_binary(self,value):\n self.write_uint32(len(value))\n self.data.extend(value)",
"def binary(self):\n return self.data.binary.values",
"def bytify(binary):\n\tbytes = [0,0,0,0]\n\ti = 3\n\twhile binary:\n\n\t\tbytes[i] = binary&255\n\t\tbinary >>= 8\n\t\ti -= 1 \n\treturn bytes",
"def test_sbyte_array_conversion():\n ob = ConversionTest()\n\n assert ob.SByteArrayField is None\n\n ob.SByteArrayField = [0, 1, 2, 3, 4]\n array = ob.SByteArrayField\n assert len(array) == 5\n assert array[0] == 0\n assert array[4] == 4\n\n value = b\"testing\"\n ob.SByteArrayField = value\n array = ob.SByteArrayField\n for i, _ in enumerate(value):\n assert array[i] == operator.getitem(value, i)",
"def read_binary(self):\n length = self.read_uint32()\n bytes = self.data[:length]\n self.data = self.data[length:]\n return bytes",
"def __init__(self, data):\n self.bytes = bytearray(data)",
"def _handleBinary(self, msg):\r\n uri = msg[:32]\r\n binaryData = StringIO()\r\n binaryData.write(msg[32:])\r\n\r\n for msg in self._incompleteMsgs:\r\n if msg.addBinary(uri, binaryData):\r\n break\r\n else:\r\n self._binaries[uri] = (binaryData, datetime.now())",
"def as_bytearray(self):\n\n if self.index < 7:\n return self.buf + bytearray([self.byte])\n else:\n return self.buf",
"def binary(message: str) -> bitarray:\n binary_message = bitarray()\n byte_message = bytes(message, encoding=\"ascii\")\n binary_message.frombytes(byte_message)\n return binary_message",
"def bytes(self, bytes: int):\n\n self._bytes = bytes",
"def __init__( self, bytes_reverse=False, bits_reverse=False, insert_at_msb=False ):\n self.output = bytearray()\n self.bits_reverse = bits_reverse\n self.bytes_reverse = bytes_reverse\n self.insert_at_msb = insert_at_msb\n self.bits_remaining = 8\n self.current_bits = 0",
"def data(self, arr):\n self.bitmap(arr, 1)",
"def convertToByteArray(booleanArray: typing.List[bool]) -> typing.List[int]:\n ...",
"def swap_byte(byte_array, index):\n\n if byte_array[index] == 0:\n changed_byte_array = byte_array[0:index] + b\"\\xff\" + byte_array[index + 1 :]\n changed_byte_array = byte_array[0:index] + b\"\\x00\" + byte_array[index + 1 :]\n return changed_byte_array",
"def set_bit(self, index_of_byte, index_of_bit, new_value):\n if index_of_bit >= self.binary_size:\n print(\"You tried to modify a byte at %d index. This cannot be done. The maximum index is %d.\"%(index_of_bit, self.binary_size - 1))\n else:\n new_value = str(new_value)\n byte = self.binary_array[index_of_byte]\n new_byte = byte[0:index_of_bit] + new_value\n if index_of_bit < self.binary_size - 1: # you aren't changing the final bit in the byte\n new_byte += byte[index_of_bit + 1:]\n #apply changes\n self.binary_array[index_of_byte] = new_byte",
"def append_bytes(self, data):\n\n if self.index != 7:\n self.buf.append(self.byte)\n self.byte = 0\n self.index = 7\n\n self.buf.extend(data)",
"def use_binary(self):\n self.scpi.set_format_binary(ORDER='SWAP')\n self.scpi.set_format_data(DATA='REAL,64')\n self.resource.values_format.use_binary(datatype='d',\n is_big_endian=False,\n container=np.array)",
"def BitArray(self):\n from bitstring import BitArray,BitStream\n b = BitArray(bin=str(self))\n return b",
"def __bytes__(self):\n byteout = bytearray()\n for index in range(1, 15):\n key = \"d\" + str(index)\n if self._user_data.get(key) is not None:\n byteout.append(self._user_data[key])\n else:\n byteout.append(0x00)\n return bytes(byteout)",
"def tobinary_multiples(arr):\n return [np.array(arr_i).tobytes() for arr_i in arr]",
"def set_bytes(self, b):\n if not self._readonly:\n self._bytes = b\n else:\n raise ReadOnlyError(\"This memory element cannot be written to.\")",
"def inc_bytes(a):\n out = list(a)\n for i in reversed(range(len(out))):\n if out[i] == 0xFF:\n out[i] = 0\n else:\n out[i] += 1\n break\n return bytes(out)",
"def update(self):\n if self.var_info.bits_per_pixel == 1:\n b = self._img.tobytes(\"raw\", \"1;R\")\n self.mmap[:len(b)] = b\n\n elif self.var_info.bits_per_pixel == 16:\n self.mmap[:] = self._img_to_rgb565_bytes()\n\n elif self.var_info.bits_per_pixel == 32:\n self.mmap[:] = self._img.convert(\"RGB\").tobytes(\"raw\", \"XRGB\")\n\n else:\n raise Exception(\"Not supported - platform %s with bits_per_pixel %s\" %\n (self.platform, self.var_info.bits_per_pixel))",
"def setBytes(self, addr: ghidra.program.model.address.Address, source: List[int]) -> None:\n ...",
"def bin_book_update(binfile, book):\n trade_update_fmt = \"II\"\n trade_update_data = [0, 0]\n order_book_level_fmt = \"IIIIII\"\n levels = [\n (book.bid[-(i+1)].price * DECIMAL_CONVERT,\n book.bid[-(i+1)].qty,\n book.bid[-(i+1)].order_count,\n book.offer[i].price * DECIMAL_CONVERT,\n book.offer[i].qty,\n book.offer[i].order_count) for i in range(5)]\n order_book_level_data = []\n for data in levels:\n order_book_level_data += list(data)\n order_book_level_data = [int(v) for v in order_book_level_data]\n valids_fmt = \"I\"\n valids_data = [2]\n the_data = [now_nanos(), book.security] + \\\n trade_update_data + order_book_level_data + valids_data\n data = struct.pack(\"<QI\" + trade_update_fmt + order_book_level_fmt * 5 + valids_fmt,\n *the_data)\n binfile.write(data)"
] | [
"0.85522306",
"0.6417961",
"0.6182146",
"0.6066188",
"0.6042835",
"0.5951569",
"0.5898244",
"0.58424544",
"0.5830605",
"0.5825022",
"0.5725478",
"0.5692757",
"0.56252694",
"0.5581197",
"0.55731577",
"0.55338085",
"0.55332655",
"0.55148214",
"0.55130553",
"0.5489311",
"0.54890907",
"0.54743695",
"0.5452508",
"0.5434074",
"0.5421013",
"0.54169565",
"0.541289",
"0.5408982",
"0.5403379",
"0.5398687"
] | 0.88327396 | 0 |
updates the bytes array based on binary bytes. reconstructs bytes from binary_array | def update_bytes_array(self):
self.bytes = data_manipulation.convert_from_binary_array(self.binary_array) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_binary_bytes(self):\n self.binary_array = data_manipulation.convert_byte_array(self.bytes, self.binary_size)",
"def update_binary(self, offset, data):\n log.debug(\"write binary {0} to {1}\".format(offset, offset+len(data)))\n cmd = bytearray([0x00, 0xD6, offset/256, offset%256, len(data)])\n cmd = cmd + bytearray(data)\n rsp = self.transceive(cmd)\n if rsp[-2:] != \"\\x90\\x00\":\n raise Type4TagError(rsp[-2:])",
"def swap_byte(byte_array, index):\n\n if byte_array[index] == 0:\n changed_byte_array = byte_array[0:index] + b\"\\xff\" + byte_array[index + 1 :]\n changed_byte_array = byte_array[0:index] + b\"\\x00\" + byte_array[index + 1 :]\n return changed_byte_array",
"def bytify(binary):\n\tbytes = [0,0,0,0]\n\ti = 3\n\twhile binary:\n\n\t\tbytes[i] = binary&255\n\t\tbinary >>= 8\n\t\ti -= 1 \n\treturn bytes",
"def test_byte_array_conversion():\n ob = ConversionTest()\n\n assert ob.ByteArrayField is None\n\n ob.ByteArrayField = [0, 1, 2, 3, 4]\n array = ob.ByteArrayField\n assert len(array) == 5\n assert array[0] == 0\n assert array[4] == 4\n\n value = b\"testing\"\n ob.ByteArrayField = value\n array = ob.ByteArrayField\n for i, _ in enumerate(value):\n assert array[i] == operator.getitem(value, i)",
"def _update_binary(self, field, tag_id, value):\n # Are we processing a binary tag?\n # pylint: disable=consider-using-f-string\n if self._binary_tag == 0:\n if tag_id in self._binary_fields:\n self._binary_length = len(str(tag_id + 1)) + int(value)\n if self._binary_length > self._max_length:\n raise FIXLengthTooLongError(\n 'binary field too long: {0} ref:{1}'.format(\n self._binary_length, tag_id))\n self._binary_tag = tag_id\n else:\n self._binary_length = -1\n else:\n # Is this the wrong tag?\n if tag_id != (self._binary_tag + 1):\n raise FIXParserError(\n f'expected binary tag {self._binary_tag+1} found {tag_id}')\n if len(field) != self._binary_length + 1:\n raise FIXParserError(\n 'binary length: expected {0} found {1}'.format(\n self._binary_length + 1, len(field)))\n self._binary_tag = 0\n self._binary_length = -1",
"def append_bytes(self, data):\n\n if self.index != 7:\n self.buf.append(self.byte)\n self.byte = 0\n self.index = 7\n\n self.buf.extend(data)",
"def _handleBinary(self, msg):\r\n uri = msg[:32]\r\n binaryData = StringIO()\r\n binaryData.write(msg[32:])\r\n\r\n for msg in self._incompleteMsgs:\r\n if msg.addBinary(uri, binaryData):\r\n break\r\n else:\r\n self._binaries[uri] = (binaryData, datetime.now())",
"def test_sbyte_array_conversion():\n ob = ConversionTest()\n\n assert ob.SByteArrayField is None\n\n ob.SByteArrayField = [0, 1, 2, 3, 4]\n array = ob.SByteArrayField\n assert len(array) == 5\n assert array[0] == 0\n assert array[4] == 4\n\n value = b\"testing\"\n ob.SByteArrayField = value\n array = ob.SByteArrayField\n for i, _ in enumerate(value):\n assert array[i] == operator.getitem(value, i)",
"def test_bytearray_doesnt_overfill(self):\n self._doesnt_overfill_test(bytearray)",
"def setByteArray(self, name: unicode, value: List[int]) -> None:\n ...",
"def __init__( self, bytes_reverse=False, bits_reverse=False, insert_at_msb=False ):\n self.output = bytearray()\n self.bits_reverse = bits_reverse\n self.bytes_reverse = bytes_reverse\n self.insert_at_msb = insert_at_msb\n self.bits_remaining = 8\n self.current_bits = 0",
"def forge_array(data, len_bytes=4) -> bytes:\n return len(data).to_bytes(len_bytes, 'big') + data",
"def binary(message: str) -> bitarray:\n binary_message = bitarray()\n byte_message = bytes(message, encoding=\"ascii\")\n binary_message.frombytes(byte_message)\n return binary_message",
"def read_binary(self):\n length = self.read_uint32()\n bytes = self.data[:length]\n self.data = self.data[length:]\n return bytes",
"def inc_bytes(a):\n out = list(a)\n for i in reversed(range(len(out))):\n if out[i] == 0xFF:\n out[i] = 0\n else:\n out[i] += 1\n break\n return bytes(out)",
"def test_bytearray_really_doesnt_overfill(self):\n self._doesnt_overfill_test(bytearray)",
"def utf8_to_binary() :\n dico_binary, comp_seq, file_comp = read_compressed_file()\n \n #for each items of the sequence convert it in binary string on 8 bits\n bin_str = \"\"\n for value in comp_seq:\n code = ord(value)\n bin_str += '{:08b}'.format(code)\n \n #remove the number of zeroes added \n \n added = int(dico_binary[\"add\"])\n #if the padding is equal to 0, don't cut anathing from the sequence\n if added == 0: \n bin_seq = bin_str\n else: \n bin_seq = bin_str[:-added]\n \n return bin_seq, dico_binary, comp_seq, file_comp",
"def resetBin(this):\n\t\tthis._BINARY = EmptyFrom(this._FRAME, 1)",
"def rebuild_binary(string_binary, password_binary):\r\n # This will be the new string we build out with binary values\r\n # Because the password_binary already contains valid numbers,\r\n # set new_string_binary to those valid numbers\r\n new_string_binary = password_binary\r\n # Set variable to the last number to start the rebuild with\r\n last_char = password_binary[-1]\r\n\r\n # Use a range starting at the end of the valid password_binary\r\n # and ends at the calculated total length\r\n ## (which is STRING_LENGTH * bit size added to the current password_binary length)\r\n for x in range(\r\n (len(password_binary) - 1), (len(password_binary) + (STRING_LENGTH * 8) - 1)\r\n ):\r\n # Calculates whether it should return '0' or '1'\r\n # Store that value for next round iteration\r\n last_char = char_compare(last_char, string_binary[x])\r\n # Add that new value to the rebuild string\r\n new_string_binary += last_char\r\n\r\n return new_string_binary",
"def as_bytearray(self):\n\n if self.index < 7:\n return self.buf + bytearray([self.byte])\n else:\n return self.buf",
"def app_message_send_byte_array(self, app_uuid, key, tuple_byte_array):\n\n\t\t# Already packed, fix endianness\n\t\ttuple_byte_array = tuple_byte_array[::-1]\n\n\t\tself.app_message_send_tuple(app_uuid, key, \"BYTE_ARRAY\", tuple_byte_array)",
"def write_binary(self,value):\n self.write_uint32(len(value))\n self.data.extend(value)",
"def tobinary_multiples(arr):\n return [np.array(arr_i).tobytes() for arr_i in arr]",
"def bytes2binary(inputBytes):\r\n result = 0\r\n for i in inputBytes:\r\n result = result * 256 + int(i)\r\n return bin(result)[2:].rjust(len(inputBytes*8), \"0\")",
"def __init__(self, data):\n self.bytes = bytearray(data)",
"def set_bit(self, index_of_byte, index_of_bit, new_value):\n if index_of_bit >= self.binary_size:\n print(\"You tried to modify a byte at %d index. This cannot be done. The maximum index is %d.\"%(index_of_bit, self.binary_size - 1))\n else:\n new_value = str(new_value)\n byte = self.binary_array[index_of_byte]\n new_byte = byte[0:index_of_bit] + new_value\n if index_of_bit < self.binary_size - 1: # you aren't changing the final bit in the byte\n new_byte += byte[index_of_bit + 1:]\n #apply changes\n self.binary_array[index_of_byte] = new_byte",
"def bytes_increment(b):\n assert isinstance(b, six.binary_type)\n b = bytearray(b) # Used subset of its API is the same on Python 2 and 3.\n for i in range(len(b) - 1, -1, -1):\n if b[i] != 0xff:\n b[i] += 1\n return bytes(b[:i+1])\n return None",
"def to_binary(self):\n return BinaryEncodedBytes(\n Integer(ord(b)).to_binary()\n for b in self._bytes\n )",
"def sub_bytes(state, s_box=s_box):\n for i in range(4):\n for j in range(4):\n state[i][j] = s_box[state[i][j]]"
] | [
"0.8974595",
"0.6320197",
"0.6153868",
"0.6111419",
"0.5868646",
"0.5779125",
"0.56829494",
"0.56726515",
"0.55885434",
"0.5576249",
"0.55422443",
"0.5534472",
"0.5524987",
"0.5517247",
"0.55114347",
"0.54999715",
"0.54987884",
"0.549476",
"0.5493693",
"0.54716897",
"0.546844",
"0.54678816",
"0.5457117",
"0.54520845",
"0.543441",
"0.5433196",
"0.54252726",
"0.54229915",
"0.539516",
"0.5385122"
] | 0.8828117 | 1 |
writes self.bytes to an image. probably should run self.update_bytes_array() before calling this method | def write_bytes_to_image(self, file_path):
data_manipulation.bytes_to_image(self.bytes, file_path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(self, image):\n raise NotImplementedError()",
"def writeimage(self, fp):\n execfile = open(self.binpath, \"w\")\n databuf = fp.read(4096)\n while databuf:\n execfile.write(databuf)\n databuf = fp.read(4096)\n execfile.flush()\n execfile.close()\n os.chmod(self.binpath, stat.S_IRWXU)",
"def _write_image(self):\r\n # Create an output raster with the correct number of rows and columns.\r\n gtiff_driver = gdal.GetDriverByName('GTiff')\r\n out_ds = gtiff_driver.Create(os.path.join(self.out_folder, self.out_file_name), self.column, self.row, 1)\r\n out_ds.SetProjection(self.in_ds.GetProjection())\r\n\r\n # Convert the offsets to real-world coordinates for the georeferencing info.\r\n # We can't use the coordinates above because they don't correspond to the pixel edges.\r\n subset_ulx, subset_uly = gdal.ApplyGeoTransform(self.in_gt, self.off_ulx, self.off_uly)\r\n out_gt = list(self.in_gt)\r\n out_gt[0] = subset_ulx\r\n out_gt[3] = subset_uly\r\n out_ds.SetGeoTransform(out_gt)\r\n\r\n data = self.read_image()\r\n out_band = out_ds.GetRasterBand(1)\r\n out_band.WriteArray(data)\r\n\r\n del out_ds",
"def write(self, filename):\n f = open(filename, 'bw')\n\n # file header (14)\n f.write(char('B'))\n f.write(char('M'))\n f.write(dword(14 + 40 + self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(14 + 40))\n\n # image header (40)\n f.write(dword(40))\n f.write(dword(self.width))\n f.write(dword(self.height))\n f.write(word(1))\n f.write(word(24))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n\n # pixel data\n for x in range(self.height):\n for y in range(self.width):\n f.write(self.pixels[x][y])\n f.close()",
"def encode(self, image) -> bytes:\n raise NotImplementedError()",
"def _write(self, stream):\n\n self._img.append(self.make_path())\n self._img.append(self.make_border())\n self._img.append(self.make_text())\n\n ET.ElementTree(self._img).write(stream, encoding=\"UTF-8\", xml_declaration=True)",
"def write_image(self, image_name, image):\n raise NotImplementedError",
"def _save_buffer(self):\n img_data = renderer.fbuffer.read(mode='color', alpha=False)\n img = Image.fromarray(img_data)\n img.save(self._save_fname)\n self._save_flag = False",
"def _save(self, data: PIL.Image) -> None:\n with self._fs.open(self._filepath, mode=\"wb\") as f:\n data.save(f)",
"def _writeBytes(self, b):\n self.stream.write(b)\n self.stream.flush()",
"def update(self):\n if self.var_info.bits_per_pixel == 1:\n b = self._img.tobytes(\"raw\", \"1;R\")\n self.mmap[:len(b)] = b\n\n elif self.var_info.bits_per_pixel == 16:\n self.mmap[:] = self._img_to_rgb565_bytes()\n\n elif self.var_info.bits_per_pixel == 32:\n self.mmap[:] = self._img.convert(\"RGB\").tobytes(\"raw\", \"XRGB\")\n\n else:\n raise Exception(\"Not supported - platform %s with bits_per_pixel %s\" %\n (self.platform, self.var_info.bits_per_pixel))",
"def update_image(self):\n self.image = Image.fromarray(self.img)",
"def imwrite(image, path):\n\n if image.ndim == 3 and image.shape[2] == 1: # for gray image\n image = np.array(image, copy=True)\n image.shape = image.shape[0:2]\n\n imgarray=((image+1.0)*127.5).astype(np.uint8)\n img=Image.fromarray(imgarray)\n img.save(path)",
"def b64_image(self) -> bytes:\n buffer = BytesIO()\n self.image.save(buffer, \"PNG\") \n im_b64 = base64.b64encode(buffer.getvalue())\n im_b64 = b\"data:image/png;base64,\" + im_b64\n return im_b64",
"def write_data(writer: UFOWriter, filename: str, data: bytes) -> None:\n writer.writeImage(filename, data)",
"def save_to_buffer(self) -> io.BytesIO:\n image = get_screenshot_as_png(self._layout)\n buffer = io.BytesIO()\n image.save(buffer, \"png\")\n return buffer",
"def write(self, filename):\n\n self.__image.save(filename)",
"def __writeImageBytes(self, image):\n\n if not image:\n raise Exception(\"image not found\")\n result = []\n for i, b in enumerate(image):\n if i % 39 == 0:\n result.append(\"\\n\")\n result.append(f\"{b:02X}\")\n return \"\".join(result)",
"def update_bytes_array(self):\n self.bytes = data_manipulation.convert_from_binary_array(self.binary_array)",
"def save_image(self, file_obj):\n manager = pyglet.image.get_buffer_manager()\n colorbuffer = manager.get_color_buffer()\n\n # if passed a string save by name\n if hasattr(file_obj, 'write'):\n colorbuffer.save(file=file_obj)\n else:\n colorbuffer.save(filename=file_obj)",
"def save_image(self):\n self.save()",
"def write_to_png(self, filename):\n png_file = open(filename, 'wb')\n writer = png.Writer(self.width, self.height)\n writer.write_array(png_file, self.data)\n png_file.close()",
"def saveImage(self, event):\r\n fileWritten = self.image.writeFile()\r\n self.statusBar.SetStatusText(\"Saved {}\".format(fileWritten))",
"def save_blob(self, img_blob, filename):\n out_file = open(filename, \"wb\")\n out_file.write(img_blob)\n out_file.close()",
"def write(self, Width, Height, ImageData, Speed):\n # write_begin = datetime.datetime.now()\n\n self.Data.Game.Speed = Speed\n\n # TODO Not sure if needed\n AspectRatio = Width / Height\n TargetWidth = int(self._TargetResolution[1] * AspectRatio)\n\n if TargetWidth >= self._TargetResolution[0]:\n if Width != TargetWidth or Height != self._TargetResolution[1]:\n ImageData = cv2.resize(ImageData, (TargetWidth, self._TargetResolution[1]))\n\n if TargetWidth != self._TargetResolution[0]:\n XStart = int(TargetWidth / 2 - self._TargetResolution[0] / 2)\n XStop = int(TargetWidth / 2 + self._TargetResolution[0] / 2)\n ImageData = ImageData[:, XStart:XStop]\n\n else:\n TargetHeight = int(self._TargetResolution[0] / AspectRatio)\n\n if Width != self._TargetResolution[0] or Height != TargetHeight:\n ImageData = cv2.resize(ImageData, (self._TargetResolution[1], TargetHeight))\n\n if TargetHeight != self._TargetResolution[1]:\n YStart = int(TargetHeight / 2 - self._TargetResolution[1] / 2)\n YStop = int(TargetHeight / 2 + self._TargetResolution[1] / 2)\n ImageData = ImageData[YStart:YStop, :]\n ImageData = cv2.flip(ImageData, 0)\n # Update Parameters\n\n Height, Width = ImageData.shape[:2]\n # print(\"Type is \", np.array(ImageData).dtype)\n\n # Set the SHM\n self.Data.Image.ImageWidth = Width\n self.Data.Image.ImageHeight = Height\n\n # Reshape ImageData to 1 D array\n ImageData = ImageData.flatten()\n\n\n # print(\"Target Image data\", Width, Height)\n\n start_time = datetime.datetime.now()\n self.Data.Image.Data = (ctypes.c_uint8 * (RECORD_MAX_IMAGE_HEIGHT * RECORD_MAX_IMAGE_WIDTH * RECORD_IMAGE_CHANNELS))(*np.array(ImageData))\n\n # elapsed = datetime.datetime.now() - start_time\n # print(\"Setting Image data \", int(elapsed.total_seconds() * 1000) )\n #\n # Notify we wrote a new data - Maybe we can also share the frame number\n #self.Data.Sync.IsWritten = 1\n # elapsed = datetime.datetime.now() - write_begin\n # print(\"Write to memory took \", int(elapsed.total_seconds() * 1000))\n\n if self._IsPauseOn:\n self.Data.Sync.IsPauseOn = 1\n else:\n self.Data.Sync.IsPauseOn = 0",
"def save_image(self):\n self.table_to_image.img.save(self.file_name)\n aws.AWSHandler().upload_image(self.file_name)",
"def draw(self):\n self.write_image()\n self.update()",
"def write_image(self, filename):\n cv2.imwrite(filename, self.image)",
"def update_binary_bytes(self):\n self.binary_array = data_manipulation.convert_byte_array(self.bytes, self.binary_size)",
"def write_image(self, img, extname=None, extver=None,\n compress=None, tile_dims=None, header=None):\n\n self.create_image_hdu(img,\n header=header,\n extname=extname, extver=extver,\n compress=compress, tile_dims=tile_dims)\n\n if header is not None:\n self[-1].write_keys(header)\n self[-1]._update_info()\n\n # if img is not None:\n # self[-1].write(img)"
] | [
"0.7372077",
"0.65278393",
"0.64787364",
"0.647208",
"0.6361551",
"0.63133377",
"0.62308866",
"0.62225616",
"0.6169296",
"0.6163713",
"0.6159095",
"0.61500794",
"0.6142363",
"0.6139682",
"0.6130171",
"0.61293423",
"0.6119667",
"0.6098111",
"0.60803",
"0.6067082",
"0.6058352",
"0.60105664",
"0.59793067",
"0.59569913",
"0.5945648",
"0.59369797",
"0.593163",
"0.5906059",
"0.5896752",
"0.58843774"
] | 0.8263245 | 0 |
Allows the user to select number of human players. Validates input and returns a matching tuple of players. | def get_participating_players(raw_input=raw_input):
no_players = 0
while no_players != 1 and no_players != 2:
inp = raw_input("Single player or multiplayer? (1/2): ")
try:
no_players = int(inp)
except ValueError:
print "Invalid input - please try again"
pass
if no_players is 1:
return (HumanPlayer('X'), ComputerPlayer('O'))
else:
return (HumanPlayer('X'), HumanPlayer('O')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_num_players(num_players):\n \n try:\n\tnum_players = int(num_players)\n except ValueError:\n\treturn None\n if num_players < 2 or num_players > 4:\n return None\n\n return num_players",
"def set_n_players(self):\n complain = \"\"\n while True:\n clear_output()\n try:\n self.n_players = int(\n input(f\"{complain}Please insert the number of players (between 2 to 6): \\n\"))\n if self.n_players >= 2 and self.n_players < 7:\n self.start_troops = 120 / self.n_players\n break\n elif self.n_players < 2:\n complain = \"Not enough players!\\n\"\n elif self.n_players >= 7:\n complain = \"Too many players!\\n\"\n except:\n complain = \"Not a valid number!\\n\"\n pass",
"def SelectPlayer(self):\n\n player = input(data['player'])\n if player == \"1\":\n return 0\n elif player == \"2\":\n return 1\n else:\n return 'invalid'",
"def AskHowManyPlayers():\n\n\t# Loop forever until the user enters an integer between 1 and 10, inclusive.\n\twhile True:\n\t\tprint \"How many players? Enter a number between 1 and 10, or press enter for default 2:\"\n\t\tnum_players = SolicitInteger( lobound=1, hibound=10, default_return=2 )\n\t\tif num_players != None:\n\t\t\tprint \"Ok, {} players.\".format( num_players )\n\t\t\treturn num_players",
"def establish_players(n_players):\n usernames_out = [input('Please input a username for player ' +str(i)) for i in range(n_players)]\n return {'username':usernames_out}",
"def create_number_of_players(self):\n self.number_of_players = pyip.inputInt(\n prompt='\\nEnter number of players (1 to 4):\\n', min=1, max=4)",
"def set_players():\n \n while True:\n players = eval(input(\"Geben Sie die Anzahl Spieler an oder tippe '0' zum Abbruch: \"))\n if int(players) > 0:\n break\n elif int(players) == 0:\n quit()\n else:\n print(\"ERROR: Du musst eine positive Ganzzahl eingeben!\")\n print()\n print()\n return players",
"def get_number_of_players():\n number_of_players = None\n while not(type(number_of_players)) == int:\n try:\n number_of_players = int(input(\"How many players are there? \"))\n if number_of_players == 0:\n raise zeroPlayersError\n elif number_of_players > 6:\n raise tooManyPlayersError\n except zeroPlayersError:\n print(\"The game needs at least 1 player\")\n number_of_players = None\n except tooManyPlayersError:\n print(\"Sorry you can't have more than 6 players\")\n number_of_players = None\n except:\n number_of_players = None\n return number_of_players",
"def get_players(n_players):\n\n if n_players < 2 or 8 < n_players:\n raise ValueError('A game must have between 2 to 8 players. You input {} players.'.format(n_players))\n\n return {classes.Player(p) for p in range(n_players)}",
"def get_players(n, playerspace):\n ps = []\n for i in range(n):\n name = \"\"\n while name == \"\":\n name = input(\"What's the name of player @ index {} (can't be empty): \".format(i))\n p = Player(name, i)\n p.playerspace = playerspace()\n ps.append(p)\n return ps",
"def createPlayers():\r\n while True:\r\n try:\r\n num_players = abs(int(raw_input(\"How many players?: \")))\r\n if num_players == 0:\r\n raise ValueError\r\n break\r\n except KeyboardInterrupt:\r\n raise KeyboardInterrupt\r\n except ValueError:\r\n print \"Invalid input\"\r\n\r\n players = {}\r\n for player_key in xrange(1,num_players+1):\r\n players[player_key] = players.get(player_key,0)\r\n return players",
"def get_game_ready():\n\tnum_players = int(input(\"\"\"How many players will be playing today? (between 2 and 5): \"\"\"))\n\twhile num_players > 5 or num_players < 2:\n\t\tnum_players = int(input(\"\"\"Between 2 and 5 players please: \"\"\"))\n\tnum_number_of_people = int(input(\"\"\"How many of these players will be humans?: \"\"\"))\n\twhile num_number_of_people > num_players or num_number_of_people < 0:\n\t\tnum_number_of_people = int(input(f\"\"\"Please enter a number equal to or less than the number of players ({num_players}): \"\"\"))\n\tnum_people = num_number_of_people\n\twhile num_people > 0:\n\t\tNAMES[abs(num_people - num_number_of_people)] = input(f\"\"\"Name of player {abs(num_people - num_number_of_people)+1}: \"\"\")\n\t\tnum_people -= 1\n\twhile len(NAMES) > num_players:\n\t\tNAMES.pop()\n\treturn NAMES",
"def play_game():\n try:\n # get selections for player and computer\n user_pick = utils.get_user_selection(user_selection.get())\n computer_pick = utils.get_computer_selection()\n\n # determine winner\n winner = utils.determine_winner(user_pick, computer_pick, \"pvc\")\n\n # display result\n player_selection.set(f\"player selection: {user_pick.name}\")\n computer_selection.set(f\"computer selection: {computer_pick.name}\")\n output.set(winner)\n except Exception:\n output.set(\"Invalid: choose any one -- 1, 2, 3\")\n player_selection.set(\"player selection: \")\n computer_selection.set(\"computer selection: \")",
"def choose_winner(): \r\n max_health = Titan.max_health()\r\n winners = tuple((titan.name for titan in Titan.titans if titan.health == max_health))\r\n return winners",
"def show_players(self) -> None:\n players_list = []\n for player in PLAYERS:\n data_player = ((\n str(player.get(\"first_name\")) + \" \" +\n str(player.get(\"last_name\")) + \" | \" +\n str(player.get(\"birthday\")) + \" | \" +\n str(player.get(\"genre\")) + \" | \" +\n str(player.get(\"ranking\"))\n ))\n players_list.append(data_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"ranking\", \"alphabetical\", \"None\")\n if choice == \"ranking\":\n player_id = 0\n players_list = sorted(players_list, key=lambda player: players_list[4])\n utils.clear_terminal()\n print(\"==========================================\")\n print(\"List of all Players in ranking order : \")\n print(\"==========================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)\n elif choice == \"alphabetical\":\n player_id = 0\n players_list.sort()\n utils.clear_terminal()\n print(\"============================================\")\n print(\"List of all Players in alphabetical order : \")\n print(\"============================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)",
"def set_players(self, player_min: int, player_max: int):\n name: str = ' '\n print('Please give between %i and %i names for your players'\n % (player_min, player_max))\n while (name != '') and (len(self.players) < player_max):\n name = input('Players {}: '.format(len(self.players)+1))\n if name != '':\n self.players.append(Player(name))\n elif len(self.players) < player_min:\n name = ' '\n\n print()\n print('{} players registered.'.format(len(self.players)))\n print()",
"def create_players_id_dict(self) -> list:\n players_id = []\n self.show_players()\n print(\"\\n\" + \"Enter id of wanted players : \")\n while len(players_id) < 8:\n while True:\n id_choice = check.request_id(PLAYERS)\n if check.check_not_same_value(players_id, id_choice) is True:\n players_id.append(id_choice)\n break\n return players_id",
"def player_choices(self, player):\n player_choices = []\n for i in range(self.quadrants_count):\n quadrant_board = self.play_area[i].get_board()\n for j in range(self.quadrant_positions_count):\n if quadrant_board[j] == player:\n position = j + 1 + i * 9\n player_choices.append(position)\n return player_choices",
"def collect_players_and_suspects_list():\n \n players_list = []\n while (players_input := input(\"Enter player: \")) != '#':\n i = players_input.upper()\n if not is_valid_player(i):\n print(\"Please enter a valid Suspect.\")\n continue\n if i not in players_list:\n players_list.append(i)\n players_decoded = [Board.identify(player) for player in players_list]\n suspects_decoded = [Board.translate(player) for player in players_list]\n return players_decoded, suspects_decoded",
"def ask_info_player(self) -> str:\n\n print(\"Enter first name : \")\n while True:\n first_name = input()\n if check.check_input_string_special(first_name) is True:\n if check.check_input_string_len(first_name) is True:\n if check.check_input_string_integer(first_name) is True:\n break\n\n print(\"Enter last name : \")\n while True:\n last_name = input()\n if check.check_input_string_special(last_name) is True:\n if check.check_input_string_len(last_name) is True:\n if check.check_input_string_integer(last_name) is True:\n break\n\n print(\"Enter date of birth with this format YEAR-MONTH-DAY : \")\n birthday = check.check_date_input()\n\n print(\n \"Enter a number for choose the gender : \\n\"\n \"1 - Man \\n\"\n \"2 - Women\"\n )\n genre = check.request_selection_with_number(\"Man\", \"Women\", \"none\")\n\n print(\"\\n The player {} {}, {}, birth on {} has been added to the database !\".format(\n first_name,\n last_name,\n genre,\n birthday))\n\n return first_name, last_name, birthday, genre",
"def test_winners_per_type_num_players_less(self):\n type_of_player = [ss.Player, ss.LazyPlayer, ss.Player]\n sim = ss.Simulation(player_field=type_of_player)\n run = sim.winners_per_type()\n assert list(run.keys()) == ['Player', 'LazyPlayer']",
"def create_players(self):\n for i in range(self.number_of_players):\n self.players_names.append(pyip.inputStr(\n prompt=f'\\nEnter name of player {i + 1}:\\n'))",
"def test_winners_per_type_num_players(self):\n type_of_player = [ss.Player, ss.LazyPlayer, ss.ResilientPlayer]\n sim = ss.Simulation(player_field=type_of_player)\n run = sim.winners_per_type()\n assert list(run.keys()) == ['Player', 'LazyPlayer', 'ResilientPlayer']",
"def get_accepted_players(self):\n return self.accepted_players",
"def min_players(self):\n return 2",
"def show_players_specific_tournament(self) -> None:\n id_choice = check.request_id(TOURNAMENTS)\n tournament_data = TOURNAMENTS.get(doc_id=id_choice)\n if tournament_data.get(\"players\") == {}:\n print(\"\\n This tournaments has no players yet\")\n else:\n players_list = tournament_data.get(\"players\")\n deserialized_player_list = []\n for player_data in players_list:\n deserialized_player = Player(**json.loads(player_data))\n deserialized_player_list.append(deserialized_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"alphabetical\", \"ranking\", \"None\")\n if choice == \"alphabetical\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.first_name)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)\n elif choice == \"ranking\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.ranking)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)",
"def human_players(self):\n return self._get(\"human_players\")",
"def test_players_per_type_num_players(self):\n type_of_player = [ss.Player, ss.LazyPlayer, ss.ResilientPlayer]\n sim = ss.Simulation(player_field=type_of_player)\n run = sim.players_per_type()\n assert list(run.keys()) == ['Player', 'LazyPlayer', 'ResilientPlayer']",
"def create_players():\n\n char_pairings = {\"X\":\"O\",\"O\":\"X\"}\n\n # Create player1\n name_1 = input(\"Player 1, what is your name? > \")\n char_1 = \"\"\n \n # Force player to choose valid input\n while char_1 not in char_pairings:\n char_1 = input(\"Would you like to be X or O? > \").upper()\n player_1 = Player(name_1, char_1)\n\n # Create player2\n name_2 = input(\"Player 2, what is your name? > \")\n\n print(\"{}, you are {}.\".format(name_2, char_pairings[char_1]))\n char_2 = char_pairings[char_1]\n\n player_2 = Player(name_2, char_2)\n\n return (player_1, player_2)",
"def test_players_per_type_num_players_less(self):\n type_of_player = [ss.Player, ss.LazyPlayer, ss.Player]\n sim = ss.Simulation(player_field=type_of_player)\n run = sim.players_per_type()\n assert list(run.keys()) == ['Player', 'LazyPlayer']"
] | [
"0.6700877",
"0.66584164",
"0.6638199",
"0.6623469",
"0.6545826",
"0.64794976",
"0.64397866",
"0.6357553",
"0.6274813",
"0.6097092",
"0.6070521",
"0.60677963",
"0.5964063",
"0.5918203",
"0.5834943",
"0.5820832",
"0.5770313",
"0.5723989",
"0.57173026",
"0.57104576",
"0.57043576",
"0.56990546",
"0.5698139",
"0.568951",
"0.56887263",
"0.5679548",
"0.5673202",
"0.5664805",
"0.5659513",
"0.56502914"
] | 0.72646415 | 0 |
Subsets and Splits