query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Obtain the statistics of a NIC. | def get_nic_statistics(self, nic_id):
return self._nic_mgmt.get_nic_statistics(nic_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def net_if_stats():\n ret = {}\n rawdict = cext.net_if_stats()\n for name, items in rawdict.items():\n if not PY3:\n assert isinstance(name, unicode), type(name)\n name = py2_strencode(name)\n isup, duplex, speed, mtu = items\n if hasattr(_common, 'NicDuplex'):\n duplex = _common.NicDuplex(duplex)\n ret[name] = _common.snicstats(isup, duplex, speed, mtu, '')\n return ret",
"def get_network_stats(net):\n return net.get_num_connections(), net.num_neurons, len(net.neurons_in_layer)",
"def metrics_nic(cmd_ctx, cpc, partition, nic, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_nic(cmd_ctx, cpc, partition, nic, options))",
"def packetSniff():\n\n packets = psutil.net_io_counters(pernic=True)\n interfaces = {}\n x = 0\n for p in packets.items():\n values = {}\n values['name'] = p[0]\n values['bytes_sent'] = p[1][0]\n values['bytes_recv'] = p[1][1]\n values['pckt_sent'] = p[1][2]\n values['pckt_recv'] = p[1][3]\n values['errin'] = p[1][4]\n values['errout'] = p[1][5]\n values['dropin'] = p[1][6]\n values['dropout'] = p[1][7]\n\n if ((values['bytes_sent'] or values['bytes_recv'] or\n values['pckt_sent'] or values['pckt_recv']) != 0):\n\n interfaces[x] = values\n x += 1\n else:\n pass\n\n return interfaces",
"async def skribbl_get_stats(self) -> int:\r\n return await self.read(self._skribbl_get_stats)",
"def net_stat_recv(x, interface=None):\n if not interface:\n interface = get_netiface()\n if interface:\n return psutil.net_io_counters(pernic=True)[interface].bytes_recv\n else:\n return 0",
"def stats_rxtx_read(iface):\n\n iface_prefix = '%s:' % iface\n rx_columns = ['bytes', 'packets', 'errs', 'drop', 'fifo', 'frame', 'compressed', 'multicast']\n tx_columns = ['bytes', 'packets', 'errs', 'drop', 'fifo', 'colls', 'carrier', 'compressed']\n\n line = None\n with open('/proc/net/dev', 'r') as ftmp:\n for tmpline in [x.strip() for x in ftmp.readlines()]:\n if tmpline.startswith(iface_prefix):\n line = tmpline\n break\n\n if line:\n # face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n # eth0: 76594958 122515 7 0 0 0 0 0 72115331 110248 0 0 0 0 0 0\n logger.debug(line)\n ret = {\n \"rx\": dict(zip(rx_columns, map(int, line.split()[1:8]))),\n \"tx\": dict(zip(tx_columns, map(int, line.split()[9:16])))\n }\n else:\n raise RuntimeError('interface statistics not found')\n\n logger.debug(ret)\n return ret",
"def test_instant_io_statistics(self):\n from supvisors.statistics import instant_io_statistics\n stats = instant_io_statistics()\n # test interface names\n with open('/proc/net/dev') as netfile:\n # two first lines are title\n contents = netfile.readlines()[2:]\n interfaces = [intf.strip().split(':')[0] for intf in contents]\n self.assertItemsEqual(interfaces, stats.keys())\n self.assertIn('lo', stats.keys())\n # test that values are pairs\n for intf, bytes in stats.items():\n self.assertEqual(2, len(bytes))\n for value in bytes:\n self.assertIs(int, type(value))\n # for loopback address, recv bytes equals sent bytes\n self.assertEqual(stats['lo'][0], stats['lo'][1])",
"def get_network_stats(self, tags):\n\n # FIXME: (aaditya) Check all networks defaults to true\n # until we can reliably assign agents to networks to monitor\n if is_affirmative(self.init_config.get('check_all_networks', True)):\n all_network_ids = set(self.get_all_network_ids())\n\n # Filter out excluded networks\n network_ids = [\n network_id\n for network_id in all_network_ids\n if not any([re.match(exclude_id, network_id) for exclude_id in self.exclude_network_id_rules])\n ]\n else:\n network_ids = self.init_config.get('network_ids', [])\n\n if not network_ids:\n self.warning(\n \"Your check is not configured to monitor any networks.\\n\"\n \"Please list `network_ids` under your init_config\"\n )\n\n for nid in network_ids:\n self.get_stats_for_single_network(nid, tags)",
"def network_io_counters():\r\n f = open(\"/proc/net/dev\", \"r\")\r\n try:\r\n lines = f.readlines()\r\n finally:\r\n f.close()\r\n\r\n retdict = dict()\r\n for line in lines[2:]:\r\n colon = line.find(':')\r\n assert colon > 0, line\r\n name = line[:colon].strip()\r\n fields = line[colon + 1:].strip().split()\r\n bytes_recv = int(fields[0])\r\n packets_recv = int(fields[1])\r\n errin = int(fields[2])\r\n dropin = int(fields[2])\r\n bytes_sent = int(fields[8])\r\n packets_sent = int(fields[9])\r\n errout = int(fields[10])\r\n dropout = int(fields[11])\r\n retdict[name] = nt_net_iostat(bytes_sent, bytes_recv, packets_sent, packets_recv,\r\n errin, errout, dropin, dropout)\r\n return retdict",
"def stats(self, **kwargs):\n return stats.stats(self._host, self._session, **kwargs)",
"def get_network_status(self, who=\"all\", get_iterator=False):\r\n\r\n nsData = self.sendAndRecv(\"GETINFO ns/\"+who+\"\\r\\n\")[0][2]\r\n if get_iterator: return ns_body_iter(nsData)\r\n else: return parse_ns_body(nsData)",
"def get_network(isamAppliance, application_interface, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving the Application Interface Statistics\",\n \"/analysis/interface_statistics.json{0}\".format(\n tools.create_query_string(prefix=application_interface,\n timespan=statistics_duration)),requires_model=requires_model)",
"def interface_stats(self, instance_name, iface_id):\n return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]",
"def get_stats(self, epg_dn):\n # Apic saves up to 95 different objects with statistic information\n traffic_list = []\n for i in range(10, -1, -1):\n traffic = self.moDir.lookupByDn(epg_dn + '/HDl2IngrBytesAg15min-%s' % str(i))\n if traffic is not None:\n traffic_list.append(traffic)\n return traffic_list",
"def statistics(self):\n return self.get_statistics()",
"def list_nics(self, sort=False):\n return self._nic_mgmt.list_nics(sort)",
"def getStats(self):\n\n raise NotImplementedError",
"def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))",
"def compute_stats(self):\n from vmc.common.oal import osobj\n d = osobj.get_iface_stats()\n d.addCallback(self.update_stats)",
"def stats(self):\n url = client.build_url('stats')\n _, res_json = client.get(url, headers=self.headers)\n\n return res_json",
"def stats(self, **kwargs):\n return self.client.api.stats(self.id, **kwargs)",
"def get_nic_settings(bmc):\n nic_settings = bmc.list_nics()\n return nic_settings",
"def showStatistics(self):\n\n deviceName = self.deviceName()\n\n if deviceName:\n stats = a.sys.net.lnx.device.DeviceUtils.getStatistics(self.name, self._log, deviceName) \n if stats:\n for key in stats:\n print \"%s: %s\" % (key, stats[key])",
"def bdev_nvme_get_transport_statistics(client):\n return client.call('bdev_nvme_get_transport_statistics')",
"def list(cls, context, limit=None, sort_key=None, sort_dir=None):\n db_nics = cls.dbapi.get_nic_list(limit=limit,\n sort_key=sort_key,\n sort_dir=sort_dir)\n return cls._from_db_object_list(context, db_nics)",
"def net_stat_sent(x, interface=None):\n if not interface:\n interface = get_netiface()\n\n if interface:\n return psutil.net_io_counters(pernic=True)[interface].bytes_sent\n else:\n return 0",
"def get_statistics(self):\n url = \"https://api.imgur.com/3/account/{0}/stats\".format(self.name)\n return self._imgur._send_request(url, needs_auth=True)",
"def get_stats(self, loadbalancer=None):\n uri = \"/loadbalancers/%s/stats\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_get(uri)\n return body",
"def item_stats(host, port):\n\n stats = None\n try:\n mc = memcache.Client(['%s:%s' % (host, port)])\n stats = mc.get_stats()[0][1]\n except IndexError:\n raise\n finally:\n return stats"
] | [
"0.7753599",
"0.6423824",
"0.6105017",
"0.61031115",
"0.6079203",
"0.6048821",
"0.6016919",
"0.6014767",
"0.60094005",
"0.60058945",
"0.59886146",
"0.59816986",
"0.59121054",
"0.5828287",
"0.5790003",
"0.5785249",
"0.5766392",
"0.5762208",
"0.57266897",
"0.5714033",
"0.5706102",
"0.569781",
"0.5689192",
"0.5680061",
"0.5678136",
"0.567046",
"0.5640307",
"0.5630449",
"0.56279683",
"0.559966"
] | 0.77570844 | 0 |
Return the system id. | def get_system_id(self):
return system.SystemManagement(self.client).get_system_id() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_system_id(self):\n return self.machine_config_file_value(\"DEFAULT.SID\").strip('\"')",
"def file_system_id(self) -> str:\n return pulumi.get(self, \"file_system_id\")",
"def get_clone_system_id(self):\n\n sys_id = self.user_systems_mgr.get_system_id()\n return sys_id",
"def getSystemUidBySystemName(self,systemName):\n\n systemObj = self.getSystemByName(systemName)\n return systemObj[\"uuid\"]",
"def get_system_name(self):\n\n\t\treturn self.__system_name",
"def unique_id(self) -> str:\n return str(self.coordinator.gios.station_id)",
"def get_hardware_id():\r\n try:\r\n return utils.run('crossystem hwid').stdout.strip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1",
"def get_systemname(self) -> str:\n\n return self.send(self.cmd.GET_SYSTEMNAME)",
"def get_id(self):\n try:\n return self.inst.query('*IDN?')[:36]\n except errors.VisaIOError as e:\n logger.warning(e)\n return 'Device not connected.'",
"def unique_id(self):\n return self._device.serial",
"def unique_id(self):\n return self.device_id",
"def system(self):\n return self['system']",
"def unique_id(self):\n return self._device_id",
"def hardware_id(self):\n return uuid.uuid4()",
"def identify_system() -> str:\n system = platform.system()\n if system not in [\"Linux\", \"Darwin\"]:\n raise ValueError(f\"Unsupported system {system}\")\n return system",
"def get_id(self) -> str:\n return self._register_id",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system",
"def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")",
"def get_id(self):\n return self.get_sitename()",
"def get_id(self, name=None):\n\n # Support using integer IDs directly\n if isinstance(name, int):\n return name\n\n self.ensure_loaded()\n if name is not None:\n ems_systems = self.search('name', name.upper(), searchtype=\"match\")\n if ems_systems.empty:\n sys_names = self.list_all()['name'].to_list()\n raise ValueError(\n 'No matching systems found. You have access to: {0}'.format(sys_names))\n id = ems_systems.iloc[0]['id']\n else:\n ems_systems = self.list_all()\n if ems_systems.shape[0] == 1:\n id = ems_systems.iloc[0]['id']\n else:\n raise LookupError(\n 'Multiple ems systems found. Please select one from the available:\\n{0}'\n .format(ems_systems.loc[:, ['id', 'name']])\n )\n return id",
"def device_id(self):\n return self.unique_id",
"def id_text(self) -> str:\n return self.source_system + \" - \" + self.external_id + \" (\" + str(self.internal_id) + \")\"",
"def unique_id(self):\n return self._deviceId",
"def get_sys_name(self):\n\t\treturn call_sdk_function('PrlVmDev_GetSysName', self.handle)",
"def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")",
"def unique_id(self) -> Optional[str]:\n return self._device.device_id",
"def get_id(self):\n from ranger_performance_tool import perf_globals\n enabled_services = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"enabled_services\")\n service = random.choice(enabled_services)\n policy_list = self.remote_store.get_policy_list()[service]\n return random.choice(policy_list).id",
"def unique_id(self):\n id = \"{}{}{}\".format(\n DOMAIN, self._account, self.sensorName.lower().replace(\" \", \"\")\n )\n return id",
"def unique_id(self) -> str:\n return f\"{self._device.mac}_{self._router.config_entry.entry_id}\"",
"def standard_id(self):\n return self.get(\"standard_id\", decode=True)"
] | [
"0.83615786",
"0.7682273",
"0.7644981",
"0.7439022",
"0.72430104",
"0.7030237",
"0.70283455",
"0.7012562",
"0.68498755",
"0.6808678",
"0.6769043",
"0.6738126",
"0.6701721",
"0.6693738",
"0.6680318",
"0.66737354",
"0.66718143",
"0.6656414",
"0.6632786",
"0.6624779",
"0.66107714",
"0.65537167",
"0.65419406",
"0.65393394",
"0.6537017",
"0.65219325",
"0.65203273",
"0.65035427",
"0.65029866",
"0.6501405"
] | 0.9230369 | 0 |
Return the system model name. | def get_system_model_name(self):
return system.SystemManagement(self.client).get_system_model_name() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_model_name(self) -> str:\n return self._get_string(openvr.Prop_RenderModelName_String)",
"def get_model_name(self):\n\n model_name = []\n model_name.append(self.get_model_type_hash())\n model_name.append(str(int(time.time())))\n return \"_\".join(model_name) + \".model\"",
"def model_name(self) -> str:\n return self.profile_device.model_name",
"def model_name(self) -> str:\n return self._model_name",
"def get_model_name(self) -> str:\n raise NotImplementedError",
"def get_product_name(self):\n sushy_system = self._get_sushy_system()\n return sushy_system.model",
"def get_product_name(self):\n system = self._get_host_details()\n return system['Model']",
"def get_system_name(self):\n\n\t\treturn self.__system_name",
"def _get_model(self) -> str:\n return str(self.hass.data[DOMAIN][self._config_entry.entry_id][ATTR_MODEL])",
"def model_name(self):\n setting = self.get_setting_definition(self.key, **self.get_kwargs())\n\n return setting.get('model', None)",
"def get_model_name():\n return 'Central Tendency'",
"def model_label(cls):\n return '{0}.{1}'.format(cls._meta.app_label, cls._meta.model_name)",
"def getName(self):\n return _libsbml.Model_getName(self)",
"def name(self) -> str:\n return self._name(self.model).decode(\"utf-8\")",
"def getName(self):\n return _libsbml.ModelCreator_getName(self)",
"def name(self) -> str: # pragma: no cover\n return self.model.__name__",
"def get_systemname(self) -> str:\n\n return self.send(self.cmd.GET_SYSTEMNAME)",
"def get_model_name(self) -> str:\n assert 'backbone' in self.model_cfg.model, 'backbone not in model '\n 'config'\n assert 'type' in self.model_cfg.model.backbone, 'backbone contains '\n 'no type'\n name = self.model_cfg.model.backbone.type.lower()\n return name",
"def get_model(self) -> str:\n return self._get_string(openvr.Prop_ModelNumber_String)",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system",
"def model(self) -> str:\n return self.properties[DBUS_ATTR_MODEL]",
"def model(self) -> Optional[str]:\n return pulumi.get(self, \"model\")",
"def name(self) -> str:\n return self._model.name",
"def model_name(self) -> str:\n return \"mock-model-name\"",
"def GetModelName(filename, model):\n\n is_srn_model = translator.IsSrnModel(model)\n if(is_srn_model):\n model_name = filename + \"SrnModel\"\n else:\n model_name = filename + \"CellCycleModel\"\n\n return model_name",
"def get_name(self):\n modelTree = self.model.find(xmlns + 'Model')\n return modelTree.attrib['name']",
"def get_model_name(self, pipette_id: str) -> str:\n return self.get_config(pipette_id).model",
"def get_model_title(self):\n pass",
"def get_sys_name(self):\n\t\treturn call_sdk_function('PrlVmDev_GetSysName', self.handle)",
"def get_model_name(instance):\n return '{}.{}'.format(\n instance._meta.app_label,\n instance.__name__ if inspect.isclass(instance) else instance.__class__.__name__)"
] | [
"0.8073972",
"0.7907462",
"0.7865882",
"0.7822895",
"0.77484596",
"0.77091956",
"0.7602069",
"0.7596205",
"0.7582411",
"0.75579596",
"0.74520415",
"0.73455113",
"0.7313186",
"0.72846437",
"0.7255404",
"0.7246455",
"0.72429657",
"0.72080094",
"0.7186412",
"0.71400344",
"0.7137875",
"0.71296495",
"0.7100846",
"0.70968753",
"0.7052772",
"0.69899595",
"0.6925585",
"0.69006914",
"0.6869713",
"0.68670565"
] | 0.9344253 | 0 |
Return the system service tag. | def get_system_service_tag(self):
return system.SystemManagement(self.client).get_system_service_tag() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def service(self) -> str:\n return pulumi.get(self, \"service\")",
"def system_service_type(self) -> pulumi.Input[Union[str, 'SystemServiceType']]:\n return pulumi.get(self, \"system_service_type\")",
"def name(self):\n return \"systemd Service\"",
"def tag(self) -> str:\n return pulumi.get(self, \"tag\")",
"def get_system_name(self):\n\n\t\treturn self.__system_name",
"def getServiceName(self) -> str:\n ...",
"def system(self):\n return self['system']",
"def service_name(self) -> str:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> str:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")",
"def YumGetServiceName(vm):\n raise NotImplementedError",
"def service_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_id\")",
"def service_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_id\")",
"def tag(self) -> str:\n return self._tag",
"def service_kind(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_kind\")",
"def tag(self):\n return self._tag",
"def tag_name(self) -> str:\n return pulumi.get(self, \"tag_name\")",
"def service_id(self) -> str:\n return pulumi.get(self, \"service_id\")",
"def get_tag(self):\n return self.tag",
"def service(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service\")",
"def tag(self):\n return self.tag_",
"def system_services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SystemServiceArgs']]]]:\n return pulumi.get(self, \"system_services\")",
"def _get_tag(self):\n return self.__tag",
"def get_tag(self) -> int:\n return self.tag",
"def tag(self):\n return self._tag",
"def get_systemname(self) -> str:\n\n return self.send(self.cmd.GET_SYSTEMNAME)",
"def get_system_id(self):\n return system.SystemManagement(self.client).get_system_id()",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system"
] | [
"0.6653975",
"0.6572798",
"0.65147954",
"0.6493566",
"0.6415243",
"0.6226118",
"0.6220162",
"0.6183548",
"0.6183548",
"0.61804986",
"0.61804986",
"0.61804986",
"0.61514616",
"0.6142467",
"0.6142467",
"0.6120087",
"0.6049482",
"0.6047134",
"0.6006878",
"0.6002862",
"0.59983057",
"0.59948283",
"0.59929657",
"0.5990298",
"0.59872013",
"0.5980902",
"0.59662175",
"0.59584135",
"0.5934534",
"0.5934374"
] | 0.9170598 | 0 |
Return true if the legacy, nonUEFI, boot protocol of a NIC is NONE, false otherwise. | def is_nic_legacy_boot_protocol_none(self, nic_id):
return self._nic_cfg.is_nic_legacy_boot_protocol_none(nic_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_nic_legacy_boot_protocol_pxe(self, nic_id):\n return self._nic_cfg.is_nic_legacy_boot_protocol_pxe(nic_id)",
"def check_ethernet_network():\n default_iface = get_default_route()\n\n assert default_iface[1] == sc.conf.iface, \"incorrect sc.conf.iface\"\n iface_str = ''\n if sys.platform.startswith('win'):\n iface_info = sc.conf.iface\n iface_str = iface_info.guid\n else:\n iface_str = sc.conf.iface\n\n ifaddresses = netifaces.ifaddresses(str(iface_str))\n try:\n iface_mac = ifaddresses[netifaces.AF_LINK][0]['addr']\n except KeyError:\n return False\n return iface_mac != ''",
"def set_nic_legacy_boot_protocol_none(self, nic_id):\n return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, 'NONE')",
"def _is_boot_mode_uefi(self):\n boot_mode = self.get_current_boot_mode()\n if boot_mode == 'UEFI':\n return True\n else:\n return False",
"def no_afni():\n if Info.version() is None:\n return True\n return False",
"def is_bootable(self):\n return self.bootable_flag == 0x80",
"def is_logical(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfgHddPart_IsLogical', self.handle))",
"def is_dip(self):\n if (\n self.dip\n and not self.deleted\n and not self.replica\n and not self.aip\n and not self.sip\n ):\n return True\n return False",
"def is_http_boot_requested(node):\n http_boot_requested = (\n str(node.driver_info.get('enable_uefi_httpboot', 'false')).lower())\n return http_boot_requested == 'true'",
"def _is_valid_interface(device, switch, nos_driver):\n for key in device.keys():\n for (speed, interface) in device[key]:\n if not _is_valid_three_tupple(interface):\n return False\n if not _is_valid_interface_speed(speed):\n return False\n return True",
"def _has_nc_config():\n return _has_prog(\"nc-config\")",
"def isLoopbackEnabled(self):\n if DPxIsDoutDinLoopback() == 0:\n enable = False\n else:\n enable = True\n return enable",
"def is_if_oper_up(ifname):\n try:\n return open('/sys/class/net/' + ifname + '/carrier') \\\n .readline().strip() == '1'\n except:\n SysTools.logger.error(\"Failed to get carrier of %s\", ifname)\n return False",
"def pilotIsBootValid (self):\n return self.isBootValid()",
"def _is_ethernet_is_routed(self):\n E = data_element_maker()\n top = E.top(\n E.Ifmgr(\n E.Interfaces(\n E.Interface(\n E.IfIndex(self.iface_index)\n )\n )\n )\n )\n\n nc_get_reply = self.device.get(('subtree', top))\n reply_data = find_in_data('ifType', nc_get_reply.data_ele)\n\n routed_reply_data = find_in_data('PortLayer', nc_get_reply.data_ele)\n\n is_ethernet = False\n is_routed = False\n try:\n if reply_data.text == '6':\n is_ethernet = True\n except AttributeError:\n pass\n\n try:\n if routed_reply_data.text == '2':\n is_routed = True\n except AttributeError:\n pass\n\n return is_ethernet, is_routed",
"def CheckIfWirelessConnecting(self):\n if self.wifi.connecting_thread:\n return self.wifi.connecting_thread.is_connecting\n else:\n return False",
"def _is_network_type(self, name):\n nt = self.config[\"networks\"].get(name)\n return bool(nt)",
"def check_fw_mode(self, cat_cpuinfo_out):\n for line in cat_cpuinfo_out.splitlines():\n if \"firmware\" in line:\n if \"OPAL\" in line:\n return True\n else:\n return False\n return False",
"def check_no_network():\n try:\n socket.gethostbyname(\"www.google.com\")\n return False\n except:\n return True",
"def _network_trunk_supported(self):\n if 'trunk' in self.network_extensions:\n return True\n return False",
"def IsWirelessUp(self):\n return self.wifi.IsUp()",
"def is_network_node():\n return config.NODE_IP == config.NETWORK_NODE_IP",
"def is_docker_user_defined_network(network): # type: (str) -> bool\n return bool(network) and network != 'bridge'",
"def is_network_appbase_ready(props):\n if \"HIVEIT_BLOCKCHAIN_VERSION\" in props:\n return False\n elif \"HIVE_BLOCKCHAIN_VERSION\" in props:\n return True",
"def is_no_command_supported(command):\n command_type = command.get('command-type')\n if command_type:\n if command_type in ['display-table','display-rest', 'show']:\n return False\n no_supported = command.get('no-supported', True)\n if no_supported == False:\n return False\n return True",
"def CheckIfConnecting(self):\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False",
"def host_network(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"host_network\")",
"def host_network(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"host_network\")",
"def is_available(cls):\n\n try:\n proc = subprocess.Popen(\n ['systemctl', 'status', 'NetworkManager'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n proc.communicate()\n return proc.returncode == 0\n except OSError:\n return False",
"def nfvi_compute_plugin_disabled():\n return (_compute_plugin is None)"
] | [
"0.66154706",
"0.63741845",
"0.6357392",
"0.61974496",
"0.6125806",
"0.59614533",
"0.5941091",
"0.5925817",
"0.591169",
"0.58899873",
"0.5883946",
"0.58777124",
"0.5874255",
"0.5867285",
"0.586479",
"0.5854184",
"0.583347",
"0.5828231",
"0.58214307",
"0.5816505",
"0.5798436",
"0.57429594",
"0.573465",
"0.57176965",
"0.5714416",
"0.57090986",
"0.57016796",
"0.57016796",
"0.5678814",
"0.5674039"
] | 0.7572183 | 0 |
Return true if the legacy, nonUEFI, boot protocol of a NIC is PXE, false otherwise. | def is_nic_legacy_boot_protocol_pxe(self, nic_id):
return self._nic_cfg.is_nic_legacy_boot_protocol_pxe(nic_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_boot_mode_uefi(self):\n boot_mode = self.get_current_boot_mode()\n if boot_mode == 'UEFI':\n return True\n else:\n return False",
"def pilotIsBootValid (self):\n return self.isBootValid()",
"def set_nic_legacy_boot_protocol_pxe(self, nic_id):\n return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, 'PXE')",
"def is_icmp(self) -> bool:\n return self.proto == ICMP",
"def is_virtual_network_host():\n return False",
"def on_powerpc():\n return processor() == 'powerpc' or machine().startswith('ppc')",
"def is_ip(self) -> bool:\n return self.typ == ETH_P_IP",
"def is_supported(self) -> bool:\n\n # TODO logging ?\n # TODO ICMP error if ttl is zero\n return self._version == 4 and self._ihl >= 5 and self._ttl != 0",
"def is_dip(self):\n if (\n self.dip\n and not self.deleted\n and not self.replica\n and not self.aip\n and not self.sip\n ):\n return True\n return False",
"def is_bootable(self):\n return self.bootable_flag == 0x80",
"def is_nic_legacy_boot_protocol_none(self, nic_id):\n return self._nic_cfg.is_nic_legacy_boot_protocol_none(nic_id)",
"def is_logical(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfgHddPart_IsLogical', self.handle))",
"def has_efi():\n return os.path.exists(\"/sys/firmware/efi\")",
"def _has_nc_config():\n return _has_prog(\"nc-config\")",
"def check_ethernet_network():\n default_iface = get_default_route()\n\n assert default_iface[1] == sc.conf.iface, \"incorrect sc.conf.iface\"\n iface_str = ''\n if sys.platform.startswith('win'):\n iface_info = sc.conf.iface\n iface_str = iface_info.guid\n else:\n iface_str = sc.conf.iface\n\n ifaddresses = netifaces.ifaddresses(str(iface_str))\n try:\n iface_mac = ifaddresses[netifaces.AF_LINK][0]['addr']\n except KeyError:\n return False\n return iface_mac != ''",
"def is_http_boot_requested(node):\n http_boot_requested = (\n str(node.driver_info.get('enable_uefi_httpboot', 'false')).lower())\n return http_boot_requested == 'true'",
"def ipv6_native(self) -> bool:\n return pulumi.get(self, \"ipv6_native\")",
"def is_inet(inet):\n interface = False\n for i in netifaces.interfaces():\n if i == inet:\n interface = True\n return interface",
"def is_discover(pkt):\n dhcp_discover = 1\n try:\n dhcp_options = pkt['BOOTP']['DHCP options'].options\n message_type = filter(lambda x: x[0] == 'message-type',\n dhcp_options)\n message_type = message_type[0][1]\n return message_type == dhcp_discover\n except:\n return False",
"def is_if_oper_up(ifname):\n try:\n return open('/sys/class/net/' + ifname + '/carrier') \\\n .readline().strip() == '1'\n except:\n SysTools.logger.error(\"Failed to get carrier of %s\", ifname)\n return False",
"def is_connected():\r\n ipconfig_output = terminal('ipconfig | findstr /i gateway')\r\n if ipconfig_output != None:\r\n return any(i for i in ipconfig_output if i.isdigit())\r\n \r\n # Alternative way if ipconfig has error in some systems\r\n ## Slower than ipconfig workaround\r\n try:\r\n socket().connect(('8.8.8.8', 53))\r\n return True\r\n except:\r\n return False",
"def is_aip(self):\n if (\n self.aip\n and not self.deleted\n and not self.replica\n and not self.dip\n and not self.sip\n ):\n return True\n return False",
"def is_present(self):\n try:\n self.read_binary(0, 2)\n return True\n except:\n return False",
"def is_network_node():\n return config.NODE_IP == config.NETWORK_NODE_IP",
"def HasWiredDriver(self):\n if self.wired.driver:\n return True\n else:\n return False",
"def is_configure_with_dhcp(self):\n\t\treturn bool(call_sdk_function('PrlVmDevNet_IsConfigureWithDhcp', self.handle))",
"def isOnNao():\n szCpuInfo = \"/proc/cpuinfo\";\n if not os.path.exists( szCpuInfo ): # already done by the getFileContents\n return False;\n szAllFile = getFileContents( szCpuInfo, bQuiet = True );\n if( szAllFile.find( \"Geode\" ) == -1 and szAllFile.find( \"Intel(R) Atom(TM)\" ) == -1 ):\n return False;\n return True;",
"def olpc_xo_1():\n return os.path.exists('/etc/olpc-release') or \\\n os.path.exists('/sys/power/olpc-pm')",
"def is_openelec():\n if os.path.exists(\"/etc/openelec-release\"):\n return True\n osrelfile=\"/etc/os-release\"\n if os.path.exists(osrelfile) and \"openelec\" in open(osrelfile,'r').read().lower():\n return True\n return False",
"def sstcp_enabled():\n return common.POWER_CAP in SYSTEM_CAPS"
] | [
"0.6369349",
"0.61551464",
"0.6151524",
"0.58789355",
"0.5854465",
"0.5850924",
"0.58316624",
"0.58264965",
"0.5778411",
"0.5718265",
"0.56990516",
"0.56512976",
"0.56229997",
"0.5622764",
"0.5621647",
"0.5593441",
"0.55838615",
"0.55536574",
"0.5550629",
"0.5543813",
"0.553845",
"0.5529022",
"0.5521223",
"0.5503682",
"0.5501048",
"0.5471087",
"0.5469018",
"0.5468034",
"0.5459659",
"0.5455828"
] | 0.7476674 | 0 |
Return true if the link status of a NIC is up, false otherwise. | def is_nic_link_up(self, nic_id):
return self._nic_mgmt.is_nic_link_up(nic_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def IsLinkup(nic,timeout):\n nic = nic.strip()\n current = time.time()\n timeout += current\n while current < timeout:\n data = os.popen(\"ipconfig\").read().split(\"Ethernet adapter\")\n for item in data:\n if item.count(nic) and item.count(\"isconnected\") == 0: #Connected\n return 1\n time.sleep(0.5)\n current = time.time()\n return 0",
"def is_if_up(ifname):\n with open('/sys/class/net/' + ifname + '/carrier', 'r') as f:\n status = f.readline()\n return (status == '1')",
"def IsLinkdown(nic,timeout):\n nic = nic.strip()\n current = time.time()\n timeout += current\n while current < timeout:\n data = os.popen(\"ipconfig\").read().split(\"Ethernet adapter\")\n for item in data:\n if item.count(nic) and item.count(\"isconnected\"): #Disconnected\n return 1\n time.sleep(0.5)\n current = time.time()\n return 0",
"def IsWirelessUp(self):\n return self.wifi.IsUp()",
"def __CheckConnectStatus(self):\r\n if not self.tn:\r\n print \"Connection is down!\"\r\n return False\r\n else:\r\n print \"Connection is alive!\"\r\n return True",
"def wifi_connectivity_verify(self):\n self.sendline(\"iw %s link\" % self.iface_wifi)\n matched = self.expect([\"Connected\", \"Not connected\", pexpect.TIMEOUT])\n if matched == 0:\n return True\n else:\n return False",
"def isReachable(self):\n cmd = \"ping -c 1 %s\" % self.ip\n ping_output = commands.getoutput(cmd)\n logger.debug(cmd)\n logger.debug(ping_output)\n return re.search(\"1[\\s\\w]+received\", ping_output) is not None",
"def check_interface_status(conn_obj, interface, state, device=\"dut\"):\n interface_state = get_interface_status(conn_obj, interface, device=device)\n if interface_state != state:\n return False\n return True",
"def isconnected(self):\n return self._wlan.isconnected()",
"def is_connected():\n sta_if = network.WLAN(network.STA_IF)\n return sta_if.isconnected()",
"def is_available(cls):\n\n try:\n proc = subprocess.Popen(\n ['systemctl', 'status', 'NetworkManager'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n proc.communicate()\n return proc.returncode == 0\n except OSError:\n return False",
"def check_connectivity(self):\n r = self.run_cmd(\"get-state\")\n return r.startswith(\"device\")",
"def is_connected():\r\n ipconfig_output = terminal('ipconfig | findstr /i gateway')\r\n if ipconfig_output != None:\r\n return any(i for i in ipconfig_output if i.isdigit())\r\n \r\n # Alternative way if ipconfig has error in some systems\r\n ## Slower than ipconfig workaround\r\n try:\r\n socket().connect(('8.8.8.8', 53))\r\n return True\r\n except:\r\n return False",
"def is_alive(self):\n ret = subprocess.call(\n shlex.split(\"ping -c 1 -W 2 %s\" % self.ip_address),\n stdout=open('/dev/null', 'w'),\n stderr=subprocess.STDOUT,\n )\n \n if ret == 0:\n return True\n else:\n return False",
"def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False",
"def is_up(self):\n data = self.vxprint()\n return self.name in data and data[self.name].STATE == \"ACTIVE\"",
"def is_if_oper_up(ifname):\n try:\n return open('/sys/class/net/' + ifname + '/carrier') \\\n .readline().strip() == '1'\n except:\n SysTools.logger.error(\"Failed to get carrier of %s\", ifname)\n return False",
"def is_online(self) -> bool:\n return self.data[Attribute.ONLINE]",
"def validate_nic_down(self): \n\n pool = WorkerPool()\n\n try: \n for nic, hostname in self.nic_to_address_map:\n address = self.nic_to_address_map[(nic, hostname)]\n cmd = Ping('ping validation', address, ctxt=REMOTE, remoteHost='localhost')\n pool.addCommand(cmd)\n pool.join()\n\n for cmd in pool.getCompletedItems():\n results = cmd.get_results()\n if results.rc == 0:\n return False\n finally:\n pool.haltWork()\n pool.joinWorkers()\n pool.join()\n\n tinctest.logger.info(\"Successfully brought down nics ...\") \n return True",
"def available(self) -> bool:\n return self._device.is_online",
"def CheckWirelessConnectingMessage(self):\n if not self.wifi.connecting_thread == None:\n stat = self.wifi.connecting_thread.GetStatus()\n return stat\n else:\n return False",
"def check_status(self):\n try:\n self.server.ping()\n return True\n except Exception as e:\n return False",
"def CheckIfWirelessConnecting(self):\n if self.wifi.connecting_thread:\n return self.wifi.connecting_thread.is_connecting\n else:\n return False",
"def isonline():\n\n conn = httplib.HTTPConnection(\"www.google.com\", timeout=5)\n try:\n conn.request(\"HEAD\", \"/\")\n conn.close()\n return True\n except:\n conn.close()\n return False",
"def internet_on():\n try:\n urllib.request.urlopen('http://216.58.192.142', timeout=1)\n return True\n except urllib.error.URLError: \n return False",
"def get_vpnssl_status(iface):\n if iface in netifaces.interfaces():\n addr = netifaces.ifaddresses(iface)\n if len(addr) > 0: # vpn0 remains in the array even when gone, for whatever reason. So check if there is anything in there.\n return True\n\n return False",
"def is_on(self):\n return bool(self.arest.data.get('state'))",
"def check_up(addr, p):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((addr, p))\n sock.close()\n if result == 0:\n ans = True\n else:\n ans = False\n return ans",
"def isup(self):\n if self.cloudserver:\n # print self.cloudserver.status\n if self.cloudserver.status in (\"ACTIVE\",):\n return True\n \n return False",
"def isconnected(self) -> bool:"
] | [
"0.7716848",
"0.72441226",
"0.7074061",
"0.70293516",
"0.6758236",
"0.6741784",
"0.67188364",
"0.66291785",
"0.66074085",
"0.6605761",
"0.6487093",
"0.64246756",
"0.6413659",
"0.63578165",
"0.6322977",
"0.62656623",
"0.6259921",
"0.62360674",
"0.6233668",
"0.62157416",
"0.6215635",
"0.62154746",
"0.6204485",
"0.61927277",
"0.61381495",
"0.6120662",
"0.6100379",
"0.609207",
"0.6073204",
"0.60729766"
] | 0.78771025 | 0 |
Return the list of integrated NICs. | def list_integrated_nics(self, sort=False):
return self._nic_mgmt.list_integrated_nics(sort) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_network_interfaces(self):\n return self.mycam.devicemgmt.GetNetworkInterfaces()",
"def list():\n\n\treturn netifaces.interfaces()",
"def list_nics(self, sort=False):\n return self._nic_mgmt.list_nics(sort)",
"def find_nic():\n result = subprocess.run([\"iw\", \"dev\"], capture_output=True).stdout.decode()\n network_interface_controllers = wlan_code.findall(result)\n return network_interface_controllers",
"def get_net_interfaces():\n import netifaces\n return netifaces.interfaces()",
"def getConnectedInterfaces(self):\n interfaces = self.connectedInterfaces[:] #make a copy\n interfaces.extend(self.getLinkedInterfaces())\n return interfaces",
"def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:\n return pulumi.get(self, \"network_interfaces\")",
"def getConnectedInterfacesOnly(self):\n return self.connectedInterfaces",
"def nics() -> List[str]:\n output = []\n\n if sys.platform == 'linux':\n try:\n # first we try ip addr command\n out = subprocess.Popen([\"ip\", \"addr\"],\n stdout=subprocess.PIPE)\n stdout, stderr = out.communicate()\n output = stdout.decode('utf-8').split(\"\\n\")\n except FileNotFoundError:\n # ip addr command failed so lets try ifconfig\n out = subprocess.Popen(\"ifconfig\",\n stdout=subprocess.PIPE)\n stdout, stderr = out.communicate()\n output = stdout.decode('utf-8').split(\"\\n\")\n elif sys.platform == 'darwin':\n return subprocess.call('ifconfig')\n elif sys.platform == 'win32':\n return subprocess.call('ipconfig')\n\n return output",
"def get_list_of_nets(self):\n return self.mfp.get_list_of_nets()",
"def getLogicalInterfaces(self):\n logicalinterfaces = []\n for interface in self.getServerStackInterfaces():\n if interface not in logicalinterfaces:\n logicalinterfaces.append(interface)\n if self not in logicalinterfaces:\n logicalinterfaces.append(self)\n for interface in self.getClientStackInterfaces():\n if interface not in logicalinterfaces:\n logicalinterfaces.append(interface)\n return logicalinterfaces",
"def interfaces(self):\n if self._interfaces is None:\n self._interfaces = list(x[\"interface\"] for x in self._interfaces_detailed_list())\n\n return self._interfaces",
"def _ifList(self):\n bNetworks = False\n for cmd in self.lstCmd[1:]:\n if cmd == 'networks' or cmd == 'n':\n bNetworks = True\n\n print 'enum interfaces ...'\n with self.wlan.enumInterfaces() as wlanIfData:\n # find each available network for each interface\n # for n,iface in enumerate(wlanIfData.ifaces):\n for n,iface in enumerate(wlanIfData):\n print \"%d : %-40s state:%s\" % (n,iface.strInterfaceDescription, iface.getState())\n if bNetworks:\n with self.wlan.getAvailableNetworks(iface) as wlanNetData:\n print ' %-15s %-30s %-15s %s' % ('Profile', 'SSID','Qual (dbm)','C:Connectable S:Secure P:Profile')\n print ' %-15s %-30s %-15s' % ('=======', '====','==========')\n for nw in wlanNetData:\n sConn = ' '\n sDesc = ''\n if nw.isConnectable():\n sDesc += 'C'\n if nw.isSecure():\n sDesc += 'S'\n if nw.isConnected():\n sConn = '*'\n if nw.hasProfile():\n sDesc += 'P'\n print ' %-15s %-30s %3d%% %.1f %s %s' % (nw.getProfileName(), nw.getSSID(), nw.getSignalQuality(), nw.getSignalQualityInDBM(), sConn, sDesc)",
"def list_all_sys_net_if():\n sys_net_path = glob.glob('/sys/class/net/*')\n # Now remove the /sys/class/net prefix, keep only the interface name\n p = re.compile('^/sys/class/net/')\n result = [ p.sub('', s) for s in sys_net_path ]\n \n return result",
"def getClientInterfaces(self):\n for adaptation in self.clientadaptations.values():\n if adaptation.hasActualClients(): # we assume only one adaptation can have client interfaces\n return adaptation.getClientInterfaces()\n return []",
"def monitoredInterfaceList(self):\n\n ifs = []\n confStr = self.config.linksToMonitor\n specLinks = parseConfStr(confStr)\n topo = self.net.topo\n topoLinks = topo.iterLinks()\n for s,d in specLinks:\n if (s,d) in topoLinks and topo.isSwitch(s) and topo.isSwitch(d):\n ifs.append('%s-eth%d' %(d, topo.port(s,d)[1]))\n else:\n info(\"**** [G2]:(%s,%s) is not a valid switch link in the topology; cannot be monitored\\n\" %(s,d))\n return ifs",
"def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceArgs']]]]:\n return pulumi.get(self, \"network_interfaces\")",
"def __get_scanning_range(self):\n if self.__network is not None:\n return [self.__network]\n networks = []\n interfaces = netifaces.interfaces()\n for data in interfaces:\n ips = netifaces.ifaddresses(data)\n for key, interface_data in ips.items():\n for item in interface_data:\n if item.get(\"netmask\", None) is not None and \\\n item.get(\"addr\", None) is not None and \\\n self.is_legal_ip(item[\"netmask\"]):\n if item.get(\"addr\") not in [\"127.0.0.1\", \"0.0.0.0\"]:\n network = \"{ip}/{cird}\".format(ip=item[\"addr\"],\n cird=IPAddress(item[\"netmask\"]).netmask_bits())\n if network not in networks:\n networks.append(network)\n return networks",
"def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']",
"def interfaces(self):\n # TODO: make not a property\n int_list = self.get_interfaces()\n\n # Put loopbacks before physical interfaces\n type_index = {\"loopback\": 0, \"physical\": 1}\n # TODO: extend this based on medium category, etc\n\n int_list = sorted(int_list, key=lambda x: x.id)\n int_list = sorted(int_list, key=lambda x: type_index[x.category])\n return int_list",
"def getNets(self):\n\t\treturn NetLoader.listNetworks()",
"def list_networks(self):\n return self._get_names('SCLogicalNetwork')",
"def list(self, req, resp):\n interfaces = []\n for e in EntryPoints('tachyonic.element.interfaces'):\n interfaces.append({'id': e, 'name': e})\n return raw_list(req, interfaces)",
"def do_nic_list(cc, args):\n nics = cc.nic.list()\n names = ['%s (uuid) %s (mac)' % (nic.get('uuid'), nic.get('mac')) for nic in\n nics['nics']]\n cliutils.print_list(names, args.json)",
"def netlist(self):\n return self._netlist",
"def getDirectlySwitchedInterfaces(self):\n interfaces = self.switchedInterfaces[:]\n interfaces.extend(self.packetSwtInterfaces)\n interfaces.extend(self.circuitSwtInterfaces)\n return interfaces",
"def __get_network_interface_info(self):\n iface_list = []\n for i in netifaces.interfaces():\n addr = netifaces.ifaddresses(i)\n\n\n # clumsy way to filter which interfaces get added to list. If these elements raise KeyErrors, we skip\n try:\n iface_list.append( {i : { \n 'ip_address' : addr[netifaces.AF_INET][0]['addr'],\n 'mac' : addr[netifaces.AF_LINK][0]['addr']\n }})\n except KeyError,e:\n\t pass\n self.print_debug(\"Key not found - _get_network_interface_info - {0}\".format(addr))\n\n return iface_list",
"def getLocalInterfaces():\n SIOCGIFCONF = 0x8912\n MAXBYTES = 8096\n \n var1 = 32\n var2 = 32\n \n sock = socket(AF_INET, SOCK_DGRAM)\n names = array('B', '\\0' * MAXBYTES)\n outbytes = unpack('iL', ioctl(sock.fileno(), SIOCGIFCONF, pack('iL', MAXBYTES, names.buffer_info()[0]) ))[0]\n \n namestr = names.tostring()\n \n return [(namestr[i:i+var1].split('\\0', 1)[0], inet_ntoa(namestr[i+20:i+24])) for i in xrange(0, outbytes, var2)]",
"def getLinkedInterfaces(self):\n if self.linkedSegment:\n interfaces = self.linkedInterfaces[:] # make a copy, as we modify the list\n interfaces.extend(self.linkedSegment.getOtherInterfaces(self))\n return interfaces\n else:\n return self.linkedInterfaces",
"def all_interfaces():\n max_possible = 128 # arbitrary. raise if needed.\n number_of_bytes = max_possible * 32\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n names = array.array('B', '\\0' * number_of_bytes)\n outbytes = struct.unpack('iL', fcntl.ioctl(\n s.fileno(),\n 0x8912, # SIOCGIFCONF\n struct.pack('iL', number_of_bytes, names.buffer_info()[0])\n ))[0]\n namestr = names.tostring()\n interfaces = {}\n\n for i in range(0, outbytes, 40):\n name = namestr[i:i+16].split('\\0', 1)[0]\n ip = namestr[i+20:i+24]\n interfaces[name] = format_ip(ip)\n return interfaces"
] | [
"0.7046846",
"0.6910179",
"0.67962945",
"0.6772716",
"0.67553645",
"0.6754463",
"0.6702723",
"0.65841603",
"0.6415843",
"0.63678586",
"0.6367431",
"0.6342075",
"0.63199943",
"0.63144964",
"0.6306874",
"0.6298536",
"0.62208396",
"0.6210721",
"0.6205201",
"0.61946857",
"0.6175783",
"0.6150843",
"0.6088086",
"0.60497063",
"0.60466903",
"0.6036275",
"0.6024428",
"0.6014299",
"0.5978659",
"0.5965465"
] | 0.75222945 | 0 |
Return the list of attribute settings of a NIC. | def list_nic_settings(self, nic_id):
return self._nic_cfg.list_nic_settings(nic_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nic_attributes(cls, interface):\n return NodeNICInterfaceClusterPlugin.\\\n get_all_enabled_attributes_by_interface(interface)",
"def get_nic_settings(bmc):\n nic_settings = bmc.list_nics()\n return nic_settings",
"def get_attribute_list(self):\n return self.dp.get_attribute_list()",
"def network_settings(self): # type: () -> t.Dict[str, t.Any]\n return self.inspection['NetworkSettings']",
"def get_nic_setting(self, nic_id, attribute_name):\n return self._nic_cfg.get_nic_setting(nic_id, attribute_name)",
"def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes",
"def list_idrac_settings(self):\n return self._idrac_cfg.list_idrac_settings()",
"def getAllAttributes(self, limit=None):\n return self.getAttributeRange(limit=limit)",
"def getAttributes(self, convertToString = False):\n d = self.__dict__\n list = []\n \n # loop through list given return values in proper format\n for item in self.defaultAttributes:\n if d.has_key(item):\n if convertToString:\n list.append(str(d[item]))\n else:\n list.append(d[item])\n return list",
"def get_bond_attributes(cls, bond):\n return NodeBondInterfaceClusterPlugin.\\\n get_all_enabled_attributes_by_bond(bond)",
"def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)",
"def ListAttributes(self):\n\n ListAttributes(self)",
"def get_attrs(self):\n return dir(self.config)",
"def get_attrs(self):\n return dir(self.config)",
"def get_all_attribute(self):\n for attr, value in self.__dict__.items():\n print(attr, value)",
"def nattributes(self):\n return self.host.nattributes",
"def device_state_attributes(self):\n if self._type == ATTR_CAQI:\n self._attrs[ATTR_CAQI_LEVEL] = self.data[ATTR_CAQI_LEVEL]\n if self._type == ATTR_PM25:\n self._attrs[ATTR_LIMIT] = self.data[ATTR_PM25_LIMIT]\n self._attrs[ATTR_PERCENT] = round(self.data[ATTR_PM25_PERCENT])\n if self._type == ATTR_PM10:\n self._attrs[ATTR_LIMIT] = self.data[ATTR_PM10_LIMIT]\n self._attrs[ATTR_PERCENT] = round(self.data[ATTR_PM10_PERCENT])\n return self._attrs",
"def zimbra_attributes():\n return [ 'displayName',\n 'zimbraAccountstatus',\n 'givenName',\n 'sn',\n 'zimbraIsAdminAccount',\n 'zimbraPrefMailForwardingAddress',\n 'zimbraPrefOutOfOfficeCacheDuration',\n 'zimbraPrefOutOfOfficeDirectAddress',\n 'zimbraPrefOutOfOfficeFromDate',\n 'zimbraPrefOutOfOfficeReply',\n 'zimbraPrefOutOfOfficeReplyEnabled',\n 'zimbraPrefOutOfOfficeUntilDate',\n 'zimbraPrefHtmlEditorDefaultFontColor',\n 'zimbraPrefHtmlEditorDefaultFontFamily',\n 'zimbraPrefHtmlEditorDefaultFontSize',\n 'zimbraPrefMessageViewHtmlPreferred',\n 'zimbraMailSieveScript',\n 'zimbraPrefComposeFormat',\n 'zimbraPrefGroupMailBy',\n 'zimbraSignatureName',\n 'zimbraSignatureId',\n 'zimbraPrefMailSignatureHTML',\n 'zimbraPrefMailSignature',\n 'zimbraPrefForwardReplySignatureId',\n 'zimbraPrefDefaultSignatureId',\n# 'zimbraMailForwardingAddress',\n 'userPassword' ]",
"def ports(self):\n return self.attrs.get('NetworkSettings', {}).get('Ports', {})",
"def getAttributes(self):\n pass",
"def getRuntimeAttrs(ad):\n \n re_runtime = re.compile('^(.*)Runtime$')\n\n # some attributes should always be ignored\n re_ignore = re.compile('^DC(Socket|Pipe)')\n ignored_attrs = ['SCGetAutoCluster_cchit']\n\n attrs = []\n for key in ad.keys():\n match = re_runtime.match(key)\n if match:\n attr = match.groups()[0]\n if not (re_ignore.match(attr) or (attr in ignored_attrs)):\n attrs.append(attr)\n\n return attrs",
"def __get_network_interface_info(self):\n iface_list = []\n for i in netifaces.interfaces():\n addr = netifaces.ifaddresses(i)\n\n\n # clumsy way to filter which interfaces get added to list. If these elements raise KeyErrors, we skip\n try:\n iface_list.append( {i : { \n 'ip_address' : addr[netifaces.AF_INET][0]['addr'],\n 'mac' : addr[netifaces.AF_LINK][0]['addr']\n }})\n except KeyError,e:\n\t pass\n self.print_debug(\"Key not found - _get_network_interface_info - {0}\".format(addr))\n\n return iface_list",
"def get_attributes(cls):\r\n return [\r\n Attribute('size', '20'),\r\n Attribute('inline', False),\r\n Attribute('label', ''),\r\n ]",
"def device_state_attributes(self):\n if self.airly.data_available:\n if self.type == ATTR_CAQI_DESCRIPTION:\n self._attrs[ATTR_CAQI_ADVICE] = (self.airly.data\n [ATTR_CAQI_ADVICE])\n if self.type == ATTR_CAQI:\n self._attrs[ATTR_CAQI_LEVEL] = self.airly.data[ATTR_CAQI_LEVEL]\n if self.type == ATTR_PM25:\n self._attrs[ATTR_LIMIT] = self.airly.data[ATTR_PM25_LIMIT]\n self._attrs[ATTR_PERCENT] = (round(self.airly.data\n [ATTR_PM25_PERCENT]))\n if self.type == ATTR_PM10:\n self._attrs[ATTR_LIMIT] = self.airly.data[ATTR_PM10_LIMIT]\n self._attrs[ATTR_PERCENT] = (round(self.airly.data\n [ATTR_PM10_PERCENT]))\n return self._attrs",
"def get_current_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n current_settings = sushy_system.bios.json\n except sushy.exceptions.SushyError as e:\n msg = (self._('The current BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n attributes = current_settings.get(\"Attributes\")\n return attributes",
"def __get_attributes(self, config_details):\n attributes = []\n\n if 'attribute' in config_details:\n if type(config_details['attribute']) == dict:\n attributes.append(config_details['attribute'])\n else:\n attributes = config_details['attribute']\n\n return attributes",
"def _ifList(self):\n bNetworks = False\n for cmd in self.lstCmd[1:]:\n if cmd == 'networks' or cmd == 'n':\n bNetworks = True\n\n print 'enum interfaces ...'\n with self.wlan.enumInterfaces() as wlanIfData:\n # find each available network for each interface\n # for n,iface in enumerate(wlanIfData.ifaces):\n for n,iface in enumerate(wlanIfData):\n print \"%d : %-40s state:%s\" % (n,iface.strInterfaceDescription, iface.getState())\n if bNetworks:\n with self.wlan.getAvailableNetworks(iface) as wlanNetData:\n print ' %-15s %-30s %-15s %s' % ('Profile', 'SSID','Qual (dbm)','C:Connectable S:Secure P:Profile')\n print ' %-15s %-30s %-15s' % ('=======', '====','==========')\n for nw in wlanNetData:\n sConn = ' '\n sDesc = ''\n if nw.isConnectable():\n sDesc += 'C'\n if nw.isSecure():\n sDesc += 'S'\n if nw.isConnected():\n sConn = '*'\n if nw.hasProfile():\n sDesc += 'P'\n print ' %-15s %-30s %3d%% %.1f %s %s' % (nw.getProfileName(), nw.getSSID(), nw.getSignalQuality(), nw.getSignalQualityInDBM(), sConn, sDesc)",
"def attributes(self):\n\n return list(self._attributes.values())",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings"
] | [
"0.7211066",
"0.66627526",
"0.6172818",
"0.61454195",
"0.5977508",
"0.57929236",
"0.5752779",
"0.57477176",
"0.57086796",
"0.56704056",
"0.5659287",
"0.5601022",
"0.55941725",
"0.55941725",
"0.5570813",
"0.5560112",
"0.55204666",
"0.5490301",
"0.547466",
"0.54474443",
"0.54402345",
"0.5438262",
"0.5437149",
"0.5422368",
"0.5416378",
"0.54150313",
"0.5411222",
"0.5409503",
"0.5391971",
"0.5391971"
] | 0.67747986 | 1 |
Return the list of NICs. | def list_nics(self, sort=False):
return self._nic_mgmt.list_nics(sort) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list():\n\n\treturn netifaces.interfaces()",
"def do_nic_list(cc, args):\n nics = cc.nic.list()\n names = ['%s (uuid) %s (mac)' % (nic.get('uuid'), nic.get('mac')) for nic in\n nics['nics']]\n cliutils.print_list(names, args.json)",
"def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']",
"def list(cls, context, limit=None, sort_key=None, sort_dir=None):\n db_nics = cls.dbapi.get_nic_list(limit=limit,\n sort_key=sort_key,\n sort_dir=sort_dir)\n return cls._from_db_object_list(context, db_nics)",
"def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:\n return pulumi.get(self, \"network_interfaces\")",
"def get_network_interfaces(self):\n return self.mycam.devicemgmt.GetNetworkInterfaces()",
"def nics() -> List[str]:\n output = []\n\n if sys.platform == 'linux':\n try:\n # first we try ip addr command\n out = subprocess.Popen([\"ip\", \"addr\"],\n stdout=subprocess.PIPE)\n stdout, stderr = out.communicate()\n output = stdout.decode('utf-8').split(\"\\n\")\n except FileNotFoundError:\n # ip addr command failed so lets try ifconfig\n out = subprocess.Popen(\"ifconfig\",\n stdout=subprocess.PIPE)\n stdout, stderr = out.communicate()\n output = stdout.decode('utf-8').split(\"\\n\")\n elif sys.platform == 'darwin':\n return subprocess.call('ifconfig')\n elif sys.platform == 'win32':\n return subprocess.call('ipconfig')\n\n return output",
"def get_net_interfaces():\n import netifaces\n return netifaces.interfaces()",
"def getNetworksList():\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"networks\"]",
"def list_networks(self):\n return self._get_names('SCLogicalNetwork')",
"def list(self, req, resp):\n interfaces = []\n for e in EntryPoints('tachyonic.element.interfaces'):\n interfaces.append({'id': e, 'name': e})\n return raw_list(req, interfaces)",
"def find_nic():\n result = subprocess.run([\"iw\", \"dev\"], capture_output=True).stdout.decode()\n network_interface_controllers = wlan_code.findall(result)\n return network_interface_controllers",
"def getNets(self):\n\t\treturn NetLoader.listNetworks()",
"def netlist(self):\n return self._netlist",
"def _get_nics(vm_):\n nics = []\n if \"public_lan\" in vm_:\n firewall_rules = []\n # Set LAN to public if it already exists, otherwise create a new\n # public LAN.\n if \"public_firewall_rules\" in vm_:\n firewall_rules = _get_firewall_rules(vm_[\"public_firewall_rules\"])\n nic = NIC(\n lan=set_public_lan(int(vm_[\"public_lan\"])),\n name=\"public\",\n firewall_rules=firewall_rules,\n )\n if \"public_ips\" in vm_:\n nic.ips = _get_ip_addresses(vm_[\"public_ips\"])\n nics.append(nic)\n\n if \"private_lan\" in vm_:\n firewall_rules = []\n if \"private_firewall_rules\" in vm_:\n firewall_rules = _get_firewall_rules(vm_[\"private_firewall_rules\"])\n nic = NIC(\n lan=int(vm_[\"private_lan\"]), name=\"private\", firewall_rules=firewall_rules\n )\n if \"private_ips\" in vm_:\n nic.ips = _get_ip_addresses(vm_[\"private_ips\"])\n if \"nat\" in vm_ and \"private_ips\" not in vm_:\n nic.nat = vm_[\"nat\"]\n nics.append(nic)\n return nics",
"def get_list_of_nets(self):\n return self.mfp.get_list_of_nets()",
"def get_networks(self):\n url = '%s/v2.0/networks' % self.catalog['network']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['networks']\n else:\n LOG.error('Get networks failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def list_integrated_nics(self, sort=False):\n return self._nic_mgmt.list_integrated_nics(sort)",
"def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceArgs']]]]:\n return pulumi.get(self, \"network_interfaces\")",
"def show_networks():\n return get_networks()",
"def network_list(self, kwargs=None):\n try:\n scode, networks = Rest.get('Network')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(networks) == 0:\n Console.info(\"No network exist\")\n return\n\n n = 1\n e = {}\n data = []\n for network in networks:\n d = {}\n d['Ip'] = network['Ip']\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))",
"def app_network_interface_list(self, **kwargs):\n return self._get(\n _name=APINames.Application,\n _method=\"networkInterfaceList\",\n response_class=NetworkInterfaceList,\n **kwargs\n )",
"def enumerate_network(arg):\n\n network = ip_network(arg, strict=False)\n data = list(map(str, network.hosts()))\n data.insert(0, str(network.network_address))\n if network.prefixlen != network.max_prefixlen:\n data.append(str(network.broadcast_address))\n return data",
"def _ifList(self):\n bNetworks = False\n for cmd in self.lstCmd[1:]:\n if cmd == 'networks' or cmd == 'n':\n bNetworks = True\n\n print 'enum interfaces ...'\n with self.wlan.enumInterfaces() as wlanIfData:\n # find each available network for each interface\n # for n,iface in enumerate(wlanIfData.ifaces):\n for n,iface in enumerate(wlanIfData):\n print \"%d : %-40s state:%s\" % (n,iface.strInterfaceDescription, iface.getState())\n if bNetworks:\n with self.wlan.getAvailableNetworks(iface) as wlanNetData:\n print ' %-15s %-30s %-15s %s' % ('Profile', 'SSID','Qual (dbm)','C:Connectable S:Secure P:Profile')\n print ' %-15s %-30s %-15s' % ('=======', '====','==========')\n for nw in wlanNetData:\n sConn = ' '\n sDesc = ''\n if nw.isConnectable():\n sDesc += 'C'\n if nw.isSecure():\n sDesc += 'S'\n if nw.isConnected():\n sConn = '*'\n if nw.hasProfile():\n sDesc += 'P'\n print ' %-15s %-30s %3d%% %.1f %s %s' % (nw.getProfileName(), nw.getSSID(), nw.getSignalQuality(), nw.getSignalQualityInDBM(), sConn, sDesc)",
"def network_interfaces():\n try:\n command = which('ipadm')\n args = ('show-addr', '-p', '-o', 'STATE,ADDR')\n pattern = r'ok:(\\d+\\.\\d+\\.\\d+\\.\\d+)'\n except CommandMissing:\n # Fall back to old command on old solaris releases.\n command = which('/usr/sbin/ifconfig')\n args = ('-a')\n pattern = r'inet (\\d+\\.\\d+\\.\\d+\\.\\d+)'\n addrs = []\n output = sh(command, *args)\n for line in output:\n match = re.match(pattern, line)\n if match:\n addr = match.group(1)\n if not addr.startswith(\"127.\"):\n addrs.append(addr)\n return addrs",
"def list_all_sys_net_if():\n sys_net_path = glob.glob('/sys/class/net/*')\n # Now remove the /sys/class/net prefix, keep only the interface name\n p = re.compile('^/sys/class/net/')\n result = [ p.sub('', s) for s in sys_net_path ]\n \n return result",
"def get_nic_settings(bmc):\n nic_settings = bmc.list_nics()\n return nic_settings",
"def list_networks():\n return __sets.keys()",
"def get_port_interfaces(self, oid):\n path = '/servers/%s/os-interface' % oid\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('List port interfaces for server %s: %s' % \n (oid, truncate(res)))\n nets = res[0]['interfaceAttachments']\n for item in nets:\n item[u'name'] = None\n return nets",
"def get_ips():\r\n local_ips = []\r\n public_ips = []\r\n \r\n # list of iface names, 'lo0', 'eth0', etc.\r\n for iface in netifaces.interfaces():\r\n # list of ipv4 addrinfo dicts\r\n ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])\r\n for entry in ipv4s:\r\n addr = entry.get('addr')\r\n #print(\"addr: \" + addr)\r\n if not addr:\r\n continue\r\n if not (iface.startswith('lo') or addr.startswith('127.')):\r\n public_ips.append(addr)\r\n else:\r\n local_ips.append(addr) \r\n return public_ips"
] | [
"0.753512",
"0.74959314",
"0.73965627",
"0.7247876",
"0.69917715",
"0.68175",
"0.67618614",
"0.67329895",
"0.6687937",
"0.6673624",
"0.66728854",
"0.66436124",
"0.66415584",
"0.65566283",
"0.6548407",
"0.6429445",
"0.6410612",
"0.63945276",
"0.637308",
"0.6348048",
"0.63194376",
"0.6283433",
"0.62829953",
"0.6225862",
"0.6221424",
"0.61943215",
"0.61889815",
"0.6179977",
"0.6161635",
"0.6124754"
] | 0.760934 | 0 |
Schedules jobs for execution in a specified order. | def schedule_job_execution(self, job_ids, start_time='TIME_NOW'):
return self._job_mgmt.schedule_job_execution(job_ids, start_time) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def schedule(self, jobs):\n assert(isinstance(jobs, list) or jobs is None)\n with self.__lock:\n for job in jobs:\n while True:\n try:\n self.__queue.put(job, False)\n self.__lock.notify_all()\n break\n except Queue.Full:\n self.__lock.wait()",
"def set_task_order(self, order):\n for task in self.tasks:\n task.order = order",
"def _place_orders_onto_queue(self, order_list: List[OrderEvent]):\n for order_event in order_list:\n self._events.add_event(order_event)",
"def scheduler(self):\n\n while not self.stop.is_set():\n # Getting job from the schedule queue\n for job in self.job_gen():\n executor = threading.Thread(target=self.executor, args=(job,))\n executor.start()\n self.running_jobs.put((executor, job))\n\n time.sleep(SCHEDULER.FINEDELAY)",
"def test_equal_priority_jobs_run_in_submit_order(\n self, equal_priority_execute_events\n ):\n for i in range(1, NUM_JOBS):\n assert (\n JobID.from_job_event(equal_priority_execute_events[i]).proc\n > JobID.from_job_event(equal_priority_execute_events[i - 1]).proc\n )",
"def schedule(args):\n job_args = [(x[0], x[1]) for x in (y.split(\"=\", 1) for y in args.arg)]\n _projects = lib.get_projects(\n args.target, args.project, username=args.username, password=args.password\n )\n for project in _projects:\n _spiders = lib.get_spiders(\n args.target,\n project,\n args.spider,\n username=args.username,\n password=args.password,\n )\n for spider in _spiders:\n job_id = lib.schedule(\n args.target,\n project,\n spider,\n job_args,\n username=args.username,\n password=args.password,\n )\n print(f\"{project} / {spider} => {job_id}\")",
"def simulateArrivalOfJobs(env, processes, batchQ):\n for p in processes:\n batchQ.addToBq(p)",
"def simulateArrivalOfJobs(env, processes, batchQ):\n for p in processes:\n batchQ.addToBq(p)",
"def run(self):\r\n db_conn = order_db_connector()\r\n db_conn.process_order(self.id)\r\n schedule.every(self.order_max_lifetime).seconds.do(self.order_finish).tag(self.id)\r\n #schedule.every(5).seconds.do(self.trading_main).tag(f'{self.id}_main',self.id)\r\n self.trading_main()\r\n logger.info(\"ENDED trading main\")\r\n \"\"\"\r\n Add order status \r\n \"\"\"\r\n\r\n #Clear scheduled task to avoid task stacking in scheduler\r",
"def do_jobs(self, job_list, wait_time=2, max_iters=100):\n for task_name in job_list:\n self.do_job(\n task_name,\n wait_time=wait_time,\n max_iters=max_iters\n )",
"def schedule(self):\n\n self.task_cls.original_apply_async(*self.task_args,\n **self.task_kwargs)",
"def step_impl(context):\n for executor in context.executors_to_run:\n now = datetime.now() + timedelta(seconds=2)\n trigger = DateTrigger(run_date=now)\n context.manager.update_executor_trigger(executor.id, trigger)\n\n # give time to execute the jobs\n wait_for_job_executed(context, executor.id)\n\n context.last_executor_id = executor.id",
"def execute(self):\n for move in self._queue:\n move.execute()",
"def run_pending(self):\n logger.debug(\"available jobs: \" + str(self.jobs))\n runnable_jobs = (job for job in self.jobs if job.should_run())\n logger.debug(\"runnable jobs: \" + str(self.jobs))\n for job in runnable_jobs:\n job.run()",
"def _schedule(self, when):\n sched = IScheduler(self.store)\n for scheduledAt in sched.scheduledTimes(self):\n # https://github.com/twisted/epsilon/issues/38\n if when._time < scheduledAt._time:\n sched.reschedule(self, scheduledAt, when)\n break\n else:\n sched.schedule(self, when)",
"def __run_schedules():\n while True:\n __scheduler.run()",
"def run_joblist(self):\n\n for message in self.message_list:\n self.run_job(message)",
"def start(self):\n\n while len(self.task_order) > 0:\n # Get the task to run, set it up, and run it\n task = self.task_order[0]\n\n # In the case of a sublist, we'll run all in parallel\n if type(task) is list:\n running_jobs = []\n job_handles = []\n print(\"Starting following tasks in parallel:\")\n for sub_task in task:\n # Add the job to a list to run. Note, each task has a\n # system object within it.\n running_jobs.append(self.task_list[sub_task])\n # If we want to keep using the same system as before\n # then assign it here.\n if running_jobs[-1].persist_system:\n running_jobs[-1].system = self.global_system\n running_jobs[-1].system.name = running_jobs[-1].task_name\n\n # Run all job\n job_handles.append(running_jobs[-1].run())\n print(\"\\t%s\" % sub_task)\n\n # Wait for jobs to finish\n for j in job_handles:\n j.wait()\n\n # Read in the data from each job\n self.data = []\n for j in running_jobs:\n j.read_results()\n self.data.append(j.data)\n\n # Check conditionals\n conditional_jobs = []\n for j in running_jobs:\n if j.conditional(j.data):\n conditional_jobs.append(j.conditional_sim_name)\n if len(conditional_jobs) > 0:\n if len(conditional_jobs) == 1:\n conditional_jobs = conditional_jobs[0]\n # Overwrite the previous task jobs and run conditionals\n self.task_order[0] = conditional_jobs\n continue\n\n # Check callbacks. Note, callbacks are only run if\n # conditionals are false.\n for j in running_jobs:\n if j.callback is not None:\n j.callback(self, j)\n\n # Remove the last simulation and continue\n del self.task_order[0]\n else:\n running_job = self.task_list[task]\n # Setup\n if running_job.persist_system:\n running_job.system = self.global_system\n running_job.system.name = running_job.task_name\n # Run\n print(\"Starting the following task:\")\n print(\"\\t%s\" % task)\n job_handle = running_job.run()\n\n job_handle.wait()\n\n # Read in the results of the simulation\n running_job.read_results()\n\n # If we have a conditional simulation to run, check and do so.\n # Note, in the case of a conditional, callback is not run!\n if running_job.conditional(running_job.data):\n self.task_order[0] = running_job.conditional_sim_name\n self.data = running_job.data\n continue\n\n # Store the data from the last simulation here\n self.data = running_job.data\n\n if running_job.callback is not None:\n running_job.callback(self, running_job)\n\n # Else, remove the finished simulation and continue\n del self.task_order[0]",
"def schedule_jobs(self):\n for species in self.species_dict.values():\n if species.initial_xyz is None and species.final_xyz is None and species.conformers \\\n and any([e is not None for e in species.conformer_energies]):\n # The species has no xyz, but has conformers and at least one of the conformers has energy.\n self.determine_most_stable_conformer(species.label)\n if species.initial_xyz is not None:\n if self.composite_method:\n self.run_composite_job(species.label)\n else:\n self.run_opt_job(species.label, fine=self.fine_only)\n self.run_conformer_jobs()\n self.spawn_ts_jobs() # If all reactants/products are already known (Arkane yml or restart), spawn TS searches.\n while self.running_jobs != {}:\n self.timer = True\n for label in self.unique_species_labels:\n if self.output[label]['convergence'] is False:\n # Skip unconverged species.\n if label in self.running_jobs:\n del self.running_jobs[label]\n continue\n # Look for completed jobs and decide what jobs to run next.\n self.get_server_job_ids() # updates ``self.server_job_ids``\n self.get_completed_incore_jobs() # updates ``self.completed_incore_jobs``\n if label not in self.running_jobs.keys():\n continue\n job_list = self.running_jobs[label]\n for job_name in job_list:\n if 'conformer' in job_name:\n i = get_i_from_job_name(job_name)\n job = self.job_dict[label]['conformers'][i]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n # this is a completed conformer job\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n troubleshooting_conformer = self.parse_conformer(job=job, label=label, i=i)\n if troubleshooting_conformer:\n break\n # Just terminated a conformer job.\n # Are there additional conformer jobs currently running for this species?\n for spec_jobs in job_list:\n if 'conformer' in spec_jobs and spec_jobs != job_name:\n break\n else:\n # All conformer jobs terminated.\n # Check isomorphism and run opt on most stable conformer geometry.\n logger.info(f'\\nConformer jobs for {label} successfully terminated.\\n')\n if self.species_dict[label].is_ts:\n self.determine_most_likely_ts_conformer(label)\n else:\n self.determine_most_stable_conformer(label) # also checks isomorphism\n if self.species_dict[label].initial_xyz is not None:\n # if initial_xyz is None, then we're probably troubleshooting conformers, don't opt\n if not self.composite_method:\n self.run_opt_job(label, fine=self.fine_only)\n else:\n self.run_composite_job(label)\n self.timer = False\n break\n if 'tsg' in job_name:\n job = self.job_dict[label]['tsg'][get_i_from_job_name(job_name)]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n # This is a successfully completed tsg job. It may have resulted in several TSGuesses.\n self.end_job(job=job, label=label, job_name=job_name)\n if job.local_path_to_output_file.endswith('.yml'):\n for rxn in job.reactions:\n rxn.ts_species.process_completed_tsg_queue_jobs(yml_path=job.local_path_to_output_file)\n # Just terminated a tsg job.\n # Are there additional tsg jobs currently running for this species?\n for spec_jobs in job_list:\n if 'tsg' in spec_jobs and spec_jobs != job_name:\n break\n else:\n # All tsg jobs terminated. Spawn confs.\n logger.info(f'\\nTS guess jobs for {label} successfully terminated.\\n')\n self.run_conformer_jobs(labels=[label])\n self.timer = False\n break\n elif 'opt' in job_name:\n # val is 'opt1', 'opt2', etc., or 'optfreq1', optfreq2', etc.\n job = self.job_dict[label]['opt'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n success = self.parse_opt_geo(label=label, job=job)\n if success:\n self.spawn_post_opt_jobs(label=label, job_name=job_name)\n self.timer = False\n break\n elif 'freq' in job_name:\n # this is NOT an 'optfreq' job\n job = self.job_dict[label]['freq'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_freq_job(label=label, job=job)\n self.timer = False\n break\n elif 'sp' in job_name:\n job = self.job_dict[label]['sp'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_sp_job(label=label, job=job)\n self.timer = False\n break\n elif 'composite' in job_name:\n job = self.job_dict[label]['composite'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n success = self.parse_composite_geo(label=label, job=job)\n if success:\n self.spawn_post_opt_jobs(label=label, job_name=job_name)\n self.timer = False\n break\n elif 'directed_scan' in job_name:\n job = self.job_dict[label]['directed_scan'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.check_directed_scan_job(label=label, job=job)\n if 'cont' in job.directed_scan_type and job.job_status[1]['status'] == 'done':\n # This is a continuous restricted optimization, spawn the next job in the scan.\n xyz = parser.parse_xyz_from_file(job.local_path_to_output_file) \\\n if not hasattr(job, 'opt_xyz') else job.opt_xyz\n self.spawn_directed_scan_jobs(label=label, rotor_index=job.rotor_index, xyz=xyz)\n if 'brute_force' in job.directed_scan_type:\n # Just terminated a brute_force directed scan job.\n # Are there additional jobs of the same type currently running for this species?\n self.species_dict[label].rotors_dict[job.rotor_index]['number_of_running_jobs'] -= 1\n if not self.species_dict[label].rotors_dict[job.rotor_index]['number_of_running_jobs']:\n # All brute force scan jobs for these pivots terminated.\n logger.info(f'\\nAll brute force directed scan jobs for species {label} between '\n f'pivots {job.pivots} successfully terminated.\\n')\n self.process_directed_scans(label, pivots=job.pivots)\n shutil.rmtree(job.local_path, ignore_errors=True)\n self.timer = False\n break\n elif 'scan' in job_name and 'directed' not in job_name:\n job = self.job_dict[label]['scan'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination \\\n and (job.directed_scan_type is None or job.directed_scan_type == 'ess'):\n self.check_scan_job(label=label, job=job)\n self.timer = False\n break\n elif 'irc' in job_name:\n job = self.job_dict[label]['irc'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n self.spawn_post_irc_jobs(label=label, job=job)\n self.timer = False\n break\n elif 'orbitals' in job_name:\n job = self.job_dict[label]['orbitals'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n # copy the orbitals file to the species / TS output folder\n folder_name = 'rxns' if self.species_dict[label].is_ts else 'Species'\n orbitals_path = os.path.join(self.project_directory, 'output', folder_name, label,\n 'geometry', 'orbitals.fchk')\n if os.path.isfile(job.local_path_to_orbitals_file):\n try:\n shutil.copyfile(job.local_path_to_orbitals_file, orbitals_path)\n except shutil.SameFileError:\n pass\n self.timer = False\n break\n elif 'onedmin' in job_name:\n job = self.job_dict[label]['onedmin'][job_name]\n if not (job.job_id in self.server_job_ids and job.job_id not in self.completed_incore_jobs):\n successful_server_termination = self.end_job(job=job, label=label, job_name=job_name)\n if successful_server_termination:\n # Copy the lennard_jones file to the species output folder (TS's don't have L-J data).\n lj_output_path = os.path.join(self.project_directory, 'output', 'Species', label,\n 'lennard_jones.dat')\n if os.path.isfile(job.local_path_to_lj_file):\n try:\n shutil.copyfile(job.local_path_to_lj_file, lj_output_path)\n except shutil.SameFileError:\n pass\n self.output[label]['job_types']['onedmin'] = True\n self.species_dict[label].set_transport_data(\n lj_path=os.path.join(self.project_directory, 'output', 'Species', label,\n 'lennard_jones.dat'),\n opt_path=self.output[label]['paths']['geo'], bath_gas=job.bath_gas,\n opt_level=self.opt_level)\n self.timer = False\n break\n\n if not len(job_list):\n self.check_all_done(label)\n if not self.running_jobs[label]:\n # Delete the label only if it represents an empty entry.\n del self.running_jobs[label]\n\n if self.timer and len(job_list):\n time.sleep(30) # wait 30 sec before bugging the servers again.\n t = time.time() - self.report_time\n if t > 3600 and self.running_jobs:\n self.report_time = time.time()\n logger.info(f'Currently running jobs:\\n{pprint.pformat(self.running_jobs)}')\n\n # Generate a TS report:\n self.generate_final_ts_guess_report()",
"def execute_order(self, event):\n raise NotImplementedError(\"Should implement execute_order()\")",
"def putonqueue(self, nr, *args):\n self.outqueues[10-nr].put_nowait(args)\n self.tickqueue.put_nowait('go')",
"def execute_order(self, event):\n if isinstance(event, OrderEvent):\n signal = FillEvent(\n event.symbol,\n date.today(),\n event.quantity,\n event.direction,\n None\n )\n self.event_queue.put(signal)",
"def _run_tasks(self):\n next_tasks = self._job_queue.get_next_tasks()\n for task in next_tasks:\n sid = self._docker.start_task(task.identifier, task.image, task.name, task.args)\n self._job_queue.mark_task_started(task.identifier, task.name, sid)",
"def run_all(self, delay_seconds=0):\n logger.info('Running *all* %i jobs with %is delay inbetween',\n len(self.jobs), delay_seconds)\n for job in self.jobs[:]:\n job.run()\n time.sleep(delay_seconds)",
"def submit_jobs(commands, prefix):\r\n qiime_config = load_qiime_config()\r\n CLUSTER_JOBS_SCRIPT = qiime_config['cluster_jobs_fp']\r\n\r\n if not CLUSTER_JOBS_SCRIPT:\r\n raise ApplicationNotFoundError(\r\n \"cluster_jobs_fp not set in config file!\")\r\n if not (exists(CLUSTER_JOBS_SCRIPT) or which(CLUSTER_JOBS_SCRIPT)):\r\n raise ApplicationNotFoundError(\r\n \"cluster_jobs_fp not in $PATH or provided as full path!\")\r\n\r\n outfilename = join(get_qiime_temp_dir(), \"%s_commands.txt\" % prefix)\r\n fh = open(outfilename, \"w\")\r\n fh.write(\"\\n\".join(commands))\r\n fh.close()\r\n cmd = '%s -ms %s %s' % (CLUSTER_JOBS_SCRIPT, outfilename, prefix)\r\n system(cmd)\r\n remove(outfilename)",
"def equal_priority_execute_events(submit_equal_priority_jobs):\n jel = htcondor.JobEventLog(\"scheduler_priority-equal.log\")\n execute_events = []\n for event in jel.events(0):\n if event.type == htcondor.JobEventType.EXECUTE:\n execute_events.append(event)\n return execute_events",
"def run_jobs(delete_completed=False, ignore_errors=False, now=None):\n if ScheduledJob.objects.filter(status='running'):\n raise ValueError('jobs in progress found; aborting')\n if now is None:\n now = datetime.datetime.now()\n\n expire_jobs(now)\n schedule_sticky_jobs()\n start_scheduled_jobs(now, delete_completed, ignore_errors)",
"def test_unequal_priority_jobs_run_in_priority_order(\n self, unequal_priority_execute_events\n ):\n assert (\n sorted(\n unequal_priority_execute_events,\n key=lambda event: JobID.from_job_event(event),\n reverse=True,\n )\n == unequal_priority_execute_events\n )",
"def schedule(self, parcels, trucks, verbose=False):\n raise NotImplementedError",
"def _schedule(self, instruction_list, schedule_mode):\n if schedule_mode:\n scheduler = Scheduler(schedule_mode)\n scheduled_start_time = scheduler.schedule(instruction_list)\n time_ordered_pos = np.argsort(scheduled_start_time)\n instruction_list = [instruction_list[i] for i in time_ordered_pos]\n scheduled_start_time.sort()\n else: # no scheduling\n scheduled_start_time = [0.]\n for instruction in instruction_list[:-1]:\n scheduled_start_time.append(\n instruction.duration + scheduled_start_time[-1])\n return instruction_list, scheduled_start_time"
] | [
"0.64295745",
"0.5901927",
"0.58836645",
"0.5856689",
"0.5843526",
"0.5808002",
"0.574288",
"0.574288",
"0.5588306",
"0.554542",
"0.55356306",
"0.5499866",
"0.5479379",
"0.5449489",
"0.5427825",
"0.54005593",
"0.5362565",
"0.53619784",
"0.5353917",
"0.5340982",
"0.53276175",
"0.53189886",
"0.5312311",
"0.5311465",
"0.53108436",
"0.53028667",
"0.52979565",
"0.5294459",
"0.5275159",
"0.5268261"
] | 0.59230965 | 1 |
Set the legacy, nonUEFI, boot protocol of a NIC. If successful, the pending value of the NIC's legacy boot protocol attribute is set. For the new value to be applied, a configuration job must be created and the node must be rebooted. | def set_nic_legacy_boot_protocol(self, nic_id, value):
return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_nic_legacy_boot_protocol_none(self, nic_id):\n return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, 'NONE')",
"def set_nic_legacy_boot_protocol_pxe(self, nic_id):\n return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, 'PXE')",
"def set_pending_boot_mode(self, boot_mode):\n boot_mode = boot_mode.lower()\n if boot_mode not in ['uefi', 'legacy']:\n msg = 'Invalid Boot mode specified'\n raise exception.IloInvalidInputError(msg)\n\n boot_properties = {'BootMode': boot_mode}\n\n if boot_mode == 'legacy':\n boot_properties['BootMode'] = 'LegacyBios'\n else:\n # If Boot Mode is 'Uefi' set the UEFIOptimizedBoot first.\n boot_properties['UefiOptimizedBoot'] = \"Enabled\"\n\n # Change the Boot Mode\n self._change_bios_setting(boot_properties)",
"def get_nic_legacy_boot_protocol(self, nic_id):\n return self._nic_cfg.get_nic_legacy_boot_protocol(nic_id)",
"def set_protocol(cls, interface_name, proto='provision'): # pragma: no cover\n if proto not in cls.supported_proto:\n return\n try:\n ret = cls.get_logical_ifname(interface_name, proto)\n if not ret:\n return\n os.system('uci set network.%s.proto=%s' % (ret, proto))\n os.system('uci commit network')\n os.system('/etc/init.d/network reload')\n if proto == cls.supported_proto[1]:\n os.system('sysctl -w net.ipv6.conf.%s.autoconf=0' % interface_name)\n os.system('sysctl -w net.ipv6.conf.%s.use_tempaddr=2' % interface_name)\n cls.logger.debug(\"set %s[%s] DCHP protocol to %s\", interface_name, ret, proto)\n except OSError as e:\n cls.logger.error(\"Got exception:%s\" % str(e))",
"def set_boot_device(self, task, device, persistent=False):\n if (getattr(task.node, 'power_interface') == 'ipmitool'\n or task.node.driver_internal_info.get('irmc_ipmi_succeed')):\n if device not in self.get_supported_boot_devices(task):\n raise exception.InvalidParameterValue(_(\n \"Invalid boot device %s specified.\") % device)\n\n uefi_mode = (\n boot_mode_utils.get_boot_mode(task.node) == 'uefi')\n\n # disable 60 secs timer\n timeout_disable = \"0x00 0x08 0x03 0x08\"\n ipmitool.send_raw(task, timeout_disable)\n\n # note(naohirot):\n # Set System Boot Options : ipmi cmd '0x08', bootparam '0x05'\n #\n # $ ipmitool raw 0x00 0x08 0x05 data1 data2 0x00 0x00 0x00\n #\n # data1 : '0xe0' persistent + uefi\n # '0xc0' persistent + bios\n # '0xa0' next only + uefi\n # '0x80' next only + bios\n # data2 : boot device defined in the dict _BOOTPARAM5_DATA2\n\n bootparam5 = '0x00 0x08 0x05 %s %s 0x00 0x00 0x00'\n if persistent:\n data1 = '0xe0' if uefi_mode else '0xc0'\n else:\n data1 = '0xa0' if uefi_mode else '0x80'\n data2 = _BOOTPARAM5_DATA2[device]\n\n cmd8 = bootparam5 % (data1, data2)\n ipmitool.send_raw(task, cmd8)\n else:\n if device not in self.get_supported_boot_devices(task):\n raise exception.InvalidParameterValue(_(\n \"Invalid boot device %s specified. \"\n \"Current iRMC firmware condition doesn't support IPMI \"\n \"but Redfish.\") % device)\n super(ipmitool.IPMIManagement, self).set_boot_device(\n task, device, persistent)",
"def is_nic_legacy_boot_protocol_none(self, nic_id):\n return self._nic_cfg.is_nic_legacy_boot_protocol_none(nic_id)",
"def set_bootloader_mode(self, mode):\n self.check_validity()\n\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 9, 'B')",
"def _change_secure_boot_settings(self, property, value):\n system = self._get_host_details()\n # find the BIOS URI\n if ('links' not in system['Oem']['Hp'] or\n 'SecureBoot' not in system['Oem']['Hp']['links']):\n msg = (' \"SecureBoot\" resource or feature is not '\n 'supported on this system')\n raise exception.IloCommandNotSupportedError(msg)\n\n secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']\n\n # Change the property required\n new_secure_boot_settings = {}\n new_secure_boot_settings[property] = value\n\n # perform the patch\n status, headers, response = self._rest_patch(\n secure_boot_uri, None, new_secure_boot_settings)\n\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)\n\n # Change the bios setting as a workaround to enable secure boot\n # Can be removed when fixed for Gen9 snap2\n val = self._get_bios_setting('CustomPostMessage')\n val = val.rstrip() if val.endswith(\" \") else val+\" \"\n self._change_bios_setting({'CustomPostMessage': val})",
"def test_patch_bios_boot_mode(self):\n pass",
"def set_boot_mode(self, task, mode):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='set_boot_mode')",
"def _setBootable(self, bootable):\n if self.partedPartition:\n if arch.isS390():\n return\n if self.flagAvailable(parted.PARTITION_BOOT):\n if bootable:\n self.setFlag(parted.PARTITION_BOOT)\n else:\n self.unsetFlag(parted.PARTITION_BOOT)\n else:\n raise errors.DeviceError(\"boot flag not available for this partition\", self.name)\n\n self._bootable = bootable\n else:\n self.req_bootable = bootable",
"def is_nic_legacy_boot_protocol_pxe(self, nic_id):\n return self._nic_cfg.is_nic_legacy_boot_protocol_pxe(nic_id)",
"def protocol_version_9():\n print('Setting protocol version to 9')\n upgrade('protocolversion', 'protocol_version', 9)",
"def _ApplyNicMods(self, idx, nic, params, private):\n changes = []\n\n for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:\n if key in params:\n changes.append((\"nic.%s/%d\" % (key, idx), params[key]))\n setattr(nic, key, params[key])\n\n new_net = params.get(constants.INIC_NETWORK, nic.network)\n new_net_uuid = self.cfg.LookupNetwork(new_net)\n if new_net_uuid != nic.network:\n changes.append((\"nic.network/%d\" % idx, new_net))\n nic.network = new_net_uuid\n\n if private.filled:\n nic.nicparams = private.filled\n\n for (key, val) in nic.nicparams.items():\n changes.append((\"nic.%s/%d\" % (key, idx), val))\n\n if self.op.hotplug:\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,\n constants.HOTPLUG_TARGET_NIC,\n nic, None, idx)\n changes.append((\"nic/%d\" % idx, msg))\n\n return changes",
"def test_update_bios_boot_mode(self):\n pass",
"def get_pending_boot_mode(self):\n headers, uri, bios_settings = self._check_bios_resource(['BootMode'])\n _, _, settings = self._get_bios_settings_resource(bios_settings)\n boot_mode = settings.get('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n return boot_mode.upper()",
"def pxe_netboot(self, filename):\n new_port = {\n 'extra_dhcp_opts': [\n {'opt_name': 'bootfile-name', 'opt_value': 'http://192.0.2.240:8088/' + filename, 'ip_version': 4, },\n {'opt_name': 'tftp-server', 'opt_value': '192.0.2.240', 'ip_version': '4'},\n {'opt_name': 'server-ip-address', 'opt_value': '192.0.2.240', 'ip_version': '4'}\n ]\n }\n self.neutron.update_port(self._provision_port_id, {'port': new_port})",
"def set_switch_config(self, config_flags, miss_send_len):\n ofproto = self.datapath.ofproto\n parser = self.datapath.ofproto_parser\n self.logger.info(\"Setting config on switch \"\n \"dpid=%s to config_flags flag=%s and \"\n \"miss_send_len=%s bytes\",\n self.dpid, config_flags, miss_send_len)\n try:\n self.datapath.send_msg(parser.OFPSetConfig(\n self.datapath,\n config_flags,\n miss_send_len))\n except:\n #*** Log the error and return 0:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n self.logger.error(\"Failed to set switch config. \"\n \"Exception %s, %s, %s\",\n exc_type, exc_value, exc_traceback)\n return 0\n return 1",
"def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')",
"def set_802_3_ethernet(self, pardus_profile):\n\n if pardus_profile.connection_type == \"802-3-ethernet\":\n return _802_3_Ethernet(pardus_profile)\n else:\n return \"none\"",
"def set_boot_device(self, device, persistent=False):\n\n operation = \"set_boot_device\"\n try:\n self.sp_manager.create_boot_policy()\n self.sp_manager.set_boot_device(device)\n\n except UcsException as ex:\n raise exception.UcsOperationError(operation=operation, error=ex)",
"def set_ipaddress(modulo):\n\n print ('Configuring IP address...')\n\n modulo.write('AT+NETOPEN\\r\\n'.encode())\n\n if _valid_net(modulo): \n try:\n modulo.write('AT+IPADDR\\r\\n'.encode())\n time.sleep(0.1)\n except serial.SerialException:\n print ('... Whitout IP address, try again')\n if _valid_ip(modulo):\n print ('IP address configurated')\n else:\n print ('IP not configurated')\n else:\n print ('Net Already configurated')\n \n data = _read_line(modulo)\n return data",
"def set_http_boot_url(self, url):\n if(self._is_boot_mode_uefi() is True):\n self._change_bios_setting({'UefiShellStartupUrl': url})\n else:\n msg = 'set_http_boot_url is not supported in the BIOS boot mode'\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def _update_persistent_boot(self, device_type=[], persistent=False,\n mac=None):\n tenure = 'Once'\n new_device = device_type[0]\n # If it is a standard device, we need to convert in RIS convention\n if device_type[0].upper() in DEVICE_COMMON_TO_RIS:\n new_device = DEVICE_COMMON_TO_RIS[device_type[0].upper()]\n\n if persistent:\n tenure = 'Continuous'\n\n systems_uri = \"/rest/v1/Systems/1\"\n # Need to set this option first if device is 'UefiTarget'\n if new_device is 'UefiTarget':\n if not mac:\n msg = ('Mac is needed for iscsi uefi boot')\n raise exception.IloInvalidInputError(msg)\n\n headers, bios_uri, bios_settings = self._check_bios_resource()\n # Get the Boot resource and Mappings resource.\n boot_settings = self._get_bios_boot_resource(bios_settings)\n StructuredBootString = None\n\n for boot_setting in boot_settings['BootSources']:\n if(mac.upper() in boot_setting['UEFIDevicePath'] and\n 'iSCSI' in boot_setting['UEFIDevicePath']):\n StructuredBootString = boot_setting['StructuredBootString']\n break\n if not StructuredBootString:\n msg = ('MAC provided is Invalid \"%s\"' % mac)\n raise exception.IloInvalidInputError(msg)\n\n new_boot_settings = {}\n new_boot_settings['Boot'] = {'UefiTargetBootSourceOverride':\n StructuredBootString}\n status, headers, response = self._rest_patch(systems_uri, None,\n new_boot_settings)\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)\n\n new_boot_settings = {}\n new_boot_settings['Boot'] = {'BootSourceOverrideEnabled': tenure,\n 'BootSourceOverrideTarget': new_device}\n status, headers, response = self._rest_patch(systems_uri, None,\n new_boot_settings)\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)",
"def test_update_firewall_rule_protocol(self):\r\n resource = 'firewall_rule'\r\n cmd = firewallrule.UpdateFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--protocol', 'any'],\r\n {'protocol': None, })",
"def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode",
"def set_boot_order(profile_obj):\n status = True\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"### Testing the 'Boot Settings' session ###\")\n logger._log_to_console_and_log_file(\"- Select the 'Legacy BIOS' mode\")\n createprofile_elements = ProfileContainer(ProfileContainerType.ADD)\n __select_value_from_a_profile_combo_box(createprofile_elements.ID_COMBO_PROFILE_BOOT_MODE, createprofile_elements.ID_COMBO_PROFILE_BOOT_MODE_LIST % \"Legacy BIOS\")\n # Set invalid values\n logger._log_to_console_and_log_file(\"Testing using invalid values\")\n for profile in profile_obj:\n items = [[\"CD\", profile.cd], [\"USB\", profile.usb], [\"HardDisk\", profile.harddisk]]\n for data in items:\n ui_lib.wait_for_element_and_input_text(\"name=%s\" % data[0], data[1])\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_CREATE_SERVER_PROFILE_FORM)\n if data[0] == \"HardDisk\":\n data[0] = \"Hard Disk\"\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_BOOT_ORDER_POSITION % data[0], data[1], timeout=1):\n logger._log_to_console_and_log_file(\"- \" + \"'\" + data[0] + \"'\" + \" field was not cleared to the default value and persisted as '\" + str(data[1]) + \"'\")\n status = False\n else:\n logger._log_to_console_and_log_file(\"- \" + \"'\" + data[0] + \"'\" + \" field was correctly cleared to the default value\")\n return status",
"def set_type(self, nDevType):\n\t\tcall_sdk_function('PrlBootDev_SetType', self.handle, nDevType)",
"def set_secure_boot_mode(self, secure_boot_enable):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('SecureBootEnable',\n secure_boot_enable)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)"
] | [
"0.68213123",
"0.6571011",
"0.6390049",
"0.6221169",
"0.62201256",
"0.5706811",
"0.55207795",
"0.5489483",
"0.5489388",
"0.54601496",
"0.5429905",
"0.53446037",
"0.5271917",
"0.5240692",
"0.51808566",
"0.51247257",
"0.51149714",
"0.5068805",
"0.5055276",
"0.50475967",
"0.50206107",
"0.501777",
"0.50008357",
"0.4959942",
"0.49275893",
"0.4903041",
"0.48820028",
"0.48757467",
"0.48213995",
"0.4820358"
] | 0.79571086 | 0 |
Set the legacy, nonUEFI, boot protocol of a NIC to NONE. If successful, the pending value of the NIC's legacy boot protocol attribute is set. For the new value to be applied, a configuration job must be created and the node must be rebooted. | def set_nic_legacy_boot_protocol_none(self, nic_id):
return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, 'NONE') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_nic_legacy_boot_protocol(self, nic_id, value):\n return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, value)",
"def is_nic_legacy_boot_protocol_none(self, nic_id):\n return self._nic_cfg.is_nic_legacy_boot_protocol_none(nic_id)",
"def set_nic_legacy_boot_protocol_pxe(self, nic_id):\n return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, 'PXE')",
"def get_nic_legacy_boot_protocol(self, nic_id):\n return self._nic_cfg.get_nic_legacy_boot_protocol(nic_id)",
"def set_pending_boot_mode(self, boot_mode):\n boot_mode = boot_mode.lower()\n if boot_mode not in ['uefi', 'legacy']:\n msg = 'Invalid Boot mode specified'\n raise exception.IloInvalidInputError(msg)\n\n boot_properties = {'BootMode': boot_mode}\n\n if boot_mode == 'legacy':\n boot_properties['BootMode'] = 'LegacyBios'\n else:\n # If Boot Mode is 'Uefi' set the UEFIOptimizedBoot first.\n boot_properties['UefiOptimizedBoot'] = \"Enabled\"\n\n # Change the Boot Mode\n self._change_bios_setting(boot_properties)",
"async def async_turn_off(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"auto-on\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"off\")\n\n await self._ctrl.async_update()",
"def ethernet_off(self):\n if not self.healthy:\n self.health_check()\n if not self._ethernet_switch:\n raise errors.CapabilityNotReadyError(\n device_name=self._device_name,\n msg=\"Not set up for ethernet switching.\")\n self._ethernet_switch.switch_power.power_off(self.ethernet_port_number)",
"def wifi_off(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE00\")\n time.sleep(100e-3)",
"def unset_iscsi_boot_info(self, mac):\n if(self._is_boot_mode_uefi() is True):\n iscsi_info = {'iSCSIBootEnable': 'Disabled'}\n self._change_iscsi_settings(mac.upper(), iscsi_info)\n else:\n msg = 'iscsi boot is not supported in the BIOS boot mode'\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"async def async_turn_off(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()",
"def disable_switch_port(self, mgr, interface):\n confstr = snipp.CMD_NO_SWITCHPORT % (interface)\n confstr = self.create_xml_snippet(confstr)\n LOG.debug(\"NexusDriver: %s\" % confstr)\n mgr.edit_config(target='running', config=confstr)",
"def unconfigure_aaa_default_dot1x_methods(device):\n try:\n device.configure([\n \"no aaa authentication dot1x default\",\n \"no aaa authorization network default\",\n \"no aaa accounting dot1x default\",\n \"no aaa accounting network default\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not unconfigure AAA dot1x default method'\n )",
"def unconfigure_aaa_login_method_none(device,servergrp):\n try:\n device.configure([\n \"line con 0\",\n \"no login authentication {servergrp}\".format(servergrp=servergrp),\n \"line vty 0 4\",\n \"no login authentication {servergrp}\".format(servergrp=servergrp),\n \"line vty 5 15\",\n \"no login authentication {servergrp}\".format(servergrp=servergrp),\n \"no aaa authentication login {servergrp} none\".format(servergrp=servergrp)\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not unconfigure AAA login method none {servergrp}'.format(servergrp=servergrp)\n )",
"def test_patch_bios_boot_mode(self):\n pass",
"def abandon_pending_nic_changes(self, nic_id):\n self._job_mgmt.delete_pending_config(\n resource_uri=uris.DCIM_NICService,\n cim_creation_class_name='DCIM_NICService',\n cim_name='DCIM:NICService',\n target=nic_id)",
"def setOff(self, command):\r\n self.setDriver('ST', 0)",
"def reset_config(modem, disable_auto_linking, monitor_mode, auto_led, deadman):\n modem.configuration[DISABLE_AUTO_LINKING].set_value(not disable_auto_linking)\n modem.configuration[MONITOR_MODE].set_value(not monitor_mode)\n modem.configuration[AUTO_LED].set_value(not auto_led)\n modem.configuration[DEADMAN].set_value(not deadman)",
"def reset_secure_boot_keys(self):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('ResetToDefaultKeys', True)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def set_protocol(cls, interface_name, proto='provision'): # pragma: no cover\n if proto not in cls.supported_proto:\n return\n try:\n ret = cls.get_logical_ifname(interface_name, proto)\n if not ret:\n return\n os.system('uci set network.%s.proto=%s' % (ret, proto))\n os.system('uci commit network')\n os.system('/etc/init.d/network reload')\n if proto == cls.supported_proto[1]:\n os.system('sysctl -w net.ipv6.conf.%s.autoconf=0' % interface_name)\n os.system('sysctl -w net.ipv6.conf.%s.use_tempaddr=2' % interface_name)\n cls.logger.debug(\"set %s[%s] DCHP protocol to %s\", interface_name, ret, proto)\n except OSError as e:\n cls.logger.error(\"Got exception:%s\" % str(e))",
"def _force_off(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'off')",
"def is_nic_legacy_boot_protocol_pxe(self, nic_id):\n return self._nic_cfg.is_nic_legacy_boot_protocol_pxe(nic_id)",
"def set_802_3_ethernet(self, pardus_profile):\n\n if pardus_profile.connection_type == \"802-3-ethernet\":\n return _802_3_Ethernet(pardus_profile)\n else:\n return \"none\"",
"def _nixie_disable():\n # type: () -> None\n GPIO.output(NIXIE_nOE, GPIO.HIGH)",
"def set_boot_device(self, task, device, persistent=False):\n if (getattr(task.node, 'power_interface') == 'ipmitool'\n or task.node.driver_internal_info.get('irmc_ipmi_succeed')):\n if device not in self.get_supported_boot_devices(task):\n raise exception.InvalidParameterValue(_(\n \"Invalid boot device %s specified.\") % device)\n\n uefi_mode = (\n boot_mode_utils.get_boot_mode(task.node) == 'uefi')\n\n # disable 60 secs timer\n timeout_disable = \"0x00 0x08 0x03 0x08\"\n ipmitool.send_raw(task, timeout_disable)\n\n # note(naohirot):\n # Set System Boot Options : ipmi cmd '0x08', bootparam '0x05'\n #\n # $ ipmitool raw 0x00 0x08 0x05 data1 data2 0x00 0x00 0x00\n #\n # data1 : '0xe0' persistent + uefi\n # '0xc0' persistent + bios\n # '0xa0' next only + uefi\n # '0x80' next only + bios\n # data2 : boot device defined in the dict _BOOTPARAM5_DATA2\n\n bootparam5 = '0x00 0x08 0x05 %s %s 0x00 0x00 0x00'\n if persistent:\n data1 = '0xe0' if uefi_mode else '0xc0'\n else:\n data1 = '0xa0' if uefi_mode else '0x80'\n data2 = _BOOTPARAM5_DATA2[device]\n\n cmd8 = bootparam5 % (data1, data2)\n ipmitool.send_raw(task, cmd8)\n else:\n if device not in self.get_supported_boot_devices(task):\n raise exception.InvalidParameterValue(_(\n \"Invalid boot device %s specified. \"\n \"Current iRMC firmware condition doesn't support IPMI \"\n \"but Redfish.\") % device)\n super(ipmitool.IPMIManagement, self).set_boot_device(\n task, device, persistent)",
"def reset(self,bootloader=False):\n self.send_packet('\\xff' if bootloader else '\\xfe')",
"def reset_protocol(self):\n #\n if self.debug > 1:\n print \"Resetting communication protocol...\"\n #\n if self.State != 1:\n print \"Can't reset protocol, reader not connected\"\n raise self.ErrorNotConnected(\"Can't reset reader\")\n else:\n if self.Device == \"ELM327\":\n self.ELM327_reset_protocol()\n else:\n raise self.ErrorReaderNotRecognized(\"Unknown OBD2 Reader device\")",
"def pibooth_reset(cfg, hard):",
"def UnsetWiredDefault(self):\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile, \"default\"):\n if misc.to_bool(config.get(profile, \"default\")):\n config.set(profile, \"default\", False)\n config.write(open(self.wired_conf, \"w\"))\n self.SaveWiredNetworkProfile(profile)",
"def net_undefine(network, server, virt=\"Xen\"):\n\n cmd = \"virsh -c %s net-undefine %s 2>/dev/null\" % (virt2uri(virt), network)\n ret, out = run_remote(server, cmd)\n\n return ret",
"def _ApplyNicMods(self, idx, nic, params, private):\n changes = []\n\n for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:\n if key in params:\n changes.append((\"nic.%s/%d\" % (key, idx), params[key]))\n setattr(nic, key, params[key])\n\n new_net = params.get(constants.INIC_NETWORK, nic.network)\n new_net_uuid = self.cfg.LookupNetwork(new_net)\n if new_net_uuid != nic.network:\n changes.append((\"nic.network/%d\" % idx, new_net))\n nic.network = new_net_uuid\n\n if private.filled:\n nic.nicparams = private.filled\n\n for (key, val) in nic.nicparams.items():\n changes.append((\"nic.%s/%d\" % (key, idx), val))\n\n if self.op.hotplug:\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,\n constants.HOTPLUG_TARGET_NIC,\n nic, None, idx)\n changes.append((\"nic/%d\" % idx, msg))\n\n return changes"
] | [
"0.7080708",
"0.67770797",
"0.6196294",
"0.5887492",
"0.5865701",
"0.57495344",
"0.5609847",
"0.55483353",
"0.5510577",
"0.5472218",
"0.54610366",
"0.5409589",
"0.5379189",
"0.5184566",
"0.51787657",
"0.5177618",
"0.5174813",
"0.5163825",
"0.5162536",
"0.51537764",
"0.5110679",
"0.50925523",
"0.5067691",
"0.5055314",
"0.50446934",
"0.50369054",
"0.50341344",
"0.5015084",
"0.49798974",
"0.49498925"
] | 0.8150516 | 0 |
Set the legacy, nonUEFI, boot protocol of a NIC to PXE. If successful, the pending value of the NIC's legacy boot protocol attribute is set. For the new value to be applied, a configuration job must be created and the node must be rebooted. | def set_nic_legacy_boot_protocol_pxe(self, nic_id):
return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, 'PXE') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_nic_legacy_boot_protocol(self, nic_id, value):\n return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, value)",
"def set_pending_boot_mode(self, boot_mode):\n boot_mode = boot_mode.lower()\n if boot_mode not in ['uefi', 'legacy']:\n msg = 'Invalid Boot mode specified'\n raise exception.IloInvalidInputError(msg)\n\n boot_properties = {'BootMode': boot_mode}\n\n if boot_mode == 'legacy':\n boot_properties['BootMode'] = 'LegacyBios'\n else:\n # If Boot Mode is 'Uefi' set the UEFIOptimizedBoot first.\n boot_properties['UefiOptimizedBoot'] = \"Enabled\"\n\n # Change the Boot Mode\n self._change_bios_setting(boot_properties)",
"def set_nic_legacy_boot_protocol_none(self, nic_id):\n return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, 'NONE')",
"def is_nic_legacy_boot_protocol_pxe(self, nic_id):\n return self._nic_cfg.is_nic_legacy_boot_protocol_pxe(nic_id)",
"def pxe_netboot(self, filename):\n new_port = {\n 'extra_dhcp_opts': [\n {'opt_name': 'bootfile-name', 'opt_value': 'http://192.0.2.240:8088/' + filename, 'ip_version': 4, },\n {'opt_name': 'tftp-server', 'opt_value': '192.0.2.240', 'ip_version': '4'},\n {'opt_name': 'server-ip-address', 'opt_value': '192.0.2.240', 'ip_version': '4'}\n ]\n }\n self.neutron.update_port(self._provision_port_id, {'port': new_port})",
"def set_protocol(cls, interface_name, proto='provision'): # pragma: no cover\n if proto not in cls.supported_proto:\n return\n try:\n ret = cls.get_logical_ifname(interface_name, proto)\n if not ret:\n return\n os.system('uci set network.%s.proto=%s' % (ret, proto))\n os.system('uci commit network')\n os.system('/etc/init.d/network reload')\n if proto == cls.supported_proto[1]:\n os.system('sysctl -w net.ipv6.conf.%s.autoconf=0' % interface_name)\n os.system('sysctl -w net.ipv6.conf.%s.use_tempaddr=2' % interface_name)\n cls.logger.debug(\"set %s[%s] DCHP protocol to %s\", interface_name, ret, proto)\n except OSError as e:\n cls.logger.error(\"Got exception:%s\" % str(e))",
"def set_boot_device(self, task, device, persistent=False):\n if (getattr(task.node, 'power_interface') == 'ipmitool'\n or task.node.driver_internal_info.get('irmc_ipmi_succeed')):\n if device not in self.get_supported_boot_devices(task):\n raise exception.InvalidParameterValue(_(\n \"Invalid boot device %s specified.\") % device)\n\n uefi_mode = (\n boot_mode_utils.get_boot_mode(task.node) == 'uefi')\n\n # disable 60 secs timer\n timeout_disable = \"0x00 0x08 0x03 0x08\"\n ipmitool.send_raw(task, timeout_disable)\n\n # note(naohirot):\n # Set System Boot Options : ipmi cmd '0x08', bootparam '0x05'\n #\n # $ ipmitool raw 0x00 0x08 0x05 data1 data2 0x00 0x00 0x00\n #\n # data1 : '0xe0' persistent + uefi\n # '0xc0' persistent + bios\n # '0xa0' next only + uefi\n # '0x80' next only + bios\n # data2 : boot device defined in the dict _BOOTPARAM5_DATA2\n\n bootparam5 = '0x00 0x08 0x05 %s %s 0x00 0x00 0x00'\n if persistent:\n data1 = '0xe0' if uefi_mode else '0xc0'\n else:\n data1 = '0xa0' if uefi_mode else '0x80'\n data2 = _BOOTPARAM5_DATA2[device]\n\n cmd8 = bootparam5 % (data1, data2)\n ipmitool.send_raw(task, cmd8)\n else:\n if device not in self.get_supported_boot_devices(task):\n raise exception.InvalidParameterValue(_(\n \"Invalid boot device %s specified. \"\n \"Current iRMC firmware condition doesn't support IPMI \"\n \"but Redfish.\") % device)\n super(ipmitool.IPMIManagement, self).set_boot_device(\n task, device, persistent)",
"def set_ipaddress(modulo):\n\n print ('Configuring IP address...')\n\n modulo.write('AT+NETOPEN\\r\\n'.encode())\n\n if _valid_net(modulo): \n try:\n modulo.write('AT+IPADDR\\r\\n'.encode())\n time.sleep(0.1)\n except serial.SerialException:\n print ('... Whitout IP address, try again')\n if _valid_ip(modulo):\n print ('IP address configurated')\n else:\n print ('IP not configurated')\n else:\n print ('Net Already configurated')\n \n data = _read_line(modulo)\n return data",
"def test_patch_bios_boot_mode(self):\n pass",
"def get_nic_legacy_boot_protocol(self, nic_id):\n return self._nic_cfg.get_nic_legacy_boot_protocol(nic_id)",
"def set_802_3_ethernet(self, pardus_profile):\n\n if pardus_profile.connection_type == \"802-3-ethernet\":\n return _802_3_Ethernet(pardus_profile)\n else:\n return \"none\"",
"def pxe_netboot(self, filename='boot.ipxe'):\n for bm_node in self.nodes:\n bm_node.pxe_netboot(filename)",
"def test_patch_pci_switch(self):\n pass",
"def set_boot_mode(self, task, mode):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='set_boot_mode')",
"def _change_secure_boot_settings(self, property, value):\n system = self._get_host_details()\n # find the BIOS URI\n if ('links' not in system['Oem']['Hp'] or\n 'SecureBoot' not in system['Oem']['Hp']['links']):\n msg = (' \"SecureBoot\" resource or feature is not '\n 'supported on this system')\n raise exception.IloCommandNotSupportedError(msg)\n\n secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']\n\n # Change the property required\n new_secure_boot_settings = {}\n new_secure_boot_settings[property] = value\n\n # perform the patch\n status, headers, response = self._rest_patch(\n secure_boot_uri, None, new_secure_boot_settings)\n\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)\n\n # Change the bios setting as a workaround to enable secure boot\n # Can be removed when fixed for Gen9 snap2\n val = self._get_bios_setting('CustomPostMessage')\n val = val.rstrip() if val.endswith(\" \") else val+\" \"\n self._change_bios_setting({'CustomPostMessage': val})",
"def set_bootloader_mode(self, mode):\n self.check_validity()\n\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 9, 'B')",
"def _setBootable(self, bootable):\n if self.partedPartition:\n if arch.isS390():\n return\n if self.flagAvailable(parted.PARTITION_BOOT):\n if bootable:\n self.setFlag(parted.PARTITION_BOOT)\n else:\n self.unsetFlag(parted.PARTITION_BOOT)\n else:\n raise errors.DeviceError(\"boot flag not available for this partition\", self.name)\n\n self._bootable = bootable\n else:\n self.req_bootable = bootable",
"def protocol_version_9():\n print('Setting protocol version to 9')\n upgrade('protocolversion', 'protocol_version', 9)",
"def test_update_bios_boot_mode(self):\n pass",
"def is_nic_legacy_boot_protocol_none(self, nic_id):\n return self._nic_cfg.is_nic_legacy_boot_protocol_none(nic_id)",
"def _get_physnet_patch(self, physnet, port):\n if (not CONF.processing.overwrite_existing\n or port.physical_network == physnet):\n return\n return {'op': 'add', 'path': '/physical_network', 'value': physnet}",
"def set_boot_device(self, device, persistent=False):\n\n operation = \"set_boot_device\"\n try:\n self.sp_manager.create_boot_policy()\n self.sp_manager.set_boot_device(device)\n\n except UcsException as ex:\n raise exception.UcsOperationError(operation=operation, error=ex)",
"def test_update_pci_switch(self):\n pass",
"def activate(self, ext_ip, ext_port):\n self.sql_manager.port_update(self.id, external_ip=ext_ip, external_port=ext_port)\n self.external_port = ext_port\n self.external_ip = ext_ip",
"def modif_network(self):\n print \"preparation du fichier network interfaces\"\n if version_os[\"OS\"] == \"CentOS\":\n self.exec_cmd(\"cp %s/etc/sysconfig/network_scripts/ifcfg-eth0 %s/etc/sysconfig/network_scripts/ifcfg-eth0.pre.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n else:\n self.exec_cmd(\"cp %s/etc/network/interfaces %s/etc/network/interfaces.post.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n self.exec_cmd(\"cp %s/etc/network/interfaces.pre.p2v %s/etc/network/interfaces\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))",
"def run_protocol(self, device, command, *argv, **kwarg):\n if not IxnetworkIxiaClientImpl.ixnet:\n return 0, \"Ixia not connected\"\n ############# Implement me ################\n if command == \"start_protocols\":\n device.applog.info(\"Starting All Protocols\")\n IxnetworkIxiaClientImpl.ixnet.StartAllProtocols(Arg1=\"sync\")\n time.sleep(15)\n for ep in IxnetworkIxiaClientImpl.ip_eps:\n device.applog.info(\"Sending ARP on \" + ep.Name)\n ep.Start()\n ep.SendArp()\n time.sleep(5)\n device.applog.info(\"Generating Traffic\")\n for ti in IxnetworkIxiaClientImpl.tis:\n ti.Generate()\n device.applog.info(\"Applying Traffic\")\n IxnetworkIxiaClientImpl.ixnet.Traffic.Apply()\n elif command == \"stop_protocols\":\n device.applog.info(\"Stopping All Protocols\")\n IxnetworkIxiaClientImpl.ixnet.StopAllProtocols(Arg1=\"sync\")\n elif command == \"set_protocol\":\n params = kwarg[\"params\"]\n param = params[0]\n for ep in IxnetworkIxiaClientImpl.bgp_eps:\n if \"bgp_peer\" in param and param[\"bgp_peer\"] != ep.Name:\n continue\n enable = param[\"enable\"]\n IxnetworkIxiaClientImpl.bgp_eps\n ep.Active.Single(enable)\n IxnetworkIxiaClientImpl.ixnet.Globals.Topology.ApplyOnTheFly()\n return 0, \"\"",
"def test_iosxr_netconf_edit_config(nornir, iosxr_config_payload):\n nr = nornir.filter(name=DEVICE_NAME)\n result = nr.run(netconf_edit_config, config=iosxr_config_payload, target=\"candidate\", xmldict=True)\n assert not result[DEVICE_NAME].result[\"errors\"]\n assert result[DEVICE_NAME].result[\"ok\"]\n\n # print_result(result)\n\n # Commit Config\n result = nr.run(netconf_commit, xmldict=True)\n assert result[DEVICE_NAME].result[\"ok\"]\n print_result(result)",
"def _update_persistent_boot(self, device_type=[], persistent=False,\n mac=None):\n tenure = 'Once'\n new_device = device_type[0]\n # If it is a standard device, we need to convert in RIS convention\n if device_type[0].upper() in DEVICE_COMMON_TO_RIS:\n new_device = DEVICE_COMMON_TO_RIS[device_type[0].upper()]\n\n if persistent:\n tenure = 'Continuous'\n\n systems_uri = \"/rest/v1/Systems/1\"\n # Need to set this option first if device is 'UefiTarget'\n if new_device is 'UefiTarget':\n if not mac:\n msg = ('Mac is needed for iscsi uefi boot')\n raise exception.IloInvalidInputError(msg)\n\n headers, bios_uri, bios_settings = self._check_bios_resource()\n # Get the Boot resource and Mappings resource.\n boot_settings = self._get_bios_boot_resource(bios_settings)\n StructuredBootString = None\n\n for boot_setting in boot_settings['BootSources']:\n if(mac.upper() in boot_setting['UEFIDevicePath'] and\n 'iSCSI' in boot_setting['UEFIDevicePath']):\n StructuredBootString = boot_setting['StructuredBootString']\n break\n if not StructuredBootString:\n msg = ('MAC provided is Invalid \"%s\"' % mac)\n raise exception.IloInvalidInputError(msg)\n\n new_boot_settings = {}\n new_boot_settings['Boot'] = {'UefiTargetBootSourceOverride':\n StructuredBootString}\n status, headers, response = self._rest_patch(systems_uri, None,\n new_boot_settings)\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)\n\n new_boot_settings = {}\n new_boot_settings['Boot'] = {'BootSourceOverrideEnabled': tenure,\n 'BootSourceOverrideTarget': new_device}\n status, headers, response = self._rest_patch(systems_uri, None,\n new_boot_settings)\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)",
"def set_vm_status(self, device='FLOPPY',\n boot_option='BOOT_ONCE', write_protect='YES'):\n # CONNECT is a RIBCL call. There is no such property to set in RIS.\n if boot_option == 'CONNECT':\n return\n\n boot_option_map = {'BOOT_ONCE': True,\n 'BOOT_ALWAYS': False,\n 'NO_BOOT': False\n }\n\n if boot_option not in boot_option_map:\n msg = ('Virtualmedia boot option \"' + boot_option + '\" is '\n 'invalid.')\n raise exception.IloInvalidInputError(msg)\n\n response, vm_device_uri = self._get_vm_device_status(device)\n\n # Update required property\n vm_settings = {}\n vm_settings['Oem'] = (\n {'Hp': {'BootOnNextServerReset': boot_option_map[boot_option]}})\n\n # perform the patch operation\n status, headers, response = self._rest_patch(\n vm_device_uri, None, vm_settings)\n\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)",
"def test_patch_pci_device(self):\n pass"
] | [
"0.6869364",
"0.59294105",
"0.5910383",
"0.581359",
"0.5732103",
"0.5631665",
"0.56119615",
"0.54020804",
"0.5247397",
"0.5175284",
"0.5159106",
"0.5112706",
"0.5091555",
"0.50698566",
"0.5003087",
"0.49630225",
"0.4952853",
"0.49507043",
"0.49433762",
"0.48527044",
"0.48416305",
"0.4840797",
"0.48376152",
"0.4829113",
"0.4825338",
"0.47973937",
"0.47789574",
"0.47743535",
"0.47720575",
"0.47220695"
] | 0.7358659 | 0 |
Modify a setting of a NIC. If successful, the pending value of the attribute is set. For the new value to be applied, a configuration job must be created and the node must be rebooted. | def set_nic_setting(self, nic_id, attribute_name, value):
return self._nic_cfg.set_nic_setting(nic_id, attribute_name, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)",
"def _ApplyNicMods(self, idx, nic, params, private):\n changes = []\n\n for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:\n if key in params:\n changes.append((\"nic.%s/%d\" % (key, idx), params[key]))\n setattr(nic, key, params[key])\n\n new_net = params.get(constants.INIC_NETWORK, nic.network)\n new_net_uuid = self.cfg.LookupNetwork(new_net)\n if new_net_uuid != nic.network:\n changes.append((\"nic.network/%d\" % idx, new_net))\n nic.network = new_net_uuid\n\n if private.filled:\n nic.nicparams = private.filled\n\n for (key, val) in nic.nicparams.items():\n changes.append((\"nic.%s/%d\" % (key, idx), val))\n\n if self.op.hotplug:\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,\n constants.HOTPLUG_TARGET_NIC,\n nic, None, idx)\n changes.append((\"nic/%d\" % idx, msg))\n\n return changes",
"def set_nic_settings(self, nic_id, settings):\n return self._nic_cfg.set_nic_settings(nic_id, settings)",
"def modify_network_interface_attribute(\n name=None,\n network_interface_id=None,\n attr=None,\n value=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n if not (name or network_interface_id):\n raise SaltInvocationError(\n \"Either name or network_interface_id must be provided.\"\n )\n if attr is None and value is None:\n raise SaltInvocationError(\"attr and value must be provided.\")\n r = {}\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n result = _get_network_interface(conn, name, network_interface_id)\n if \"error\" in result:\n return result\n eni = result[\"result\"]\n info = _describe_network_interface(eni)\n network_interface_id = info[\"id\"]\n # munge attr into what the API requires\n if attr == \"groups\":\n _attr = \"groupSet\"\n elif attr == \"source_dest_check\":\n _attr = \"sourceDestCheck\"\n elif attr == \"delete_on_termination\":\n _attr = \"deleteOnTermination\"\n else:\n _attr = attr\n _value = value\n if info.get(\"vpc_id\") and _attr == \"groupSet\":\n _value = __salt__[\"boto_secgroup.convert_to_group_ids\"](\n value,\n vpc_id=info.get(\"vpc_id\"),\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not _value:\n r[\"error\"] = {\n \"message\": \"Security groups do not map to valid security group ids\"\n }\n return r\n _attachment_id = None\n if _attr == \"deleteOnTermination\":\n try:\n _attachment_id = info[\"attachment\"][\"id\"]\n except KeyError:\n r[\"error\"] = {\n \"message\": (\n \"No attachment id found for this ENI. The ENI must\"\n \" be attached before delete_on_termination can be\"\n \" modified\"\n )\n }\n return r\n try:\n r[\"result\"] = conn.modify_network_interface_attribute(\n network_interface_id, _attr, _value, attachment_id=_attachment_id\n )\n except boto.exception.EC2ResponseError as e:\n r[\"error\"] = __utils__[\"boto.get_error\"](e)\n return r",
"def put():\n json_data = request.get_json()\n\n # validate request\n try:\n schema, resolver = ConfigStore.load_json_schema('modify_rdt_iface.json')\n jsonschema.validate(json_data, schema, resolver=resolver)\n except (jsonschema.ValidationError, OverflowError) as error:\n raise BadRequest(\"Request validation failed - %s\" % (str(error)))\n\n if not json_data['interface'] in common.PQOS_API.supported_iface():\n raise BadRequest(\"RDT interface '%s' not supported!\" % (json_data['interface']))\n\n if common.CONFIG_STORE.is_any_pool_defined():\n return {'message': \"Please remove all Pools first!\"}, 409\n\n data = deepcopy(common.CONFIG_STORE.get_config())\n\n if 'rdt_iface' not in data:\n data['rdt_iface'] = {}\n\n data['rdt_iface']['interface'] = json_data['interface']\n CapsMbaCtrl.set_mba_ctrl_enabled(data, False)\n\n common.CONFIG_STORE.set_config(data)\n\n res = {'message': \"RDT Interface modified\"}\n return res, 200",
"def modify_attribute(self, attribute, value):\r\n return self.connection.modify_instance_attribute(self.id, attribute,\r\n value)",
"def set_attr(self, server, attribute, value):\n\t\tattribute = str(attribute)\n\t\tserver = valid_server(server)\n\t\treturn self._update_server_cfg(server, get_dict(attribute, value))",
"def change_setting(self, key, val):\n if isinstance(val, bool):\n payload = 'on' if val else 'off'\n else:\n payload = val\n return self._request('post',\n 'fifo_command.php?cmd={}%20{}'.format(key,\n payload))",
"def setIP( self, intf, ip, prefixLen=8 ):\n ipSub = '%s/%d' % ( ip, prefixLen )\n result = self.cmd( 'ifconfig', intf, ipSub, 'up' )\n self.ips[ intf ] = ip\n return result",
"def set_ipaddress(modulo):\n\n print ('Configuring IP address...')\n\n modulo.write('AT+NETOPEN\\r\\n'.encode())\n\n if _valid_net(modulo): \n try:\n modulo.write('AT+IPADDR\\r\\n'.encode())\n time.sleep(0.1)\n except serial.SerialException:\n print ('... Whitout IP address, try again')\n if _valid_ip(modulo):\n print ('IP address configurated')\n else:\n print ('IP not configurated')\n else:\n print ('Net Already configurated')\n \n data = _read_line(modulo)\n return data",
"def update_neutron_advanced_configuration(self, option, value):\n attributes = self.nailgun_client.get_cluster_attributes(\n self.cluster_id)\n nac_subdict = attributes['editable']['neutron_advanced_configuration']\n nac_subdict[option]['value'] = value\n self.nailgun_client.update_cluster_attributes(\n self.cluster_id, attributes)",
"def _platformix_set(self, context, fake_reply, prop, value):\r\n if hasattr(self.host, prop):\r\n if not callable(getattr(self.host, prop)):\r\n try:\r\n setattr(self.host, prop, value)\r\n except Exception as e:\r\n eprint(\"Platformix protocol: failed to set attribute {} of {} to value {} \"\r\n \"due to exception {}\".format(prop, self.host.name, value, e))\r\n exprint()\r\n self._reply(context, proto_failure(\r\n \"Failed to set attribute {} of {} to value {} \"\r\n \"due to exception {}\".format(prop, self.host.name, value, e)), fake_reply)\r\n return\r\n self._reply(context, proto_success(getattr(self.host, prop), prop), fake_reply)\r\n else:\r\n self._reply(context, proto_failure(\"Attribute {} of {} is a method\".format(\r\n prop, self.host.name)), fake_reply)\r\n else:\r\n self._reply(context, proto_failure(\"Property {} not found on {}\".format(prop, self.host.name)), fake_reply)",
"def ModifyNetwork(self, network, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/modify\" %\n (GANETI_RAPI_VERSION, network)), None, kwargs)",
"async def set_bit(self, instance, value):\n print(f\"Server: {'set_bit'} Got 'put' request from outside: new value is {value} and type {type(value)}\")\n if self.device is not None:\n self.device.set_bit_server(value)\n else:\n print('device is None')",
"async def changesetting(self, ctx:commands.Context, setting, new_value: int):\r\n\r\n settings = await self.config.guild(ctx.guild).settings()\r\n\r\n if not settings.get(setting):\r\n await ctx.send(f'{setting} is not a valid setting\\n```Valid settings:\\nfishing_delay: Length (seconds) between casts from a user\\nbucket_display_length: Amount of fish to display per page with /bucket```')\r\n return\r\n\r\n settings[setting] = new_value\r\n await self.config.guild(ctx.guild).settings.set(settings)\r\n await ctx.send(f'{setting} updated')",
"def set_attribute(self, attribute, value):\r\n return self.connection.set_queue_attribute(self, attribute, value)",
"def set_network(self, addr, netmask, value):\n\n if len(addr) == 4:\n ipset.ipmap_ipv4_set_network(self.map, addr, netmask, value)\n return\n\n elif len(addr) == 16:\n ipset.ipmap_ipv6_set_network(self.map, addr, netmask, value)\n return\n\n else:\n raise ValueError(\"Invalid address\")",
"def commit_pending_nic_changes(self, nic_id, reboot=False):\n return self._job_mgmt.create_config_job(\n resource_uri=uris.DCIM_NICService,\n cim_creation_class_name='DCIM_NICService',\n cim_name='DCIM:NICService',\n target=nic_id,\n reboot=reboot)",
"def device_set_property_int(pnd, property, value):\n return _nfc.device_set_property_int(pnd, property, value)",
"def set_config(self, attr, value):\n setattr(self.config, attr, value)",
"def set_config(self, attr, value):\n setattr(self.config, attr, value)",
"def set_attribute(self, attr, value):\n logger.debug(\"SET ATTRIBUTE {} to {}\".format(attr, value))",
"def setNetwork(self, network):\n # type: (str)->None\n\n self._validator.validate_one(\n 'network', VALID_OPTS['network'], network)\n self._ifAttributes['network'] = network",
"def set_static_conn(nic, ip_addr, subnet_mask, default_gateway, dns_servers):\n if isinstance(ip_addr, str):\n ip_addr = [ip_addr,]\n if isinstance(subnet_mask, str):\n subnet_mask = [subnet_mask,]\n if isinstance(default_gateway, str):\n default_gateway = [default_gateway, ]\n\n # set defult gateway. return value:\n # 0: success & no reboot required, \n # 1: sucess & reboot required\n ret = nic.SetGateways(default_gateway)\n print 'Default Gateway updated (status %d)' % ret\n\n # Set IP adrress & subnet mask. return value:\n # 0: success & no reboot required, \n # 1: sucess & reboot required\n ret = nic.EnableStatic(IPAddress=ip_addr, SubnetMask=subnet_mask)\n print 'IP Address / Subnet Mask updated (status %d)' % ret\n\n # set dns servers\n if dns_servers:\n #assert 0 == nic.EnableDNS(DNSServerSearchOrder=dns_servers)\n # or \n ret = nic.SetDNSServerSearchOrder(dns_servers)\n print 'DNS Server updated (status %d)' % ret",
"def put():\n json_data = request.get_json()\n\n # validate request\n try:\n schema, resolver = ConfigStore.load_json_schema('modify_mba_ctrl.json')\n jsonschema.validate(json_data, schema, resolver=resolver)\n except (jsonschema.ValidationError, OverflowError) as error:\n raise BadRequest(\"Request validation failed - %s\" % (str(error)))\n\n if not caps.mba_bw_supported():\n return {'message': \"MBA CTRL not supported!\"}, 409\n\n if common.CONFIG_STORE.is_any_pool_defined():\n return {'message': \"Please remove all Pools first!\"}, 409\n\n data = deepcopy(common.CONFIG_STORE.get_config())\n\n CapsMbaCtrl.set_mba_ctrl_enabled(data, json_data['enabled'])\n\n common.CONFIG_STORE.set_config(data)\n\n return {'message': \"MBA CTRL status changed.\"}, 200",
"def setAttr(*args, alteredValue: bool=True, caching: bool=True, capacityHint: int=0,\n channelBox: bool=True, clamp: bool=True, keyable: bool=True, lock: bool=True, size:\n int=0, type: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass",
"def seti(self, node, new_int):\n\n self.daq.syncSetInt(f'/{self.device_id}/{node}', new_int)",
"def write_pin(self, attr):\n \n self.logging.debug(\"Setting \" + attr.label + \" to \" + str(attr.value) + \" on pin \" + str(attr.io_pin))\n GPIO.output(attr.io_pin, attr.value)",
"def set_permitted_ip(address=None, deploy=False):\n\n if not address:\n raise CommandExecutionError(\"Address option must not be empty.\")\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/permitted-ip\",\n \"element\": \"<entry name='{}'></entry>\".format(address),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret",
"def SetWirelessInterface(self, interface):\n print \"setting wireless interface %s\" % (str(interface))\n self.wifi.wireless_interface = noneToBlankString(interface)\n self.wired.wireless_interface = noneToBlankString(interface)\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\",\"wireless_interface\", interface)\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)"
] | [
"0.602123",
"0.58192426",
"0.5802508",
"0.5783846",
"0.5681156",
"0.56103724",
"0.5568371",
"0.55253685",
"0.5505217",
"0.5504358",
"0.54779464",
"0.54318684",
"0.5402808",
"0.539177",
"0.53857595",
"0.5385296",
"0.53848606",
"0.53735757",
"0.53488266",
"0.53465766",
"0.53465766",
"0.53264153",
"0.53262866",
"0.53115386",
"0.5293917",
"0.5291782",
"0.52868354",
"0.526775",
"0.5261677",
"0.52587754"
] | 0.7097443 | 0 |
Modify one or more settings of a NIC. If successful, the pending values of the attributes are set. For the new values to be applied, a configuration job must be created and the node must be rebooted. | def set_nic_settings(self, nic_id, settings):
return self._nic_cfg.set_nic_settings(nic_id, settings) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_nic_setting(self, nic_id, attribute_name, value):\n return self._nic_cfg.set_nic_setting(nic_id, attribute_name, value)",
"def _ApplyNicMods(self, idx, nic, params, private):\n changes = []\n\n for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:\n if key in params:\n changes.append((\"nic.%s/%d\" % (key, idx), params[key]))\n setattr(nic, key, params[key])\n\n new_net = params.get(constants.INIC_NETWORK, nic.network)\n new_net_uuid = self.cfg.LookupNetwork(new_net)\n if new_net_uuid != nic.network:\n changes.append((\"nic.network/%d\" % idx, new_net))\n nic.network = new_net_uuid\n\n if private.filled:\n nic.nicparams = private.filled\n\n for (key, val) in nic.nicparams.items():\n changes.append((\"nic.%s/%d\" % (key, idx), val))\n\n if self.op.hotplug:\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,\n constants.HOTPLUG_TARGET_NIC,\n nic, None, idx)\n changes.append((\"nic/%d\" % idx, msg))\n\n return changes",
"def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)",
"def put():\n json_data = request.get_json()\n\n # validate request\n try:\n schema, resolver = ConfigStore.load_json_schema('modify_rdt_iface.json')\n jsonschema.validate(json_data, schema, resolver=resolver)\n except (jsonschema.ValidationError, OverflowError) as error:\n raise BadRequest(\"Request validation failed - %s\" % (str(error)))\n\n if not json_data['interface'] in common.PQOS_API.supported_iface():\n raise BadRequest(\"RDT interface '%s' not supported!\" % (json_data['interface']))\n\n if common.CONFIG_STORE.is_any_pool_defined():\n return {'message': \"Please remove all Pools first!\"}, 409\n\n data = deepcopy(common.CONFIG_STORE.get_config())\n\n if 'rdt_iface' not in data:\n data['rdt_iface'] = {}\n\n data['rdt_iface']['interface'] = json_data['interface']\n CapsMbaCtrl.set_mba_ctrl_enabled(data, False)\n\n common.CONFIG_STORE.set_config(data)\n\n res = {'message': \"RDT Interface modified\"}\n return res, 200",
"def modify_network_interface_attribute(\n name=None,\n network_interface_id=None,\n attr=None,\n value=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n if not (name or network_interface_id):\n raise SaltInvocationError(\n \"Either name or network_interface_id must be provided.\"\n )\n if attr is None and value is None:\n raise SaltInvocationError(\"attr and value must be provided.\")\n r = {}\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n result = _get_network_interface(conn, name, network_interface_id)\n if \"error\" in result:\n return result\n eni = result[\"result\"]\n info = _describe_network_interface(eni)\n network_interface_id = info[\"id\"]\n # munge attr into what the API requires\n if attr == \"groups\":\n _attr = \"groupSet\"\n elif attr == \"source_dest_check\":\n _attr = \"sourceDestCheck\"\n elif attr == \"delete_on_termination\":\n _attr = \"deleteOnTermination\"\n else:\n _attr = attr\n _value = value\n if info.get(\"vpc_id\") and _attr == \"groupSet\":\n _value = __salt__[\"boto_secgroup.convert_to_group_ids\"](\n value,\n vpc_id=info.get(\"vpc_id\"),\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not _value:\n r[\"error\"] = {\n \"message\": \"Security groups do not map to valid security group ids\"\n }\n return r\n _attachment_id = None\n if _attr == \"deleteOnTermination\":\n try:\n _attachment_id = info[\"attachment\"][\"id\"]\n except KeyError:\n r[\"error\"] = {\n \"message\": (\n \"No attachment id found for this ENI. The ENI must\"\n \" be attached before delete_on_termination can be\"\n \" modified\"\n )\n }\n return r\n try:\n r[\"result\"] = conn.modify_network_interface_attribute(\n network_interface_id, _attr, _value, attachment_id=_attachment_id\n )\n except boto.exception.EC2ResponseError as e:\n r[\"error\"] = __utils__[\"boto.get_error\"](e)\n return r",
"def edit_config(self, parameter_type, parameter_value):\n\n assert isinstance(parameter_value, list), \"Parameter Value needs to be a list\"\n\n def change_interface_name():\n\n parameter_dictionary = {'a': 'config',\n parameter_type: [netconf_server_namespace, {parameter_value[0]:parameter_value[1]}]}\n xml, tags = dictToXML(parameter_dictionary, [root_namespace, netconf_server_namespace])\n config_data = wrap_tags(xml, tags)\n\n\n try:\n\n with manager.connect(host=netconf_server_ip,\n port=int(netconf_server_port),\n username=netconf_server_username,\n password=netconf_server_password) as m:\n\n assert(\":validate\" in m.server_capabilities)\n m.edit_config(target='running', config=config_data)\n return m.get_config(source='running').data_xml\n\n except:\n return \"Can not establish connection with the server, something went wrong\"\n\n\n def set_experimenter():\n parameter_dictionary = {'a': 'config',\n parameter_type: [netconf_server_namespace, {parameter_type[0]: parameter_value[1]}]}\n xml, tags = dictToXML(parameter_dictionary, [root_namespace, netconf_server_namespace])\n config_data = wrap_tags(xml, tags)\n\n try:\n with manager.connect(host=netconf_server_ip,\n port=int(netconf_server_port),\n username= netconf_server_username,\n password=netconf_server_password) as m:\n\n assert(\":validate\" in m.server_capabilities)\n m.edit_config(target='running', config=config_data)\n return m.get_config(source='running').data_xml\n except:\n return \"Can not establish connection with the server, something went wrong\"\n\n functions = {'change': change_interface_name,\n 'experimenter': set_experimenter}\n\n if parameter_type in ['interface', 'interfaces']:\n return functions['change']()\n\n if parameter_type in ['experimenter', 'experiment', 'properties']:\n return functions['experimenter']()",
"def ModifyNetwork(self, network, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/modify\" %\n (GANETI_RAPI_VERSION, network)), None, kwargs)",
"def config_networking(\n self, network_obj, ip, netmask, gateway, domain, dns, guest_hostname\n ):\n\n global_ip = vim.vm.customization.GlobalIPSettings()\n adapter_map = vim.vm.customization.AdapterMapping()\n adapter_map.adapter = vim.vm.customization.IPSettings()\n adapter_map.macAddress = network_obj.macAddress\n if ip:\n adapter_map.adapter.ip = vim.vm.customization.FixedIp()\n adapter_map.adapter.ip.ipAddress = ip\n else:\n adapter_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()\n adapter_map.adapter.subnetMask = netmask\n adapter_map.adapter.gateway = gateway\n global_ip.dnsServerList = dns\n adapter_map.adapter.dnsDomain = domain\n ident = vim.vm.customization.LinuxPrep()\n ident.hostName = vim.vm.customization.FixedName()\n if guest_hostname:\n ident.hostName.name = guest_hostname\n else:\n ident.hostName.name = self.vm_obj.name\n custom_spec = vim.vm.customization.Specification()\n custom_spec.nicSettingMap = [adapter_map]\n custom_spec.identity = ident\n custom_spec.globalIPSettings = global_ip\n return self.vm_obj.Customize(spec=custom_spec)",
"def apply_network_settings(**settings):\n if 'require_reboot' not in settings:\n settings['require_reboot'] = False\n if 'apply_hostname' not in settings:\n settings['apply_hostname'] = False\n\n hostname_res = True\n if settings['apply_hostname'] in _CONFIG_TRUE:\n if 'hostname' in settings:\n hostname_res = __salt__['network.mod_hostname'](settings['hostname'])\n else:\n log.warning(\n 'The network state sls is trying to apply hostname '\n 'changes but no hostname is defined.'\n )\n hostname_res = False\n\n res = True\n if settings['require_reboot'] in _CONFIG_TRUE:\n log.warning(\n 'The network state sls is requiring a reboot of the system to '\n 'properly apply network configuration.'\n )\n res = True\n else:\n res = __salt__['cmd.run']('/etc/netstart restart')\n\n return hostname_res and res",
"def updateNetworkSwitchSettings(self, networkId: str, **kwargs):\n\n kwargs.update(locals())\n\n metadata = {\n 'tags': ['switch', 'configure', 'settings'],\n 'operation': 'updateNetworkSwitchSettings',\n }\n resource = f'/networks/{networkId}/switch/settings'\n\n body_params = ['vlan', 'useCombinedPower', 'powerExceptions']\n payload = {k: v for (k, v) in kwargs.items() if k in body_params}\n\n return self._session.put(metadata, resource, payload)",
"def test_iosxr_netconf_edit_config(nornir, iosxr_config_payload):\n nr = nornir.filter(name=DEVICE_NAME)\n result = nr.run(netconf_edit_config, config=iosxr_config_payload, target=\"candidate\", xmldict=True)\n assert not result[DEVICE_NAME].result[\"errors\"]\n assert result[DEVICE_NAME].result[\"ok\"]\n\n # print_result(result)\n\n # Commit Config\n result = nr.run(netconf_commit, xmldict=True)\n assert result[DEVICE_NAME].result[\"ok\"]\n print_result(result)",
"def fusion_api_update_li_ethernet_settings(self, body=None, uri=None, api=None, headers=None):\n param = '/ethernetSettings'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)",
"def commit_pending_nic_changes(self, nic_id, reboot=False):\n return self._job_mgmt.create_config_job(\n resource_uri=uris.DCIM_NICService,\n cim_creation_class_name='DCIM_NICService',\n cim_name='DCIM:NICService',\n target=nic_id,\n reboot=reboot)",
"def _change_bios_setting(self, properties):\n keys = properties.keys()\n # Check if the BIOS resource/property exists.\n headers, bios_uri, settings = self._check_bios_resource(keys)\n if not self._operation_allowed(headers, 'PATCH'):\n headers, bios_uri, _ = self._get_bios_settings_resource(settings)\n self._validate_if_patch_supported(headers, bios_uri)\n\n request_headers = self._get_bios_hash_password(self.bios_password)\n status, headers, response = self._rest_patch(bios_uri, request_headers,\n properties)\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)",
"def test_change_config(self):\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)",
"def set_static_conn(nic, ip_addr, subnet_mask, default_gateway, dns_servers):\n if isinstance(ip_addr, str):\n ip_addr = [ip_addr,]\n if isinstance(subnet_mask, str):\n subnet_mask = [subnet_mask,]\n if isinstance(default_gateway, str):\n default_gateway = [default_gateway, ]\n\n # set defult gateway. return value:\n # 0: success & no reboot required, \n # 1: sucess & reboot required\n ret = nic.SetGateways(default_gateway)\n print 'Default Gateway updated (status %d)' % ret\n\n # Set IP adrress & subnet mask. return value:\n # 0: success & no reboot required, \n # 1: sucess & reboot required\n ret = nic.EnableStatic(IPAddress=ip_addr, SubnetMask=subnet_mask)\n print 'IP Address / Subnet Mask updated (status %d)' % ret\n\n # set dns servers\n if dns_servers:\n #assert 0 == nic.EnableDNS(DNSServerSearchOrder=dns_servers)\n # or \n ret = nic.SetDNSServerSearchOrder(dns_servers)\n print 'DNS Server updated (status %d)' % ret",
"def SetWirelessInterface(self, interface):\n print \"setting wireless interface %s\" % (str(interface))\n self.wifi.wireless_interface = noneToBlankString(interface)\n self.wired.wireless_interface = noneToBlankString(interface)\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\",\"wireless_interface\", interface)\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)",
"def set_bios_settings(self, data=None):\n\n if not data:\n raise exception.SDFlexError(\"Could not apply settings with\"\n \" empty data\")\n sushy_system = self._get_sushy_system()\n\n try:\n for key in data.keys():\n sushy_system.bios.set_attribute(key, data[key])\n except sushy.exceptions.SushyError as e:\n message_extended_info = e.body.get('@Message.ExtendedInfo')\n error_message = message_extended_info[0]['Message']\n\n msg = (self._(\"Setting the value of Bios attribute \"\n \"'%(atrribute)s' is not succesfull. \"\n \"Error: %(error)s\") %\n {'error': str(error_message), 'atrribute': key})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)",
"def update_zcs_settings(session, network, lowport, highport,\n return_type=None, **kwargs):\n verify_low_high_port(lowport, highport)\n\n body_values = {'network': network, 'lowport': lowport,\n 'highport': highport}\n\n path = '/api/settings/container_service.json'\n\n return session.post_api(path=path, body=body_values,\n return_type=return_type, **kwargs)",
"def update_settings(self):\n\n self.sim.account.set_balance(int(self.balance_str.get()))\n\n self.sim.config.set_base_bet(int(self.base_bet_str.get()))\n self.sim.config.set_payout(float(self.payout_str.get()))\n self.sim.config.set_iterations(int(self.iterations_str.get()))\n self.sim.config.set_loss_adder(int(self.loss_adder_str.get()))",
"def iface_config(self, iface, *args, **kwargs):\n if not set(kwargs).issubset({'intf_ip_addr', 'netns', 'adminMode'}):\n raise NotImplementedError(\"Method is not implemented for current kwargs.\")\n if kwargs.get('netns', False):\n # Create network namespaces for current iface\n self.create_namespaces(iface)\n del kwargs['netns']\n if 'intf_ip_addr' in kwargs:\n kwargs['ipAddr'] = \"{}/24\".format(kwargs['intf_ip_addr'])\n if iface in self.namespaces:\n self._lhost.ui.enter_namespace(self.namespaces[iface])\n self._lhost.ui.modify_ports([iface], **kwargs)\n if iface in self.namespaces:\n self._lhost.ui.exit_namespace()",
"def set_network(self, path, ip=\"\", netmask=\"255.255.255.0\", gateway=\"\"):\n\n with open(os.path.join(path, 'etc', 'network', 'interfaces'), 'w') \\\n as f:\n f.write(\"auto lo\\niface lo inet loopback\\n\\n\")\n\n if len(ip) <= 0:\n f.write(\"auto eth0\\niface eth0 inet dhcp\\n\")\n else:\n f.write(\"auto eth0\\niface eth0 inet static\\n\")\n f.write(\"\\taddress {0}\\n\\tnetmask {1}\\n\\tgateway {2}\\n\".\\\n format(ip, netmask, gateway))",
"def update_neutron_advanced_configuration(self, option, value):\n attributes = self.nailgun_client.get_cluster_attributes(\n self.cluster_id)\n nac_subdict = attributes['editable']['neutron_advanced_configuration']\n nac_subdict[option]['value'] = value\n self.nailgun_client.update_cluster_attributes(\n self.cluster_id, attributes)",
"def set_attributes(self, settings):\n\n for key, value in settings.items():\n self.__dict__[key] = value",
"def resetSettings(self):\n\n # it does this 4 times because for some reason it would not grab everything one time through. Investigate\n for i in range(4):\n\n networkNode = self.returnNetworkNode\n attrs = cmds.listAttr(networkNode, ud=True)\n\n for attr in attrs:\n attrType = str(cmds.getAttr(networkNode + \".\" + attr, type=True))\n\n if attrType == \"double\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, 0, lock=True)\n\n if attrType == \"bool\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, True, lock=True)\n\n if attrType == \"enum\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, 0, lock=True)\n\n # relaunch the UI\n self.updateSettingsUI()\n self.applyModuleChanges(self)",
"def change_settings(self, bio=None, public_images=None,\n messaging_enabled=None, album_privacy=None,\n accepted_gallery_terms=None):\n # NOTE: album_privacy should maybe be renamed to default_privacy\n # NOTE: public_images is a boolean, despite the documentation saying it\n # is a string.\n url = \"https://api.imgur.com/3/account/{0}/settings\".format(self.name)\n resp = self._imgur._send_request(url, needs_auth=True, params=locals(),\n method='POST')\n return resp",
"def put():\n json_data = request.get_json()\n\n # validate request\n try:\n schema, resolver = ConfigStore.load_json_schema('modify_mba_ctrl.json')\n jsonschema.validate(json_data, schema, resolver=resolver)\n except (jsonschema.ValidationError, OverflowError) as error:\n raise BadRequest(\"Request validation failed - %s\" % (str(error)))\n\n if not caps.mba_bw_supported():\n return {'message': \"MBA CTRL not supported!\"}, 409\n\n if common.CONFIG_STORE.is_any_pool_defined():\n return {'message': \"Please remove all Pools first!\"}, 409\n\n data = deepcopy(common.CONFIG_STORE.get_config())\n\n CapsMbaCtrl.set_mba_ctrl_enabled(data, json_data['enabled'])\n\n common.CONFIG_STORE.set_config(data)\n\n return {'message': \"MBA CTRL status changed.\"}, 200",
"def set_ipaddress(modulo):\n\n print ('Configuring IP address...')\n\n modulo.write('AT+NETOPEN\\r\\n'.encode())\n\n if _valid_net(modulo): \n try:\n modulo.write('AT+IPADDR\\r\\n'.encode())\n time.sleep(0.1)\n except serial.SerialException:\n print ('... Whitout IP address, try again')\n if _valid_ip(modulo):\n print ('IP address configurated')\n else:\n print ('IP not configurated')\n else:\n print ('Net Already configurated')\n \n data = _read_line(modulo)\n return data",
"def update(\n self,\n ECI=None,\n Enabled=None,\n MCC=None,\n MNC=None,\n Name=None,\n ParentMme=None,\n ParentSgw=None,\n RAILAC=None,\n RAIMCC1=None,\n RAIMCC2=None,\n RAIMCC3=None,\n RAIMNC1=None,\n RAIMNC2=None,\n RAIMNC3=None,\n RAIRAC=None,\n TAC=None,\n ):\n # type: (str, bool, str, str, str, str, str, str, int, int, int, int, int, int, str, str) -> EgtpNbS5S8Range\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))",
"def changeSettings(self,instance,description, cur_memory, memory, cur_vcpu, vcpu):\n memory = int(memory) * 1024\n cur_memory = int(cur_memory) * 1024\n\n xml = instance.XMLDesc(1)\n tree = ElementTree.fromstring(xml)\n\n set_mem = tree.find('memory')\n set_mem.text = str(memory)\n set_cur_mem = tree.find('currentMemory')\n set_cur_mem.text = str(cur_memory)\n set_desc = tree.find('description')\n set_vcpu = tree.find('vcpu')\n set_vcpu.text = vcpu\n set_vcpu.set('current', cur_vcpu)\n\n if not set_desc:\n tree_desc = ElementTree.Element('description')\n tree_desc.text = description\n tree.insert(2, tree_desc)\n else:\n set_desc.text = description\n\n new_xml = ElementTree.tostring(tree)\n return self.defineXML(new_xml)"
] | [
"0.6353005",
"0.63099056",
"0.6132525",
"0.5717877",
"0.5647892",
"0.5579225",
"0.5547068",
"0.55100584",
"0.5498269",
"0.54694575",
"0.54531837",
"0.5437021",
"0.5431047",
"0.53944135",
"0.5393298",
"0.5388977",
"0.53056186",
"0.5285326",
"0.51902294",
"0.5187543",
"0.51482683",
"0.51276726",
"0.5085158",
"0.5080308",
"0.5073576",
"0.5063239",
"0.5047335",
"0.50268316",
"0.5010453",
"0.49904656"
] | 0.64070183 | 0 |
Applies all pending changes on a RAID controller ...by creating a config job. | def commit_pending_raid_changes(self, raid_controller, reboot=False,
start_time='TIME_NOW'):
return self._job_mgmt.create_config_job(
resource_uri=ironic_uris.DCIM_RAIDService,
cim_creation_class_name='DCIM_RAIDService',
cim_name='DCIM:RAIDService',
target=raid_controller,
reboot=reboot,
start_time=start_time) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply_config(dts, acg, xact, action, scratch):\n self.log.debug(\"Apply Config\")\n return rwtypes.RwStatus.SUCCESS",
"def apply_config(\n hostname: str, config: str, dry_run: bool, job_id: Optional[int] = None, scheduled_by: Optional[str] = None\n) -> NornirJobResult:\n logger = get_logger()\n\n with sqla_session() as session:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none()\n if not dev:\n raise Exception(\"Device {} not found\".format(hostname))\n elif not (dev.state == DeviceState.MANAGED or dev.state == DeviceState.UNMANAGED):\n raise Exception(\"Device {} is in invalid state: {}\".format(hostname, dev.state))\n\n nr = cnaas_init()\n nr_filtered, _, _ = inventory_selector(nr, hostname=hostname)\n\n try:\n nrresult = nr_filtered.run(task=push_static_config, config=config, dry_run=dry_run, job_id=job_id)\n except Exception as e:\n logger.exception(\"Exception in apply_config: {}\".format(e))\n else:\n if not dry_run:\n with sqla_session() as session:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none()\n dev.state = DeviceState.UNMANAGED\n dev.synchronized = False\n\n return NornirJobResult(nrresult=nrresult)",
"def multiple_apply_config(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfiguration_scalability\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_compute = computes[0]\n config = utils.get_config_template('nova_disk')\n structured_config_old = get_structured_config_dict(config)\n\n config['nova_config'][\n 'DEFAULT/default_ephemeral_format']['value'] = 'ext3'\n structured_config_new = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n node_id=target_compute['id'])\n\n self.show_step(3)\n service_name = 'nova-compute'\n uptimes = self.get_service_uptime([target_compute], service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(\n cluster_id,\n node_id=target_compute['id'])\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n self.check_service_was_restarted([target_compute],\n uptimes, service_name)\n\n self.show_step(6)\n for compute in computes:\n if compute == target_compute:\n self.check_config_on_remote([compute], structured_config_new)\n target_hypervisor_name = compute['fqdn']\n else:\n hypervisor_name = compute['fqdn']\n self.check_config_on_remote([compute], structured_config_old)\n\n self.show_step(7)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=target_hypervisor_name,\n fs_type='ext3')\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.env.make_snapshot(\"multiple_apply_config\")",
"def apply(self) -> None:\n _ba.apply_config()",
"def apply(self):\n changed = False\n job_schedule_exists = False\n results = netapp_utils.get_cserver(self.server)\n cserver = netapp_utils.setup_ontap_zapi(\n module=self.module, vserver=results)\n netapp_utils.ems_log_event(\"na_ontap_job_schedule\", cserver)\n job_details = self.get_job_schedule()\n if job_details:\n job_schedule_exists = True\n if self.state == 'absent': # delete\n changed = True\n elif self.state == 'present': # modify\n if job_details['job_minutes'] != str(self.job_minutes):\n changed = True\n else:\n if self.state == 'present': # create\n changed = True\n if changed:\n if self.module.check_mode:\n pass\n else:\n if self.state == 'present': # execute create\n if not job_schedule_exists:\n self.create_job_schedule()\n else: # execute modify minute\n self.modify_minute_job_schedule()\n elif self.state == 'absent': # execute delete\n self.delete_job_schedule()\n self.module.exit_json(changed=changed)",
"def reconfiguration_scalability(self):\n\n self.check_run('reconfiguration_scalability')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_nova_ephemeral_disk\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n config = utils.get_config_template('nova_disk')\n structured_config_nova = get_structured_config_dict(config)\n config = utils.get_config_template('keystone')\n structured_config_keystone = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='controller')\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='controller')\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(4)\n self.check_config_on_remote(controllers, structured_config_keystone)\n\n self.show_step(5)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n time_expiration = config[\n 'keystone_config']['token/expiration']['value']\n self.check_token_expiration(os_conn, time_expiration)\n\n self.show_step(6)\n bs_nodes = [x for x in self.env.d_env.get_nodes()\n if x.name == 'slave-05' or x.name == 'slave-06']\n self.env.bootstrap_nodes(bs_nodes)\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['compute', 'cinder']})\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-06': ['controller']})\n\n self.show_step(7)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.show_step(10)\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_controller = [x for x in controllers\n if 'slave-06' in x['name']]\n target_compute = [x for x in computes\n if 'slave-05' in x['name']]\n self.check_config_on_remote(target_controller,\n structured_config_keystone)\n\n self.show_step(11)\n self.check_config_on_remote(target_compute, structured_config_nova)\n\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.show_step(16)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n hypervisor_name = target_compute[0]['fqdn']\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.show_step(17)\n self.check_token_expiration(os_conn, time_expiration)\n\n self.env.make_snapshot(\"reconfiguration_scalability\", is_make=True)",
"def reconfigure_with_new_fields(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config_controller = utils.get_config_template('new_fields_controller')\n structured_config = get_structured_config_dict(config_controller)\n self.fuel_web.client.upload_configuration(config_controller,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n service_list = ['neutron-server', 'neutron-dhcp-agent',\n 'neutron-l3-agent', 'neutron-metadata-agent',\n 'nova-scheduler', 'nova-novncproxy', 'nova-conductor',\n 'nova-api', 'nova-consoleauth', 'nova-cert']\n services_uptime = {}\n for service_name in service_list:\n services_uptime[service_name] = self.get_service_uptime(\n controllers, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n for service_name in service_list:\n self.check_service_was_restarted(\n controllers,\n services_uptime[service_name],\n service_name)\n\n self.show_step(6)\n self.check_config_on_remote(controllers, structured_config)\n\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n\n self.show_step(7)\n config_copmute = utils.get_config_template('new_fields_compute')\n structured_config = get_structured_config_dict(config_copmute)\n self.fuel_web.client.upload_configuration(config_copmute,\n cluster_id,\n role='compute')\n\n self.show_step(8)\n uptimes_nova = self.get_service_uptime(computes, 'nova-compute')\n\n self.show_step(9)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='compute')\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(10)\n self.check_service_was_restarted(computes,\n uptimes_nova,\n 'nova-compute')\n\n self.show_step(11)\n self.check_config_on_remote(computes, structured_config)\n self.env.make_snapshot(\"reconfigure_with_new_fields\")",
"def commit_pending_idrac_changes(\n self,\n idrac_fqdd='iDRAC.Embedded.1',\n reboot=False,\n start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri=uris.DCIM_iDRACCardService,\n cim_creation_class_name='DCIM_iDRACCardService',\n cim_name='DCIM:iDRACCardService',\n target=idrac_fqdd,\n reboot=reboot,\n start_time=start_time)",
"def work(self):\n self.config_file = self.args.config\n self.init_config()\n self.init_db()\n\n self.kickoff()",
"def _configure_all_tasks(self, config, job_exe, job_type):\n\n config.set_task_ids(job_exe.get_cluster_id())\n\n for task_type in config.get_task_types():\n # Configure env vars describing allocated task resources\n env_vars = {}\n nvidia_docker_label = None\n\n for resource in config.get_resources(task_type).resources:\n env_name = 'ALLOCATED_%s' % normalize_env_var_name(resource.name)\n env_vars[env_name] = '%.1f' % resource.value # Assumes scalar resources\n if resource.name == \"gpus\" and int(resource.value) > 0:\n gpu_list = GPUManager.get_nvidia_docker_label(job_exe.node_id, job_exe.job_id)\n nvidia_docker_label = DockerParameter('env','NVIDIA_VISIBLE_DEVICES={}'.format(gpu_list.strip(',')))\n\n # Configure env vars for Scale meta-data\n env_vars['SCALE_JOB_ID'] = unicode(job_exe.job_id)\n env_vars['SCALE_EXE_NUM'] = unicode(job_exe.exe_num)\n if job_exe.recipe_id:\n env_vars['SCALE_RECIPE_ID'] = unicode(job_exe.recipe_id)\n if job_exe.batch_id:\n env_vars['SCALE_BATCH_ID'] = unicode(job_exe.batch_id)\n\n # Configure workspace volumes\n workspace_volumes = {}\n for task_workspace in config.get_workspaces(task_type):\n logger.debug(self._workspaces)\n workspace_model = self._workspaces[task_workspace.name]\n # TODO: Should refactor workspace broker to return a Volume object and remove BrokerVolume\n if workspace_model.volume:\n vol_name = get_workspace_volume_name(job_exe, task_workspace.name)\n cont_path = get_workspace_volume_path(workspace_model.name)\n if workspace_model.volume.host:\n host_path = workspace_model.volume.remote_path\n volume = Volume(vol_name, cont_path, task_workspace.mode, is_host=True, host_path=host_path)\n else:\n driver = workspace_model.volume.driver\n driver_opts = {}\n # TODO: Hack alert for nfs broker, as stated above, we should return Volume from broker\n if driver == 'nfs':\n driver_opts = {'share': workspace_model.volume.remote_path}\n volume = Volume(vol_name, cont_path, task_workspace.mode, is_host=False, driver=driver,\n driver_opts=driver_opts)\n workspace_volumes[task_workspace.name] = volume\n\n config.add_to_task(task_type, env_vars=env_vars, wksp_volumes=workspace_volumes)\n\n # Labels for metric grouping\n job_id_label = DockerParameter('label', 'scale-job-id={}'.format(job_exe.job_id))\n job_execution_id_label = DockerParameter('label', 'scale-job-execution-id={}'.format(job_exe.exe_num))\n job_type_name_label = DockerParameter('label', 'scale-job-type-name={}'.format(job_type.name))\n job_type_version_label = DockerParameter('label', 'scale-job-type-version={}'.format(job_type.version))\n main_label = DockerParameter('label', 'scale-task-type=main')\n if nvidia_docker_label:\n nvidia_runtime_param = DockerParameter('runtime', 'nvidia')\n config.add_to_task('main', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, main_label, nvidia_docker_label, nvidia_runtime_param])\n else:\n config.add_to_task('main', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, main_label])\n\n if not job_type.is_system:\n pre_label = DockerParameter('label', 'scale-task-type=pre')\n post_label = DockerParameter('label', 'scale-task-type=post')\n config.add_to_task('pre', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, pre_label])\n config.add_to_task('post', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, post_label])\n\n # Configure tasks for logging\n if settings.LOGGING_ADDRESS is not None:\n log_driver = DockerParameter('log-driver', 'fluentd')\n fluent_precision = DockerParameter('log-opt', 'fluentd-sub-second-precision=true')\n log_address = DockerParameter('log-opt', 'fluentd-address=%s' % settings.LOGGING_ADDRESS)\n if not job_type.is_system:\n pre_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('pre'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('pre', docker_params=[log_driver, fluent_precision, log_address, pre_task_tag])\n post_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('post'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('post', docker_params=[log_driver, fluent_precision, log_address, post_task_tag])\n # TODO: remove es_urls parameter when Scale no longer supports old style job types\n\n # Post task needs ElasticSearch URL to grab logs for old artifact registration\n es_param = DockerParameter('env', 'ELASTICSEARCH_URL=%s' % settings.ELASTICSEARCH_URL)\n config.add_to_task('post', docker_params=[es_param])\n main_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('main'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('main', docker_params=[log_driver, fluent_precision, log_address, main_task_tag])",
"def preservation_config_after_reset_and_preconfigured_deploy(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_ml2_vlan_range\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n self.fuel_web.stop_reset_env_wait(cluster_id)\n\n self.show_step(3)\n config = utils.get_config_template('nova_cpu')\n structured_config_nova = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='controller')\n config = utils.get_config_template('neutron')\n structured_config_neutron = get_structured_config_dict(config)\n\n self.show_step(4)\n self.fuel_web.wait_nodes_get_online_state(\n self.env.d_env.nodes().slaves[:4], timeout=10 * 60)\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(5)\n self.fuel_web.run_ostf(\n cluster_id=cluster_id)\n\n self.show_step(6)\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n structured_config = {}\n structured_config.update(structured_config_neutron)\n structured_config.update(structured_config_nova)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(7)\n self.show_step(8)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n self.check_ml2_vlan_range(os_conn)\n\n self.show_step(9)\n self.show_step(10)\n self.check_overcommit_ratio(os_conn, cluster_id)\n\n snapshot = \"preservation_config_after_reset_and_preconfigured_deploy\"\n self.env.make_snapshot(snapshot, is_make=True)",
"def update(cfg, jobs):\n server = jenkins_utils.server_factory(cfg)\n libjobs.updateJobs(server, jobs)",
"def apply_config_for_node_with_multiple_role(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfiguration_scalability\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n target_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute', 'cinder'])\n config_for_compute_role = utils.get_config_template('nova_disk')\n config_for_compute_role['nova_config'].update(\n {'DEFAULT/debug': {'value': 'False'}})\n config_for_cinder_role = utils.get_config_template(\n 'nova_disk_cinder_role')\n\n self.show_step(2)\n self.fuel_web.client.upload_configuration(config_for_compute_role,\n cluster_id,\n role='compute')\n\n self.show_step(3)\n self.fuel_web.client.upload_configuration(config_for_cinder_role,\n cluster_id,\n role='cinder')\n\n # Configs are merging with ID-priority\n general_config = {}\n general_config.update(config_for_compute_role)\n general_config.update(config_for_cinder_role)\n structured_config = get_structured_config_dict(general_config)\n service_name = 'nova-compute'\n uptime = self.get_service_uptime(target_node, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(\n cluster_id,\n node_id=target_node[0]['id'])\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n self.check_service_was_restarted(target_node,\n uptime,\n service_name)\n\n self.show_step(6)\n self.check_config_on_remote(target_node, structured_config)\n\n snapshot_name = \"apply_config_for_node_with_multiple_role\"\n self.env.make_snapshot(snapshot_name)",
"def config_func(tools, index, device_id, config_old: {}, config_new: {}):\n\n # This is an example of a firmware upgrade requiring a configuration migration\n\n # Firmware 01.03.XX to 01.04.XX configuration migration.\n\n # GENERAL section, no changes\n config_new[\"general\"] = config_old[\"general\"]\n\n # LOG section, error_frames added\n config_new[\"log\"] = config_old[\"log\"]\n config_new[\"log\"][\"error_frames\"] = {\"state\": 0}\n \n # RTC section, no changes\n config_new[\"rtc\"] = config_old[\"rtc\"]\n\n # SECONDARY PORT section, no changes\n config_new['secondaryport'] = config_old['secondaryport']\n\n # CAN sections, remote_frames added, filter moved\n for can_x in [\"can_1\", \"can_2\"]:\n config_new[can_x] = config_old[can_x]\n config_new[can_x][\"filter\"] = {\"remote_frames\": 0, \"id\": config_old[can_x][\"filter\"]}\n\n # LIN sections, before optional, now mandatory\n for lin_x in [\"lin_1\", \"lin_2\"]:\n if lin_x in config_old:\n config_new[lin_x] = config_old[lin_x]\n\n # CONNECT section, server->request_style now mandatory\n if \"connect\" in config_old:\n config_new[\"connect\"] = config_old[\"connect\"]\n\n # Add mandatory \"request_style\" if not already set\n if \"s3\" in config_new[\"connect\"]:\n if \"server\" in config_new[\"connect\"][\"s3\"]:\n if \"request_style\" not in config_new[\"connect\"][\"s3\"][\"server\"]:\n config_new[\"connect\"][\"s3\"][\"server\"][\"request_style\"] = 0\n\n return config_new",
"def backup(ctx):\n config_path = ctx.obj['config_path']\n logger = ctx.obj['logger']\n\n config = Config(config_path)\n scheduler = BlockingScheduler(\n executors={'default': ThreadPoolExecutor(max_workers=1)}\n )\n\n for job in config.jobs.values():\n logger.info(f'filesystem={job.filesystem} '\n f'cron=\"{job.cron}\" '\n 'msg=\"Adding job.\"')\n scheduler.add_job(job.start, 'cron', **job.cron, coalesce=True)\n\n try:\n scheduler.start()\n except (KeyboardInterrupt, SystemExit):\n pass",
"def update_fcoe_configs(self):\n # Nothing to be done if no reordering has occurred.\n reordered = self.udev.reordered_devices\n if not reordered:\n return\n\n # Skip if we have already completed this stage\n if self.fcoe_confs:\n return\n\n # Generate candidate list of fcoe conf files, with\n # associated rule, that need to be processed\n reordered_files = tuple((r, os.path.join(self.syspaths.fcoe_dir,\n \"cfg-%s\" % r['from']))\n for r in reordered)\n\n # At this stage changes have been prepared but are not yet\n # committed to disk\n self._fcoe_confs = self._process_candidate_conf_files(reordered_files)",
"def apply_config(self, responsible, paths, arg=None):\n self.warning(\"Reconfiguring NTP server (called with paths %s)\" % paths)\n return self.updateRunningConf(responsible)",
"def _on_config_changed(self, _):\n self._configure_pod()",
"def reconfigure_nova_quota(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config = utils.get_config_template('nova_quota')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n uptimes = self.get_service_uptime(controllers, 'nova-api')\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(controllers, uptimes, 'nova-api')\n\n self.show_step(7)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(8)\n self.show_step(9)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.check_nova_quota(os_conn, cluster_id)\n\n self.env.make_snapshot(\"reconfigure_nova_quota\")",
"def perform_distributed_cloud_config(dbapi, mgmt_iface_id):\n\n system = dbapi.isystem_get_one()\n if system.distributed_cloud_role == \\\n constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:\n # Add routes to get from this controller to all the existing subclouds.\n # Do this by copying all the routes configured on the management\n # interface on the mate controller (if it exists).\n mgmt_iface = dbapi.iinterface_get(mgmt_iface_id)\n controller_hosts = dbapi.ihost_get_by_personality(constants.CONTROLLER)\n mate_controller_id = None\n for controller in controller_hosts:\n if controller.id != mgmt_iface.forihostid:\n # Found the mate controller\n mate_controller_id = controller.id\n break\n else:\n LOG.info(\"Mate controller for host id %d not found. Routes not \"\n \"added.\" % mgmt_iface.forihostid)\n return\n\n mate_interfaces = dbapi.iinterface_get_all(\n forihostid=mate_controller_id)\n for interface in mate_interfaces:\n if constants.NETWORK_TYPE_MGMT in interface.networktypelist:\n mate_mgmt_iface = interface\n break\n else:\n LOG.error(\"Management interface for host id %d not found.\" %\n mate_controller_id)\n return\n\n routes = dbapi.routes_get_by_interface(mate_mgmt_iface.id)\n for route in routes:\n new_route = {\n 'family': route.family,\n 'network': route.network,\n 'prefix': route.prefix,\n 'gateway': route.gateway,\n 'metric': route.metric\n }\n try:\n dbapi.route_create(mgmt_iface_id, new_route)\n except exception.RouteAlreadyExists:\n LOG.info(\"DC Config: Attempting to add duplicate route \"\n \"to system controller.\")\n pass\n\n LOG.info(\"DC Config: Added route to subcloud: \"\n \"%s/%s gw:%s on mgmt_iface_id: %s\" %\n (new_route['network'], new_route['prefix'],\n new_route['gateway'], mgmt_iface_id))\n\n elif system.distributed_cloud_role == \\\n constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD:\n # Add the route back to the system controller.\n # Assumption is we do not have to do any error checking\n # for local & reachable gateway etc, as config_subcloud\n # will have already done these checks before allowing\n # the system controller gateway into the database.\n try:\n # Prefer admin network\n sc_network = dbapi.network_get_by_type(\n constants.NETWORK_TYPE_ADMIN)\n cc_gtwy_addr_name = '%s-%s' % (\n constants.SYSTEM_CONTROLLER_GATEWAY_IP_NAME,\n constants.NETWORK_TYPE_ADMIN)\n except exception.NetworkTypeNotFound:\n sc_network = dbapi.network_get_by_type(\n constants.NETWORK_TYPE_MGMT)\n cc_gtwy_addr_name = '%s-%s' % (\n constants.SYSTEM_CONTROLLER_GATEWAY_IP_NAME,\n constants.NETWORK_TYPE_MGMT)\n pass\n\n try:\n cc_gtwy_addr = dbapi.address_get_by_name(\n cc_gtwy_addr_name)\n except exception.AddressNotFoundByName:\n LOG.warning(\"DC Config: Failed to retrieve central \"\n \"cloud gateway ip address\")\n return\n\n try:\n cc_network = dbapi.network_get_by_type(\n constants.NETWORK_TYPE_SYSTEM_CONTROLLER)\n except exception.NetworkTypeNotFound:\n LOG.warning(\"DC Config: Failed to retrieve central \"\n \"cloud network\")\n return\n\n cc_network_addr_pool = dbapi.address_pool_get(\n cc_network.pool_uuid)\n\n route = {\n 'family': cc_network_addr_pool.family,\n 'network': cc_network_addr_pool.network,\n 'prefix': cc_network_addr_pool.prefix,\n 'gateway': cc_gtwy_addr.address,\n 'metric': 1\n }\n\n for ifnet in dbapi.interface_network_get_by_interface(mgmt_iface_id):\n if ifnet.network_id == sc_network.id:\n break\n else:\n LOG.warning(\"DC Config: Failed to retrieve network for interface \"\n \"id %s\", mgmt_iface_id)\n return\n\n try:\n dbapi.route_create(mgmt_iface_id, route)\n except exception.RouteAlreadyExists:\n LOG.info(\"DC Config: Attempting to add duplicate route \"\n \"to system controller.\")\n pass\n\n LOG.info(\"DC Config: Added route to system \"\n \"controller: %s/%s gw:%s on mgmt_iface_id: %s\" %\n (cc_network_addr_pool.network, cc_network_addr_pool.prefix,\n cc_gtwy_addr.address, mgmt_iface_id))",
"def configure(self, config):\n log.debug(\"Syncing with R2RM server\")\n yield self._r2rm_client.until_synced(2)\n self._icom_id = config[\"icom_id\"]\n self._firmware = config[\"firmware\"]\n log.debug(\"Trying to force deconfiguring board\")\n response = yield self._r2rm_client.req.force_deconfigure_board(self._icom_id)\n if not response.reply.reply_ok():\n self.log.warning(\"Unable to deconfigure ROACH2 board: {}\".format(\n response.reply.arguments[1]))\n log.debug(\"Sending configure request to R2RM server\")\n response = yield self._r2rm_client.req.configure_board(self._icom_id, EDD_R2RM_USER, self._firmware, timeout=20)\n if not response.reply.reply_ok():\n self.log.error(\"Error on configure request: {}\".format(\n response.reply.arguments[1]))\n raise EddRoach2ProductError(response.reply.arguments[1])\n _, firmware_ip, firmware_port = response.reply.arguments\n log.debug(\"Connecting client to activated firmware server @ {}:{}\".format(\n firmware_ip, firmware_port))\n firmware_client = KATCPClientResource(dict(\n name=\"firmware-client\",\n address=(firmware_ip, firmware_port),\n controlled=True))\n firmware_client.start()\n log.debug(\"Syncing with firmware client\")\n yield firmware_client.until_synced(2)\n for command, args in config[\"commands\"]:\n log.debug(\n \"Sending firmware server request '{}' with args '{}'\".format(command, args))\n response = yield firmware_client.req[command](*args, timeout=20)\n if not response.reply.reply_ok():\n self.log.error(\n \"Error on {}->{} request: {}\".format(command, args, response.reply.arguments[1]))\n raise EddRoach2ProductError(response.reply.arguments[1])\n log.debug(\"Stopping client connection to firmware server\")\n firmware_client.stop()",
"def run_morf_job(job_config, no_cache = False, no_morf_cache = False):\n combined_config_filename = \"config.properties\"\n logger = set_logger_handlers(module_logger, job_config)\n logger.info(\"running job id: {}\".format(job_config.morf_id))\n controller_script_name = \"controller.py\"\n docker_image_name = \"docker_image\"\n s3 = job_config.initialize_s3()\n # create temporary directory in local_working_directory from server.config\n with tempfile.TemporaryDirectory(dir=job_config.local_working_directory) as working_dir:\n # copy config file into new directory\n shutil.copy(combined_config_filename, working_dir)\n os.chdir(working_dir)\n # from job_config, fetch and download the following: docker image, controller script, cached config file\n if not no_morf_cache:\n update_raw_data_cache(job_config)\n # from client.config, fetch and download the following: docker image, controller script\n try:\n fetch_file(s3, working_dir, job_config.docker_url, dest_filename=docker_image_name, job_config=job_config)\n fetch_file(s3, working_dir, job_config.controller_url, dest_filename=controller_script_name, job_config=job_config)\n if not no_cache: # cache job files in s3 unless no_cache parameter set to true\n cache_job_file_in_s3(job_config, filename = docker_image_name)\n cache_job_file_in_s3(job_config, filename = controller_script_name)\n except KeyError as e:\n cause = e.args[0]\n logger.error(\"[Error]: field {} missing from client.config file.\".format(cause))\n sys.exit(-1)\n # change working directory and run controller script with notifications for initialization and completion\n job_config.update_status(\"INITIALIZED\")\n send_email_alert(job_config)\n subprocess.call(\"python3 {}\".format(controller_script_name), shell = True)\n job_config.update_status(\"SUCCESS\")\n # push image to docker cloud, create doi for job files in zenodo, and send success email\n docker_cloud_path = cache_to_docker_hub(job_config, working_dir, docker_image_name)\n setattr(job_config, \"docker_cloud_path\", docker_cloud_path)\n zenodo_deposition_id = upload_files_to_zenodo(job_config, upload_files=(job_config.controller_url, job_config.client_config_url))\n setattr(job_config, \"zenodo_deposition_id\", zenodo_deposition_id)\n send_success_email(job_config)\n return",
"def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")",
"def commit_pending_nic_changes(self, nic_id, reboot=False):\n return self._job_mgmt.create_config_job(\n resource_uri=uris.DCIM_NICService,\n cim_creation_class_name='DCIM_NICService',\n cim_name='DCIM:NICService',\n target=nic_id,\n reboot=reboot)",
"def refresh_config(self):\n\t\treturn Job(SDK.PrlVm_RefreshConfig(self.handle)[0])",
"def get_config(ctx):\n global HISTORY_LOGS, EXPERIMENT_ID #Ugly hack, make it better at some point, may be ;)\n id = ctx.job.id\n EXPERIMENT_ID = hash(id)\n\n import montezuma_env\n\n ctx.job.register_action(\"Set starting point procssor:\",\n lambda str: set_motezuma_env_options(str, montezuma_env.STARTING_POINT_SELECTOR))\n ctx.job.register_action(\"Set rewards:\",\n lambda str: set_motezuma_env_options(str, montezuma_env.REWARDS_FILE))\n\n logger.auto_set_dir(suffix=id)\n\n # (self, parameters, number_of_actions, input_shape)\n\n M = EXPERIMENT_MODEL\n\n name_base = str(uuid.uuid1())[:6]\n PIPE_DIR = os.environ.get('TENSORPACK_PIPEDIR_{}'.format(id), '.').rstrip('/')\n namec2s = 'ipc://{}/sim-c2s-{}-{}'.format(PIPE_DIR, name_base, id)\n names2c = 'ipc://{}/sim-s2c-{}-{}'.format(PIPE_DIR, name_base, id)\n procs = [MySimulatorWorker(k, namec2s, names2c) for k in range(SIMULATOR_PROC)]\n ensure_proc_terminate(procs)\n start_proc_mask_signal(procs)\n\n master = MySimulatorMaster(namec2s, names2c, M)\n dataflow = BatchData(DataFromQueue(master.queue), BATCH_SIZE)\n\n # My stuff - PM\n neptuneLogger = NeptuneLogger.get_instance()\n lr = tf.Variable(0.001, trainable=False, name='learning_rate')\n tf.scalar_summary('learning_rate', lr)\n num_epochs = get_atribute(ctx, \"num_epochs\", 100)\n\n rewards_str = get_atribute(ctx, \"rewards\", \"5 1 -200\")\n with open(montezuma_env.REWARDS_FILE, \"w\") as file:\n file.write(rewards_str)\n\n\n if hasattr(ctx.params, \"learning_rate_schedule\"):\n schedule_str = str(ctx.params.learning_rate_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 0.0003], [120, 0.0001]]\"\n logger.info(\"Setting learing rate schedule:{}\".format(schedule_str))\n learning_rate_scheduler = ScheduledHyperParamSetter('learning_rate', json.loads(schedule_str))\n\n if hasattr(ctx.params, \"entropy_beta_schedule\"):\n schedule_str = str(ctx.params.entropy_beta_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 0.0003], [120, 0.0001]]\"\n logger.info(\"Setting entropy beta schedule:{}\".format(schedule_str))\n entropy_beta_scheduler = ScheduledHyperParamSetter('entropy_beta', json.loads(schedule_str))\n\n if hasattr(ctx.params, \"explore_factor_schedule\"):\n schedule_str = str(ctx.params.explore_factor_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 2], [100, 3], [120, 4], [140, 5]]\"\n logger.info(\"Setting explore factor schedule:{}\".format(schedule_str))\n explore_factor_scheduler = ScheduledHyperParamSetter('explore_factor', json.loads(schedule_str))\n\n\n\n return TrainConfig(\n dataset=dataflow,\n optimizer=tf.train.AdamOptimizer(lr, epsilon=1e-3),\n callbacks=Callbacks([\n StatPrinter(), ModelSaver(),\n learning_rate_scheduler, entropy_beta_scheduler, explore_factor_scheduler,\n HumanHyperParamSetter('learning_rate'),\n HumanHyperParamSetter('entropy_beta'),\n HumanHyperParamSetter('explore_factor'),\n NeputneHyperParamSetter('learning_rate', ctx),\n NeputneHyperParamSetter('entropy_beta', ctx),\n NeputneHyperParamSetter('explore_factor', ctx),\n master,\n StartProcOrThread(master),\n PeriodicCallback(Evaluator(EVAL_EPISODE, ['state'], ['logits'], neptuneLogger, HISTORY_LOGS), 1),\n neptuneLogger,\n ]),\n session_config=get_default_sess_config(0.5),\n model=M,\n step_per_epoch=STEP_PER_EPOCH,\n max_epoch=num_epochs,\n )",
"def updateRunningConf(self, responsible):\n stop_command, start_command = map(\n self.gen_actionCommand, (\"stop\", \"start\")\n )\n\n #runCommandAsRootAndCheck raises in case of an error (perfect!)\n yield deferToThread(self.runCommandAsRootAndCheck, stop_command)\n\n ntpservers = self.CONFIG['ntpservers'].split(' ')\n responsible.feedback(\n tr(\"Reference servers: %(NTPSERVERS)s\"),\n NTPSERVERS=', '.join(ntpservers)\n )\n\n ha_status = None\n context = Context.fromComponent(self)\n if EDENWALL and self.core.hasComponent(context, 'ha'):\n try:\n ha_status = yield self.core.callService(context, 'ha', 'getHAMode')\n except Exception, err:\n self.error(exceptionAsUnicode(err))\n peers = _ntppeers(responsible, ha_status)\n\n template_variables = {\n 'ntpservers' : self.CONFIG['ntpservers'].split(' '),\n 'peers': peers,\n }\n self.generate_configfile(template_variables)\n\n yield deferToThread(self.runCommandAsRootAndCheck, start_command)",
"def _apply_workload_filling(self):\n\n # call functions corresponding to fill_in types\n for filling_strategy_config in self.config.model[\"workload_filling\"].get('operations', []):\n\n # select the appropriate workload_filling strategy\n filler = job_filling_types[filling_strategy_config[\"type\"]](self.workload_set.workloads)\n\n # configure the strategy with specific config + user defined functions (if present)\n if self.config.model[\"workload_filling\"].get('user_functions'):\n user_functions = self.config.model[\"workload_filling\"]['user_functions']\n filling_strategy_config.update({\"user_functions\": user_functions})\n\n filler.apply(filling_strategy_config)",
"def postConf(conf):\n\n rootbconf = conf.bconfManager.root\n btypeDir = rootbconf.selectedBuildTypeDir\n rootdir = rootbconf.rootdir\n\n for taskParams in conf.allOrderedTasks:\n\n features = taskParams['features']\n cmdArgs = taskParams.get('run', None)\n\n if 'runcmd' not in features:\n if cmdArgs is not None:\n features.append('runcmd')\n else:\n continue\n\n if cmdArgs is None:\n cmdArgs = {}\n elif not isinstance(cmdArgs, maptype):\n cmdArgs = { 'cmd' : cmdArgs }\n\n cmdArgs.update({\n 'name' : taskParams['name'],\n 'timeout': cmdArgs.get('timeout', None),\n 'env' : cmdArgs.get('env', {}),\n 'repeat' : cmdArgs.get('repeat', 1),\n })\n\n taskParams['run'] = cmdArgs\n\n cwd = cmdArgs.get('cwd', None)\n if cwd:\n try:\n cwd = cwd.abspath()\n except AttributeError:\n startdir = cmdArgs.get('startdir', taskParams['$bconf'].startdir)\n cwd = PathsParam(cwd, startdir, rootdir).abspath()\n else:\n cwd = btypeDir\n cmdArgs['cwd'] = cwd\n\n cmdArgs['$type'] = ''\n cmd = cmdArgs.get('cmd', None)\n if cmd and callable(cmd):\n # it's needed because a function cannot be saved in a file as is\n cmdArgs['cmd'] = cmd.__name__\n cmdArgs['shell'] = False\n cmdArgs['$type'] = 'func'",
"def run(params):\n jobs_config_file = os.path.join(CONFIG_PATH, 'jobs.yaml')\n\n jenkins_config_file = os.path.join(RESOURCE_PATH, 'jobs', 'config')\n\n jobs_path = os.path.join(RESOURCE_PATH, 'jobs')\n\n jobs = list(JobGenerator(jobs_config_file).jobs())\n\n if params.jobs:\n jobs = [job for job in jobs if fnmatch.fnmatch(job.name, params.jobs)]\n yaml_obj = [job.get_object() for job in jobs]\n if params.config:\n yaml_file = open(params.config, 'w')\n yaml_path = params.config\n else:\n yaml_file = tempfile.NamedTemporaryFile(\n prefix='libvirt_ci-jobs-', suffix='.yaml',\n dir=jobs_path, delete=False)\n yaml_path = yaml_file.name\n try:\n yaml.dump(yaml_obj, stream=yaml_file, indent=4,\n default_flow_style=False)\n yaml_file.close()\n\n if params.only_config:\n return\n\n cmd = \"jenkins-jobs\"\n cmd += \" --conf %s\" % jenkins_config_file\n if params.test:\n cmd += \" test\"\n else:\n cmd += \" update\"\n\n cmd += \" -r %s\" % jobs_path\n if params.jobs:\n cmd += \" %s\" % params.jobs\n # Ignore standard output of jenkins-job-builder\n cmd += \" > /dev/null\"\n\n utils.run(cmd, debug=True, ignore_fail=False, timeout=3600)\n finally:\n if params.only_config:\n LOGGER.info('Keep job file %s', yaml_path)\n else:\n try:\n LOGGER.info('Removing job file %s', yaml_path)\n os.remove(yaml_path)\n except (OSError, IOError) as details:\n LOGGER.warning('Failed to remove job file %s: %s',\n yaml_file.name, details)"
] | [
"0.59766585",
"0.57936853",
"0.577992",
"0.5772036",
"0.57508814",
"0.5709033",
"0.5448869",
"0.5360297",
"0.5341171",
"0.52653414",
"0.5233503",
"0.52317417",
"0.51950896",
"0.5162699",
"0.5160941",
"0.51412576",
"0.50951284",
"0.5054649",
"0.5042832",
"0.50163287",
"0.49999997",
"0.49800107",
"0.4939681",
"0.4933833",
"0.49311903",
"0.49206558",
"0.4896609",
"0.48741153",
"0.48337904",
"0.4828956"
] | 0.6687969 | 0 |
Generate weightmaps for the images using the binary masks | def create_weightmaps(path,
folders,
w0=10.,
sigma=3.,
thresh_fn=lambda x:x>0,
name_weights_folder=True):
# set up some pipelines
w_pipe = ImageWeightMap2(w0=w0, sigma=sigma)
for d in folders:
r_dir = os.path.join(path, d)
f_labels = os.listdir(os.path.join(r_dir,'label/'))
f_labels = [l for l in f_labels if l.endswith('.tif')]
w_dir_base = 'weights'
if name_weights_folder:
w_dir_base += '_w0-{0:2.2f}_sigma-{1:2.2f}'.format(w0, sigma)
w_dir = os.path.join(r_dir, w_dir_base)
utils.check_and_makedir(w_dir)
for f in f_labels:
print 'Calculating weights for {0:s} in folder \'{1:s}\''.format(f,d)
w_label = re.match('([a-zA-Z0-9()]+)_([a-zA-Z0-9()]+_)*', f).group(0)
w_label += 'weights.tif'
label_filename = os.path.join(r_dir,'label/',f)
im_label = ImageLabels(label_filename).labels()
im_weights = np.squeeze(w_pipe(im_label.astype('bool')))
t.imsave(os.path.join(w_dir, w_label), im_weights.astype('float32')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_weight_map(masks, w0=10, sigma=5, longest_max_size=-1):\n nrows, ncols = masks.shape[1:]\n\n if longest_max_size > 0:\n old_rows, old_cols = nrows, ncols\n max_size = max(nrows, ncols)\n new_rows, new_cols = longest_max_size * nrows // max_size, longest_max_size * ncols // max_size\n\n resized_masks = []\n for mask in masks:\n resized_masks.append(cv2.resize(mask, (new_cols, new_rows), interpolation=0))\n masks = np.stack(resized_masks)\n nrows, ncols = new_rows, new_cols\n\n masks = (masks > 0).astype(int)\n distMap = np.zeros((nrows * ncols, masks.shape[0]))\n X1, Y1 = np.meshgrid(np.arange(nrows), np.arange(ncols))\n X1, Y1 = np.c_[X1.ravel(), Y1.ravel()].T\n for i, mask in enumerate(masks):\n # find the boundary of each mask,\n # compute the distance of each pixel from this boundary\n bounds = find_boundaries(mask, mode='inner')\n X2, Y2 = np.nonzero(bounds)\n xSum = (X2.reshape(-1, 1) - X1.reshape(1, -1)) ** 2\n ySum = (Y2.reshape(-1, 1) - Y1.reshape(1, -1)) ** 2\n distMap[:, i] = np.sqrt(xSum + ySum).min(axis=0)\n ix = np.arange(distMap.shape[0])\n if distMap.shape[1] == 1:\n d1 = distMap.ravel()\n border_loss_map = w0 * np.exp((-1 * (d1) ** 2) / (2 * (sigma ** 2)))\n else:\n if distMap.shape[1] == 2:\n d1_ix, d2_ix = np.argpartition(distMap, 1, axis=1)[:, :2].T\n else:\n d1_ix, d2_ix = np.argpartition(distMap, 2, axis=1)[:, :2].T\n d1 = distMap[ix, d1_ix]\n d2 = distMap[ix, d2_ix]\n border_loss_map = w0 * np.exp((-1 * (d1 + d2) ** 2) / (2 * (sigma ** 2)))\n xBLoss = np.zeros((nrows, ncols))\n xBLoss[X1, Y1] = border_loss_map\n # class weight map\n loss = np.zeros((nrows, ncols))\n w_1 = 1 - masks.sum() / loss.size\n w_0 = 1 - w_1\n loss[masks.sum(0) == 1] = w_1\n loss[masks.sum(0) == 0] = w_0\n ZZ = xBLoss + loss\n\n if longest_max_size > 0:\n ZZ = cv2.resize(ZZ, (old_cols, old_rows))\n return ZZ",
"def convertMaskToWeights(mask):\n vals = np.unique(mask)\n for i in range(len(vals)):\n mask[mask==vals[i]]=i\n mask = mask.astype(int)\n w = mask.ravel() \n return w",
"def weight_images(im_dir, wt_dir, weight_dir, im_weight_dir, wt_weight_dir, imtype='intbgsub', wttype='rrhr'):\n im_suff, wt_suff = '*-{}.fits'.format(imtype), '*-{}.fits'.format(wttype)\n imfiles = sorted(glob.glob(os.path.join(im_dir, im_suff)))\n wtfiles = sorted(glob.glob(os.path.join(wt_dir, wt_suff))) \n\n # weight each image\n for i in range(len(imfiles)):\n # read in the data\n imfile = imfiles[i]\n wtfile = os.path.join(os.path.dirname(wtfiles[i]), os.path.basename(imfile).replace(imtype, wttype))\n im, hdr = astropy.io.fits.getdata(imfile, header=True)\n rrhr, rrhrhdr = astropy.io.fits.getdata(wtfile, header=True)\n\n # weight the data by the exposure time\n wt = rrhr\n newim = im * wt\n\n # write data to new files and copy the *_area.fits files created by Montage to have the same naming convention\n newfile = os.path.join(im_weight_dir, os.path.basename(imfile))\n astropy.io.fits.writeto(newfile, newim, hdr)\n old_area_file = imfile.replace('.fits', '_area.fits')\n if os.path.exists(old_area_file):\n new_area_file = newfile.replace('.fits', '_area.fits')\n shutil.copy(old_area_file, new_area_file)\n\n weightfile = os.path.join(wt_weight_dir, os.path.basename(wtfile))\n astropy.io.fits.writeto(weightfile, wt, rrhrhdr)\n old_area_file = wtfile.replace('.fits', '_area.fits')\n if os.path.exists(old_area_file):\n new_area_file = weightfile.replace('.fits', '_area.fits')\n shutil.copy(old_area_file, new_area_file)",
"def detectron_weight_mapping(self):\n detectron_weight_mapping = {\n 'block.0.weight': 'blockConv1_w',\n 'block.0.bias': 'blockConv2_b',\n 'block.1.weight': 'blockBN1_w',\n 'block.1.running_mean': 'blockBN1_rm',\n 'block.1.running_var': 'blockBN1_rv',\n 'block.1.bias': 'blockBN1_b',\n 'block.3.weight': 'blockConv2_w',\n 'block.3.bias': 'blockConv2_b',\n 'block.4.weight': 'blockBN2_w',\n 'block.4.bias': 'blockBN2_b',\n 'block.4.running_mean': 'blockBN4_rm',\n 'block.4.running_var': 'blockBN4_rv',\n }\n orphan_in_detectron = []\n self.mapping_to_detectron = detectron_weight_mapping\n self.orphans_in_detectron = orphan_in_detectron\n return self.mapping_to_detectron, self.orphans_in_detectron",
"def detectron_weight_mapping(self):\n d_wmap = {}\n d_orphan = []\n d_wmap['gen_base.0.weight'] = 'baseConv1_w'\n d_wmap['gen_base.0.bias'] = 'baseConv1_b'\n d_wmap['gen_base.2.weight'] = 'baseConv2_w'\n d_wmap['gen_base.2.bias'] = 'baseConv2_b'\n\n for name, m_child in self.named_children():\n if name in ['gen_base']: # skip gen_base\n continue\n if list(m_child.parameters()): # if module has any parameter\n child_map, child_orphan = m_child.detectron_weight_mapping()\n d_orphan.extend(child_orphan)\n for key, value in child_map.items():\n new_key = name + '.' + key\n d_wmap[new_key] = value\n self.mapping_to_detectron = d_wmap\n self.orphans_in_detectron = d_orphan\n return self.mapping_to_detectron, self.orphans_in_detectron",
"def compute_probability_weights(indexing, \n counts, \n image, \n binary_set_mappings): \n S_w_cardinalities = np.zeros_like(indexing)\n\n countsgeq2 = sum(c>=2 for c in counts) # compute amount of indices that have count>=2\n countseq1 = [v for v in range(indexing.shape[0]) if counts[indexing[v]]==1]\n K_cardinalities = np.zeros_like(indexing)\n for card,w in enumerate(countseq1[::-1]):\n K_cardinalities[w] = card\n\n for w,index in enumerate(indexing):\n if counts[index] >= 3:\n S_w_cardinalities[w] = len(image)\n elif counts[index] == 2:\n offset = 1 if w==binary_set_mappings[index] else 0\n S_w_cardinalities[w] = len(image) - 1 + offset\n elif counts[index] == 1:\n S_w_cardinalities[w] = countsgeq2 + K_cardinalities[w]\n\n return S_w_cardinalities/np.sum(S_w_cardinalities)",
"def build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x",
"def calculate_MAP(self):\n testing_images = open('./digitdata/testimages', 'r')\n with testing_images as ti:\n data = list(csv.reader(ti))\n data = [i for i in data if i]\n count = 0\n #loop through all the test images\n for j in range(0,1000):\n classification_dict = {0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0} \n for l in range(0,28):\n coord = count + l\n for w in range(0,28):\n if data[coord][0][w] == \"+\":\n #iterate through each class. z is the class [0-9]\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][0]) \n elif data[coord][0][w] == \"#\":\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][1])\n elif data[coord][0][w] == \" \":\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][2])\n count += 28\n self.solutions.append(max(classification_dict, key=classification_dict.get))",
"def get_mask_dictionary(train_names):\n masks={}\n for name in train_names:\n masks[name]=cv.imread(\"../dataset/masks/\"+name+\".png\",cv.IMREAD_GRAYSCALE)\n \n return masks",
"def mask_weights(self, mask, weights):\n new_weights = list()\n for idx, layer in enumerate(self.model.layers):\n if len(layer.get_weights())>0:\n new_weights.append(weights[idx]*mask[idx])\n new_weights.append(layer.get_weights()[1])\n else:\n continue\n return new_weights",
"def write_weights_images(self):\n for weight_name, weight in self._weights.items():\n self._write_weight_image_to_tensorboard(\n name=f\"{self._Sections.WEIGHTS}/{weight_name}\",\n weight=weight,\n step=self._epochs,\n )",
"def _mappings(self, inputs):\n return self.mapbias + tensor.dot(\n self._factorsX(inputs) * self._factorsY(inputs), self.whf_in.T)",
"def create_binary_masks(image_path):\n mask = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH)\n size = mask.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if mask[row_pixel, column_pixel] == 0:\n mask[row_pixel, column_pixel] = 65535\n\n else:\n mask[row_pixel, column_pixel] = 0\n\n cv2.imwrite(image_path[:-4]+'_binary.png', mask)",
"def weight_compression(weights, bits, axis=0, quantizer=None):\n assert bits <= 8\n n = 2**bits\n index_table = []\n codebook_table = np.zeros((weights.shape[axis], n))\n km_models = [None] * weights.shape[axis]\n\n for i, w in tqdm(enumerate(np.split(weights, weights.shape[axis], axis))):\n original_shape = w.shape\n w = w.ravel()\n km = KMeans(n)\n km.fit(w.reshape(-1, 1))\n if quantizer:\n km.cluster_centers_ = quantizer(km.cluster_centers_).numpy()\n km.cluster_centers_.sort(axis=0)\n\n km_models[i] = km\n codebook_table[i, :] = km.cluster_centers_.flatten()\n preds = km.predict(w.reshape(-1, 1))\n index_table.append(preds.reshape(original_shape))\n\n index_table = np.concatenate(index_table, axis)\n return index_table, codebook_table",
"def fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, channels]\n x = modellib.PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(modellib.BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(modellib.BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(modellib.BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(modellib.BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(1, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n # Duplicate output for fg/bg detections\n x = KL.Concatenate(axis=-1)([x for i in range(num_classes)])\n return x",
"def _all_labels_to_bitmasks(all_labels):\n l_dict = {}\n for i, label in enumerate(all_labels):\n l_dict[label.name] = 1<<i\n return l_dict",
"def calculate_binaries(dict_data):\n list_all_preprocessed_binaries = []\n for index_patient, patient in enumerate(dict_data):\n # pick and convert image\n image = dict_data[patient][1]\n image = image.astype(\"uint8\")\n # blur image\n image_blurred = cv2.medianBlur(image, 29)\n # segment image using k-means segmentation\n image_segmented = run_kmean_on_single_image(image_blurred, k=10,\n precision=10000, max_iterations=1000)\n # find lower threshold for binarizing images\n \"\"\" the idea i had here was that all the electrodes always occupy the same area on each picture.\n this function basically returns the pixel value, at which we need to threshold in our binary\n function, so that all pixels that have a higher intensity will collectively make up at least \n \"fraction_of_image_threshold\" percent of the picture - electrodes seem to take up about 5-10% of each\n image\"\"\"\n lower_threshold = intelligent_get_threshold(image_segmented,\n fraction_of_image_threshold=0.08)\n # binarize image\n image_binary = binarize_image(image_segmented, \n lower_threshold=lower_threshold, upper_threshold=255)\n list_all_preprocessed_binaries.append(image_binary)\n return list_all_preprocessed_binaries",
"def image_mask(kmeans_labels, img_gray_orig):\n\n\tmask_img = np.zeros((img_gray_orig.shape[0], img_gray_orig.shape[1]))\n\n\tkmeans_labels_arr = kmeans_labels.reshape(img_gray_orig.shape[0],\n\t\t\t\t\t\t\t\t\t\t\t img_gray_orig.shape[1])\n\n\tsort_labels = sorted(pd.Series(kmeans_labels).unique(),\n\t\t\t\t\t\t\t\t\t\t\t\t\treverse = True)\n\tjust_bone = ()\n\n\tif (np.sum(kmeans_labels_arr==sort_labels[0])) > 8000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[0])\n\t mask_img[just_bone] = 1\n\t\t \n\tif (np.sum(kmeans_labels_arr==sort_labels[1])) > 8000 and\\\n\t\t\t\t (np.sum(kmeans_labels_arr==sort_labels[1])) < 60000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[1])\n\t mask_img[just_bone] = 1\n\t\n\tif (np.sum(kmeans_labels_arr==sort_labels[2]))>8000 and\\\n\t\t\t\t (np.sum(kmeans_labels_arr==sort_labels[2])) < 70000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[2])\n\t mask_img[just_bone] = 1\n\t\n\tif (np.sum(kmeans_labels_arr==sort_labels[3]))>8000 and\\\n\t\t\t\t(np.sum(kmeans_labels_arr==sort_labels[3])) < 70000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[3])\n\t mask_img[just_bone] = 1\n\t\n\tif not just_bone:\n\t\tjust_bone = np.where(kmeans_labels_arr==sort_labels[1]) \n\t\tmask_img[just_bone] = 1\n\n\treturn just_bone, mask_img",
"def create_band_maps(self):\n band_maps = []\n source_band_index = 1\n target_band_index = self.starting_target_band\n for band in self.image['bands']:\n band_maps.append({\n 'source': source_band_index,\n 'target': target_band_index\n })\n source_band_index += 1\n target_band_index += 1\n return band_maps",
"def calculate_class_weights(label_data):\n neg, pos = np.bincount(label_data)\n weight_for_0 = 1 / neg\n weight_for_1 = 1 / pos\n return {0: weight_for_0, 1: weight_for_1}",
"def create_GT_masks(root_dir, background_dir, intrinsic_matrix,classes):\n list_all_images = load_obj(root_dir + \"all_images_adr\")\n training_images_idx = load_obj(root_dir + \"train_images_indices\")\n for i in range(len(training_images_idx)):\n img_adr = list_all_images[training_images_idx[i]]\n label = os.path.split(os.path.split(os.path.dirname(img_adr))[0])[1]\n regex = re.compile(r'\\d+')\n idx = regex.findall(os.path.split(img_adr)[1])[0]\n\n if i % 1000 == 0:\n print(str(i) + \"/\" + str(len(training_images_idx)) + \" finished!\")\n\n image = cv2.imread(img_adr)\n ID_mask = np.zeros((image.shape[0], image.shape[1]))\n U_mask = np.zeros((image.shape[0], image.shape[1]))\n V_mask = np.zeros((image.shape[0], image.shape[1]))\n\n ID_mask_file = root_dir + label + \\\n \"/ground_truth/IDmasks/color\" + str(idx) + \".png\"\n U_mask_file = root_dir + label + \\\n \"/ground_truth/Umasks/color\" + str(idx) + \".png\"\n V_mask_file = root_dir + label + \\\n \"/ground_truth/Vmasks/color\" + str(idx) + \".png\"\n\n tra_adr = root_dir + label + \"/data/tra\" + str(idx) + \".tra\"\n rot_adr = root_dir + label + \"/data/rot\" + str(idx) + \".rot\"\n rigid_transformation = get_rot_tra(rot_adr, tra_adr)\n\n # Read point Point Cloud Data\n ptcld_file = root_dir + label + \"/object.xyz\"\n pt_cld_data = np.loadtxt(ptcld_file, skiprows=1, usecols=(0, 1, 2))\n ones = np.ones((pt_cld_data.shape[0], 1))\n homogenous_coordinate = np.append(pt_cld_data[:, :3], ones, axis=1)\n\n # Perspective Projection to obtain 2D coordinates for masks\n homogenous_2D = intrinsic_matrix @ (rigid_transformation @ homogenous_coordinate.T)\n coord_2D = homogenous_2D[:2, :] / homogenous_2D[2, :]\n coord_2D = ((np.floor(coord_2D)).T).astype(int)\n x_2d = np.clip(coord_2D[:, 0], 0, 639)\n y_2d = np.clip(coord_2D[:, 1], 0, 479)\n ID_mask[y_2d, x_2d] = classes[label]\n\n if i % 100 != 0: # change background for every 99/100 images\n background_img_adr = background_dir + random.choice(os.listdir(background_dir))\n background_img = cv2.imread(background_img_adr)\n background_img = cv2.resize(background_img, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_AREA)\n background_img[y_2d, x_2d, :] = image[y_2d, x_2d, :]\n background_adr = root_dir + label + \"/changed_background/color\" + str(idx) + \".png\"\n mpimg.imsave(background_adr, background_img)\n\n # Generate Ground Truth UV Maps\n centre = np.mean(pt_cld_data, axis=0)\n length = np.sqrt((centre[0]-pt_cld_data[:, 0])**2 + (centre[1] -\n pt_cld_data[:, 1])**2 + (centre[2]-pt_cld_data[:, 2])**2)\n unit_vector = [(pt_cld_data[:, 0]-centre[0])/length, (pt_cld_data[:,\n 1]-centre[1])/length, (pt_cld_data[:, 2]-centre[2])/length]\n U = 0.5 + (np.arctan2(unit_vector[2], unit_vector[0])/(2*np.pi))\n V = 0.5 - (np.arcsin(unit_vector[1])/np.pi)\n U_mask[y_2d, x_2d] = U\n V_mask[y_2d, x_2d] = V\n\n # Saving ID, U and V masks after using the fill holes function\n ID_mask, U_mask, V_mask = fill_holes(ID_mask, U_mask, V_mask)\n cv2.imwrite(ID_mask_file, ID_mask)\n mpimg.imsave(U_mask_file, U_mask, cmap='gray')\n mpimg.imsave(V_mask_file, V_mask, cmap='gray')",
"def convert_masks():\n for fn in sorted(glob.glob('../input/extra_data/*/masks/*.png')):\n print(fn)\n img = skimage.io.imread(fn)\n # utils.print_stats('mask', img)\n img[img > 0] = 255\n skimage.io.imsave(fn, img)",
"def apply_mask(binary, mask_dict):\n result = \"\"\n for i, val in enumerate(binary):\n if mask_dict[i] in ('X', '1'):\n result += mask_dict[i]\n else:\n result += binary[i]\n return result",
"def pz_weight(cat,mask,bins,binnum=100,pdf=False):\n\n if pdf:\n print 'transfer pdf support'\n return\n else:\n if hasattr(cat,'pzstore'):\n nz = cat.pzstore.pz_full\n else:\n nz = cat.pz_full\n mask1=mask\n e1,e2,w,m1,m2=lin.linear_methods.get_lin_e_w_ms(cat,mask=mask1,xi=True)\n if cat.wt:\n weights = w * (m1+m2)/2.\n else:\n weights = (m1+m2)/2.*np.ones(np.sum(mask))\n h0,b0=np.histogram(nz[mask],bins=binnum,weights=weights)\n w=np.ones(len(nz))\n print 'w0',len(w)\n for j in range(cat.sbins):\n binmask=(bins==j)&mask\n h,b=np.histogram(nz[binmask],bins=b0,weights=weights[bins[mask]==j])\n for k in range(binnum):\n binmask2=(nz>b[k])&(nz<=b[k+1])\n mask_=binmask&binmask2\n if h[k]<0.01*h0[k]:\n w[mask_]=0.\n else:\n w[mask_]=0.5*h0[k]/h[k]\n\n print 'max/min/mean weight', k,np.max(w),np.min(w),np.mean(w[binmask])\n\n return w,weights",
"def add_character_others(image, weight_map, weight_val, bbox):\n\n\tif not Polygon(bbox.reshape([4, 2]).astype(np.int32)).is_valid:\n\t\treturn image\n\n\ttop_left = np.array([np.min(bbox[:, 0]), np.min(bbox[:, 1])]).astype(np.int32)\n\tif top_left[1] > image.shape[0] or top_left[0] > image.shape[1]:\n\t\treturn image, weight_map\n\tbbox -= top_left[None, :]\n\ttransformed = four_point_transform(gaussian_heatmap.copy(), bbox.astype(np.float32))\n\n\tstart_row = max(top_left[1], 0) - top_left[1]\n\tstart_col = max(top_left[0], 0) - top_left[0]\n\tend_row = min(top_left[1] + transformed.shape[0], image.shape[0])\n\tend_col = min(top_left[0] + transformed.shape[1], image.shape[1])\n\timage[max(top_left[1], 0):end_row, max(top_left[0], 0):end_col] += \\\n\t\ttransformed[\n\t\tstart_row:end_row - top_left[1],\n\t\tstart_col:end_col - top_left[0]]\n\n\tweight_map[max(top_left[1], 0):end_row, max(top_left[0], 0):end_col] += \\\n\t\tnp.float32(transformed[\n\t\t\tstart_row:end_row - top_left[1],\n\t\t\tstart_col:end_col - top_left[0]] != 0)*weight_val\n\n\treturn image, weight_map",
"def convolve(kerns, kshp, nkern, images, imgshp, step=(1, 1), bias=None,\r\n mode='valid', flatten=True):\r\n N = numpy\r\n # start by computing output dimensions, size, etc\r\n kern_size = N.int64(N.prod(kshp))\r\n\r\n # inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)\r\n # in the first case, default nfeatures to 1\r\n if N.size(imgshp) == 2:\r\n imgshp = (1,) + imgshp\r\n\r\n # construct indices and index pointers for sparse matrix, which,\r\n # when multiplied with input images will generate a stack of image\r\n # patches\r\n indices, indptr, spmat_shape, sptype, outshp = \\\r\n convolution_indices.conv_eval(imgshp, kshp, step, mode)\r\n\r\n # build sparse matrix, then generate stack of image patches\r\n csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,\r\n indptr, spmat_shape)\r\n patches = (sparse.structured_dot(csc, images.T)).T\r\n\r\n # compute output of linear classifier\r\n pshape = tensor.stack(images.shape[0] * tensor.as_tensor(N.prod(outshp)),\\\r\n tensor.as_tensor(imgshp[0] * kern_size))\r\n patch_stack = tensor.reshape(patches, pshape, ndim=2)\r\n\r\n # kern is of shape: nkern x ksize*number_of_input_features\r\n # output is thus of shape: bsize*outshp x nkern\r\n output = tensor.dot(patch_stack, kerns.T)\r\n\r\n # add bias across each feature map (more efficient to do it now)\r\n if bias is not None:\r\n output += bias\r\n\r\n # now to have feature maps in raster order ...\r\n # go from bsize*outshp x nkern to bsize x nkern*outshp\r\n newshp = tensor.stack(images.shape[0],\\\r\n tensor.as_tensor(N.prod(outshp)),\\\r\n tensor.as_tensor(nkern))\r\n tensout = tensor.reshape(output, newshp, ndim=3)\r\n output = tensor.DimShuffle((False,) * tensout.ndim, (0, 2, 1))(tensout)\r\n if flatten:\r\n output = tensor.flatten(output, 2)\r\n\r\n return output, N.hstack((nkern, outshp))",
"def compute_gradient_saliency_maps(samples: torch.tensor,\n true_labels: torch.tensor,\n model: nn.Module):\n \"\"\"INSERT YOUR CODE HERE, overrun return.\"\"\"\n return torch.rand(6, 256, 256)",
"def init_weights(num_tilings, tiles_per_dim, num_dims, num_actions):\n weights = np.zeros((num_tilings*tiles_per_dim**num_dims*num_actions))\n return weights",
"def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array",
"def make_conv_weight_image(all_weights, limit=144):\n import vtool as vt\n # Try to infer if use_color should be shown\n num, channels, height, width = all_weights.shape\n # Try to infer if use_color should be shown\n use_color = (channels == 3)\n # non-use_color features need to be flattened\n if not use_color:\n all_weights_ = all_weights.reshape(num * channels, height, width, 1)\n else:\n # convert from theano to cv2 BGR\n all_weights_ = utils.convert_theano_images_to_cv2_images(all_weights)\n # convert from BGR to RGB\n all_weights_ = all_weights_[..., ::-1]\n #cv2.cvtColor(all_weights_[-1], cv2.COLOR_BGR2RGB)\n\n # Limit all_weights_\n #num = all_weights_.shape[0]\n num, height, width, channels = all_weights_.shape\n if limit is not None and num > limit:\n all_weights_ = all_weights_[:limit]\n num = all_weights_.shape[0]\n\n # Convert weight values to image values\n normalize_individually = False\n if normalize_individually:\n # Normalize each feature individually\n all_max = vt.multiaxis_reduce(np.amax, all_weights_, startaxis=1)\n all_min = vt.multiaxis_reduce(np.amin, all_weights_, startaxis=1)\n all_domain = all_max - all_min\n extra_dims = (None,) * (len(all_weights_.shape) - 1)\n broadcaster = (slice(None),) + extra_dims\n all_features = ((all_weights_ - all_min[broadcaster]) *\n (255.0 / all_domain[broadcaster])).astype(np.uint8)\n else:\n # Normalize jointly across all filters\n _max = all_weights_.max()\n _min = all_weights_.min()\n _domain = _max - _min\n all_features = ((all_weights_ - _min) * (255.0 / _domain)).astype(np.uint8)\n\n #import scipy.misc\n # resize feature, give them a border, and stack them together\n new_height, new_width = max(32, height), max(32, width)\n nbp_ = 1 # num border pixels\n _resized_features = np.array([\n cv2.resize(img, (new_width, new_height),\n interpolation=cv2.INTER_NEAREST)\n for img in all_features\n ])\n resized_features = _resized_features.reshape(\n num, new_height, new_width, channels)\n border_shape = (num, new_height + (nbp_ * 2),\n new_width + (nbp_ * 2), channels)\n bordered_features = np.zeros(border_shape, dtype=resized_features.dtype)\n bordered_features[:, nbp_:-nbp_, nbp_:-nbp_, :] = resized_features\n #img_list = bordered_features\n stacked_img = vt.stack_square_images(bordered_features)\n return stacked_img"
] | [
"0.6640836",
"0.6275805",
"0.60981536",
"0.6081133",
"0.6068288",
"0.60453105",
"0.6001575",
"0.5891197",
"0.58610183",
"0.5838815",
"0.57847476",
"0.57843643",
"0.5777125",
"0.5762508",
"0.5748882",
"0.5746224",
"0.574566",
"0.57455665",
"0.5738557",
"0.5715211",
"0.5675463",
"0.5667382",
"0.5663564",
"0.56318",
"0.5601039",
"0.55911916",
"0.55900025",
"0.5588349",
"0.5576355",
"0.5572406"
] | 0.7141074 | 0 |
Updates current data with dataframe, stores old data in history | def update_current_data(self, data):
if self.current_data is not None:
current_results = self.get_results()
self._history.append((self.current_data, current_results))
self.current_data = data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_data(self, data):\n start_time = data.index[-1].strftime(\"%Y-%m-%dT%H:%M:%S.000000Z\")\n temp_data = self.gather_data(start=start_time)\n temp_data = self._list_to_df(temp_data)\n if (len(temp_data) > 1):\n # temp_data[0] is the same as data[-1]\n out_data = data.append(temp_data[1:])\n return out_data",
"def do(self, market_data):\r\n self.data.history = self.data.history + market_data",
"def set_data(self, df):\n self.df = df",
"def updateStockDF(self): # Client stock dataframe\n self.all_items = self.stock_df['Item'].unique()\n self.stock_df = pd.read_csv('data/menu.csv')\n self.categories = self.stock_df['Category'].unique()\n for category in self.categories_data:\n self.categories_data[category] = self.stock_df[self.stock_df['Category'] == category]",
"def refresh_history(self):\n\n self.old_jobs = self.secretary_bot.history_bullshit_filter(self.old_jobs)\n self.jobs_save(self.old_jobs, 'overwrite')",
"def update(self, updated_dataframe):\n original_dataframe = pd.read_csv(self.file_location)\n new_dataframe = original_dataframe.append(updated_dataframe)\n\n with open(self.file_location, 'w+') as appendable_file:\n new_dataframe.to_csv(\n appendable_file,\n header=True,\n )",
"def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')",
"def reframe_df(previous_df, processed_data):\n idx = previous_df.index\n col = previous_df.columns\n df = pd.DataFrame(data=processed_data, index=idx, columns=col)\n return df",
"def update_data(self):\n data, meta_data = ts.get_daily(symbol=self.stock_ticker, outputsize='full')\n self.data = data\n self.meta_data = meta_data",
"def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:",
"def refresh(self):\n new = self.table.records_updated_since(self.updated.max()).set_index(\"operator\")\n new = new.rename(columns={\"operator_alias\": \"alias\", \"fscore\": \"confidence\"})\n\n if not new.empty: # TODO: this is clunky. need to fix later\n self.update(new)\n for idx, values in new.iterrows():\n try:\n self.loc[\n idx\n ] # try to lookup the index. Insert record if the lookup fails.\n except KeyError:\n self.loc[idx] = values",
"def refresh(self):\n new = self.table.records_updated_since(self.updated.max()).set_index(\"operator\")\n new = new.rename(columns={\"operator_alias\": \"alias\", \"fscore\": \"confidence\"})\n\n if not new.empty: # TODO: this is clunky. need to fix later\n self.update(new)\n for idx, values in new.iterrows():\n try:\n self.loc[\n idx\n ] # try to lookup the index. Insert record if the lookup fails.\n except KeyError:\n self.loc[idx] = values",
"def add_data(self, df):\n # TODO: improve merging code\n self.data = self.data.append(df, ignore_index=False)\n self.data = self.data[~self.data.index.duplicated(keep='first')]",
"def update_historical_data():\n print('updating historical data')\n for sp in SupplyPoint.objects.filter(supplypointwarehouserecord__isnull=True).exclude(type__code=SupplyPointCodes.ZONE):\n update_historical_data_for_supply_point(sp)",
"def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n aq_data.add_aq_to_db()\n DB.session.commit()\n return 'Data refreshed!'",
"def search_history_update(self, query):\n df = self.read_history()\n\n if query not in df[\"query\"].values:\n df.loc[len(df)] = [query, 1]\n else:\n num = df[df['query'] == query][\"times\"].values[0]\n df.loc[df['query'] == query, \"times\"] = num + 1\n\n df.to_csv(self.HISTORY_FILE_PATH, index=False)",
"def refresh(self) -> pd.DataFrame:\n log.info(f\"Refreshing {self.fqtable}\")\n df = db.db_to_df(fqtable=self.fqtable, ids=self.ids)\n io.df_to_parquet(df=df, path=self.path)\n return df",
"def update_original_data(self):\n pass",
"def update_data():\n etf_prices = get_prices(start=START_DATE, end=END_DATE)\n etf_returns = compute_returns(etf_prices)\n merged_etf_data = etf_prices.merge(etf_returns, right_index=True, left_index=True)\n indicators = compute_indicators(merged_etf_data) # this uses the \"ta\" lib, but it does not need\n # to be imported\n merged_etf_data = merged_etf_data.merge(indicators, right_index=True, left_index=True)\n vix_data = get_vix()\n data = merged_etf_data.merge(vix_data, right_index=True, left_index=True)\n data.to_csv('Data/database.csv')\n return",
"def _update(self, results: dict):\n new_row = pd.DataFrame.from_dict([results])\n self._reload()\n # No values\n if not len(self.data):\n self.data = new_row\n\n # Existing runs, they may have different status eg done, started, unknown\n existing = self._query_df(results)\n\n # Check number we have finished\n if len(existing[existing['status'] == 'done']):\n print(f\"[Warning Found {len(existing)} existing runs adding anyway]\")\n\n # Overwrite the one we started if possible else concat\n started = existing[existing['status'] == 'started']\n if len(started):\n # Delete the first started one...\n self.data = self.data.drop(index=started.index[0])\n\n self.data = pd.concat([self.data, new_row], sort=False)\n self._write()",
"def evaluate_history(self, price_info):\n\n historic_stocks = util.historic_stocks(self)\n historic_df = pd.DataFrame(columns=historic_stocks)\n\n for record in self.history:\n update_row = util.update_row(historic_df, record)\n historic_df.loc[record.date] = update_row\n print(historic_df)\n\n start_date = self.history[0].date\n end_date = self.history[-1].date\n\n price_info = price_info.loc[(price_info.index >= start_date) & (price_info.index <= end_date)][historic_stocks]\n historic_stocks = price_info.merge(historic_df,\n how=\"left\", left_index=True, right_index=True,\n suffixes=(\"_price\", \"_amount\"))\n return historic_stocks",
"def update_data():\n pass",
"def Update(self):\n print(f\"Updating {self.name} from yfinance API...\")\n import yfinance as yf\n import datetime\n stock = yf.Ticker(self._symbol)\n if (self.name == None or self.name == self.symbol) and stock.info is not None:\n if \"shortName\" in stock.info:\n self.name = stock.info['shortName']\n yhistory = stock.history(period=\"max\")\n print(yhistory)\n\n dividends = []\n for date, row in yhistory.iterrows():\n dividend_today = row['Dividends']\n dividends.append((date, dividend_today))\n if dividend_today != 0.:\n while date - dividends[0][0] > datetime.timedelta(days=360):\n dividends.remove(dividends[0])\n else:\n while date - dividends[0][0] > datetime.timedelta(days=370):\n dividends.remove(dividends[0])\n\n annualDividend = 0.\n for dividend in dividends:\n annualDividend += dividend[1]\n \n self.AddSnapshot(price=row['Open'], date=date, dividend=dividend_today, annualDividend=annualDividend)\n #self.AddSnapshot(price=row['Close'], date=date, annualDividend=annualDividend)\n\n try:\n self.short_percent_of_float = stock.info['shortPercentOfFloat']\n except(KeyError):\n self.short_percent_of_float = 0.\n try:\n self.pe_ratio = stock.info['forwardPE']\n except(KeyError, TypeError):\n self.pe_ratio = float('inf')\n\n print(f\"History for {self.name} updated.\")",
"def _reload(self):\n if os.path.exists(self.filename):\n self.data = pd.read_csv(self.filename)\n else:\n self.data = pd.DataFrame(columns=self.unique_keys)\n\n # Set these default values\n # if 'weight_rescale' not in self.data.columns:\n # self.data['weight_rescale'] = 'none'\n # if 'norm' not in self.data.columns:\n # self.data['norm'] = 'softmax'\n # if 'update' not in self.data.columns:\n # self.data['update'] = 'all'\n # if 'replay' not in self.data.columns:\n # self.data['replay'] = False\n if 'debug' not in self.data.columns:\n self.data['debug'] = False\n\n # if 'tie' not in self.data.columns:\n # self.data['tie'] = False\n\n if 'update_length' not in self.data.columns:\n self.data['update_length'] = 0\n # for key in self.unique_keys:\n # self.data[key] = np.nan\n # Remaining set to None\n # for k in self.check_keys:\n # if k not in self.data.columns:\n # self.data[k] = None",
"def update_data(self):\n self._model.update()\n self.__refresh()",
"def update_which_sde_data(\n current_sde_df,\n latest_esi_df,\n index_key\n):\n pass",
"def join_historic_data(df):\n\n prev_hist = pd.read_csv('Historic_data.csv') # get previous data\n prev_date = prev_hist.time.values[0]\n df = df[(df.time > prev_date)] # only select the records that have a date higher than the previous date\n\n df = pd.concat([df, prev_hist])\n df.to_csv('Historic_data.csv', index=False)\n return df",
"def df_update(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.update, inplace=True)(\n self, other=other, **kwargs\n )",
"def get_updated_dataframe():\n # pylint: disable=import-outside-toplevel\n from sotaque_brasileiro.io import fetch_paginated_data\n records = fetch_paginated_data(constants.API_RECORDS_ENDPOINT.value)\n df = parse_records_to_dataframe(records) # pylint: disable=invalid-name\n return df",
"def setup_df(ticker):\n stock = yf.Ticker(ticker)\n df = stock.history(period=\"max\")\n del df['Dividends']\n del df['Stock Splits']\n return df"
] | [
"0.6701856",
"0.6649733",
"0.6549324",
"0.6426941",
"0.6328103",
"0.6311179",
"0.6242696",
"0.6232504",
"0.6194989",
"0.6187145",
"0.61294377",
"0.61294377",
"0.6119569",
"0.6101014",
"0.6090818",
"0.60787976",
"0.6045064",
"0.60259575",
"0.60231996",
"0.60224056",
"0.6018149",
"0.6007953",
"0.6004822",
"0.59978145",
"0.5959492",
"0.59511393",
"0.59470963",
"0.5933574",
"0.5915693",
"0.58977944"
] | 0.6706491 | 0 |
Aggregated history, i.e. in two single dataframes corresponding to "current data" attributes and results | def agg_history(self):
cd_list, cr_list = zip(*self._history)
return pd.concat(cd_list), pd.concat(cr_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _calculate_history(self, df: DataFrame) -> DataFrame:\n\n base_columns = {\"op\", \"id\", \"ts\"}\n data_columns = list(set(df.schema.names) - base_columns)\n\n window_spec = Window.partitionBy(\"id\").orderBy(\"ts\")\n agg_columns = [last(column, ignorenulls=True).over(window_spec).alias(column)\n for column in data_columns]\n\n return df.select([col(column) for column in base_columns] + agg_columns)",
"def evaluate_history(self, price_info):\n\n historic_stocks = util.historic_stocks(self)\n historic_df = pd.DataFrame(columns=historic_stocks)\n\n for record in self.history:\n update_row = util.update_row(historic_df, record)\n historic_df.loc[record.date] = update_row\n print(historic_df)\n\n start_date = self.history[0].date\n end_date = self.history[-1].date\n\n price_info = price_info.loc[(price_info.index >= start_date) & (price_info.index <= end_date)][historic_stocks]\n historic_stocks = price_info.merge(historic_df,\n how=\"left\", left_index=True, right_index=True,\n suffixes=(\"_price\", \"_amount\"))\n return historic_stocks",
"def combined_df(self) -> pd.DataFrame:\n return pd.concat([self.data, self.latest_data.reset_index()], ignore_index=True)",
"def all_data(self):\n return pd.concat([self.historic_data, self.dayahead_data])",
"def fit_history(self) -> FitResultHelper:\n pass",
"def get_history_df(self) -> DataFrame:\n history_df = DataFrame(columns=[\"player_id\", \"date\", \"rating\"])\n history_df[\"rating\"] = history_df[\"rating\"].astype(float)\n\n players = [player for player in self.player_df[\"player\"]]\n for player in players:\n # check if there are any missing dates after the first entry (the initial rating)\n if any([x[0] is None for x in player.rating_history[1:]]):\n warnings.warn(f\"WARNING: possible missing dates in history for Player {player.id}\")\n\n player_history_df = DataFrame(player.rating_history, columns=[\"date\", \"rating\"])\n player_history_df = player_history_df[~player_history_df[\"date\"].isna()]\n player_history_df[\"player_id\"] = player.id\n history_df = pd.concat([history_df, player_history_df], sort=False)\n\n return history_df.reset_index(drop=True)",
"def account_df_history(self, improve=False):\n return(self.account_df('history', improve))",
"def join_historic_data(df):\n\n prev_hist = pd.read_csv('Historic_data.csv') # get previous data\n prev_date = prev_hist.time.values[0]\n df = df[(df.time > prev_date)] # only select the records that have a date higher than the previous date\n\n df = pd.concat([df, prev_hist])\n df.to_csv('Historic_data.csv', index=False)\n return df",
"def _history(self, target, phases=None, with_actual=True, y0_dict=None):\n # Include actual data or not\n with_actual = with_actual and target in self.VALUE_COLUMNS\n # Get tracking data\n df = self.track(phases=phases, with_actual=with_actual, y0_dict=y0_dict)\n if target not in df.columns:\n col_str = \", \".join(list(df.columns))\n raise KeyError(f\"@target must be selected from {col_str}, but {target} was applied.\")\n # Select the records of target variable\n return df.pivot_table(\n values=target, index=self.DATE, columns=self.SERIES, aggfunc=\"last\")",
"def history_to_df(self):\n records = [pe.to_dict() for pe in self.history]\n return pd.DataFrame.from_records(\n records, columns=[\n \"date\", \"type\", \"description\", \"debit\", \"credit\", \"balance\"\n ]\n ).set_index(keys=[\"date\"])",
"def test_get_derived_metric_history(self):\n pass",
"def history():",
"def run(self):\r\n history = self.extracter.load_user_history()\r\n self.plot_history(history)\r\n \r\n pp_history = self.analyser.preprocess_history(history)\r\n part_worths, attribute_importance, relative_importance = self.analyser.conjoint_analysis(pp_history)\r\n self.plot_analysis(part_worths, relative_importance)\r\n \r\n return history, pp_history, part_worths, relative_importance",
"def hist_data(self):\n # database address\n jydb_address = \"117.122.223.35\"\n jydb_user_id = \"zpy\"\n jydb_user_pwd = \"Z1pe1y1@zpy\"\n jydb_db_name = \"jydb02\"\n # preallocate data set\n back_testing_data = {}\n \n # on-line mode\n conn = pymysql.connect(jydb_address, jydb_user_id, jydb_user_pwd, jydb_db_name)\n # not using moving average\n if not self.moving_average: \n # iterate on all ism packages\n for _i, ism_pack in enumerate(tqdm(self.ism_data)):\n ismcode = ism_pack[0][0]\n ProRunDate = ism_pack[0][5]\n ProStopDate = ism_pack[0][6]\n MaxStoreSum = ism_pack[0][7]\n MinInvestShare = ism_pack[0][8]\n InnerCode_ls = flatten(list(map(lambda x: [y[0] for y in x[1:]], ism_pack[1:])))\n SecuCode_ls = flatten(list(map(lambda x: [y[1] for y in x[1:]], ism_pack[1:])))\n PriorType_ls = flatten(list(map(lambda x: [y[2] for y in x[1:]], ism_pack[1:])))\n # collect data from source conn\n flag_run = \"SELECT InnerCode, OpenPrice, ClosePrice FROM QT_DailyQuote WHERE \" + \\\n \"InnerCode IN (\" + \",\".join(InnerCode_ls) + \") AND \" + \\\n \"TradingDay=\\'\" + ProRunDate + \"\\'\"\n flag_stop = \"SELECT InnerCode, OpenPrice, ClosePrice FROM QT_DailyQuote WHERE \" + \\\n \"InnerCode IN (\" + \",\".join(InnerCode_ls) + \") AND \" + \\\n \"TradingDay=\\'\" + ProStopDate + \"\\'\"\n run_price = pd.read_sql(flag_run, conn)\n stop_price = pd.read_sql(flag_stop, conn)\n back_testing_data[ismcode] = pd.merge(run_price, stop_price, on='InnerCode', \n suffixes=('_run', '_stop'))\n else: # using moving average \n # iterate on all ism packages\n for _i, ism_pack in enumerate(self.ism_data):\n ismcode = ism_pack[0][0]\n ProRunDate = ism_pack[0][5]\n TradingDay_begin = former_market_date(ProRunDate, self.L_shift, conn)\n TradingDay_end = future_market_date(ProRunDate, self.R_shift, conn)\n InnerCode_ls = flatten(list(map(lambda x: [y[0] for y in x[1:]], ism_pack[1:])))\n SecuCode_ls = flatten(list(map(lambda x: [y[1] for y in x[1:]], ism_pack[1:])))\n PriorType_ls = flatten(list(map(lambda x: [y[2] for y in x[1:]], ism_pack[1:])))\n flag = \"SELECT InnerCode, TradingDay, OpenPrice, ClosePrice FROM QT_DailyQuote WHERE \" + \\\n \"InnerCode IN (\" + \",\".join(InnerCode_ls) + \") AND \" + \\\n \"TradingDay BETWEEN \\'\" + TradingDay_begin + \"\\' AND \\'\" + TradingDay_end + \"\\'\"\n back_testing_data[ismcode] = pd.read_sql(flag, conn)\n \n # close sql connection\n conn.close()\n \n return back_testing_data",
"def update_history(self, start_training, start_epoch, epoch, res_trackers_dict):\n self.history.get('epoch_index', []).append(epoch+1)\n self.history.get('time_epoch', []).append(time.time()-start_epoch)\n self.history.get('time_cumulative', []).append(time.time()-start_training)\n\n self.history.get('train', {}).get('gen_loss', []).append(res_trackers_dict['loss_tracker_train_gen'])\n self.history.get('train', {}).get('gen_mae', []).append(res_trackers_dict['metric_tracker_train_gen'])\n self.history.get('train', {}).get('disc_loss', []).append(res_trackers_dict['loss_tracker_train_disc'])\n self.history.get('train', {}).get('disc_acc', []).append(res_trackers_dict['metric_tracker_train_disc'])\n\n self.history.get('val', {}).get('gen_loss', []).append(res_trackers_dict['loss_tracker_val_gen'])\n self.history.get('val', {}).get('gen_mae', []).append(res_trackers_dict['metric_tracker_val_gen'])\n self.history.get('val', {}).get('disc_loss', []).append(res_trackers_dict['loss_tracker_val_disc'])\n self.history.get('val', {}).get('disc_acc', []).append(res_trackers_dict['metric_tracker_val_disc'])",
"def performance_history(self, request, pk=None, **kwargs):\n # Get the goal even though we don't need it (we could just use the pk)\n # so we can ensure we have permission to do so.\n goal = self.get_object()\n\n # - Get all the transaction with this goal involved that are of reason 'Execution'.\n # We want the volume, ticker id, date ordered by date. [(date, {ticker: vol}, ...]\n qs = Transaction.objects.filter(Q(to_goal=goal) | Q(from_goal=goal),\n reason=Transaction.REASON_EXECUTION).order_by('executed')\n txs = qs.values_list('execution_distribution__execution__executed',\n 'execution_distribution__execution__asset__id',\n 'execution_distribution__volume')\n ts = []\n entry = (None,)\n aids = set()\n # If there were no transactions, there can be no performance\n if len(txs) == 0:\n return Response([])\n\n # Because executions are stored with timezone, but other things are just as date, we need to make datetimes\n # naive before doing date arithmetic on them.\n bd = timezone.make_naive(txs[0][0]).date()\n ed = timezone.make_naive(timezone.now()).date()\n for tx in txs:\n aids.add(tx[1])\n txd = timezone.make_naive(tx[0]).date()\n if txd == entry[0]:\n entry[1][tx[1]] += tx[2]\n else:\n if entry[0] is not None:\n ts.append(entry)\n entry = (txd, defaultdict(int))\n entry[1][tx[1]] = tx[2]\n ts.append(entry)\n\n # - Get the time-series of prices for each instrument from the first transaction date until now.\n # Fill empty dates with previous value [(date, {ticker: price}, ...]\n pqs = DailyPrice.objects.filter(date__range=(bd, ed),\n instrument_content_type=ContentType.objects.get_for_model(Ticker).id,\n instrument_object_id__in=aids)\n prices = pqs.to_timeseries(fieldnames=['price', 'date', 'instrument_object_id'],\n index='date',\n storage='long',\n pivot_columns='instrument_object_id',\n values='price')\n # Remove negative prices and fill missing values\n # We replace negs with None so they are interpolated.\n prices[prices <= 0] = None\n prices = prices.reindex(pd.date_range(bd, ed), method='ffill').fillna(method='bfill')\n\n # For each day, calculate the performance\n piter = prices.itertuples()\n res = []\n # Process the first day - it's special\n row = next(piter)\n p_m1 = row[1:]\n vols_m1 = [0] * len(prices.columns)\n tidlocs = {tid: ix for ix, tid in enumerate(prices.columns)}\n for tid, vd in ts.pop(0)[1].items():\n vols_m1[tidlocs[tid]] += vd\n res.append((dt2ed(row[0]), 0)) # First day has no performance as there wasn't a move\n # Process the rest\n for row in piter:\n # row[0] (a datetime) is a naive timestamp, so we don't need to convert it\n if ts and row[0].date() == ts[0][0]:\n vols = vols_m1.copy()\n dtrans = ts.pop(0)[1] # The transactions for the current processed day.\n for tid, vd in dtrans.items():\n vols[tidlocs[tid]] += vd\n # The exposed assets for the day. These are the assets we know for sure were exposed for the move.\n pvol = list(map(min, vols, vols_m1))\n else:\n vols = vols_m1\n pvol = vols\n pdelta = list(map(operator.sub, row[1:], p_m1)) # The change in price from yesterday\n impact = sum(map(operator.mul, pvol, pdelta)) # The total portfolio impact due to price moves for exposed assets.\n b_m1 = sum(map(operator.mul, pvol, p_m1)) # The total portfolio value yesterday for the exposed assets.\n perf = 0 if b_m1 == 0 else impact / b_m1\n # row[0] (a datetime) is a naive timestamp, so we don't need to convert it\n res.append((dt2ed(row[0]), decimal.Decimal.from_float(perf).quantize(decimal.Decimal('1.000000'))))\n p_m1 = row[1:]\n vols_m1 = vols[:]\n\n return Response(res)",
"def factor_ret(self):\n factor_ret_all = pd.DataFrame([])\n for i in range(len(self.trade_date) - self.timelog):\n date = self.trade_date.iloc[i,0]\n date_lag = self.trade_date.iloc[i + self.timelog,0]\n factor_ret = get_factor_ret(date,date_lag)\n factor_ret_all = pd.concat([factor_ret_all,pd.DataFrame(factor_ret).T],axis = 0)\n print(i)\n cumulative_factor_ret = factor_ret_all.cumsum(axis = 0)\n factor_ret_all.index = self.trade_date.iloc[:len(self.trade_date) - self.timelog,0]\n cumulative_factor_ret.index = self.trade_date.iloc[:len(self.trade_date) -self.timelog,0]\n return factor_ret_all,cumulative_factor_ret",
"def accumulateSubgridMassHistory(self,q):\n pass",
"def store_predictions(df):\n ts = df[df.columns[1]]\n base = pd.DataFrame(ts)\n preds = make_preds(ts, 'Predicted '+ df.columns[1])\n base.index = df['year']\n base = base.append(pd.DataFrame(preds), sort = True)\n for col in df.columns[2:]:\n ts = df[col]\n temp = pd.DataFrame(ts)\n preds = make_preds(ts, 'Predicted ' + col)\n temp.index = df['year']\n temp = temp.append(pd.DataFrame(preds), sort = True)\n base = base.join(temp)\n return base",
"def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is used\n\n running_pids_set = set(self.__pids)\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not self.__aggregated_metrics.get(_metric):\n self.__aggregated_metrics[_metric] = 0\n if _metric.is_cumulative:\n if pid in running_pids_set:\n if len(_metric_values) > 1:\n # only report the cumulative metrics for more than one sample\n self.__aggregated_metrics[_metric] += (\n _metric_values[-1] - _metric_values[-2]\n )\n else:\n if pid in running_pids_set:\n # absolute metric - accumulate the last reported value\n self.__aggregated_metrics[_metric] += _metric_values[-1]",
"def _calculate(self):\n source = self.source\n res = {}\n l_cols = [[], [], [], []]\n r_lines = {}\n dateline=None\n ###delete the below code when fetch data from database(assume: data in database has been pretreatment)\n if source[t.ror].min() > -99.0:\n pass\n else:\n source[t.ror] = np.where(\n source[t.ror] > -99.0, source[t.ror], -99.0)\n ###\n for account in self.accounts:\n source_account = source[source[t.account] == account]\n source_account = source_account.reset_index(drop=True)\n dateline=source_account[t.effective_date]\n ror=source_account[t.ror]/100\n returns_cum = ROR.ror_cum_ann(source_account, self.annualized)\n # double_return_cum=round(double_return_cum,2)+1\n returns_cum = returns_cum + 1\n growth_amounts = returns_cum * self.starting_value\n returns_cum, growth_amounts = round(returns_cum - 1, 4), \\\n round(growth_amounts, 2)\n l_cols[0].append(growth_amounts.iloc[-1, 0])#account growth amount\n l_cols[1].append(growth_amounts.iloc[-1, 1])#bench growth amount\n l_cols[2].append(returns_cum.iloc[-1, 0])#account return\n l_cols[3].append(returns_cum.iloc[-1, 1])#bench return\n r_lines[account] = [list(returns_cum.iloc[:,0]), list(growth_amounts.iloc[:, 0]),#list(returns_cum.iloc[:, 0])\n list(growth_amounts.iloc[:, 1])]#account return, account growth amount, bench growth amount\n res['account_vs_benchmark'] = {'xAxis': self.accounts,\n 'series': l_cols}\n res['growth_of_unit'] = {'xAxis': list(dateline),\n 'series': r_lines}\n return res\n # ret_dict = self._ret(accounts, starting_value, source, annualized)\n # return ret_dict",
"def __build_history(self, obj: Object) -> dict:\n previous_history = obj.history\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}",
"def _get_outputdf(self):\n keys = self.info_df['Trace'].values.tolist()\n frame = deepcopy([line.df for line in self.info_df['Line'].values.tolist()])\n for i in range(len(frame)):\n df = frame[i]\n num = list(range(len(df)))\n angle_gr = list(map(deg2grad,df['Angle Horizontal'].values))\n df.insert(0,'Number', ['s' + '-'.join(x) + 'gr' for x in zip(map(str,num),map(str,map(int,angle_gr)))])\n df.insert(1, 'Name', keys[i])\n return pd.concat(frame, keys=keys, join='inner', ignore_index=True)",
"def gather_data(self, *args, **kwargs):\n instrument_arg = kwargs.get('instrument', 'EUR_USD')\n granularity_arg = kwargs.get('granularity', 'M1')\n candle_format = kwargs.get('candleFormat', 'bidask')\n start_time = kwargs.get('start', '2014-10-01T00:00:00.000000Z')\n count_arg = kwargs.get('count', 5000)\n out_data = []\n data_complete = False\n while(not data_complete):\n response = self.oanda.get_history(instrument=instrument_arg,\n granularity=granularity_arg,\n candleFormat=candle_format,\n start=start_time,\n count=count_arg)\n raw_data = response['candles']\n if (len(out_data) == 0):\n out_data = out_data + raw_data\n elif (len(out_data) > 1):\n # raw_data[0] is already in out_data as raw_data[-1] from last\n # iteration\n out_data = out_data + raw_data[1:]\n start_time = raw_data[-1]['time']\n if (len(raw_data) < 5000):\n data_complete = True\n\n out_data = self._list_to_df(out_data)\n return out_data",
"def __build_history(self, obj: Object) -> dict:\n previous_history = dict(obj.history)\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}",
"def get_recent_history(session=None): \n from model_old_schema.reference import Reference, RefBad\n\n def f(session):\n min_date = datetime.date.today() - datetime.timedelta(days=10)\n refs = session.query(Reference).filter_by(created_by = session.user).filter(Reference.date_created >= min_date)\n refbads = session.query(RefBad).filter_by(created_by = session.user).filter(Reference.date_created >= min_date)\n \n history = {}\n today = datetime.date.today()\n for i in range(10):\n new_date = today - datetime.timedelta(days=i)\n history[new_date] = HistoryEntry(new_date)\n \n for ref in refs:\n if ref.date_created in history:\n history[ref.date_created].inc_ref_count()\n \n for refbad in refbads:\n if refbad.date_created in history:\n history[refbad.date_created].inc_refbad_count()\n \n return history\n \n return f if session is None else f(session)",
"def history(df, team, opponent):\n if opponent:\n games = df[(df.team == team) & (df.opponent == opponent)]#team_games(df, team)\n else:\n games = df[df.team == team]#team_games(df, team)\n\n games['dragkills'] = (games['teamdragkills'] + games['oppdragkills'])\n games['turrkills'] = (games['teamtowerkills'] + games['opptowerkills'])\n result = games[['team', 'opponent', 'player', 'champion', 'fb', 'fd', 'ft', 'fbaron', 'result', 'turrkills', 'dragkills', 'gamelength']]\n\n result = result[result.player == 'Team'].sort_values('gamelength')\n\n return result.round(2)",
"def __populate_historical_trade_data(self):\n\n trade_data = self.__transactions.pivot_table(\n index=\"Date\",\n columns=[\"Ticker\"],\n values=[\n \"Quantity\",\n \"Investment\",\n ],\n aggfunc={\"Quantity\": np.sum, \"Investment\": np.sum},\n )\n\n # Make historical prices columns a multi-index. This helps the merging.\n self.portfolio_historical_prices.columns = pd.MultiIndex.from_product(\n [[\"Close\"], self.portfolio_historical_prices.columns]\n )\n\n trade_data = pd.merge(\n trade_data,\n self.portfolio_historical_prices,\n how=\"outer\",\n left_index=True,\n right_index=True,\n )\n\n trade_data[\"Close\"] = trade_data[\"Close\"].fillna(method=\"ffill\")\n trade_data.fillna(0, inplace=True)\n\n trade_data[\"Quantity\"] = trade_data[\"Quantity\"].cumsum()\n trade_data[\"Investment\"] = trade_data[\"Investment\"].cumsum()\n trade_data[\"Investment\", \"Total\"] = trade_data[\"Investment\"].sum(axis=1)\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Investment delta\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"Investment\"].diff(periods=1).fillna(trade_data[\"Investment\"]))\n\n # End Value = Quantity * Close\n trade_data[pd.MultiIndex.from_product([[\"End Value\"], self.tickers_list])] = (\n trade_data[\"Quantity\"][self.tickers_list]\n * trade_data[\"Close\"][self.tickers_list]\n )\n\n trade_data.loc[:, (\"End Value\", \"Total\")] = trade_data[\"End Value\"][\n self.tickers_list\n ].sum(axis=1)\n\n # Initial Value = Previous End Value + Investment changes\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Initial Value\"], self.tickers_list + [\"Total\"]]\n )\n ] = 0\n\n trade_data[\"Initial Value\"] = trade_data[\"End Value\"].shift(1) + trade_data[\n \"Investment\"\n ].diff(periods=1)\n\n # Set first day Initial Value as the Investment (NaNs break first period)\n for t in self.tickers_list + [\"Total\"]:\n trade_data.at[trade_data.index[0], (\"Initial Value\", t)] = trade_data.iloc[\n 0\n ][\"Investment\"][t]\n\n trade_data = trade_data.reindex(\n columns=[\n \"Quantity\",\n \"Investment\",\n \"Investment delta\",\n \"Close\",\n \"Initial Value\",\n \"End Value\",\n ],\n level=0,\n )\n self.historical_trade_data = trade_data",
"def fetch_history(*args, **kwargs):\n return collect_history(*args, **kwargs)",
"def __add_current_fen_to_history(self):\n self.history = np.hstack((self.history, self.fen()))"
] | [
"0.68992007",
"0.6481204",
"0.62190855",
"0.60813546",
"0.60175997",
"0.6004348",
"0.5911996",
"0.58084685",
"0.58043",
"0.5802159",
"0.57765096",
"0.5699813",
"0.56859416",
"0.5673143",
"0.56662226",
"0.56241435",
"0.56107074",
"0.56097555",
"0.55994374",
"0.55979264",
"0.5578164",
"0.5555178",
"0.5543194",
"0.55356574",
"0.55320007",
"0.5504235",
"0.549595",
"0.5494538",
"0.5483056",
"0.5457977"
] | 0.74194807 | 0 |
This function partly overrides the standard QFileSystemModel data function to return custom file and folder icons | def data(self, index, role):
fileInfo = self.getFileInfo(index)[4]
if role == QtCore.Qt.DecorationRole:
if fileInfo.isDir():
return QtGui.QPixmap(os.path.join(ICONS_L, 'Folder.png'))
elif fileInfo.isFile():
return QtGui.QPixmap(os.path.join(ICONS_L, 'airfoil.png'))
# return QtWidgets.QFileSystemModel.data(self, index, role)
return super().data(index, role) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_icon(self):\r\n raise NotImplementedError",
"def icon(self):",
"def get_icon(self):\n raise NotImplementedError",
"def _icons(self):",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return None",
"def icon(self):\n return None",
"def om_icons(self):\n icons = ({'path': 'misc_/DataTemplates/ic-xml.gif',\n 'alt': self.meta_type, 'title': self.meta_type},)\n if not self._v_cooked:\n self._cook()\n if self._v_errors:\n icons = icons + ({'path': 'misc_/PageTemplates/exclamation.gif',\n 'alt': 'Error',\n 'title': 'This template has an error'},)\n return icons",
"def button_icons(self):\n self.ui_mkdir.setText(\"\")\n self.ui_mkdir.setIcon(QIcon(icons(\"folder-new.png\")))\n self.ui_other_fs.setText(\"\")\n self.ui_other_fs.setIconSize(QtCore.QSize(64, 64))\n self.ui_other_fs.setIcon(QIcon(icons(\"fs_logo.png\", origin=\"fs\")))",
"def icon(self):\n return self.ICON",
"def icon(self):\n return self.ICON",
"def processIconFilename(self):\n\t\tself.iconFilename = self._getVal(64, 2)",
"def icon(self):\n return DEFAULT_ICON",
"def icon(self):\n return self._metadata[2]",
"def icon(self):\n return self.var_icon",
"def icon(self):\n return ICON_BUS",
"def icon(self):\n\n # look for icon one level up from this hook's folder in \"icons\" folder\n return os.path.join(\n self.disk_location,\n os.pardir,\n \"icons\",\n \"review.png\"\n )",
"def get_icon(self):\r\n return get_icon(self.ICON)",
"def icon(self):\n return self.__icon",
"def _leadingIcons(self):",
"def getIconPath(self):\n try:\n return self.primaryAq().zIcon\n except AttributeError:\n return '/zport/dmd/img/icons/noicon.png'"
] | [
"0.6583041",
"0.6500342",
"0.6448918",
"0.62393034",
"0.6156974",
"0.6156974",
"0.6156974",
"0.6156974",
"0.6156974",
"0.6156974",
"0.6156974",
"0.6156974",
"0.6156974",
"0.6156974",
"0.61385536",
"0.61385536",
"0.61254543",
"0.6075093",
"0.5986688",
"0.5986688",
"0.59493023",
"0.58703774",
"0.5838602",
"0.5818366",
"0.58008116",
"0.5790349",
"0.5781261",
"0.5779007",
"0.57691735",
"0.5764041"
] | 0.78969663 | 0 |
The external fixed IPs of the router. | def external_fixed_ips(self) -> Sequence['outputs.GetRouterExternalFixedIpResult']:
return pulumi.get(self, "external_fixed_ips") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_floating_ips(self):\n return self.router.get(l3_constants.FLOATINGIP_KEY, [])",
"def ip_addresses(self):\n try:\n return socket.gethostbyaddr(self.fqdn)[-1]\n except socket.error as _:\n return ['127.0.0.1']",
"def external_IP(self):\r\n return self._external_ip",
"def get_ips():\r\n local_ips = []\r\n public_ips = []\r\n \r\n # list of iface names, 'lo0', 'eth0', etc.\r\n for iface in netifaces.interfaces():\r\n # list of ipv4 addrinfo dicts\r\n ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])\r\n for entry in ipv4s:\r\n addr = entry.get('addr')\r\n #print(\"addr: \" + addr)\r\n if not addr:\r\n continue\r\n if not (iface.startswith('lo') or addr.startswith('127.')):\r\n public_ips.append(addr)\r\n else:\r\n local_ips.append(addr) \r\n return public_ips",
"def inetVisibleIP(self):\n def handle(results):\n ips = [ result[1][0] for result in results if result[0] ]\n self.log.debug(\"other nodes think our ip is %s\" % str(ips))\n return ips\n\n ds = []\n for neighbor in self.bootstrappableNeighbors():\n ds.append(self.protocol.stun(neighbor))\n d = defer.gatherResults(ds)\n d.addCallback(handle)\n d.addErrback(self.onError)\n return d",
"def _update_ips(self):\n self.ip_others = []\n ips = self.mesh.ipaddr()\n self.rloc16 = self.mesh.rloc()\n for line in ips:\n if line.startswith('fd'):\n # Mesh-Local unicast IPv6\n try:\n addr = int(line.split(':')[-1], 16)\n except Exception:\n continue\n if addr == self.rloc16:\n # found RLOC\n # RLOC IPv6 has x:x:x:x:0:ff:fe00:RLOC16\n self.rloc = line\n elif ':0:ff:fe00:' not in line:\n # found Mesh-Local EID\n self.ip_eid = line\n elif line.startswith('fe80'):\n # Link-Local\n self.ip_link = line\n else:\n self.ip_others.append(line)",
"def neighbors_ip(self):\n neighbors = self.neighbors()\n nei_list = []\n net_ip = self._rloc_ip_net_addr()\n if neighbors is not None:\n for nei_rec in neighbors:\n nei_ip = net_ip + hex(nei_rec.rloc16)[2:]\n nei_list.append(nei_ip)\n return nei_list",
"def get_local_host_ip(self) -> str:",
"def get_ip_address_filter(self):\n return self.mycam.devicemgmt.GetIPAddressFilter()",
"def discovered_ips(self) -> Sequence[str]:\n return pulumi.get(self, \"discovered_ips\")",
"def GetExternalIp():\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n url = 'http://whatismyip.akamai.com'\n resp, content = h.request(url, 'GET')\n if resp.status == 200:\n return content\n for provider in (UltraDNSAuth(), MyResolverInfo()):\n answer = provider.GetClientIp()\n if answer:\n return answer",
"def get_ip(self):",
"def public_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"public_ip_addresses\")",
"def public_ip_addresses(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"public_ip_addresses\")",
"def publicIP(self):\n return self.query('https://plex.tv/:/ip')",
"def get_my_ip():\r\n try:\r\n return [x[4] for x in conf.route.routes if x[2] != '0.0.0.0'][0]\r\n except IndexError:\r\n return '127.0.0.1'",
"def internal_IP(self):\r\n return self._internal_ip",
"def internalIP(self):\r\n return self._internalIP",
"def get_externalip(self):\n\n myip = \"\"\n for i in range(5):\n myip = self.fetch(random.choice(self.server_list))\n if myip != \"\":\n return myip\n else:\n continue\n return \"\"",
"def get_ips(self, instances):\n public_ips = []\n for instance in instances:\n public_ips.append(instance.public_dns_name)\n return public_ips",
"def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()",
"def floating_ips(self):\n return self.get('floating_ips')",
"def get_port_fixed_ips(self, port):\n if hasattr(port, 'fixed_ips') and port.fixed_ips:\n fixed_ips = port.fixed_ips\n else:\n fixed_ips = None\n return fixed_ips",
"def public_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:\n return pulumi.get(self, \"public_ips\")",
"def ipaddrs( host ):\n return socket.gethostbyaddr(host)[2][0]",
"def dns_server_ips(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"dns_server_ips\")",
"def ip(self):\n if not self._ip:\n if 'ip' in self.config:\n ip = self.config['ip']\n else:\n ip = self.protocol.transport.get_extra_info('sockname')[0]\n ip = ip_address(ip)\n if ip.version == 4:\n self._ip = ip\n else: # pragma: no cover\n response = urlopen('http://ipv4.icanhazip.com/')\n ip = response.read().strip().decode()\n ip = ip_address(ip)\n self._ip = ip\n return self._ip",
"def get_host_ip_addr():\n return nova_conf.my_ip",
"def floating_ips(self):\n return self.tree.get('floating_ips', {})",
"def get_host_ipaddress(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetHostIPAddress', self.handle)"
] | [
"0.73665017",
"0.72443134",
"0.6921729",
"0.6901388",
"0.68353254",
"0.6818246",
"0.68095595",
"0.6784862",
"0.67622966",
"0.67588675",
"0.6741735",
"0.673702",
"0.6674886",
"0.6626563",
"0.6607832",
"0.6575119",
"0.6543172",
"0.6542807",
"0.65271246",
"0.6526337",
"0.6521113",
"0.6506455",
"0.6499497",
"0.64959747",
"0.64505845",
"0.64345413",
"0.6433185",
"0.6427063",
"0.64031005",
"0.64000744"
] | 0.82707417 | 0 |
The network UUID of an external gateway for the router. | def external_network_id(self) -> str:
return pulumi.get(self, "external_network_id") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_network_id(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetNetworkId', self.handle)",
"def get_device_id(self, network):\n # There could be more than one dhcp server per network, so create\n # a device id that combines host and network ids\n\n host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname())\n return 'dhcp%s-%s' % (host_uuid, network.id)",
"def private_network_uuid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_network_uuid\")",
"def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")",
"def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")",
"def network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network_id\")",
"def network_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_id\")",
"def network(self) -> str:\n return pulumi.get(self, \"network\")",
"def get_network_default_gateway(self):\n return self.mycam.devicemgmt.GetNetworkDefaultGateway()",
"def unique_id(self):\n return f\"{self._mac_address}:{self._device_id}:{self._zone_id}:switch\"",
"def private_network_uuid(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_network_uuid\")",
"def private_network_uuid(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_network_uuid\")",
"def _external_network(self):\n try:\n router = next(self._connection.network.routers.all())\n except StopIteration:\n raise errors.ImproperlyConfiguredError('Could not find tenancy router.')\n return self._connection.network.networks.get(router.external_gateway_info['network_id'])",
"def attached_network_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"attached_network_id\")",
"def unique_id(self):\n return self._light.address",
"def local_gateway_id(self) -> Optional[str]:\n return pulumi.get(self, \"local_gateway_id\")",
"def network_address(self):\n\n return self._network_address",
"def virtual_network_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"virtual_network_id\")",
"def ipv6_gateway_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ipv6_gateway_id\")",
"def vpn_gateway_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vpn_gateway_id\")",
"def transit_gateway_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"transit_gateway_id\")",
"def device_type(self) -> str:\n return \"urn:schemas-upnp-org:device:InternetGatewayDevice:1\"",
"def gateway_name(self) -> str:\n return pulumi.get(self, \"gateway_name\")",
"def get_network(self):\n return self.get_ip_network()[-1]",
"def getnetwork(ipaddr):\n return '192.168.1.0/24'",
"def _get_network_id(self):\n pubnet = self.conn.network.find_network('public')\n net = self.conn.network.find_network(self.net_conf['net_name'])\n subnet = self.conn.network.find_subnet(self.net_conf['subnet_name'])\n # TODO: Add support for security group\n\n self.network_id = {\n 'public': pubnet.id,\n 'net': net.id,\n 'subnet': subnet.id\n }",
"def unique_id(self) -> str:\n return f\"{self._device.mac}_{self._router.config_entry.entry_id}\"",
"def unique_id(self) -> str:\n return self.tahoma_device.url",
"def get_default_gateway_linux():\n with open(\"/proc/net/route\") as fh:\n for line in fh:\n fields = line.strip().split()\n if fields[1] != '00000000' or not int(fields[3], 16) & 2:\n continue\n\n return socket.inet_ntoa(struct.pack(\"<L\", int(fields[2], 16)))",
"def gateway_name(self):\n return self.msg.gateway_name"
] | [
"0.69283426",
"0.68110025",
"0.67746633",
"0.668923",
"0.668923",
"0.668923",
"0.6625821",
"0.6577762",
"0.65629625",
"0.6535451",
"0.651444",
"0.651444",
"0.64417446",
"0.6367765",
"0.63646585",
"0.6359441",
"0.6353521",
"0.63178164",
"0.63145465",
"0.62964016",
"0.6283311",
"0.62819654",
"0.6266267",
"0.6247259",
"0.62064314",
"0.6190699",
"0.6187951",
"0.61717427",
"0.61567926",
"0.6154649"
] | 0.7114002 | 0 |
Test if attribute is a property. | def isprop(v):
return isinstance(v, property) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isproperty(object):\n return isinstance(object, property)",
"def _is_propertyable(\n names, # type: List[str]\n attrs, # type: Dict[str, Any]\n annotations, # type: Dict[str, type]\n attr, # Dict[str, Any]\n):\n # type: (...) -> bool\n return (\n attr in annotations\n and not attr.startswith(\"_\")\n and not attr.isupper()\n and \"__{}\".format(attr) not in names\n and not isinstance(getattr(attrs, attr, None), types.MethodType)\n )",
"def isAttribute(self, p_int): # real signature unknown; restored from __doc__\n return False",
"def has_attribute(self, name):\n\n pass",
"def is_attribute(self):\r\n return conf.lib.clang_isAttribute(self)",
"def hasAttribute(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return False",
"def check_attr(obj, member, default=None):\n cls = type(obj)\n if member in cls.__dict__ and isinstance(getattr(cls, member), property):\n # We don't do anything with properties.\n return getattr(obj, member)\n try:\n if member in obj.__dict__:\n return getattr(obj, member)\n except AttributeError:\n # Slots?\n try:\n return getattr(obj, member)\n except AttributeError:\n pass\n setattr(obj, member, default)\n return getattr(obj, member)",
"def has_attribute(self, attribute):\n return (attribute in self.attribute_list)",
"def _property(attr):\n name = '_' + attr\n @property\n def prop(self):\n \"\"\"Get the specific attribute.\"\"\"\n return getattr(self, name)\n @prop.setter\n def prop(self, value):\n \"\"\"Set the attribute.\"\"\"\n setattr(self, name, value)\n if isa(self, Pair):\n self.update_str()\n return prop",
"def has_attribute(self, key):\n return key in self.__dict",
"def check_property(self, descriptor): # pylint: disable=unused-argument\r\n raise SkipTest(\"check_property not defined\")",
"def hasProp(self, name):\n ret = libxml2mod.xmlHasProp(self._o, name)\n if ret is None:return None\n __tmp = xmlAttr(_obj=ret)\n return __tmp",
"def has_attribute(self, attribute: str) -> bool:\n return any([\n key_node.value == attribute for key_node, _ in self.yaml_node.value\n ])",
"def is_element_attribute(element, attribute_name):\n return element.get(attribute_name) is not None",
"def property( self, prop ):\n raise NotImplementedError(\"property\")",
"def has_attribute(*attrs):\n\n @meta\n def check(cls):\n return all(hasattr(cls, a) for a in attrs)\n\n return check",
"def hasAttribute(self, attrib):\n return self._dqa(attrib) in self.attributes",
"def checkattr(name):\n\n def check(obj):\n try:\n attrgetter(name)(obj)\n return True\n except AttributeError:\n return False\n\n return check",
"def _static_hasattr(value, attr):\n try:\n object.__getattribute__(value, attr)\n except AttributeError:\n return False\n else:\n return True",
"def has_attribute(self, name):\n return name in self.schema",
"def process_property(self, prop):\n NifLog.warn(f\"Unknown property block found : {prop.name}\")\n NifLog.warn(f\"This type isn't currently supported: {type(prop)}\")",
"def _check_property_on_test_context(\n context: \"HookContext\", attr_str: str, user_facing_name: str, param_on_builder: str\n):\n value = getattr(context, attr_str)\n if value is None:\n raise DagsterInvalidPropertyError(\n f\"Attribute '{user_facing_name}' was not provided when \"\n f\"constructing context. Provide a value for the '{param_on_builder}' parameter on \"\n \"'build_hook_context'. To learn more, check out the testing hooks section of Dagster's \"\n \"concepts docs: https://docs.dagster.io/concepts/ops-jobs-graphs/op-hooks#testing-hooks\"\n )\n else:\n return value",
"def do_get_property(self, spec):\n attribute = self.find_attribute(spec.name)\n if attribute is not None and isinstance(attribute, property):\n return attribute.fget(self)\n else:\n raise ValueError(\"No such property\", spec.name)",
"def is_attribute(method, name=None):\n if name is None:\n name = method.__name__\n method.is_attribute = True\n method.name = name\n return method",
"def check_property(prop, name, **kwargs):\n\n checkers = {\n 'color': check_color,\n 'alpha': check_alpha,\n 'size': check_size,\n 'thickness': check_thickness,\n 'index': check_index,\n 'coordinates': check_coordinates,\n 'colormap': check_colormap,\n 'bins': check_bins,\n 'spec': check_spec\n }\n\n if name in checkers:\n return checkers[name](prop, **kwargs)\n elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):\n return check_1d(prop, name)\n else:\n return prop",
"def test_name_properties_on_attribute():\n assert not Attribute(name=\"b\", path=\"a.b\", file_path=\"a.py\").name_properties\n assert \"private\" in Attribute(name=\"_b\", path=\"a._b\", file_path=\"a.py\").name_properties\n assert \"class-private\" in Attribute(name=\"__b\", path=\"a.__b\", file_path=\"a.py\").name_properties\n assert \"special\" in Attribute(name=\"__b__\", path=\"a.__b__\", file_path=\"a.py\").name_properties",
"def is_property_available(self, name):\n if name in self.properties and not (isinstance(self.properties[name], dict)\n and '__deferred' in self.properties[name]):\n return True\n return False",
"def matches_property_name(fun):\n return callable(fun) and getattr(fun, annotation, None) == value",
"def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)",
"def has_value(self, attribute_name):\n return hasattr(self, '%s__' % attribute_name)"
] | [
"0.7569112",
"0.71270937",
"0.65027094",
"0.64971614",
"0.645277",
"0.63821",
"0.6280971",
"0.62516636",
"0.62270504",
"0.6196012",
"0.6137933",
"0.6086928",
"0.60693777",
"0.6069171",
"0.60603017",
"0.59751207",
"0.59495217",
"0.58659714",
"0.5848836",
"0.58300596",
"0.5807404",
"0.58037776",
"0.57887787",
"0.57631767",
"0.5737818",
"0.5696852",
"0.569572",
"0.56857586",
"0.56556946",
"0.56556946"
] | 0.7676597 | 0 |
calculates beam xy and other parameters. | def calculate_beam_xy(self):
info = []
# Import relevant info
pixel_size = self.info.pixel_size
for i in [j.final for j in self.final_objects]:
try:
info.append(
[
i,
i["beamX"],
i["beamY"],
i["wavelength"],
i["distance"],
(i["a"], i["b"], i["c"], i["alpha"], i["beta"], i["gamma"]),
]
)
except IOError as e:
print("IOTA ANALYSIS ERROR: BEAMXY failed! ", e)
pass
# Calculate beam center coordinates and distances
beamX = [i[1] for i in info]
beamY = [j[2] for j in info]
beam_dist = [
math.hypot(i[1] - np.median(beamX), i[2] - np.median(beamY)) for i in info
]
beam_dist_std = np.std(beam_dist)
img_list = [
[i[0], i[1], i[2], i[3], i[4], i[5], j]
for i, j in list(zip(info, beam_dist))
]
# Separate out outliers
outliers = [i for i in img_list if i[3] > 2 * beam_dist_std]
clean = [i for i in img_list if i[3] <= 2 * beam_dist_std]
cbeamX = [i[1] for i in clean]
cbeamY = [j[2] for j in clean]
obeamX = [i[1] for i in outliers]
obeamY = [j[2] for j in outliers]
# Calculate median wavelength, detector distance and unit cell params from
# non-outliers only
wavelengths = [i[3] for i in clean]
distances = [i[4] for i in clean]
cells = [i[5] for i in clean]
wavelength = np.median(wavelengths)
det_distance = np.median(distances)
a = np.median([i[0] for i in cells])
b = np.median([i[1] for i in cells])
c = np.median([i[2] for i in cells])
# Calculate predicted L +/- 1 misindexing distance for each cell edge
aD = det_distance * math.tan(2 * math.asin(wavelength / (2 * a)))
bD = det_distance * math.tan(2 * math.asin(wavelength / (2 * b)))
cD = det_distance * math.tan(2 * math.asin(wavelength / (2 * c)))
return (
beamX,
beamY,
cbeamX,
cbeamY,
obeamX,
obeamY,
beam_dist,
[i[4] for i in info],
aD,
bD,
cD,
pixel_size,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xy(self):\n ...",
"def beam(xb,yb,zb,wx,wy,wavelen):\n\n zRx = np.pi * wx**2 / wavelen\n zRy = np.pi * wy**2 / wavelen \n \n sqrtX = np.sqrt( 1 + np.power(zb/zRx,2) ) \n sqrtY = np.sqrt( 1 + np.power(zb/zRy,2) ) \n intensity = np.exp( -2.*( np.power(xb/(wx*sqrtX ),2) \\\n + np.power(yb/(wy*sqrtY),2) )) / sqrtX / sqrtY\n return intensity",
"def beam_positions(closepack=False):\n \n x_pos, y_pos = [], []\n\n x=0\n for j in range(0,6,1):\n x += 0.1\n y=0\n for k in range(0,6,2):\n y += 0.2\n x_pos.append(x+(0.05 if closepack else 0))\n y_pos.append(y)\n y += 0.2\n x_pos.append(x)\n y_pos.append(y)\n\n return x_pos, y_pos",
"def execute(self):\n self.W = self.X+self.y+self.a\n self.Z = 2*self.W",
"def fCalc(self):\n # A dictionary composed of all internal and boundary points\n allPoints = dict(self.internalPoints.items() + self.boundaryPoints.items())\n\n for pointLabel in allPoints.keys():\n # Compute fE, fW, fN and fW only for internal mesh points\n if allPoints[pointLabel].type.lower() == 'internal':\n xLabel = pointLabel[0]\n yLabel = pointLabel[1]\n x = self.internalPoints[(xLabel,yLabel)].x\n y = self.internalPoints[(xLabel,yLabel)].y\n xE = allPoints[(xLabel + 1,yLabel)].x\n xW = allPoints[(xLabel - 1,yLabel)].x\n yN = allPoints[(xLabel,yLabel + 1)].y\n yS = allPoints[(xLabel,yLabel - 1)].y\n \n if (xE - x)/self.h < -0.000001 or (xE - x)/self.h > 1.000001:\n errorMessage = '**Error! (xE - x)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (xE - x)/h = ' + str((xE - x)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fE = (xE - x)/self.h\n\n # Note that in the following we use -0.000001 and 1.000001 \n # instead of 0 and 1, respectively, to avoid problems with\n # with very small fractions. For example if the fractions is\n # greater than one by 2.22e-16 the condition (x - xW)/self.h > 1\n # will be false and the code returns an error\n if (x - xW)/self.h < -0.000001 or (x - xW)/self.h > 1.000001:\n errorMessage = '**Error! (x - xW)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (x - xW)/h = ' + str((x - xW)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fW = (x - xW)/self.h\n \n if (yN - y)/self.h < -0.000001 or (yN - y)/self.h > 1.000001:\n errorMessage = '**Error! (yN - y)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (yN - y)/h = ' + str((yN - y)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fN = (yN - y)/self.h\n \n if (y - yS)/self.h < -0.000001 or (y - yS)/self.h > 1.000001:\n errorMessage = '**Error! (y - yS)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (y - yS)/h = ' + str((y - yS)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fS = (y - yS)/self.h\n \n # Calculate the coeeficients requried to compute the Laplacian \n self.internalPoints[(xLabel,yLabel)].LapCoeffCalc()",
"def GetParametricCoords(self):\n ...",
"def calculate_marginal(self):\n self.marginal_ray=beam_field()\n m=self.marginal_ray\n m.U=np.array([[[0,0,1]]])\n m.Q_p=np.array([[[0,self.entrance_pupil,0]]])\n m.propagate(self.surfaces)",
"def calc_enginprops(self):\n # Let's assemble the ABD matrix even if it is not required\n ABD = np.bmat([[self.A, self.B], [self.B, self.D]])\n ABD_inv = np.linalg.inv(ABD)\n # We would use the whole matrix. This gives results similar to elamX and considers poisson effects\n A_inv = ABD_inv[0:3, 0:3]\n self.Ex = 1 / (self.total_t * A_inv[0, 0]) # It is 2 * t because we need total thickness\n self.Ey = 1 / (self.total_t * A_inv[1, 1])\n self.Gxy = 1 / (self.total_t * A_inv[2, 2])\n self.poissonxy = - A_inv[0,1] / A_inv[0, 0]\n # Flexural stiffness properties\n self.zx = 0.0\n self.zy = 0.0\n zx_dem = 0.0\n zy_dem = 0.0\n self.EIx = 0.0\n self.EIy = 0.0\n z = 0.0\n # Calculate neutral axis in direction x and y\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n z += t / 2.0\n self.zx += Ex * t * z\n zx_dem += Ex * t\n self.zy += Ey * t * z\n zy_dem += Ey * t\n z += t / 2.0\n self.zx = self.zx / zx_dem\n self.zy = self.zy / zy_dem\n # Calculate EI in direction x and y\n z = 0.0\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n Gxy = 1 / S_bar[2, 2]\n z += t / 2.0\n self.EIx += Ex * (t**3 / 12 + t * (z - self.zx)**2)\n self.EIy += Ey * (t**3 / 12 + t * (z - self.zy)**2)\n self.GA += Gxy * t\n z += t / 2.0\n return self.Ex, self.Ey, self.Gxy, self.poissonxy",
"def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)",
"def BeamPosition():\n \n XPOS, YPOS = [], []\n\n x=0\n for j in range(0,6,1):\n x += 0.1\n y=0\n for k in range(0,6,1):\n y += 0.2\n XPOS.append(x)\n YPOS.append(y)\n\n return XPOS, YPOS",
"def cal_pt(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for pt routine)')\n\n self.pt =math.sqrt(self.px**2+self.py**2)",
"def getAllPoints(self):\n self.getOrigin()\n self.computesWingsMeshPoints()\n self.computesFuselageMeshPoints()\n self.readsMaterials()\n self.assembleMatrices()\n self.computesWingConnexions()\n\n logger.debug(self.aircraftTotalMass)\n # self.plotSectionsPoints() # for debugging",
"def Initialize():\n # --- Set four-character run id, comment lines, user's name.\n top.pline2 = \"Example 3D beam in a FODO lattice\"\n top.pline1 = \"S-G cigar beam. 64x64x256\"\n top.runmaker = \"David P. Grote\"\n\n # --- Invoke setup routine - it is needed to created a cgm file for plots\n setup()\n\n # --- Create the beam species\n beam = Species(type=Potassium,charge_state=+1,name=\"Beam species\")\n\n # --- Set input parameters describing the beam, 72 to 17.\n beam.b0 = 15.358933450767e-3\n beam.a0 = 8.6379155933081e-3\n beam.x0 = 3.*mm\n beam.emit = 51.700897052724e-6\n beam.ap0 = 0.e0\n beam.bp0 = 0.e0\n beam.ibeam = 2.e-03\n beam.vbeam = 0.e0\n beam.ekin = 80.e3\n beam.aion = beam.type.A\n beam.zion = beam.charge_state\n top.lrelativ = false\n top.derivqty()\n beam.vthz = .5e0*beam.vbeam*beam.emit/sqrt(beam.a0*beam.b0) # Vthz ~ Vthperp\n\n # +++ Set up arrays describing lattice.\n # --- Set temp variables.\n hlp = 36.0e-2 # half lattice period length\n piperad = 3.445e-2 # pipe radius\n quadlen = 11.e-2 # quadrupole length\n gaplen = 4.*cm\n rodlen = quadlen + gaplen\n dbdx = .949/quadlen\n\n # --- Set general lattice variables.\n top.tunelen = 2.e0*hlp\n env.zl = -hlp*2\n env.zu = -env.zl\n env.dzenv = top.tunelen/100.e0\n\n # --- Set up quadrupoles\n addnewquad(zs= -quadlen/2.,\n ze= +quadlen/2.,\n db=-dbdx,ap=piperad)\n addnewquad(zs=hlp - quadlen/2.,\n ze=hlp + quadlen/2.,\n db=+dbdx,ap=piperad)\n addnewquad(zs=2.*hlp - quadlen/2.,\n ze=2.*hlp + quadlen/2.,\n db=-dbdx,ap=piperad)\n top.zlatstrt = 0.\n top.zlatperi = 2.e0*hlp\n\n # +++ Set input parameters describing the 3d simulation.\n w3d.nx = 64/2\n w3d.ny = 64/2\n w3d.nz = 256/2\n steps_p_perd = 50\n top.dt = (top.tunelen/steps_p_perd)/beam.vbeam\n\n # --- Set to finite beam.\n top.pbound0 = top.pboundnz = periodic\n top.pboundxy = absorb\n w3d.xmmin = -piperad\n w3d.xmmax = piperad\n w3d.ymmin = -piperad\n w3d.ymmax = piperad\n w3d.zmmin = -hlp*2\n w3d.zmmax = +hlp*2\n top.prwall = piperad\n\n # --- Set pulse length.\n beam.zimin = w3d.zmmin*.95/2.\n beam.zimax = w3d.zmmax*.95/2.\n\n # --- Load Semi-Gaussian cigar beam.\n top.npmax = 20000\n w3d.distrbtn = \"semigaus\"\n w3d.cigarld = true\n w3d.xrandom = \"digitrev\"\n w3d.vtrandom = \"digitrev\"\n w3d.vzrandom = \"digitrev\"\n w3d.ldprfile = \"polar\"\n w3d.cylinder = false\n top.straight = .8\n\n # --- set up field solver\n w3d.l4symtry = true\n w3d.bound0 = periodic\n w3d.boundnz = periodic\n w3d.boundxy = dirichlet\n\n solver = MultiGrid3D()\n registersolver(solver)\n\n pipe = ZCylinderOut(piperad,4.,voltage=0.)\n installconductors(pipe,dfill=largepos)\n\n # --- Run the envelope solver to provide data used to initialize particles.\n package(\"env\")\n generate()\n step()\n\n # --- Generate the PIC code (allocate storage, load ptcls, t=0 plots, etc.).\n package(\"w3d\")\n generate()\n return",
"def __call__( self, X, Y, Z):\n xb,yb,zb = self.transform( X,Y,Z)\n \n gauss = beam( xb,yb,zb, self.w[0], self.w[1], self.l)\n intensity = (2/np.pi)* self.mW/1000. /self.w[0]/self.w[1] *gauss # W um^-2\n\n cosSq = np.power(np.cos(2*np.pi/self.l * zb/self.scale ),2)\n \n lattice = cosSq *4*np.sqrt(self.retro*self.alpha)\\\n + ( 1 + self.retro - 2*np.sqrt(self.retro*self.alpha) )\n \n return uL(self.l)*intensity*lattice",
"def coordinates(self):",
"def main_beam_eff(beam=1, ZA=0, frequency=1400.): # apply the frequency at 1400\n D = 300 # m\n n_R = 1.0\n lam = 299792458./(1e6*frequency)\n theta = beam_size(beam=beam,frequency=frequency)/60. * np.pi/180.\n ape_eff = aperture_eff(beam=beam, ZA=ZA, frequency=frequency) \n mb_eff = 0.8899 * ape_eff / n_R * theta**2 * D**2 / lam**2\n\n return mb_eff",
"def _calculate_parameters(self, thickness: int = 10):\n\n self.thickness = thickness\n\n # set orientation dependent parameters: (different for x, y, z-PML)\n # NOTE: these methods are implemented by the subclasses of PML.\n self._set_locations()\n self._set_shape()\n self._set_sigmaE()\n self._set_sigmaH()\n\n # set the other parameters\n Nx, Ny, Nz = self.shape # is defined by _set_shape()\n self.phi_E = bd.zeros((Nx, Ny, Nz, 3))\n self.phi_H = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ex = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ey = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ez = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hx = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hy = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hz = bd.zeros((Nx, Ny, Nz, 3))\n\n self.bE = bd.exp(-(self.sigmaE / self.k + self.a) * self.grid.courant_number)\n self.cE = (\n (self.bE - 1.0)\n * self.sigmaE # is defined by _set_sigmaE()\n / (self.sigmaE * self.k + self.a * self.k ** 2)\n )\n\n self.bH = bd.exp(-(self.sigmaH / self.k + self.a) * self.grid.courant_number)\n self.cH = (\n (self.bH - 1.0)\n * self.sigmaH # is defined by _set_sigmaH()\n / (self.sigmaH * self.k + self.a * self.k ** 2)\n )",
"def calculate_coordinates(self):\n # get coordinates for lef side of equation\n self._calculate_for_one_side(self.left_side)\n\n # set process glyph x coordinate\n self.process_glyph_x = self.x_limit + 150\n\n self._calculate_for_one_side(self.right_side, side=\"right_side\")\n\n self.x_limit, self.y_limit = self._generate_real_coordinates_according_to_compartment()\n\n # set process glyph y coordinate\n self.process_glyph_y = self.y_limit / 2\n\n # set final image width, height\n self.x_limit += 10\n self.y_limit += 20",
"def compute_vel(self, state, goal):\n\n \"\"\"\n Unicycle model control law:\n [v;w] = [kp 0 0; 0 ka kb]*[p;a;b]\n v = commanded linear velocity of robot\n w = commanded rotational velcoity of robot\n kp = gain parameter where kp > 0\n ka = gain parameter where ka - kp > 0\n kb = gain parameter where kb < 0\n p = distance from robot to goal\n a = angle between current robot heading and heading to goal\n b = error between current heading to goal and target end heading\n \"\"\"\n \n #print('state,goal,v,w')\n #print(state)\n #print(goal)\n\n xr = state[0][0] # m in world frame\n yr = state[1][0] # m in world frame\n thetar = state[2][0] #rads\n\n xg = goal[0] # m in world frame\n yg = goal[1] # m in world frame\n\n dy = yg - yr\n dx = xg - xr\n\n #print('')\n #print(state)\n #print(goal)\n \n # Calculate a\n a = -1*thetar + math.atan2(dy,dx)\n\n #print(a)\n\n if a > math.pi:\n a = a - 2*math.pi\n\n if a < -1*math.pi:\n a = a + 2*math.pi\n\n #print(a)\n\n # Set omega according to control law\n omega = self.ka*a\n if math.fabs(omega) > self.MAX_OMEGA:\n if omega > 0:\n omega = self.MAX_OMEGA\n else:\n omega = -1*self.MAX_OMEGA\n\n # Calculate P\n p = math.sqrt(dy*dy + dx*dx)\n\n # Set v \n v = self.kp*p\n if v > self.MAX_SPEED:\n v = self.MAX_SPEED\n\n # set the done value\n done = (p <= self.done_distance)\n\n #print(v)\n #print(omega)\n\n out_tuple = (v, omega, done)\n \n return out_tuple",
"def __getxyB(x, y):\n\t\treturn x*3+y",
"def get_initial_params(self, x, y, yerr):\n estimated_height = max(y)\n y1 = map(int, y *1000)\n estimated_position = x[ y1.index(max(y1)) ]\n estimated_width = (max(x) - min(x)) / 20.0\n p0 = array([estimated_position, estimated_width, estimated_height])\n return p0",
"def get_initial_params(self, x, y, yerr):\n estimated_height = max(y)\n y1 = map(int, y *1000)\n estimated_position = x[ y1.index(max(y1)) ]\n estimated_width = (max(x) - min(x)) / 20.0\n p0 = array([estimated_position, estimated_width, estimated_height])\n return p0",
"def __ComputeApproximateVals(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0, 0, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results",
"def __init__(self):\n self.eyepoint = np.array([*self.eyepoint], dtype=np.float32)\n self.lookat = np.array([*self.lookat], dtype=np.float32)\n self.up = np.array([*self.up], dtype=np.float32)",
"def __init__(self,E,px,py,pz):\n Particle.__init__(self)\n self.E=float(E)\n self.px=float(px)\n self.py=float(py)\n self.pz=float(pz)\n self.cal_pt()\n self.cal_phi()\n self.cal_eta()\n #self.cal_mass()\n #print self.E,self.px,self.py,self.pz\n #print self.pt,self.phi,self.eta",
"def at_b (self):\n self.argc = int((len(n.coord[0]))/2)\n self.pts_con = np.array(self.coord[:,self.argc:len(n.coord[0])])\n\n self.xd = self.xdi\n self.zd = self.zdi \n \n for i, x in enumerate(self.xdi):\n self.aux_con = self.pts_con[0] - x \n self.arg1 = np.argmin(abs(self.aux_con)) \n \n if (self.aux_con[self.arg1] < 0 and self.arg1 == 0) or (self.aux_con[self.arg1] > 0 and self.arg1 == len(self.aux_con)-1):\n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif (self.aux_con[self.arg1] > 0 and self.aux_con[self.arg1+1] > self.aux_con[self.arg1]): #(self.aux_con[self.arg1] < 0 and self.aux_con[self.arg1-1] > self.aux_con[self.arg1]) or \n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif self.aux_con[self.arg1] < 0:\n #print(self.arg1)\n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 - 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1])\n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n\n elif self.aux_con[self.arg1] > 0:\n #print(self.arg1) \n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 + 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1]) \n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n \n #print('Defensa {0}\\n{1}: {2}\\n{3}: {4}'.format(i,self.arg1,self.aux_con[self.arg1],self.arg2,self.aux_con[self.arg2])) \n \n #self.yd = self.yd\n self.b = np.array([self.xd,self.yd,self.zd])\n #self.b.loc[:,('y')] = self.b.loc[:,('y')] ",
"def beamcentery(self) -> ErrorValue:\n return ErrorValue(self._data['BeamPosY'], self._data.setdefault('BeamPosYError',0.0))",
"def plot_beam_xy(self, write_files=False, return_values=False, threeD=False):\n\n import matplotlib.pyplot as plt\n\n # Get values\n (\n beamX,\n beamY,\n cbeamX,\n cbeamY,\n obeamX,\n obeamY,\n beam_dist,\n distances,\n aD,\n bD,\n cD,\n pixel_size,\n ) = self.calculate_beam_xy()\n\n # Plot figure\n if threeD:\n fig = plt.figure(figsize=(8, 8))\n ax1 = fig.add_subplot(111, projection=\"3d\")\n else:\n fig = plt.figure(figsize=(9, 13))\n gsp = gridspec.GridSpec(2, 1, height_ratios=[3, 1])\n ax1 = fig.add_subplot(gsp[0, :], aspect=\"equal\")\n\n # Calculate axis limits of beam center scatter plot\n ax1_delta = np.ceil(np.max(beam_dist))\n xmax = round(np.median(beamX) + ax1_delta)\n xmin = round(np.median(beamX) - ax1_delta)\n ymax = round(np.median(beamY) + ax1_delta)\n ymin = round(np.median(beamY) - ax1_delta)\n zmax = round(np.ceil(np.max(distances)))\n zmin = round(np.floor(np.min(distances)))\n\n ax1.set_xlim(xmin, xmax)\n ax1.set_ylim(ymin, ymax)\n if threeD:\n ax1.set_zlim(zmin, zmax)\n\n # Plot beam center scatter plot\n if threeD:\n ax1.scatter(beamX, beamY, distances, alpha=1, s=20, c=\"grey\", lw=1)\n ax1.plot(\n [np.median(beamX)],\n [np.median(beamY)],\n [np.median(distances)],\n markersize=8,\n marker=\"o\",\n c=\"yellow\",\n lw=2,\n )\n else:\n ax1.scatter(cbeamX, cbeamY, alpha=1, s=20, c=\"grey\", lw=1)\n ax1.scatter(obeamX, obeamY, alpha=1, s=20, c=\"red\", lw=1)\n ax1.plot(\n np.median(beamX),\n np.median(beamY),\n markersize=8,\n marker=\"o\",\n c=\"yellow\",\n lw=2,\n )\n\n # Plot projected mis-indexing limits for all three axes\n circle_a = plt.Circle(\n (np.median(beamX), np.median(beamY)),\n radius=aD,\n color=\"r\",\n fill=False,\n clip_on=True,\n )\n circle_b = plt.Circle(\n (np.median(beamX), np.median(beamY)),\n radius=bD,\n color=\"g\",\n fill=False,\n clip_on=True,\n )\n circle_c = plt.Circle(\n (np.median(beamX), np.median(beamY)),\n radius=cD,\n color=\"b\",\n fill=False,\n clip_on=True,\n )\n ax1.add_patch(circle_a)\n ax1.add_patch(circle_b)\n ax1.add_patch(circle_c)\n\n # Set labels\n ax1.set_xlabel(\"BeamX (mm)\", fontsize=15)\n ax1.set_ylabel(\"BeamY (mm)\", fontsize=15)\n if threeD:\n ax1.set_zlabel(\"Distance (mm)\", fontsize=15)\n ax1.set_title(\"Beam XYZ Coordinates\")\n else:\n ax1.set_title(\"Beam XY Coordinates\")\n\n if not threeD:\n # Plot histogram of distances to each beam center from median\n ax2 = fig.add_subplot(gsp[1, :])\n ax2_n, ax2_bins, ax2_patches = plt.hist(\n beam_dist, 20, facecolor=\"b\", alpha=0.75, histtype=\"stepfilled\"\n )\n ax2_height = (np.max(ax2_n) + 9) // 10 * 10\n ax2.axis([0, np.max(beam_dist), 0, ax2_height])\n ax2.set_xlabel(\"Distance from median (mm)\", fontsize=15)\n ax2.set_ylabel(\"No. of images\", fontsize=15)\n\n if write_files:\n fig.savefig(self.xy_file, format=\"pdf\", bbox_inches=0)\n else:\n plt.show()\n\n if return_values:\n return np.median(beamX), np.median(beamY), pixel_size",
"def _evaluate_xyz(self,x,y,z=0.):\n return -np.pi * self._rhoc_M /(self.n+1.)*self.a**3*self._b*self._c * \\\n _potInt(x, y, z, self._a2, self._b2*self._a2, self._c2*self._a2, self.n)",
"def velocity_field(xt,yt,x0,y0,Vinf,dia,rot,chord,B,param=None,veltype='all',integration='simp',m=220,n=200):\n rad = dia/2.\n tsr = rad*fabs(rot)/Vinf\n solidity = (chord*B)/rad\n\n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n\n coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9 = coef_val()\n\n # Calculating EMG distribution parameters (based on polynomial surface fitting)\n if param is None:\n loc1 = _parameterval(tsr,solidity,coef0)\n loc2 = _parameterval(tsr,solidity,coef1)\n loc3 = _parameterval(tsr,solidity,coef2)\n spr1 = _parameterval(tsr,solidity,coef3)\n spr2 = _parameterval(tsr,solidity,coef4)\n skw1 = _parameterval(tsr,solidity,coef5)\n skw2 = _parameterval(tsr,solidity,coef6)\n scl1 = _parameterval(tsr,solidity,coef7)\n scl2 = _parameterval(tsr,solidity,coef8)\n scl3 = _parameterval(tsr,solidity,coef9)\n\n else:\n # Reading in EMG distribution parameters\n loc1 = param[0]\n loc2 = param[1]\n loc3 = param[2]\n spr1 = param[3]\n spr2 = param[4]\n skw1 = param[5]\n skw2 = param[6]\n scl1 = param[7]\n scl2 = param[8]\n scl3 = param[9]\n\n ###################################\n if veltype == 'vort':\n # VORTICITY CALCULATION (NO INTEGRATION)\n if x0t < 0.:\n vel = 0.\n else:\n vel = _vawtwake.vorticitystrength(x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)/rot\n ###################################\n else:\n # Integration of the vorticity profile to calculate velocity\n if integration == 'simp':\n # SIMPSON'S RULE INTEGRATION (must use polynomial surface coefficients from VAWTPolySurfaceCoef.csv)\n inte = 1 # Simpson's Rule\n # inte = 2 # Trapezoidal Rule (optional ability of the code-- faster but less accurate)\n\n if param is not None:\n print \"**** Using polynomial surface coefficients from VAWTPolySurfaceCoef.csv for Simpson's rule integration ****\"\n\n vel_xs,vel_ys = _vawtwake.vel_field(xt,yt,x0,y0,dia,rot,chord,B,Vinf,coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9,m,n,inte)\n\n if veltype == 'all':\n vel = sqrt((vel_xs*Vinf + Vinf)**2 + (vel_ys*Vinf)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs*Vinf + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])\n ###################################\n elif integration == 'gskr':\n # 21-POINT GAUSS-KRONROD RULE QUADRATURE INTEGRATION\n xbound = (scl3+5.)*dia\n argval = (x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)\n if veltype == 'all' or veltype == 'x' or veltype == 'ind':\n vel_x = _dblquad(_vawtwake.integrandx,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_xs = (vel_x[0]*fabs(rot))/(2.*pi)\n if veltype == 'all' or veltype == 'y' or veltype == 'ind':\n vel_y = _dblquad(_vawtwake.integrandy,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_ys = (vel_y[0]*fabs(rot))/(2.*pi)\n\n if veltype == 'all':\n vel = sqrt((vel_xs + Vinf)**2 + (vel_ys)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys/Vinf\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])/Vinf\n ###################################\n\n return vel"
] | [
"0.6129466",
"0.60761815",
"0.6049173",
"0.5987054",
"0.59085387",
"0.58142376",
"0.58065397",
"0.5801077",
"0.5798147",
"0.57459855",
"0.57192415",
"0.57188255",
"0.5706152",
"0.5704091",
"0.5699249",
"0.5691896",
"0.5674206",
"0.5672045",
"0.5594236",
"0.5586325",
"0.5564337",
"0.5564337",
"0.55561405",
"0.5550695",
"0.55446434",
"0.55436474",
"0.5535642",
"0.5528498",
"0.5524297",
"0.548697"
] | 0.73167294 | 0 |
Calls unit cell analysis module, which uses hierarchical clustering (Zeldin, et al, Acta D, 2015) to split integration results according to detected morphological groupings (if any). Most useful with preliminary integration without target unit cell specified. | def unit_cell_analysis(self):
# Will not run clustering if only one integration result found or if turned off
if not self.info.categories["integrated"]:
util.main_log(
self.info.logfile, "\n\n{:-^80}\n".format(" UNIT CELL ANALYSIS "), True
)
util.main_log(self.info.logfile, "\n UNIT CELL CANNOT BE DETERMINED!", True)
elif len(self.info.categories["integrated"]) == 1:
unit_cell = self.info.cluster_iterable[0][:5]
point_group = self.info.cluster_iterable[0][6]
util.main_log(
self.info.logfile, "\n\n{:-^80}\n".format(" UNIT CELL ANALYSIS "), True
)
uc_line = (
"{:<6} {:^4}: {:<6.2f}, {:<6.2f}, {:<6.2f}, {:<6.2f}, "
"{:<6.2f}, {:<6.2f}".format(
"(1)",
point_group,
unit_cell[0],
unit_cell[1],
unit_cell[2],
unit_cell[3],
unit_cell[4],
unit_cell[5],
)
)
util.main_log(self.info.logfile, uc_line, True)
self.info.best_pg = str(point_group)
self.info.best_uc = unit_cell
else:
uc_table = []
uc_summary = []
if self.params.analysis.clustering.flag_on:
# run hierarchical clustering analysis
from xfel.clustering.cluster import Cluster
counter = 0
self.info.clusters = []
threshold = self.params.analysis.clustering.threshold
cluster_limit = self.params.analysis.clustering.limit
final_pickles = self.info.categories["integrated"][0]
pickles = []
if self.params.analysis.clustering.n_images:
import random
for i in range(len(self.params.analysis.clustering.n_images)):
random_number = random.randrange(0, len(final_pickles))
if final_pickles[random_number] in pickles:
while final_pickles[random_number] in pickles:
random_number = random.randrange(0, len(final_pickles))
pickles.append(final_pickles[random_number])
else:
pickles = final_pickles
# Cluster from files (slow, but will keep for now)
ucs = Cluster.from_files(pickle_list=pickles)
# Do clustering
clusters, _ = ucs.ab_cluster(
threshold=threshold,
log=False,
write_file_lists=False,
schnell=False,
doplot=False,
)
uc_table.append("\n\n{:-^80}\n" "".format(" UNIT CELL ANALYSIS "))
# extract clustering info and add to summary output list
if cluster_limit is None:
if len(pickles) / 10 >= 10:
cluster_limit = 10
else:
cluster_limit = len(pickles) / 10
for cluster in clusters:
sorted_pg_comp = sorted(
cluster.pg_composition.items(), key=lambda x: -1 * x[1]
)
pg_nums = [pg[1] for pg in sorted_pg_comp]
cons_pg = sorted_pg_comp[np.argmax(pg_nums)]
if len(cluster.members) > cluster_limit:
counter += 1
# Write to file
cluster_filenames = [j.path for j in cluster.members]
if self.params.analysis.clustering.write_files:
output_file = os.path.join(
self.info.int_base, "uc_cluster_{}.lst".format(counter)
)
for fn in cluster_filenames:
with open(output_file, "a") as scf:
scf.write("{}\n".format(fn))
mark_output = os.path.basename(output_file)
else:
mark_output = "*"
output_file = None
else:
mark_output = ""
output_file = None
# Populate clustering info for GUI display
uc_init = uctbx.unit_cell(cluster.medians)
symmetry = crystal.symmetry(
unit_cell=uc_init, space_group_symbol="P1"
)
groups = metric_subgroups(input_symmetry=symmetry, max_delta=3)
top_group = groups.result_groups[0]
best_sg = str(groups.lattice_group_info()).split("(")[0]
best_uc = top_group["best_subsym"].unit_cell().parameters()
# best_sg = str(top_group['best_subsym'].space_group_info())
uc_no_stdev = (
"{:<6.2f} {:<6.2f} {:<6.2f} "
"{:<6.2f} {:<6.2f} {:<6.2f} "
"".format(
best_uc[0],
best_uc[1],
best_uc[2],
best_uc[3],
best_uc[4],
best_uc[5],
)
)
cluster_info = {
"number": len(cluster.members),
"pg": best_sg,
"uc": uc_no_stdev,
"filename": mark_output,
}
self.info.clusters.append(cluster_info)
# format and record output
# TODO: How to propagate stdevs after conversion from Niggli?
# uc_line = "{:<6} {:^4}: {:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), "\
# "{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), "\
# "{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}) "\
# "{}".format('({})'.format(len(cluster.members)), cons_pg[0],
# cluster.medians[0], cluster.stdevs[0],
# cluster.medians[1], cluster.stdevs[1],
# cluster.medians[2], cluster.stdevs[2],
# cluster.medians[3], cluster.stdevs[3],
# cluster.medians[4], cluster.stdevs[4],
# cluster.medians[5], cluster.stdevs[5],
# mark_output)
# uc_table.append(uc_line)
uc_table.append(
"{:<6}: {} {}".format(
len(cluster.members), uc_no_stdev, mark_output
)
)
lattices = ", ".join(
["{} ({})".format(i[0], i[1]) for i in sorted_pg_comp]
)
# uc_info = [len(cluster.members), cons_pg[0], cluster.medians,
# output_file, uc_line, lattices]
uc_info = [
len(cluster.members),
best_sg,
best_uc,
output_file,
uc_no_stdev,
lattices,
]
uc_summary.append(uc_info)
else:
# generate average unit cell
uc_table.append(
"\n\n{:-^80}\n" "".format(" UNIT CELL AVERAGING (no clustering) ")
)
uc_a, uc_b, uc_c, uc_alpha, uc_beta, uc_gamma, uc_sg = list(
zip(*self.info.cluster_iterable)
)
cons_pg = Counter(uc_sg).most_common(1)[0][0]
all_pgs = Counter(uc_sg).most_common()
unit_cell = (
np.median(uc_a),
np.median(uc_b),
np.median(uc_c),
np.median(uc_alpha),
np.median(uc_beta),
np.median(uc_gamma),
)
# Populate clustering info for GUI display
uc_init = uctbx.unit_cell(unit_cell)
symmetry = crystal.symmetry(unit_cell=uc_init, space_group_symbol="P1")
groups = metric_subgroups(input_symmetry=symmetry, max_delta=3)
top_group = groups.result_groups[0]
best_sg = str(groups.lattice_group_info()).split("(")[0]
best_uc = top_group["best_subsym"].unit_cell().parameters()
# best_sg = str(top_group['best_subsym'].space_group_info())
uc_no_stdev = (
"{:<6.2f} {:<6.2f} {:<6.2f} "
"{:<6.2f} {:<6.2f} {:<6.2f} "
"".format(
best_uc[0],
best_uc[1],
best_uc[2],
best_uc[3],
best_uc[4],
best_uc[5],
)
)
cluster_info = {
"number": len(self.info.cluster_iterable),
"pg": best_sg,
"uc": uc_no_stdev,
"filename": None,
}
self.info.clusters.append(cluster_info)
# uc_line = "{:<6} {:^4}: {:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), " \
# "{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), " \
# "{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}) " \
# "{}".format('({})'.format(len(self.final_objects)), cons_pg,
# np.median(uc_a), np.std(uc_a),
# np.median(uc_b), np.std(uc_b),
# np.median(uc_c), np.std(uc_c),
# np.median(uc_alpha), np.std(uc_alpha),
# np.median(uc_beta), np.std(uc_beta),
# np.median(uc_gamma), np.std(uc_gamma), '')
#
# uc_table.append(uc_line)
uc_table.append(uc_no_stdev)
lattices = ", ".join(["{} ({})".format(i[0], i[1]) for i in all_pgs])
# uc_info = [len(self.final_objects), cons_pg, unit_cell, None,
# uc_line, lattices]
uc_info = [
len(self.info.cluster_iterable),
best_sg,
best_uc,
None,
uc_no_stdev,
lattices,
]
uc_summary.append(uc_info)
uc_table.append("\nMost common unit cell:\n")
# select the most prevalent unit cell (most members in cluster)
uc_freqs = [i[0] for i in uc_summary]
uc_pick = uc_summary[np.argmax(uc_freqs)]
uc_table.append(uc_pick[4])
uc_table.append(
"\nBravais Lattices in Biggest Cluster: {}" "".format(uc_pick[5])
)
self.info.best_pg = str(uc_pick[1])
self.info.best_uc = uc_pick[2]
if uc_pick[3] is not None:
self.prime_data_path = uc_pick[3]
for item in uc_table:
util.main_log(self.info.logfile, item, False)
self.info.update(uc_table=uc_table)
if self.gui_mode:
return self.info.clusters | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main(argv):\n args = process_command_line(argv)\n name = job_string(args)\n #That feel when no torison ;_;\n if args.dihed:\n raise Exception(\"Dihed is not supported right now\")\n #SDFS!\n if args.sdf:\n handle_sdf(args)\n #Conversion, pruning\n pybel_mols = convert_to_pybel(args.files, args.format)\n if args.pruneStart:\n pybel_mols = prune(pybel_mols, args.pruneStart)\n print \"Total number of molecules to process is\", len(pybel_mols)\n #Division\n if args.division:\n grouped_pybels = molecule_grouping.main(args.division, pybel_mols)\n else:\n grouped_pybels = [pybel_mols]\n #Run algorithm\n groups_reps, weights = run_smrs(grouped_pybels, args.dihed, args.nonH, args.energy,\n args.alpha, args.delCoordCSV, args.delCoefCSV, name)\n prune_finished = False\n #Pruning representatives\n if args.pruneFinish:\n all_reps = []\n for group in groups_reps:\n all_reps += group\n all_reps = prune(all_reps, args.pruneFinish)\n prune_finished = True\n #Save all groups into one folder\n folder_name = 'rep_' + name\n if args.folder:\n #folder creation\n while True:\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n break\n else:\n folder_name = folder_name + 'c'\n #copying\n if prune_finished:\n for mol in all_reps:\n shutil.copy(mol.title, os.getcwd() + \"/\" + folder_name)\n else:\n for group in groups_reps:\n for mol in group:\n shutil.copy(mol.title, os.getcwd() + \"/\" + folder_name)\n print \"Coeficient matrix results\"\n for i in range(len(grouped_pybels)):\n for j in range(len(grouped_pybels[i])):\n print grouped_pybels[i][j].title, weights[i][j]\n print \"\"\n print \"Rep mols\"\n for group in groups_reps:\n for mol in group:\n print mol.title\n return groups_reps",
"def unit_cell_info(sub_clusters):\n from libtbx.utils import plural_s\n # 3. print out some information that is useful.\n out_str = \"\\n\\n{:<16} {:<8} {:<13} {:<13} {:<13} {:<12} {:<12} {:<12}{:<8}\\n\".format(\n \"Cluster_id\",\n \"N_xtals\",\n \"Med_a\", \"Med_b\", \"Med_c\",\n \"Med_alpha\", \"Med_beta\", \"Med_gamma\",\"Delta(deg)\")\n singletons = []\n for cluster in sub_clusters:\n if len(cluster.members) != 1:\n # New approach, takes niggli setting of the cluster median and converts\n # back to reference setting for cluster report. Fixes cctbx#97.\n from cctbx import crystal\n from cctbx.uctbx import unit_cell\n from cctbx.sgtbx.lattice_symmetry import metric_subgroups\n\n input_symmetry = crystal.symmetry(\n unit_cell=unit_cell(cluster.medians[0:6]),\n space_group_symbol=\"P 1\")\n groups = metric_subgroups(input_symmetry, 3.00,\n enforce_max_delta_for_generated_two_folds=True)\n group = groups.result_groups[0]\n print(\" Unit cell:\", group['best_subsym'].unit_cell())\n uc_params_conv = group['best_subsym'].unit_cell().parameters()\n\n sorted_pg_comp = sorted(list(cluster.pg_composition.items()),\n key=lambda x: -1 * x[1])\n pg_strings = [\"{} in {}\".format(pg[1], pg[0])\n for pg in sorted_pg_comp]\n point_group_string = \", \".join(pg_strings) + \".\"\n out_str += point_group_string\n out_str += (\"\\n{:<16} {:<8} {:<6.2f}({:<5.2f}) {:<6.2f}({:<5.2f})\"\n \" {:<6.2f}({:<5.2f}) {:<6.2f}({:<4.2f}) {:<6.2f}\"\n \"({:<4.2f}) {:<6.2f}({:<4.2f})\").format(\n cluster.cname,\n len(cluster.members),\n cluster.medians[0], cluster.stdevs[0],\n cluster.medians[1], cluster.stdevs[1],\n cluster.medians[2], cluster.stdevs[2],\n cluster.medians[3], cluster.stdevs[3],\n cluster.medians[4], cluster.stdevs[4],\n cluster.medians[5], cluster.stdevs[5])\n out_str += (\"\\n{:>24} {:<6.2f}{:<7} {:<6.2f}{:<7}\"\n \" {:<6.2f}{:<7} {:<6.2f}{:<6} {:<6.2f}\"\n \"{:<6} {:<6.2f}{:<6} {:<6.2}\").format(\n group['best_subsym'].space_group_info().symbol_and_number(),\n uc_params_conv[0], \"\",\n uc_params_conv[1], \"\",\n uc_params_conv[2], \"\",\n uc_params_conv[3], \"\",\n uc_params_conv[4], \"\",\n uc_params_conv[5], \"\",\n group[\"max_angular_difference\"]) + \"\\n\\n\"\n\n else:\n singletons.append(\"\".join([(\"{:<14} {:<11.2f} {:<11.2f} {:<11.2f}\"\n \"{:<12.1f} {:<12.1f} {:<12.1f}\").format(\n list(cluster.pg_composition.keys())[0],\n cluster.members[0].uc[0], cluster.members[0].uc[1],\n cluster.members[0].uc[2], cluster.members[0].uc[3],\n cluster.members[0].uc[4], cluster.members[0].uc[5]),\n '\\n']))\n out_str += \"\\nStandard deviations are in brackets.\"\n explanation = \"\"\"\\nEach cluster:\nInput lattice count, with integration Bravais setting space group.\nCluster median with Niggli cell parameters (std dev in brackets).\nHighest possible metric symmetry and unit cell using LePage (J Appl Cryst 1982, 15:255) method, maximum delta 3deg.\"\"\"\n out_str += explanation\n singleton_str = \"\\n%i singleton%s:\" %plural_s(len(singletons))\n singleton_str += \"\\n\\n{:<14} {:<11} {:<11} {:<11}{:<12} {:<12} {:<12}\\n\".format(\n \"Point group\",\n \"a\", \"b\", \"c\", \"alpha\", \"beta\", \"gamma\")\n singleton_str += \"\".join(singletons)\n n_clusters = len(sub_clusters) - len(singletons)\n out_str = \"\\n%i cluster%s:\" %plural_s(n_clusters) + out_str\n return singleton_str + out_str",
"def test(dist_param, picker_param, iters):\n orig = '/home/zby/MAGISTERKA/MGR/results/oryginal.clustered.t'\n cl_orig = read_clustered(orig)\n name_tag = ''\n ndist = dist_param[1:]\n npick = picker_param[1:]\n for index in drange(4, 20, 0.5):\n name_tag = \"{}_{}_{}\".format(index, npick, ndist)\n tf_conf = configs.TfidfConfig(\n root_name('all_merged.txt', None),\n tfidf_name('merged.stem{}.stop', name_tag),\n tfidf_name('merged.stem{}.stop.txt', name_tag),\n None,\n tfidf_name('merged.stem{}.tfidf', name_tag),\n 10,\n 0,\n None)\n execute(tf_conf)\n tf_conf = configs.TfidfConfig(\n root_name('all_merged.txt', None),\n None,\n tfidf_name('merged.stem{}.stop.txt', name_tag),\n tfidf_name('merged.stem{}.stop', name_tag),\n tfidf_name('merged.stem{}.stop.tfidf', name_tag),\n None,\n None,\n None)\n execute(tf_conf)\n #input, out, picker, distance, iterations,\n clust_cfg = configs.ClusteringConfig(\n tfidf_name('merged.stem{}.stop.tfidf', name_tag),\n tfidf_name('merged.stem{}.stop.clustered.t', name_tag),\n picker_param,\n dist_param,\n iters,\n None\n )\n execute(clust_cfg)\n clust2 = read_clustered(tfidf_name('merged.stem{}.stop.clustered.t', name_tag))\n var, norm = variation_of_information(cl_orig, clust2)\n print(\"**** FOR var {} VOI is {}\".format(name_tag, norm))",
"def run(self):\n import sacc\n import healpy\n import treecorr\n # Load the different pieces of data we need into\n # one large dictionary which we accumulate\n data = {}\n self.load_tomography(data)\n self.load_shear_catalog(data)\n self.load_random_catalog(data)\n # This one is optional - this class does nothing with it\n self.load_lens_catalog(data)\n # Binning information\n self.read_nbin(data)\n\n # Calculate metadata like the area and related\n # quantities\n meta = self.calculate_metadata(data)\n\n # Choose which pairs of bins to calculate\n calcs = self.select_calculations(data)\n\n sys.stdout.flush()\n \n # This splits the calculations among the parallel bins\n # It's not necessarily the most optimal way of doing it\n # as it's not dynamic, just a round-robin assignment,\n # but for this case I would expect it to be mostly fine\n results = []\n for i,j,k in self.split_tasks_by_rank(calcs):\n results += self.call_treecorr(data, i, j, k)\n\n # If we are running in parallel this collects the results together\n results = self.collect_results(results)\n\n # Save the results\n if self.rank==0:\n self.write_output(data, meta, results)",
"def main():\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1e-10\n level_params['dt'] = 0.1\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = 3\n\n # initialize problem parameters\n problem_params = dict()\n problem_params['nu'] = 0.1 # diffusion coefficient\n problem_params['freq'] = 4 # frequency for the test value\n problem_params['nvars'] = 1023 # number of degrees of freedom\n problem_params['bc'] = 'dirichlet-zero' # boundary conditions\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 20\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['log_to_file'] = True\n controller_params['fname'] = 'data/step_2_C_out.txt'\n\n # Fill description dictionary for easy hierarchy creation\n description = dict()\n description['problem_class'] = heatNd_forced\n description['problem_params'] = problem_params\n description['sweeper_class'] = imex_1st_order\n description['sweeper_params'] = sweeper_params\n description['level_params'] = level_params\n description['step_params'] = step_params\n\n Path(\"data\").mkdir(parents=True, exist_ok=True)\n\n # instantiate the controller\n controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)\n\n # set time parameters\n t0 = 0.1\n Tend = 0.3 # note that we are requesting 2 time steps here (dt is 0.1)\n\n # get initial values on finest level\n P = controller.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # compute exact solution and compare\n uex = P.u_exact(Tend)\n err = abs(uex - uend)\n\n f = open('data/step_2_C_out.txt', 'a')\n out = 'Error after SDC iterations: %8.6e' % err\n f.write(out)\n print(out)\n f.close()\n\n assert err <= 2e-5, \"ERROR: controller doing IMEX SDC iteration did not reduce the error enough, got %s\" % err",
"def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return",
"def mi_from_dm_alt_hq(distance_matrix, ns, nh, spike_train_list=None):\n \n print \"start loading\"\n \n nr = len(distance_matrix)\n nt = nr/ns\n #nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n nearest_neighbours = np.array([np.array(hq.nsmallest(nh, r)) for r in distance_matrix])\n near_to = [[j for j in range(nr) if i in nearest_neighbours[j] ] for i in range(nr)]\n \n print \"finished sorting\"\n return\n #nr = len(distance_matrix)\n #nearest_neighbours = np.array([[i] + distance_matrix[i].argsort()[1:nh].tolist() for i in range(nr)])\n \n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n \n counts = []\n counted_glob = False #set a flag for later use\n if spike_train_list is not None:\n for i in range(len(near_to)):\n c_i = 0\n \n if i not in members_of_glob:\n #print near_to[i]\n for j in near_to[i]:\n if j not in members_of_glob and spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1\n else:\n if not counted_glob: #this should only really happen if glob has a small number of members...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*g_i\n \n counted_glob = True\n else:\n pass\n \n else: #If i is in the glob...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i = 1 + (nh - 1)*g_i\n \n counts.append(c_i) \n counts = np.array(counts) \n I = (1.0/nr)*sum( np.log2((ns*counts)/float(nh)) ) \n \n else:\n near_to_same_stim = [[n for n in near_to[j] if abs(n-j)%ns==0 ] for j in range(nr)]\n number_of_neighbourhoods = np.array([len(l) for l in near_to])\n number_of_neighbourhoods_same_stim = np.array([len(l) for l in near_to_same_stim])\n I = (1.0/nr)*sum( np.log2((ns*number_of_neighbourhoods_same_stim)/float(nh)) )\n \n return I",
"def undulations(**kwargs):\n\n\t#---parameters\n\tsn = kwargs['sn']\n\twork = kwargs['workspace']\n\tcalc = kwargs['calc']\n\tupname = 'lipid_abstractor'\n\tgrid_spacing = calc['specs']['grid_spacing']\n\tvecs = datmerge(kwargs,upname,'vecs')\n\tnframes = int(np.sum(datmerge(kwargs,upname,'nframes')))\n\ttrajectory = datmerge(kwargs,upname,'points')\n\tattrs,result = {},{}\n\t#---! hacking through error with monolayer separation\n\ttry: monolayer_indices = kwargs['upstream'][upname+'0']['monolayer_indices']\n\texcept: monolayer_indices = kwargs['upstream'][upname]['monolayer_indices']\n\t#---choose grid dimensions\n\tgrid = np.array([round(i) for i in np.mean(vecs,axis=0)/grid_spacing])[:2]\n\t#---! removed timeseries from result for new version of omnicalc\n\t#---parallel\n\tmesh = [[],[]]\n\tfor mn in range(2):\n\t\tstart = time.time()\n\t\tmesh[mn] = Parallel(n_jobs=work.nprocs,verbose=0,require='sharedmem')(\n\t\t\tdelayed(makemesh_regular)(\n\t\t\t\ttrajectory[fr][np.where(monolayer_indices==mn)],vecs[fr],grid)\n\t\t\tfor fr in framelooper(nframes,start=start,text='monolayer %d, frame'%mn))\n\tchecktime()\n\n\t#---pack\n\tresult['mesh'] = np.array(mesh)\n\tresult['grid'] = np.array(grid)\n\tresult['nframes'] = np.array(nframes)\n\tresult['vecs'] = vecs\n\tattrs['grid_spacing'] = grid_spacing\n\treturn result,attrs",
"def main():\n subjectlist = ['hel{}'.format(i) for i in range(1, 20) if i is not 9]\n logfile = setup_log(os.path.join(os.environ['hel'], 'logs',\n 'randomise_setup_fslmerge'))\n logfile.info('Setup for randomise.')\n logfile.info('Making a 4D data set by combining images')\n outdir = os.path.join(os.environ['hel'], 'graph_analyses',\n 'randomise_global_connectivity')\n for subclust_n in range(1, 4):\n outfilename = os.path.join(outdir,\n 'knnward_clst1_subclust{}_4Dfile'.format(\n subclust_n))\n mergefsl(logfile, make_file_list(subjectlist, subclust_n), outfilename)",
"def _run(evaluation_dir_name, smoothing_radius_grid_cells,\n score_colour_map_name, num_ex_colour_map_name, max_colour_percentile,\n output_dir_name):\n\n if smoothing_radius_grid_cells <= 0:\n smoothing_radius_grid_cells = None\n\n score_colour_map_object = pyplot.get_cmap(score_colour_map_name)\n num_ex_colour_map_object = pyplot.get_cmap(num_ex_colour_map_name)\n error_checking.assert_is_geq(max_colour_percentile, 90.)\n error_checking.assert_is_leq(max_colour_percentile, 100.)\n\n grid_metafile_name = grids.find_equidistant_metafile(\n directory_name=evaluation_dir_name, raise_error_if_missing=True\n )\n\n print('Reading grid metadata from: \"{0:s}\"...'.format(grid_metafile_name))\n grid_metadata_dict = grids.read_equidistant_metafile(grid_metafile_name)\n print(SEPARATOR_STRING)\n\n num_grid_rows = len(grid_metadata_dict[grids.Y_COORDS_KEY])\n num_grid_columns = len(grid_metadata_dict[grids.X_COORDS_KEY])\n\n auc_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n csi_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n pod_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n far_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n num_examples_matrix = numpy.full(\n (num_grid_rows, num_grid_columns), 0, dtype=int\n )\n num_positive_examples_matrix = numpy.full(\n (num_grid_rows, num_grid_columns), 0, dtype=int\n )\n\n for i in range(num_grid_rows):\n for j in range(num_grid_columns):\n this_eval_file_name = model_eval.find_file(\n directory_name=evaluation_dir_name, grid_row=i, grid_column=j,\n raise_error_if_missing=False)\n\n if not os.path.isfile(this_eval_file_name):\n warning_string = (\n 'Cannot find file (this may or may not be a problem). '\n 'Expected at: \"{0:s}\"'\n ).format(this_eval_file_name)\n\n warnings.warn(warning_string)\n continue\n\n print('Reading data from: \"{0:s}\"...'.format(this_eval_file_name))\n this_evaluation_dict = model_eval.read_evaluation(\n this_eval_file_name)\n\n num_examples_matrix[i, j] = len(\n this_evaluation_dict[model_eval.OBSERVED_LABELS_KEY]\n )\n num_positive_examples_matrix[i, j] = numpy.sum(\n this_evaluation_dict[model_eval.OBSERVED_LABELS_KEY]\n )\n\n this_evaluation_table = this_evaluation_dict[\n model_eval.EVALUATION_TABLE_KEY]\n\n auc_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.AUC_KEY].values\n )\n csi_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.CSI_KEY].values\n )\n pod_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.POD_KEY].values\n )\n far_matrix[i, j] = 1. - numpy.nanmean(\n this_evaluation_table[model_eval.SUCCESS_RATIO_KEY].values\n )\n\n print(SEPARATOR_STRING)\n\n auc_matrix[num_positive_examples_matrix == 0] = numpy.nan\n csi_matrix[num_positive_examples_matrix == 0] = numpy.nan\n pod_matrix[num_positive_examples_matrix == 0] = numpy.nan\n far_matrix[num_positive_examples_matrix == 0] = numpy.nan\n\n if smoothing_radius_grid_cells is not None:\n print((\n 'Applying Gaussian smoother with e-folding radius of {0:.1f} grid '\n 'cells...'\n ).format(\n smoothing_radius_grid_cells\n ))\n\n orig_num_examples_matrix = num_examples_matrix + 0\n num_examples_matrix = general_utils.apply_gaussian_filter(\n input_matrix=num_examples_matrix.astype(float),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n num_examples_matrix = numpy.round(num_examples_matrix).astype(int)\n num_examples_matrix[orig_num_examples_matrix == 0] = 0 # HACK\n\n num_positive_examples_matrix = general_utils.apply_gaussian_filter(\n input_matrix=num_positive_examples_matrix.astype(float),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n num_positive_examples_matrix = (\n numpy.round(num_positive_examples_matrix).astype(int)\n )\n num_positive_examples_matrix[num_examples_matrix == 0] = 0\n\n auc_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(auc_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n csi_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(csi_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n pod_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(pod_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n far_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(far_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n\n auc_matrix[num_positive_examples_matrix == 0] = numpy.nan\n csi_matrix[num_positive_examples_matrix == 0] = numpy.nan\n pod_matrix[num_positive_examples_matrix == 0] = numpy.nan\n far_matrix[num_positive_examples_matrix == 0] = numpy.nan\n\n panel_file_names = []\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name)\n\n # Plot number of examples.\n this_data_matrix = numpy.maximum(numpy.log10(num_examples_matrix), 0.)\n this_data_matrix[this_data_matrix == 0] = numpy.nan\n max_colour_value = numpy.nanpercentile(\n this_data_matrix, max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=this_data_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=num_ex_colour_map_object,\n min_colour_value=0., max_colour_value=max_colour_value,\n plot_cbar_min_arrow=False, plot_cbar_max_arrow=True, log_scale=True)\n\n axes_object.set_title(r'Number of examples')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(a)')\n\n panel_file_names.append('{0:s}/num_examples.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot number of positive examples.\n this_data_matrix = num_positive_examples_matrix.astype(float)\n this_data_matrix[this_data_matrix == 0] = numpy.nan\n\n max_colour_value = numpy.nanpercentile(\n this_data_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n this_data_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=this_data_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=num_ex_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=True, plot_cbar_max_arrow=True)\n\n axes_object.set_title('Number of tornadic examples')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(b)')\n\n panel_file_names.append(\n '{0:s}/num_positive_examples.jpg'.format(output_dir_name)\n )\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot AUC.\n max_colour_value = numpy.nanpercentile(auc_matrix, max_colour_percentile)\n min_colour_value = numpy.maximum(\n numpy.nanpercentile(auc_matrix, 100. - max_colour_percentile),\n 0.5\n )\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=auc_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=True, plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('AUC (area under ROC curve)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(c)')\n\n panel_file_names.append('{0:s}/auc.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot CSI.\n max_colour_value = numpy.nanpercentile(csi_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n csi_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=csi_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('CSI (critical success index)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(d)')\n\n panel_file_names.append('{0:s}/csi.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot POD.\n max_colour_value = numpy.nanpercentile(pod_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n pod_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=pod_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('POD (probability of detection)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(e)')\n\n panel_file_names.append('{0:s}/pod.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot FAR.\n max_colour_value = numpy.nanpercentile(far_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n far_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=far_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('FAR (false-alarm ratio)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(f)')\n\n panel_file_names.append('{0:s}/far.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Concatenate panels.\n concat_file_name = '{0:s}/spatially_subset_evaluation.jpg'.format(\n output_dir_name)\n print('Concatenating panels to: \"{0:s}\"...'.format(concat_file_name))\n\n imagemagick_utils.concatenate_images(\n input_file_names=panel_file_names, output_file_name=concat_file_name,\n num_panel_rows=NUM_PANEL_ROWS, num_panel_columns=NUM_PANEL_COLUMNS)\n\n imagemagick_utils.resize_image(\n input_file_name=concat_file_name, output_file_name=concat_file_name,\n output_size_pixels=CONCAT_FIGURE_SIZE_PX)",
"def mi_from_dm_alt(distance_matrix, ns, nh, spike_train_list=None):\n \n #print \"start loading\"\n \n nr = len(distance_matrix)\n nt = nr/ns\n nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n near_to = [[j for j in range(nr) if i in nearest_neighbours[j] ] for i in range(nr)]\n \n #print \"finished sorting\"\n #return\n #nr = len(distance_matrix)\n #nearest_neighbours = np.array([[i] + distance_matrix[i].argsort()[1:nh].tolist() for i in range(nr)])\n \n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n \n counts = []\n counted_glob = False #set a flag for later use\n if spike_train_list is not None:\n for i in range(len(near_to)):\n c_i = 0\n \n if i not in members_of_glob:\n #print near_to[i]\n for j in near_to[i]:\n \n if j not in members_of_glob and spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1\n else:\n if not counted_glob: #this should only really happen if glob has a small number of members...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*g_i\n \n counted_glob = True\n else:\n pass\n \n else: #If i is in the glob...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i = 1 + (nh - 1)*g_i\n \n counts.append(c_i) \n counts = np.array(counts) \n I = (1.0/nr)*sum( np.log2((ns*counts)/float(nh)) ) \n \n else:\n near_to_same_stim = [[n for n in near_to[j] if abs(n-j)%ns==0 ] for j in range(nr)]\n number_of_neighbourhoods = np.array([len(l) for l in near_to])\n number_of_neighbourhoods_same_stim = np.array([len(l) for l in near_to_same_stim])\n I = (1.0/nr)*sum( np.log2((ns*number_of_neighbourhoods_same_stim)/float(nh)) )\n \n return I",
"def batchAnalysis(groupfil):\n groups = []\n with open(groupfil, 'r') as fIn:\n for line in fIn:\n groups.append(line.strip().split(','))\n \n checks = ['maxV', 'maxDerivV', 'maxDerivdV', 'minDerivV',\n 'minDerivdV', 'preMinV', 'postMinV', 'preMaxCurveV',\n 'preMaxCurveK', 'postMaxCurveV', 'postMaxCurveK',\n 'height', 'repolarizationV', 'intervals', 'frequencies']\n props = {ch: {gr: {} for gr in list(set([g[1] for g in groups]))}\n for ch in checks} # A dict of dicts\n # props [properties] [group name] [cell name]\n cells = [f[0].split('/')[-1].split('_')[0] for f in groups]\n \n # Add a few more keys\n props['activity'] = {gr: {} for gr in list(set([g[1] for g in groups]))}\n \n # Assign all the properties to the props dict\n for g in groups:\n df = pd.read_csv(g[0])\n df = df.drop('Unnamed: 33', 1) # Garbage\n df = df.drop('freq', 1) # These are downsampled\n df = df.dropna() # Dropna\n \n # If there are multiple clusters, add them in order\n if max(df.clust_inds) == 1: # Two clusters\n numClusts = int(max(df.clust_inds)+1)\n for ch in checks:\n for clust in range(numClusts):\n try:\n props[ch][g[1]][cells[groups.index(g)]].append(df[df['clust_inds']==clust][ch].dropna().values)\n except:\n props[ch][g[1]][cells[groups.index(g)]] = [df[df['clust_inds']==clust][ch].dropna().values]\n else: # Just one cluster\n for ch in checks:\n props[ch][g[1]][cells[groups.index(g)]] = [df[ch].dropna().values]\n # Get activity profile\n tIn, cBouts = timeInClusters(df)\n props['activity'][g[1]][cells[groups.index(g)]] = [tIn, cBouts]\n \n return props",
"def main(args):\n\n\t##############################################################################\n\t######## Pass user command line arguments to setup.py which will #############\n\t############# initialise some parameters for the analysis ###################\n\t##############################################################################\n\tinit_ = setup.initialise_user_input(args)\n\n\t##############################################################################\n\t######## Define system_ which is the object, of class nanoCISC, ##############\n\t######## which contains all relevant information about your nanoparticle ####\n\t##############################################################################\n\tsystem_ = nano_cisc.nanoCISC(init_.nano_particle, init_.anchors, init_.beta, init_.calcrange, \n init_.curves, init_.targetinc, init_.density) \n\t# initialise system_ as nanoCISC class here ^^^\n\n\t# If density is being calculated, define grid from grid class\n\tif args['density']:\n\t\tgrid=grids.grid(system_)\n\n\n\t##############################################################################\n\t################ Process trajectory, frame by frame ##########################\n\t##############################################################################\n\n\tfor ts in init_.u.trajectory: # loop through trajectory frames here \n\t\tprint \"Processing snapshot %d \" % (ts.frame)\n\n\t\t# Array for calculating intrinsic density is initialised to {0}\n\t\tintrinsic_count=np.zeros( ( np.ceil( 3 * system_.calculation_range).astype(np.int) ,len(system_.density) ), dtype = np.float32) \n\n\t\t# Array that stores the instantaneous volume of each spatial interval is initialised to {0}\n\t\tvolume_at_dist=np.zeros( ( np.ceil( 3 * system_.calculation_range).astype(np.int) ,len(system_.density) ), dtype = np.float32) \n\n\t\t# Centre of mass position is updated\n\t\tsystem_.update_com()\n\n\t\t# Vectors describing the anchor points are updated \n\t\tsystem_.update_anchors() \n\n\t\t# Nanoparticle depth values are updated\n\t\tsystem_.update_surface() \t\n\n\t\tif args['XYZsurface']:\n\t\t\tsystem_.write_surface(init_.f_visualise_surface) # write micelle surface to xyz file\n \n \t\tif args['density']: \n \t\t\tgrid.update_volume_estimate(volume_at_dist, system_) # volume estimate is updated for snapshot\n\t\t\tsystem_.calculate_density(intrinsic_count, volume_at_dist) # calculate density here\n\n\t\tsystem_.frames_processed += 1\n\n\t##################################\n\t##### Print results to files #####\n\t##################################\n\tif args['density']:\n\t\tsystem_.print_intrinsic_density(init_.f_intrinsic_density_out)\n\t\tsystem_.print_radial_density()\n\n\n\tprint \"Program finished successfully!!!\\n\"",
"def main():\n parser = argparse.ArgumentParser(description=\"Wrapper of the scikit-learn AgglomerativeClustering method. \", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('--config', required=False, help='Configuration file')\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')\n required_args.add_argument('--output_results_path', required=True, help='Path to the clustered dataset. Accepted formats: csv.')\n parser.add_argument('--output_plot_path', required=False, help='Path to the clustering plot. Accepted formats: png.')\n\n args = parser.parse_args()\n args.config = args.config or \"{}\"\n properties = settings.ConfReader(config=args.config).get_prop_dic()\n\n # Specific call of each building block\n agglomerative_clustering(input_dataset_path=args.input_dataset_path,\n output_results_path=args.output_results_path,\n output_plot_path=args.output_plot_path,\n properties=properties)",
"def main_predefined_split():\n\n average_performance = []\n fold_num = 'predefined'\n output_file_folder = \"output/{}\".format(args.experiment_name)\n output_file_name = \"{}/lnnel_{}.csv\".format(output_file_folder, fold_num)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = output_file_name\n\n if args.use_blink:\n df_train = pd.read_csv(\"./data/lcquad/blink/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/blink/lcquad_test_sorted.csv\")\n else:\n df_train = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_test_sorted.csv\")\n\n # filter out the questions with single positive or many negatives in trianing set\n filtered_question_mentions = []\n for qm in df_train.QuestionMention.unique():\n df_ = df_train[df_train.QuestionMention == qm]\n if df_.Label.sum() == 0:\n filtered_question_mentions.append(qm)\n if df_.Label.sum() == 1 and df_.shape[0] == 1:\n filtered_question_mentions.append(qm)\n # print(df_.Label.values)\n df_train_split_filtered = df_train[~df_train.QuestionMention.isin(filtered_question_mentions)]\n df_train_split_filtered = df_train_split_filtered.sort_values(by=['QuestionMention', 'Label'])\n df_train = df_train_split_filtered\n\n # train\n features_train = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_train.Features.values])\n x_train = torch.from_numpy(features_train).float()\n y_train = torch.from_numpy(df_train.Label.values).float().reshape(-1, 1)\n m_labels_train = df_train.Mention_label.values\n ques_train = df_train.Question.values\n\n # test\n features_test = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_test.Features.values])\n x_test = torch.from_numpy(features_test).float()\n y_test = torch.from_numpy(df_test.Label.values).float().reshape(-1, 1)\n m_labels_test = df_test.Mention_label.values\n ques_test = df_test.Question.values\n\n # train model and evaluate\n model = pick_model(args.model_name, args.alpha)\n model = model.to(device)\n\n # move to gpu\n x_train, y_train = x_train.to(device), y_train.to(device)\n x_test, y_test = x_test.to(device), y_test.to(device)\n\n print(model)\n\n print(\"model: \", args.model_name, args.alpha)\n print(model(x_train, m_labels_train))\n\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n # aggregate the data into train, val, and test\n train_data = (x_train, y_train, m_labels_train, ques_train)\n print(\"train:\", x_train.shape, y_train.shape, m_labels_train.shape, ques_train.shape)\n test_data = (x_test, y_test, m_labels_test, ques_test)\n print(\"test:\", x_test.shape, y_test.shape, m_labels_test.shape, ques_test.shape)\n\n # check class distribution\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n train(model, train_data, test_data, test_data, args.checkpoint_name, args.num_epoch, args.margin,\n args.learning_rate)\n test_pred, best_scores = test(x_test, m_labels_test, ques_test, args.alpha, args.checkpoint_name,\n args.model_name,\n args.output_file_name)\n with open(args.log_file_name, 'a') as f:\n f.write(\n \"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}; lr={}; margin={}\\n\".format(\n args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores[\n 'precision'],\n best_scores[\n 'recall'],\n best_scores['f1'],\n args.learning_rate,\n args.margin))\n print(\"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}\\n\".format(args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores['precision'],\n best_scores['recall'],\n best_scores['f1']))\n average_performance.append([best_scores['precision'], best_scores['recall'], best_scores['f1']])\n\n average_performance = np.array(average_performance)\n print(\"Avg performance is prec - rec - f1: \", average_performance.mean(0))",
"def __init__(self, device=\"cuda:0\", *args, **kwargs):\n source_file_wtsd = \"/g/kreshuk/data/leptin/sourabh_data_v1/Segmentation_results_fused_tp_1_ch_0_Masked_WatershedBoundariesMergeTreeFilter_Out1.tif\"\n source_file_wtsd = \"/g/kreshuk/hilt/projects/data/leptin_fused_tp1_ch_0/Masked_WatershedBoundariesMergeTreeFilter_Out1.h5\"\n # wtsd = torch.from_numpy(np.array(imread(source_file_wtsd).astype(np.long))).to(device)\n wtsd = torch.from_numpy(h5py.File(source_file_wtsd, \"r\")[\"data\"][:].astype(np.long)).to(device)\n slices = [0, 157, 316]\n label_1 = [1359, 886, 1240]\n label_2 = [1172, 748, 807]\n label_3 = [364, 1148, 1447]\n m1, m2, m3, m4, m5 = [], [], [], [], []\n self.outer_cntr_ds, self.inner_cntr_ds, self.celltype_1_ds, self.celltype_2_ds, self.celltype_3_ds = [], [], [], [], []\n for slc, l1, l2, l3 in zip(slices, label_1, label_2, label_3):\n bg = wtsd[:, slc, :] == 1\n bg_cnt = find_contours(bg.cpu().numpy(), level=0)\n cnt1 = bg_cnt[0] if bg_cnt[0].shape[0] > bg_cnt[1].shape[0] else bg_cnt[1]\n cnt2 = bg_cnt[1] if bg_cnt[0].shape[0] > bg_cnt[1].shape[0] else bg_cnt[0]\n for m, cnt in zip([m1, m2], [cnt1, cnt2]):\n mask = torch.zeros_like(wtsd[:, slc, :]).cpu()\n mask[np.round(cnt[:, 0]), np.round(cnt[:, 1])] = 1\n m.append(torch.from_numpy(binary_fill_holes(mask.long().cpu().numpy())).to(device).sum().item())\n\n mask = wtsd[:, slc, :] == l1\n m3.append(mask.long().sum().item())\n cnt3 = find_contours(mask.cpu().numpy(), level=0)[0]\n mask = wtsd[:, slc, :] == l2\n m4.append(mask.long().sum().item())\n cnt4 = find_contours(mask.cpu().numpy(), level=0)[0]\n mask = wtsd[:, slc, :] == l3\n m5.append(mask.long().sum().item())\n cnt5 = find_contours(mask.cpu().numpy(), level=0)[0]\n\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt1[:, 0], cnt1[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt2[:, 0], cnt2[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt3[:, 0], cnt3[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt4[:, 0], cnt4[:, 1]] = 1\n # plt.imshow(img);plt.show()\n # img = torch.zeros_like(wtsd[:, slc, :]).cpu()\n # img[cnt5[:, 0], cnt5[:, 1]] = 1\n # plt.imshow(img);plt.show()\n\n self.outer_cntr_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt1, tolerance=1.2)).to(device)))\n self.inner_cntr_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt2, tolerance=1.2)).to(device)))\n self.celltype_1_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt3, tolerance=1.2)).to(device)))\n self.celltype_2_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt4, tolerance=1.2)).to(device)))\n self.celltype_3_ds.append(Polygon2d(torch.from_numpy(approximate_polygon(cnt5, tolerance=1.2)).to(device)))\n\n self.masses = [np.array(m1).mean(), np.array(m2).mean(), np.array(m3 + m4 + m5).mean()]\n self.fg_shape_descriptors = self.celltype_1_ds + self.celltype_2_ds + self.celltype_3_ds",
"def test_run_jackknifed_beta_diversity_parallel(self):\r\n\r\n run_jackknifed_beta_diversity(\r\n self.test_data['biom'][0],\r\n self.test_data['tree'][0],\r\n 20,\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n self.test_data['map'][0],\r\n parallel=True,\r\n status_update_callback=no_status_updates)\r\n\r\n weighted_unifrac_upgma_tree_fp = join(self.test_out,\r\n 'weighted_unifrac',\r\n 'upgma_cmp', 'jackknife_named_nodes.tre')\r\n unweighted_unifrac_upgma_tree_fp = join(\r\n self.test_out, 'unweighted_unifrac', 'upgma_cmp',\r\n 'jackknife_named_nodes.tre')\r\n weighted_unifrac_emperor_index_fp = join(\r\n self.test_out, 'weighted_unifrac', 'emperor_pcoa_plots',\r\n 'index.html')\r\n unweighted_unifrac_emperor_index_fp = join(\r\n self.test_out, 'unweighted_unifrac', 'emperor_pcoa_plots',\r\n 'index.html')\r\n\r\n input_file_basename = splitext(split(self.test_data['biom'][0])[1])[0]\r\n unweighted_unifrac_dm_fp = join(self.test_out,\r\n 'unweighted_unifrac_%s.txt' % input_file_basename)\r\n weighted_unifrac_dm_fp = join(self.test_out,\r\n 'weighted_unifrac_%s.txt' % input_file_basename)\r\n\r\n # check for expected relations between values in the unweighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(unweighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n # check for expected relations between values in the weighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(weighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(weighted_unifrac_upgma_tree_fp) > 0)\r\n self.assertTrue(getsize(unweighted_unifrac_upgma_tree_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_emperor_index_fp) > 0)\r\n self.assertTrue(getsize(unweighted_unifrac_emperor_index_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)",
"def run(self,kRange=None,sigmaRange=None,chunks=None):\n\n ## run spectral clustering parameter search\n totalCores = cpu_count()\n totalCores = totalCores - 1\n\n ## specify the ranges\n if not kRange:\n kRange = np.array([int(round(i)) for i in np.linspace(20,500,15)])\n elif type(kRange) == type([]):\n kRange = np.array(kRange)\n\n ## different sigma ranges are appropriate for different GO aspects\n if sigmaRange:\n pass\n elif self.aspect == 'biological_process':\n sigmaRange = np.linspace(0.01,1.0,15)\n elif self.aspect == 'molecular_function':\n sigmaRange = np.linspace(1.0,2.0,15)\n elif self.aspect == 'cellular_component':\n sigmaRange = np.linspace(0.05,1.0,15)\n else:\n raise Exception(\"invalid aspect provided\")\n\n ## prepare outfiles\n outFid1 = open(self.resultsPath1,'wa')\n self.writer1 = csv.writer(outFid1)\n header1 = ['k','sigma','silvalue']\n self.writer1.writerow(header1)\n \n outFid2 = open(self.resultsPath2,'wa')\n self.writer2 = csv.writer(outFid2)\n header2 = ['k','sigma']+range(kRange.max())\n self.writer2.writerow(header2)\n\n ## limit each iteration to keep memory usage down \n if chunks:\n pass\n else:\n chunks = int(round((np.log(self.M.shape[0]))))\n print(\"chunks = %s\"%chunks)\n\n toRun = []\n for k in kRange:\n toRun += [(k,sigma,self.distancePath,self.dtype) for sigma in sigmaRange]\n\n stopPoints = np.arange(0,len(toRun),chunks)\n if stopPoints[-1] < len(toRun):\n stopPoints = np.hstack([stopPoints[1:],np.array([len(toRun)])])\n\n begin = 0\n\n if chunks == 1:\n self._run_sc(toRun)\n else:\n for i,chunk in enumerate(range(stopPoints.size)):\n stop = stopPoints[chunk]\n print('...running %s-%s/%s'%(begin,stop,len(toRun)))\n self.run_sc(toRun,begin,stop)\n begin = stop\n\n print(\"complete.\")\n outFid1.close()\n outFid2.close()",
"def local_vs_global_gene_mm_scan(y, sd, file_prefix='/tmp/temp', radius=20000, kinship_method='ibd',\n global_k=None, tair_ids=None, plot_gene_trees=False, ets=None):\n print 'Starting Mixed model, local vs. global kinship scan...'\n import gwaResults as gr\n import dataParsers as dp\n if global_k == None:\n if kinship_method == 'ibd':\n K = sd.get_ibd_kinship_matrix()\n elif kinship_method == 'ibs':\n K = sd.get_ibs_kinship_matrix()\n else:\n raise NotImplementedError\n else:\n K = global_k\n lmm0 = LinearMixedModel(Y=y)\n lmm0.add_random_effect(K)\n eig_L = lmm0._get_eigen_L_()\n h0_res = lmm0.get_estimates(eig_L)\n\n gene_dict = dp.parse_tair_gff_file()\n if tair_ids == None:\n tair_ids = gene_dict.keys()\n tair_ids.sort()\n chromosomes = []\n positions = []\n pvals = []\n perc_variances1 = []\n perc_variances2 = []\n h1_heritabilities = []\n mapped_tair_ids = []\n# chunk_i = 0\n for i, tair_id in enumerate(tair_ids):\n gd = gene_dict[tair_id]\n chrom = gd['chromosome']\n if chrom not in ['1', '2', '3', '4', '5']:\n continue\n chrom = int(chrom)\n start_pos = gd['start_pos'] - radius\n stop_pos = gd['end_pos'] + radius\n mean_pos = (start_pos + stop_pos) / 2\n d = sd.get_local_n_global_kinships(chrom=chrom, start_pos=start_pos, stop_pos=stop_pos,\n global_kinship=K, kinship_method=kinship_method)\n if d['local_k'] != None and d['global_k'] != None:\n local_k = kinship.scale_k(d['local_k'])\n global_k = kinship.scale_k(d['global_k'])\n # print \"Chromosome=%d, position=%d\" % (chrom, focal_pos)\n res_dict = local_vs_global_mm(y, local_k, global_k, K, h0_res=h0_res)\n chromosomes.append(chrom)\n positions.append(mean_pos)\n perc_variances1.append(res_dict['perc_var1'])\n perc_variances2.append(res_dict['perc_var2'])\n h1_heritabilities.append(res_dict['pseudo_heritability1'])\n pvals.append(res_dict['pval'])\n mapped_tair_ids.append(tair_id)\n if plot_gene_trees and ets != None:\n tree_file = file_prefix + '_%s_%d_tree.pdf' % (tair_id, radius)\n y_strs = map(lambda x: '%0.2f' % x, y)\n snpsdata.plot_tree(local_k, tree_file, ets, verbose=True, label_values=y_strs)\n continue\n\n # print 'H0: pseudo_heritability=%0.2f' % (res_dict['h0_res']['pseudo_heritability'])\n # print 'H1: pseudo_heritability=%0.2f, perc_var1=%0.2f, perc_var2=%0.2f' % \\\n # (res_dict['h1_res']['pseudo_heritability'],\n # res_dict['h1_res']['perc_var1'],\n # res_dict['h1_res']['perc_var2'])\n if (i + 1) % int(len(tair_ids) / 100) == 0: # Print dots\n sys.stdout.write('.')\n sys.stdout.flush()\n\n pval_res = gr.Result(scores=pvals, positions=positions, chromosomes=chromosomes)\n pval_res.neg_log_trans()\n pval_res.plot_manhattan(png_file=file_prefix + '_lrt_pvals.png', percentile=0, plot_bonferroni=True)\n perc_var_res = gr.Result(scores=perc_variances2, positions=positions, chromosomes=chromosomes)\n perc_var_res.plot_manhattan(png_file=file_prefix + '_perc_var_explained.png', percentile=0,\n ylab='% of variance explained')\n return {'pvals':pvals, 'perc_variances2':perc_variances2, 'perc_variances1':perc_variances1,\n 'h0_heritability':h0_res['pseudo_heritability'], 'h1_heritabilities':h1_heritabilities,\n 'chromosomes':chromosomes, 'positions':positions, 'tair_ids':mapped_tair_ids}",
"def run(self):\n self.membershipFunction()\n self.interpretingMF()\n self.rules()\n self.standardComposition_Min()\n self.standardComposition_Max()\n self.defuzzification()",
"def test_main_split_cluster(self):\r\n\r\n command = \" \".join([\"denoiser.py\",\r\n \"-S\", \"--force\", '-c', '-n 2',\r\n \"-i\", \"%s/qiime/support_files/denoiser/TestData/denoiser_test_set.sff.txt\" % PROJECT_HOME,\r\n \"-f\", \"%s/qiime/support_files/denoiser/TestData/test_set_seqs.fna\" % PROJECT_HOME,\r\n \"-o\", self.test_dir])\r\n\r\n result = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT).stdout.read()\r\n self.result_dir = self.test_dir\r\n\r\n for subdir in [\"0/\", \"1/\"]:\r\n observed = \"\".join(\r\n list(open(self.result_dir + subdir + \"centroids.fasta\")))\r\n self.assertEqual(observed, expected_centroids[subdir])\r\n\r\n observed = \"\".join(\r\n list(open(self.result_dir + subdir + \"denoiser_mapping.txt\")))\r\n self.assertEqual(observed, expected_map_string_on_cluster[subdir])",
"def main(args):\n # setting numpy error handling\n np.seterr(invalid='warn')\n \n # making BD2DBL index \n DBL_index = BD2DBL_index(r_min = float(args['--r_min']),\n r_max = float(args['--r_max']),\n D = float(args['-D']),\n B = float(args['-B']),\n w = float(args['-w']),\n tube_diam = float(args['--tube_diam']),\n tube_height = float(args['--tube_height']),\n BD_min = float(args['--BD_min']),\n BD_max = float(args['--BD_max']),\n vertical = args['--vertical'])\n\n\n #--debug--#\n #DBL_index = _fake_DBL_index(BD_min = float(args['--BD_min']),\n # BD_max = float(args['--BD_max']))\n\n ## writing DBL_index\n if args['--DBL_out']:\n write_DBL_index(DBL_index, args['--DBL_out'])\n\n # comm file (if provided)\n try:\n comm = CommTable.from_csv(args['--comm'], sep='\\t')\n except ValueError:\n comm = None\n \n # loading fragment KDEs of each genome\n kde2d = Utils.load_kde(args['<fragment_kde>'])\n\n # making new KDEs {libID:{taxon:kde}}\n KDEs = {}\n if comm is not None:\n for libID in comm.get_unique_libIDs(): \n tmp = KDE_by_lib(DBL_index, kde2d,\n n = int(args['-n']),\n frac_abs = float(args['--frac_abs']),\n bw_method = args['--bw'],\n nprocs = int(args['--np']),\n debug = args['--debug'],\n comm = comm,\n commx = float(args['--commx']),\n libID=libID)\n if args['-o'].lower() == 'none': \n KDEs[libID] = {taxon:KDE for taxon,KDE in tmp}\n else:\n KDEs[libID] = Utils.write_lib_kde({taxon:KDE for taxon,KDE in tmp},\n args['-o'], \n libID) \n tmp = None \n \n else:\n libID = '1'\n tmp = KDE_by_lib(DBL_index, kde2d,\n n = int(args['-n']),\n frac_abs = float(args['--frac_abs']),\n bw_method = args['--bw'],\n nprocs = int(args['--np']),\n debug = args['--debug'])\n KDEs[libID] = {taxon:KDE for taxon,KDE in tmp}\n tmp = None\n \n # pickling output\n if args['-o'].lower() == 'none':\n dill.dump(KDEs, sys.stdout) \n else:\n with open(args['-o'], 'wb') as outFH:\n dill.dump(KDEs, outFH)",
"def test_main_split_cluster(self):\n \n command = \" \".join( [\"%s/denoiser.py\" % get_qiime_scripts_dir(),\n \"-S\", \"--force\", '-c', '-n 2',\n \"-i\", \"%s/qiime/support_files/denoiser/TestData/denoiser_test_set.sff.txt\" % PROJECT_HOME,\n \"-f\", \"%s/qiime/support_files/denoiser/TestData/test_set_seqs.fna\" % PROJECT_HOME,\n \"-o\", self.test_dir] )\n\n result = Popen(command,shell=True,universal_newlines=True,\\\n stdout=PIPE,stderr=STDOUT).stdout.read()\n self.result_dir = self.test_dir\n\n for subdir in [\"0/\",\"1/\"]:\n observed = \"\".join(list(open(self.result_dir+ subdir+\"centroids.fasta\")))\n self.assertEqual(observed, expected_centroids[subdir])\n\n observed = \"\".join(list(open(self.result_dir+ subdir+\"denoiser_mapping.txt\")))\n self.assertEqual(observed, expected_map_string_on_cluster[subdir])",
"def postExecution(self):\n\n casalog.origin(\"ParallelDataHelper\") \n if self._msTool:\n self._msTool.close()\n \n # We created a data directory and many SubMSs,\n # now build the reference MS. The outputList is a\n # dictionary of the form:\n # {'path/outputvis.data/SUBMSS/outputvis.0000.ms':True,\n # 'path/outuputvis.data/SUBMSS/outputvis.0001.ms':False}\n outputList = {}\n \n# if (ParallelTaskHelper.getBypassParallelProcessing()==1):\n if (self._cluster == None):\n # This is the list of output SubMSs\n outputList = self._sequential_return_list\n self._sequential_return_list = {}\n elif (self._cluster != None):\n command_response_list = self._cluster.get_command_response(self._command_request_id_list,True,True)\n # Format list in the form of vis dict\n for command_response in command_response_list:\n outvis = command_response['parameters']['outputvis']\n outputList[outvis] = command_response['ret']\n \n \n # List of failed MSs. TBD\n nFailures = []\n \n subMSList = []\n\n nFailures = [v for v in outputList.values() if v == False]\n \n for subMS in outputList:\n # Only use the successful output MSs\n if outputList[subMS]:\n subMSList.append(subMS)\n \n subMSList.sort()\n\n if len(subMSList) == 0:\n casalog.post(\"Error: no subMSs were successfully created.\", 'WARN')\n return False\n \n # When separationaxis='scan' there is no need to give ddistart. \n # The tool looks at the whole spw selection and\n # creates the indices from it. After the indices are worked out, \n # it applies MS selection. We do not need to consolidate either.\n \n # If axis is spw, give a list of the subMSs\n # that need to be consolidated. This list is pre-organized\n # inside the separation functions above.\n \n # Only when input is MS or MS-like and createmms=True\n # Only partition and mstransform have the createmms parameter\n if self._arg.has_key('createmms') and self._arg['createmms'] == True and self._arg['separationaxis'] == 'spw':\n# if (self._arg['separationaxis'] == 'spw' or \n# self._arg['separationaxis'] == 'auto'): \n# if (self._arg['separationaxis'] == 'spw'): \n \n casalog.post('Consolidate the sub-tables')\n \n toUpdateList = self.__ddidict.values()\n \n toUpdateList.sort()\n casalog.post('List to consolidate %s'%toUpdateList,'DEBUG')\n \n # Consolidate the spw sub-tables to take channel selection\n # or averages into account.\n mtlocal1 = mttool()\n try: \n mtlocal1.mergespwtables(toUpdateList)\n mtlocal1.done()\n except Exception, instance:\n mtlocal1.done()\n casalog.post('Cannot consolidate spw sub-tables in MMS','SEVERE')\n return False\n\n if len(nFailures) > 0:\n casalog.post('%s subMSs failed to be created. This is not an error, if due to selection when creating a Multi-MS'%len(nFailures))\n # need to rename/re-index the subMSs\n newList = copy.deepcopy(subMSList)\n idx = 0\n for subms in newList:\n suffix = re.findall(r\".\\d{4}.ms\",subms)\n# newms = subms.rpartition(suffix[-1])[0] \n newms = subms[:-len(suffix[-1])]\n newms = newms+'.%04d.ms'%idx\n os.rename(subms,newms)\n newList[idx] = newms\n idx += 1\n\n \n if len(subMSList) == len(newList):\n subMSList = newList\n \n # Get the first subMS to be the reference when\n # copying the sub-tables to the other subMSs \n mastersubms = subMSList[0]\n\n # Get list of all subtables in a subms\n thesubtables = ph.getSubtables(mastersubms)\n \n # Remove the SOURCE and HISTORY tables, which will be the only copied.\n # All other sub-tables will be linked to first subms\n thesubtables.remove('SOURCE')\n thesubtables.remove('HISTORY')\n\n subtabs_to_omit = thesubtables\n \n # Parallel axis to write to table.info of MMS\n # By default take the one from the input MMS\n parallel_axis = ph.axisType(self.__args['vis'])\n if self._arg.has_key('createmms') and self._arg['createmms'] == True:\n parallel_axis = self._arg['separationaxis']\n\n if parallel_axis == 'auto' or parallel_axis == 'both':\n parallel_axis = 'scan,spw'\n \n # Copy sub-tables from first subMS to the others. The tables in\n # subtabs_to_omit are linked instead of copied.\n casalog.post(\"Finalizing MMS structure\")\n ph.makeMMS(self._arg['outputvis'], subMSList,\n True, # copy subtables (will copy only the SOURCE and HISTORY tables)\n subtabs_to_omit, # omitting these\n parallel_axis\n )\n \n thesubmscontainingdir = os.path.dirname(subMSList[0].rstrip('/'))\n \n shutil.rmtree(thesubmscontainingdir)\n \n # Sanity check on the just created MMS\n # check for broken symlinks\n try:\n with open(os.devnull, 'w') as null:\n p = subprocess.Popen(['find', '-L', self._arg['outputvis'], '-type', 'l'],\n universal_newlines=True, stdout=subprocess.PIPE, stderr=null)\n o, e = p.communicate()\n if o:\n casalog.post('The new MMS contain broken symlinks. Please verify', 'SEVERE')\n casalog.post(o, 'SEVERE')\n return False\n except:\n pass\n\n return True",
"def setup(self):\n igd = self.options['input_grid_data']\n ogd = self.options['output_grid_data']\n output_subset = self.options['output_subset']\n\n if ogd is None:\n ogd = igd\n\n # Build the interpolation matrix which maps from the input grid to the output grid.\n # Rather than a single phase-wide interpolating polynomial, map each segment.\n # To do this, find the nodes in the output grid which fall in each segment of the input\n # grid. Then build a Lagrange interpolating polynomial for that segment\n L_blocks = []\n output_nodes_ptau = list(ogd.node_ptau[ogd.subset_node_indices[output_subset]])\n\n for iseg in range(igd.num_segments):\n i1, i2 = igd.segment_indices[iseg]\n iptau_segi = np.take(igd.node_ptau, (i1, i2-1))\n istau_segi = np.take(igd.node_stau, (i1, i2-1))\n\n # The indices of the output grid that fall within this segment of the input grid\n if ogd is igd:\n optau_segi = iptau_segi\n else:\n ptau_hi = igd.segment_ends[iseg+1]\n if iseg < igd.num_segments - 1:\n idxs_in_iseg = np.where(output_nodes_ptau <= ptau_hi)[0]\n else:\n idxs_in_iseg = np.arange(len(output_nodes_ptau))\n optau_segi = np.asarray(output_nodes_ptau)[idxs_in_iseg]\n # Remove the captured nodes so we don't accidentally include them again\n output_nodes_ptau = output_nodes_ptau[len(idxs_in_iseg):]\n\n # Now get the output nodes which fall in iseg in iseg's segment tau space.\n ostau_segi = 2.0 * (optau_segi - iptau_segi[0]) / (iptau_segi[-1] - iptau_segi[0]) - 1\n\n # Create the interpolation matrix and add it to the blocks\n L, _ = lagrange_matrices(istau_segi, ostau_segi)\n L_blocks.append(L)\n\n self.interpolation_matrix = block_diag(*L_blocks)\n r, c = np.nonzero(self.interpolation_matrix)\n\n output_num_nodes, input_num_nodes = self.interpolation_matrix.shape\n\n for (name, kwargs) in self._timeseries_outputs:\n\n input_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n input_name = 'input_values:{0}'.format(name)\n self.add_input(input_name,\n shape=(input_num_nodes,) + kwargs['shape'],\n **input_kwargs)\n\n output_name = name\n output_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n output_kwargs['shape'] = (output_num_nodes,) + kwargs['shape']\n self.add_output(output_name, **output_kwargs)\n\n self._vars.append((input_name, output_name, kwargs['shape']))\n\n size = np.prod(kwargs['shape'])\n val_jac = np.zeros((output_num_nodes, size, input_num_nodes, size))\n\n for i in range(size):\n val_jac[:, i, :, i] = self.interpolation_matrix\n\n val_jac = val_jac.reshape((output_num_nodes * size, input_num_nodes * size),\n order='C')\n\n val_jac_rows, val_jac_cols = np.where(val_jac != 0)\n\n rs, cs = val_jac_rows, val_jac_cols\n self.declare_partials(of=output_name,\n wrt=input_name,\n rows=rs, cols=cs, val=val_jac[rs, cs])",
"def main():\n parser = argparse.ArgumentParser(description=\"Process the results of an experiment.\")\n parser.add_argument(\"experiment\")\n arguments = parser.parse_args()\n path = f\"experiments/{arguments.experiment}\"\n if not os.path.exists(path):\n raise SystemExit(f\"Path {path} does not exists.\")\n\n # For efficiency, one should generate the results from the parts without merging them.\n files = [file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file))]\n frames = []\n for file in files:\n device, experiment, _ = file.split(\".\")\n frame = pandas.read_csv(\n os.path.join(path, file),\n index_col=\"variable\",\n usecols=[\"variable\", \"group_index\", \"value_i\"], dtype={\"value_i\": \"Int64\"}\n )\n frame[\"board\"] = device\n frame[\"experiment\"] = experiment\n frames.append(frame)\n dataframe = pandas.concat(frames)\n frames = None\n\n current_grouping = dataframe.groupby([\"group_index\", \"variable\"])\n \n data = current_grouping.agg([\n numpy.median,\n _percentile_factory(95),\n numpy.mean,\n numpy.std,\n \"count\"\n ])\n\n print(data)\n \n data = data.droplevel([0], axis=1)\n data = data.unstack()\n data.columns = data.columns.map('_'.join)\n data.to_csv(f\"{arguments.experiment}.csv\")",
"def _build_integration_grid(self):\n pass",
"def abs_units(wb_run,sample_run,mono_van,wb_mono,samp_rmm,samp_mass,ei_guess,rebin,map_file,monovan_mapfile,**kwargs): \n #available keywords\n #abs_units_van_range\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n print 'Output will be in absolute units of mb/str/mev/fu'\n\n #reducer.van_rmm =50.94\n reducer.van_mass=van_mass\n #sample info\n reducer.sample_mass=samp_mass\n reducer.sample_rmm =samp_rmm\n print 'Using vanadium mass: ',van_mass\n print ' sample mass: ',samp_mass \n print ' sample_rmm : ',samp_rmm \n # check if mono-vanadium is provided as multiple files list or just put in brackets ocasionally\n if isinstance(mono_van,list):\n if len(mono_van)>1:\n raise IOError(' Can currently work only with single monovan file but list supplied')\n else:\n mono_van = mono_van[0];\n\n \n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=str(sample_run)+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n if kwargs.has_key('norm_method'):\n reducer.normalise_method = kwargs.get('norm_method')\n print 'Setting normalisation method to ', kwargs.get('norm_method')\n else:\n reducer.normalise_method = 'monitor-1'\n \n if kwargs.has_key('mask_run'):\n mask_run = kwargs.get('mask_run')\n print 'Using run ', kwargs.get('mask_run'),' for diag'\n else:\n mask_run=sample_run\n \n if kwargs.has_key('background'):\n reducer.background = kwargs.get('background')\n print 'Setting background option to ', kwargs.get('background')\n else:\n reducer.background = False\n \n if kwargs.has_key('fixei'):\n reducer.fix_ei = kwargs.get('fixei')\n print 'Setting fixei to ', kwargs.get('fixei')\n else:\n reducer.fix_ei = False\n \n if kwargs.has_key('save_format'):\n reducer.save_formats = kwargs.get('save_format')\n print 'Setting save format to ', kwargs.get('save_format')\n else:\n reducer.save_formats = ['.spe']\n #Set parameters for the run\n \n if kwargs.has_key('detector_van_range'):\n reducer.wb_integr_range = kwargs.get('detector_van_range')\n print 'Setting detector van int range to ', kwargs.get('detector_van_range')\n else:\n reducer.wb_integr_range=[20,100]\n \n #######DIAG###########\n if kwargs.has_key('bkgd_range'):\n background_range = kwargs.get('bkgd_range')\n print 'Setting background intergration to ', kwargs.get('bkgd_range')\n else:\n background_range=[15000,19000]\n \n if kwargs.has_key('tiny'):\n tinyval = kwargs.get('tiny')\n print 'Setting tiny ratelimit to ', kwargs.get('tiny')\n else:\n tinyval=1e-10\n \n if kwargs.has_key('large'):\n largeval = kwargs.get('large')\n print 'Setting large limit to ', kwargs.get('large')\n else:\n largeval=1e10\n \n if kwargs.has_key('diag_remove_zero'):\n sampzero = kwargs.get('diag_remove_zero')\n print 'Setting diag to reject zero backgrounds '\n else:\n sampzero =False\n \n if kwargs.has_key('diag_van_median_rate_limit_hi'):\n vanouthi = kwargs.get('diag_van_median_rate_limit_hi')\n print 'Setting diag_van_median_rate_limit_hi to ', kwargs.get('diag_van_median_rate_limit_hi')\n else:\n vanouthi=100\n \n if kwargs.has_key('diag_van_median_rate_limit_lo'):\n vanoutlo = kwargs.get('diag_van_median_rate_limit_lo')\n print 'Setting diag_van_median_rate_limit_lo to ', kwargs.get('diag_van_median_rate_limit_lo')\n else:\n vanoutlo=0.01\n \n if kwargs.has_key('diag_van_median_sigma_lo'):\n vanlo = kwargs.get('diag_van_median_sigma_lo')\n print 'Setting diag_van_median_sigma_lo to ', kwargs.get('diag_van_median_sigma_lo')\n else:\n vanlo=0.1\n \n if kwargs.has_key('diag_van_median_sigma_hi'):\n vanhi = kwargs.get('diag_van_median_sigma_hi')\n print 'Setting diag_van_median_sigma_hi to ', kwargs.get('diag_van_median_sigma_hi')\n else:\n vanhi=1.5\n \n if kwargs.has_key('diag_van_median_sigma'):\n vansig = kwargs.get('diag_van_median_sigma')\n print 'Setting diag_van_median_sigma to ', kwargs.get('diag_van_median_sigma')\n else:\n vansig=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_lo'):\n samplo = kwargs.get('diag_samp_median_sigma_lo')\n print 'Setting diag_samp_median_sigma_lo to ', kwargs.get('diag_samp_median_sigma_lo')\n else:\n samplo=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_hi'):\n samphi = kwargs.get('diag_samp_median_sigma_hi')\n print 'Setting diag_samp_median_sigma_hi to ', kwargs.get('diag_samp_median_sigma_hi')\n else:\n samphi=2.0\n \n if kwargs.has_key('diag_samp_median_sigma'):\n sampsig = kwargs.get('diag_samp_median_sigma')\n print 'Setting diag_samp_median_sigma to ', kwargs.get('diag_samp_median_sigma')\n else:\n sampsig=3.0\n \n if kwargs.has_key('bleed'):\n bleed_switch = kwargs.get('bleed')\n print 'Setting bleed ', kwargs.get('bleed')\n else:\n print 'bleed set to default'\n #####diad end########\n \n \n if kwargs.has_key('det_cal_file'):\n reducer.det_cal_file = kwargs.get('det_cal_file')\n reducer.relocate_dets = True\n print 'Setting detector calibration file to ', kwargs.get('det_cal_file')\n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n reducer.det_cal_file =None\n reducer.relocate_dets = False\n \n if mtd.doesExist(str(sample_run))==True and kwargs.has_key('det_cal_file')==False:\n print 'For data input type: workspace detector calibration must be specified'\n print 'use Keyword det_cal_file with a valid detctor file or run number'\n return\n \n \n if kwargs.has_key('one2one'):\n reducer.map_file =None\n map_file = \"\"\n print 'one2one selected'\n else:\n fileName, fileExtension = os.path.splitext(map_file)\n if (not fileExtension):\n map_file = map_file+'.map'\n reducer.map_file = map_file;\n \n if kwargs.has_key('hardmaskPlus'):\n HardMaskFile = kwargs.get('hardmaskPlus')\n print 'Use hardmask from ', HardMaskFile\n #hardMaskSpec=common.load_mask(HardMaskFile)\n #MaskDetectors(Workspace='masking',SpectraList=hardMaskSpec)\n else:\n HardMaskFile=None\n \n reducer.energy_bins = rebin\n #monovan info\n fileName, fileExtension = os.path.splitext(monovan_mapfile)\n if (not fileExtension):\n monovan_mapfile=monovan_mapfile+'.map'\n reducer.abs_map_file =monovan_mapfile \n\n if kwargs.has_key('abs_units_van_range'):\n reducer.monovan_integr_range = kwargs.get('abs_units_van_range')\n print 'Setting absolute units vanadium integration range to: ', kwargs.get('abs_units_van_range')\n else:\n reducer.monovan_integr_range=[-40,40]\n\n \n \n print 'output will be normalised to', reducer.normalise_method\n if (numpy.size(sample_run)) > 1 and kwargs.has_key('sum') and kwargs.get('sum')==True:\n #this sums the runs together before passing the summed file to the rest of the reduction\n #this circumvents the inbuilt method of summing which fails to sum the files for diag\n \n sumfilename=str(sample_run[0])+'sum'\n accum=sum_files(sumfilename, sample_run)\n #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace\n RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw')\n sample_run=sample_run[0]\n \n if kwargs.has_key('hardmaskOnly'):\n if (kwargs.get('hardmaskOnly')): \n totalmask = kwargs.get('hardmaskOnly')\n print 'Using hardmask from ', totalmask\n #next stable version can replace this with loadmask algoritum\n specs=diag_load_mask(totalmask)\n else:\n specs=\"\"\n \n CloneWorkspace(InputWorkspace=sample_run,OutputWorkspace='mask_wksp')\n MaskDetectors(Workspace='mask_wksp',SpectraList=specs)\n masking =mtd['mask_wksp']\n else:\n print '########### Run diagnose for sample run ##############'\n masking = reducer.diagnose(wb_run, \n sample=mask_run,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(masking) \n print 'first Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n if kwargs.has_key('use_sam_msk_on_monovan') and kwargs.get('use_sam_msk_on_monovan')==True:\n print 'applying sample run mask to mono van'\n reducer.spectra_masks=masking\n fail_list=get_failed_spectra_list(masking) \n else:\n print '########### Run diagnose for monochromatic vanadium run ##############'\n masking2 = reducer.diagnose(wb_mono, \n sample=mono_van,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n total_mask=masking+masking2 \n reducer.spectra_masks=total_mask \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(total_mask)\n #fail_list=get_failed_spectra_list('total_mask')\n \n \n print 'Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n \n \n #Run the conversion first on the sample\n deltaE_wkspace_sample = reducer.convert_to_energy(sample_run, ei_guess, wb_run)\n\n \n if kwargs.has_key('mono_correction_factor'):\n absnorm_factor=kwargs.get('mono_correction_factor')\n print 'Using supplied correction factor for absolute units'\n else:\n print '##### Evaluate the integral from the monovan run and calculate the correction factor ######'\n print ' Using absolute units vanadion integration range : ', reducer.monovan_integr_range \n #now on the mono_vanadium run swap the mapping file\n reducer.map_file = monovan_mapfile \n deltaE_wkspace_monovan = reducer.convert_to_energy(mono_van, ei_guess, wb_mono)\n \n (absnorm_factorL,absnorm_factorSS,absnorm_factorP,absnorm_factTGP) = getAbsNormalizationFactor(deltaE_wkspace_monovan.getName(),str(reducer.monovan_integr_range[0]),str(reducer.monovan_integr_range[1])) \n \n print 'Absolute correction factor S^2 =',absnorm_factorSS,' Libisis: ',absnorm_factorL,' Puasonian: ',absnorm_factorP, ' TGP : ',absnorm_factTGP\n CreateSingleValuedWorkspace(OutputWorkspace='AbsFactor',DataValue=absnorm_factTGP)\n end_time=time.time()\n results_name=str(sample_run)+'.spe'\n ei= (deltaE_wkspace_sample.getRun().getLogData(\"Ei\").value)\n \n if mtd.doesExist('_wksp.spe-white')==True:\n DeleteWorkspace(Workspace='_wksp.spe-white')\n \n \n print 'Incident energy found for sample run ',ei,' meV'\n print 'Incident energy found for mono vanadium run ',ei,' meV'\n print 'Elapsed time =',end_time-start_time, 's'\n #get the name that convert to energy will use\n \n if mtd.doesExist(results_name)==False:\n RenameWorkspace(InputWorkspace=deltaE_wkspace_sample,OutputWorkspace=results_name)\n if results_name != wksp_out:\n RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)\n Divide(LHSWorkspace=wksp_out,RHSWorkspace='AbsFactor',OutputWorkspace=wksp_out)\n DeleteWorkspace(Workspace='AbsFactor')\n return mtd[wksp_out]",
"def _trainBySegments(self, divisions, trainingSet):\n # subdivide domain and train subdomain ROMs, as with the segmentation\n ## TODO can we increase the inheritance more here, or is this the minimum cutset?\n counter, remainder = divisions\n # store delimiters\n if len(remainder):\n self.raiseADebug('\"{}\" division(s) are being excluded from clustering consideration.'.format(len(remainder)))\n ## train ROMs for each segment\n roms = self._trainSubdomainROMs(self._templateROM, counter, trainingSet, self._romGlobalAdjustments)\n # collect ROM features (basic stats, etc)\n clusterFeatures = self._gatherClusterFeatures(roms, counter, trainingSet)\n # future: requested metrics\n ## TODO someday\n # store clustering info, unweighted\n self._clusterInfo['features'] = {'unscaled': copy.deepcopy(clusterFeatures)}\n # weight and scale data\n ## create hierarchy for cluster params\n features = sorted(clusterFeatures.keys())\n hierarchFeatures = defaultdict(list)\n for feature in features:\n _, metric, ident = feature.split('|', 2)\n # the same identifier might show up for multiple targets\n if ident not in hierarchFeatures[metric]:\n hierarchFeatures[metric].append(ident)\n ## weighting strategy, TODO make optional for the user\n weightingStrategy = 'uniform'\n clusterFeatures = self._weightAndScaleClusters(features, hierarchFeatures, clusterFeatures, weightingStrategy)\n self._clusterInfo['features']['scaled'] = copy.deepcopy(clusterFeatures)\n # perform clustering\n labels = self._classifyROMs(self._divisionClassifier, features, clusterFeatures)\n uniqueLabels = sorted(list(set(labels))) # note: keep these ordered! Many things hinge on this.\n self.raiseAMessage('Identified {} clusters while training clustered ROM \"{}\".'.format(len(uniqueLabels), self._romName))\n # if there were some segments that won't compare well (e.g. leftovers), handle those separately\n if len(remainder):\n unclusteredROMs = self._trainSubdomainROMs(self._templateROM, remainder, trainingSet, self._romGlobalAdjustments)\n else:\n unclusteredROMs = []\n # make cluster information dict\n self._clusterInfo['labels'] = labels\n ## clustered\n self._clusterInfo['map'] = dict((label, roms[labels == label]) for label in uniqueLabels)\n ## unclustered\n self._clusterInfo['map']['unclustered'] = unclusteredROMs\n # TODO what about the unclustered ones? We throw them out in truncated representation, of necessity.\n self._roms = list(self._clusterInfo['map'][label][0] for label in uniqueLabels)",
"def main():\n\n house_path = '../../Data/wijk1_huizen.csv'\n battery_path = '../../Data/wijk1_batterijen.txt'\n\n houses, batteries = read_data(house_path, battery_path)\n\n smart_wijk = SmartGrid(51,51)\n smart_wijk.add_house_dictionaries(houses)\n smart_wijk.add_battery_dictionaries(batteries)\n\n for element in houses:\n smart_wijk.create_house(element['position'], element['output'])\n for element in batteries:\n smart_wijk.create_battery(element['position'], element['capacity'])\n\n solution_reader(smart_wijk, '../../Results/best_brabo_solution.csv')"
] | [
"0.5610221",
"0.5556858",
"0.5512909",
"0.546379",
"0.54304254",
"0.5370964",
"0.536435",
"0.53357214",
"0.5281515",
"0.5277882",
"0.52746207",
"0.5239652",
"0.52363443",
"0.5225614",
"0.52088296",
"0.52050596",
"0.5199139",
"0.5171439",
"0.5166146",
"0.51584786",
"0.51337284",
"0.51055896",
"0.5099281",
"0.50943357",
"0.5093252",
"0.50917387",
"0.50806475",
"0.5077881",
"0.5066935",
"0.50654095"
] | 0.75939274 | 0 |
Imports default PRIME input parameters, modifies correct entries and prints out a starting PHIL file to be used with PRIME. | def make_prime_input(self, filename="prime.phil", run_zero=False):
assert self.info
pixel_size = self.info.pixel_size
hres = self.info.stats["res"]
lres = self.info.stats["lres"]
# If symmetry / unit cell were not overridden from GUI, set from INFO
if not self.best_pg:
try:
self.best_pg = self.info.best_pg.replace(" ", "")
except AttributeError as e:
print("PRIME INPUT ERROR, SPACE GROUP: ", e)
self.best_pg = "P1"
if not self.best_uc:
self.best_uc = self.info.best_uc
# Determine crystal system from crystal symmetry
sym = crystal.symmetry(space_group_symbol=self.best_pg)
crystal_system = str(sym.space_group().crystal_system())
# Determine number of images for indexing ambiguity resolution
# My default: 1/2 of images or 300, whichever is smaller
if len(self.info.categories["integrated"]) >= 600:
idx_ambiguity_sample = 300
idx_ambiguity_selected = 100
else:
idx_ambiguity_sample = int(
round(len(self.info.categories["integrated"]) / 2)
)
idx_ambiguity_selected = int(round(idx_ambiguity_sample / 3))
# Set run number to 000 if running LivePRIME
out_dir = os.path.join(os.path.dirname(self.prime_data_path), "prime")
if run_zero:
run_path = os.path.join(out_dir, "000")
else:
run_path = util.set_base_dir(out_dir=out_dir)
# Populate pertinent data parameters
prime_params = mod_input.master_phil.extract()
prime_params.run_no = run_path
prime_params.data = [self.prime_data_path]
prime_params.title = "Auto-generated by IOTA v{} on {}" "".format(
iota_version, now
)
prime_params.scale.d_min = hres["mean"]
prime_params.scale.d_max = 8
prime_params.postref.scale.d_min = hres["mean"]
prime_params.postref.scale.d_max = lres["max"]
prime_params.postref.crystal_orientation.d_min = hres["mean"]
prime_params.postref.crystal_orientation.d_max = lres["max"]
prime_params.postref.reflecting_range.d_min = hres["mean"]
prime_params.postref.reflecting_range.d_max = lres["max"]
prime_params.postref.unit_cell.d_min = hres["mean"]
prime_params.postref.unit_cell.d_max = lres["max"]
prime_params.postref.allparams.d_min = hres["mean"]
prime_params.postref.allparams.d_max = lres["max"]
prime_params.merge.d_min = hres["mean"]
prime_params.merge.d_max = lres["max"]
prime_params.target_unit_cell = uctbx.unit_cell(self.best_uc)
prime_params.target_space_group = self.best_pg
prime_params.target_crystal_system = crystal_system
prime_params.pixel_size_mm = pixel_size
prime_params.n_residues = 500
prime_params.indexing_ambiguity.n_sample_frames = idx_ambiguity_sample
prime_params.indexing_ambiguity.n_selected_frames = idx_ambiguity_selected
# Determine which queue to run on (i.e. match IOTA queue)
# Modify specific options based in IOTA settings
# Queue options
if self.params.mp.method == "lsf" and self.params.mp.queue is not None:
prime_params.queue.mode = "bsub"
prime_params.queue.qname = self.params.mp.queue
# Number of processors (automatically, 1/2 of IOTA procs)
prime_params.n_processors = int(self.params.mp.n_processors / 2)
# Generate PRIME param PHIL
prime_phil = mod_input.master_phil.format(python_object=prime_params)
prime_file = os.path.join(self.info.int_base, filename)
with open(prime_file, "w") as pf:
pf.write(prime_phil.as_str())
return prime_phil | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Usage():\r\n print \"Correct Usage:\"\r\n print \"python primesBelow.py <integer>\"",
"def pr(form, *args):\n # variables\n global lPr, lMaPr\n\n if lPr < lMaPr:\n for l in range(lPr + 1):\n sys.stdout.write('-')\n if len(args) == 0:\n print form\n if logFile is not None:\n logging.info(form)\n else:\n print form % args\n if logFile is not None:\n logging.info(form % args)",
"def start():\n\n global input, frameList, processPageList\n\n print \"\\n******************************************************************\"\n print \"\\tThis is a simulation for LRU replacement.\"\n print \"\\tAt any time you may choose to quit('q') this program.\"\n print \"******************************************************************\"\n\n # Open File\n with open(\"input.txt\") as f:\n original = f.readlines()\n\n # Remove \":\", \"\\n\"\n for line in original:\n input = line.replace(\":\", \"\")\n input = input.replace('\\n', \"\")\n\n # Split by tab, add to processList\n output = input.partition('\\t')\n\n # convert process to number\n pid = output[0]\n proc = int(pid[1])\n\n # convert binary page # to decimal\n page = int(str(output[2]), 2)\n\n # add process and page # to list\n processPageList.append([proc, page])",
"def preprocess_main():",
"def main():\n # Get Params\n parser = argparse.ArgumentParser(description=\"PolyplopiaModule script\")\n add_args(parser)\n params = vars(parser.parse_args()) # dict()\n\n # Execute Tray\n summary = RunI3Tray(params, configure_tray, \"PolyplopiaModule\",\n inputfilenamelist=[params['gcdfile'], params['inputfile']],\n outputfile=params['outputfile'],\n seed=params['seed'],\n nstreams=params['nproc'],\n streamnum=params['procnum'],\n usegslrng=params['usegslrng'])",
"def main(args):\n parser = argparse.ArgumentParser(prog='pyprepfnt')\n parser.add_argument('fontfile', help='Input font file')\n parser.add_argument('output_dir', help='Output directory')\n parser.add_argument('--force', default=False, action='store_true',\n help='Force preprocessing even if the timestamps indicate'\n ' it is not necessary')\n parser.add_argument('--hinting', default=False, action='store_true',\n help='Retain hinting if set, else strip hinting')\n parser.add_argument('--reuse_clean', default=False, action='store_true',\n help='Reuse the \"clean\" file if possible')\n parser.add_argument('--log', default='WARNING',\n help='Set the logging level; eg, --log=INFO')\n parser.add_argument('--verbose', default=False, action='store_true',\n help='Report internal operations')\n\n cmd_args = parser.parse_args(args)\n\n loglevel = getattr(logging, cmd_args.log.upper(), None)\n if not isinstance(loglevel, int):\n raise ValueError('Invalid log level: %s' % loglevel)\n\n log = logging.getLogger()\n logging_handler = logging.StreamHandler(sys.stdout)\n logging_handler.setLevel(loglevel)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n logging_handler.setFormatter(formatter)\n log.addHandler(logging_handler)\n log.setLevel(loglevel)\n verbose = cmd_args.verbose\n\n force_preprocessing = cmd_args.force\n log.debug('force_preprocessing = ' + str(force_preprocessing))\n\n fontfile = cmd_args.fontfile\n fonttime = os.path.getmtime(fontfile)\n # TODO(bstell) use Logger\n basename = os.path.basename(fontfile)\n log.info('preprocess %s = %d bytes' % (cmd_args.fontfile, \n os.path.getsize(cmd_args.fontfile)))\n filename, extension = os.path.splitext(basename)\n cur_time = datetime.datetime.now()\n build_dir = 'tmp-%s' % filename\n if not cmd_args.reuse_clean:\n build_dir = ('%s-%04d-%02d-%02d-%02d-%02d-%02d.%d' %\n (build_dir, cur_time.year, cur_time.month, cur_time.day,\n cur_time.hour, cur_time.minute, cur_time.second, os.getpid()))\n output_dir = cmd_args.output_dir\n log.debug('JAR file: ' + output_dir)\n try:\n os.makedirs(build_dir)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n log.error('failed to create build_dir (' + build_dir + ')')\n raise\n\n log.debug('if reuse_clean then we should compare the source font and final jar')\n cleanfile = filename + '_clean' + extension\n cleanfilepath = build_dir + '/' + cleanfile\n # Decide if we are building the cleaned up version of the font.\n rebuild_clean = not cmd_args.reuse_clean\n cleanfile_exists = os.path.isfile(cleanfilepath)\n if force_preprocessing or not cleanfile_exists:\n rebuild_clean = True\n else:\n cleantime = os.path.getmtime(cleanfilepath)\n if cleantime <= fonttime:\n rebuild_clean = True\n log.debug('rebuild_clean = ' + str(rebuild_clean))\n if rebuild_clean:\n log.debug('cleaned version: ' + cleanfilepath)\n cleanup.cleanup(fontfile, cmd_args.hinting, cleanfilepath, verbose)\n closure.dump_closure_map(cleanfilepath, build_dir)\n else:\n log.debug('reuse cleaned up version: ' + cleanfilepath)\n # Get the latest cleaned up font timestamp.\n cleantime = os.path.getmtime(cleanfilepath)\n\n # Decide if we are rebuilding the jar file.\n tachyfont_file = filename + '.TachyFont.jar'\n jarfilepath = build_dir + '/' + tachyfont_file\n rebuild_jar = False\n jarfile_exists = os.path.isfile(jarfilepath)\n log.debug('file %s exists: %s' % (jarfilepath, jarfile_exists))\n if force_preprocessing or not jarfile_exists:\n rebuild_jar = True\n else:\n jartime = os.path.getmtime(jarfilepath)\n if jartime <= cleantime:\n rebuild_jar = True\n log.debug('rebuild_jar = ' + str(rebuild_jar))\n if rebuild_jar:\n log.debug('start proprocess')\n preprocess = Preprocess(cleanfilepath, build_dir, verbose)\n log.debug('build base')\n preprocess.base_font()\n log.debug('dump cmap')\n preprocess.cmap_dump()\n log.debug('build glyph data')\n preprocess.serial_glyphs()\n log.debug('write sha-1 fingerprint')\n preprocess.sha1_fingerprint()\n\n log.debug('create jar file')\n sub_files = ('base closure_data closure_idx codepoints gids glyph_data '\n 'glyph_table sha1_fingerprint')\n jar_cmd = 'cd %s; jar cf %s %s' % (build_dir, tachyfont_file, sub_files)\n log.debug('jar_cmd: ' + jar_cmd)\n status = os.system(jar_cmd)\n log.debug('jar command status: ' + str(status))\n if status:\n log.error('jar command status: ' + str(status))\n return status\n else:\n log.debug('no need to rebuild intermediate jar file: ' + jarfilepath)\n # Get the latest cleaned up jar timestamp.\n jartime = os.path.getmtime(jarfilepath)\n\n\n # Decide if we are copying over the jar file.\n copy_jar = False\n jarcopy_filepath = output_dir + '/' + tachyfont_file\n jarcopy_exists = os.path.isfile(jarcopy_filepath)\n if force_preprocessing or not jarcopy_exists:\n copy_jar = True\n else:\n jarcopytime = os.path.getmtime(jarcopy_filepath)\n if jarcopytime <= jartime:\n copy_jar = True\n log.debug('copy_jar = ' + str(copy_jar))\n if copy_jar:\n log.debug('cp the files to the output directory')\n log.info('cleaned: %s = %d' % (cleanfile, os.path.getsize(cleanfilepath)))\n log.info('Jar: %s/%s' % (output_dir, tachyfont_file))\n cp_cmd = ('cp %s/%s %s/%s %s' %\n (build_dir, tachyfont_file, build_dir, cleanfile, output_dir))\n log.debug('cp_cmd: ' + cp_cmd)\n status = os.system(cp_cmd)\n log.debug('cp status ' + str(status))\n if status:\n log.error('cp status = ' + str(status))\n return status\n else:\n log.debug('the existing jar file is up to date: ' + jarfilepath)\n\n\n if cmd_args.reuse_clean:\n log.debug('leaving the build directory: ' + build_dir)\n status = 0\n else:\n log.debug('cleanup the build directory')\n rm_cmd = ('rm -rf %s' % build_dir)\n log.debug('rm_cmd: ' + rm_cmd)\n status = os.system(rm_cmd)\n log.debug('rm status ' + str(status))\n if status:\n log.error('rm status = ' + str(status))\n return status\n\n log.debug('command status = ' + str(status))\n if status != 0:\n log.info('preprocessing FAILED')\n return status",
"def main():\n parser = argparse.ArgumentParser(description = 'Arguments for running the different modules of iLiner')\n\n parser.add_argument('module', choices=['ilash', 'ibd_depth', 'stats'], help='module choice')\n parser.add_argument('--sample', '-s', type=str, required=False, help='Sample file with file path')\n parser.add_argument('--haps', '-hp', type=str, required=False, help='Phased haplotype file with file path')\n parser.add_argument('--genetic_map', '-gm', type=str, required=False, help='Genetic map file with file path')\n parser.add_argument('--mapfile', '-mf', type=str, required=False, help='Mapfile made by the ilash module')\n parser.add_argument('--outfile_prefix', '-op', type=str, help='Prefix for all of the files produced from this module with file path')\n parser.add_argument('--ilash', '-i', type=str, required=False, help='File path to ilash')\n parser.add_argument('--ilash_output', '-io', type=str, required=False, help='Ilash output, one chromosome for ibd_depth module and all chromosomes for stats module')\n parser.add_argument('--population_file', '-pf', type=str, required=False, help='File with individual ids and population [ID]tab[Population]')\n parser.add_argument('--vcf', '-v', type=str, required=False, help='Phased VCF with file path')\n parser.add_argument('--no_indel', '-ni', action='store_true', required=False, help='Pass if you would like to remove indels from your vcf or haplotype file')\n\n args = parser.parse_args()\n log = Logger()\n if args.module == 'ilash':\n args_dict = vars(args)\n log.logger.info('You have selected ilash')\n if args_dict['sample'] != None:\n log.logger.info(\"Parsing your sample and haplotype files\")\n samp_file = LoadFiles.load_sample(args.sample)\n log.logger.info('Sample file has been loaded')\n haps_file = LoadFiles.load_haps(args.haps)\n log.logger.info('Haplotype file has been loaded')\n ilash_obj = LoadFiles(samp_file, haps_file)\n FileParser.check_sample_file(ilash_obj)\n log.logger.info('Your sample file has been validated')\n FileParser.check_same_sample(ilash_obj)\n log.logger.info('Your sample file and haplotype file populations are the same')\n if args_dict['no_indel'] == True:\n FileParser.remove_indels(ilash_obj)\n else:\n pass\n FileParser.check_alleles(ilash_obj.haps_file.iloc[:,[3]].values, ilash_obj.haps_file.iloc[:,[4]].values)\n nucfunc = np.vectorize(FileParser.nucleotide_test)\n log_array_ref = nucfunc(ilash_obj.haps_file[[3]])\n log_array_alt = nucfunc(ilash_obj.haps_file[[4]])\n FileParser.validate_nucleotides(log_array_ref, 'Reference Allele')\n FileParser.validate_nucleotides(log_array_alt, 'Alternative Allele')\n log.logger.info('Your reference and alternative alleles have been validated')\n FileParser.validate_binary(ilash_obj)\n log.logger.info('Your haplotypes have been validated')\n log.logger.info(\"Making your map and pedigree files\")\n pedfile_start = FileConvert.start_pedfile(ilash_obj)\n pedlen = int(len(pedfile_start))\n hapmap = open(args.genetic_map)\n mapfile_str = args.outfile_prefix + \".map\"\n pedfile_str = args.outfile_prefix + \".ped\"\n mapfile = open(mapfile_str, 'w')\n FileConvert.make_gpos_mapfile(ilash_obj, hapmap, mapfile)\n log.logger.info('Your map file has been made')\n hapslist = FileConvert.convert_haps(ilash_obj)\n final_array = FileConvert.make_pedfile(hapslist, pedlen, pedfile_start)\n final_list = final_array.tolist()\n pedfile = open(pedfile_str, 'w')\n for line in range(len(final_list)):\n pedfile.write(' '.join(final_list[line]) + '\\n')\n pedfile.close()\n log.logger.info('Your pedigree file has been made')\n log.logger.info('Launching iLASH')\n RunIlash.make_param_file(mapfile_str, pedfile_str, args.ilash)\n if args_dict['vcf'] != None:\n log.logger.info('Your VCF is being parsed')\n vcf_file, comments = VcfReader.read_vcf(args.vcf)\n vcf_obj = VcfReader(vcf_file, comments)\n vcf_header = vcf_file.columns.values.tolist()\n VcfParser.validate_header(vcf_header)\n VcfParser.validate_chrom(vcf_obj)\n if args_dict['no_indels'] == True:\n VcfParser.remove_indels(vcf_ojb)\n else:\n pass\n VcfParser.diff_alt_ref(vcf_obj.vcf['REF'], vcf_obj.vcf['ALT'])\n log.logger.info('Your VCF has been parsed')\n nucfunc = np.vectorize(VcfParser.nucleotide_test)\n log_array_ref = nucfunc(vcf_obj.vcf['REF'])\n log_array_alt = nucfunc(vcf_obj.vcf['ALT'])\n VcfParser.validate_nucleotides(vcf_obj, log_array_ref, 'Reference Allele')\n VcfParser.validate_nucleotides(vcf_obj, log_array_alt, 'Alternative Allele')\n binary_array = np.isin(vcf_obj.vcf.iloc[:,9:], ['0|0', '0|1', '1|0', '1|1'])\n VcfParser.validate_binary(vcf_obj, binary_array)\n people = VcfMakeFiles.get_people(vcf_header)\n pedfile_start = VcfMakeFiles.start_pedfile(people)\n hapslist = VcfMakeFiles.convert_haps(vcf_obj)\n pedlen = int(len(haplist[0]/2))\n final_array = VcfMakeFiles.make_pedfile(hapslist, pedlen, pedfile_start)\n final_list = final_array.tolist()\n mapfile_str = args.outfile_prefix + \".map\"\n pedfile_str = args.outfile_prefix + \".ped\"\n mapfile = open(mapfile_str, 'w')\n pedfile = open(pedfile_str, 'w')\n hapmap = open(args.genetic_map)\n VcfMakeFiles.make_gpos_mapfile(vcf_obj, hapmap, mapfile)\n log.logger.info('Your map file has been made')\n for line in range(len(final_list)):\n pedfile.write(' '.join(final_list[line]) + '\\n')\n pedfile.close()\n log.logger.info('Your pedigree file has been made')\n log.logger.info('Launching iLASH')\n RunIlash.make_param_file(mapfile_str, pedfile_str, args.ilash)\n elif args.module == 'ibd_depth':\n log.logger.info('You have selected ibd_depth')\n mapfile = IBDDepth.load_files(args.mapfile)\n log.logger.info('Your map file has been loaded')\n ilash_output, snps = IBDDepth.load_ilash(args.ilash_output)\n log.logger.info('Your ilash output file has been loaded')\n mapfile = IBDDepth.find_ibd(mapfile, snps)\n four_up, four_down = IBDDepth.get_stdev(mapfile)\n IBDDepth.make_depth_plot(mapfile, args.outfile_prefix, four_up, four_down)\n log.logger.info('Your IBD depth figures have been made')\n log.logger.info('Removing outliers and creating an outlier free file')\n IBDDepth.remove_outlier(mapfile, ilash_output, args.outfile_prefix, four_up, four_down)\n elif args.module == 'stats':\n log.logger.info('You have selected stats')\n ilash_results = IBDStats.load_ilash(args.ilash_output)\n log.logger.info('Your iLASH output has been loaded')\n ilash_pops = IBDStats.load_popfile(args.population_file)\n log.logger.info('Your population file has been loaded')\n stats_obj = IBDStats(ilash_results, ilash_pops)\n IBDStats.remove_self_ibd(stats_obj)\n IBDStats.stratify_by_pop(stats_obj)\n IBDStats.make_dot_plot(stats_obj, args.outfile_prefix)\n log.logger.info('Your pair plot has been made')\n IBDStats.make_violin_plot(stats_obj, args.outfile_prefix)\n log.logger.info('Your violin plot has been made')\n IBDStats.get_ibd_stats(stats_obj, log)\n H, pval = (IBDStats.get_kw_h(stats_obj))\n log.logger.info('Kruskal-Wallis Test')\n log.logger.info('H-statistic: ' + str(H))\n log.logger.info('P-Values: ' + str(pval))\n if pval < 0.05:\n log.logger.info('Reject the null hypothesis - A significant differences exists between groups.')\n log.logger.info('Post-hoc Wilcoxon Rank Sum Test')\n IBDStats.get_ranksum(stats_obj, log)\n else:\n log.logger.info('Fail to reject the hypothesis - A discernable difference does not exist between the groups')\n pop_heatmap = IBDStats.fraction_ibd_sharing(stats_obj)\n fig = pop_heatmap.get_figure()\n fig.set_size_inches(13.0, 13.0)\n fig.savefig(args.outfile_prefix + '_heatmap.png')\n log.logger.info('Your heatmap has been made')",
"def main():\n # Remove the funny -psn_xxx_xxx argument (from py2app)\n if len(sys.argv) > 1 and sys.argv[1][:4] == '-psn':\n del sys.argv[1]\n\n if len(sys.argv) <= 1:\n phoshare.phoshare_ui.main()\n else:\n phoshare.phoshare_main.main()",
"def main():\n args = get_args()\n char_arg = args.character\n min_arg = args.minimum\n scale_arg = args.scale\n # flag_arg = args.flag\n pos_arg = args.positional\n\n \n pair = {}\n #print(pos_arg)\n for i in pos_arg :\n i = int(i)\n if i >= min_arg :\n his = char_arg\n i_s = i/scale_arg\n #print(i_s)\n for n in range(1,int(i_s)):\n his = his + char_arg\n# print('{} {}'.format(i,his))\n pair[i] = his\n# print(pair)\n for mmm in sorted(pair.keys()):\n print('{:3} {}'.format(mmm, pair[mmm]))",
"def pymol_pocket_cmdline(protein=None, ligand=None, prot_file=None, lig_file=None, min_rad=1.4, max_rad=3.4, constrain_radii=True, mode=\"largest\", coordinates=None, residue=None, resid=None, lig_excl_rad=None, lig_incl_rad=None, min_volume=200, subdivide=False, max_clusters=None, min_subpocket_rad=1.7, max_subpocket_rad=3.4, min_subpocket_surf_rad=1.0, radial_sampling=0.1, inclusion_radius_buffer=1.0, min_cluster_size=50, project_dir=None, output_dir=None, prefix=None, logger_stream_level=\"INFO\", logger_file_level=\"DEBUG\", protein_only=False, display_mode=\"solid\", alpha=1.0, palette=None):\n\n opts = {\n \"protein\": protein,\n \"ligand\": ligand,\n \"prot_file\": prot_file,\n \"lig_file\": lig_file,\n \"min_rad\": min_rad,\n \"max_rad\": max_rad,\n \"constrain_radii\": constrain_radii,\n \"mode\": mode,\n \"residue\": residue,\n \"resid\": resid,\n \"coordinates\": coordinates,\n \"lig_excl_rad\": lig_excl_rad,\n \"lig_incl_rad\": lig_incl_rad,\n \"min_volume\": min_volume,\n \"subdivide\": subdivide,\n \"max_clusters\": max_clusters,\n \"min_subpocket_rad\": min_subpocket_rad,\n \"max_subpocket_rad\": max_subpocket_rad,\n \"min_subpocket_surf_rad\": min_subpocket_surf_rad,\n \"radial_sampling\": radial_sampling,\n \"inclusion_radius_buffer\": inclusion_radius_buffer,\n \"min_cluster_size\": min_cluster_size,\n \"project_dir\": project_dir,\n \"output_dir\": output_dir,\n \"prefix\": prefix,\n \"logger_stream_level\": logger_stream_level,\n \"logger_file_level\": logger_file_level,\n \"protein_only\": protein_only,\n \"display_mode\": display_mode,\n \"alpha\": alpha,\n \"palette\": palette\n }\n\n pymol_pocket(**opts)",
"def main(argv):\n parsed = parse_args(argv)\n instream = sys.stdin\n name = parsed.name\n if parsed.input_file != \"-\":\n instream = open(parsed.input_file, 'r')\n name = parsed.input_file.split('.')[1]\n print pfm_as_meme_str(parse_scer_pfm(instream, handle_passed=True), name)",
"def main():\r\n\r\n # contents = ['ATGGCCATGGCCCCCAGAACTGAGATCAATAGTACCCGTATTAACGGGTGA', 'MA'] # sample input\r\n contents = []\r\n for line in sys.stdin:\r\n contents.append(line.strip())\r\n myPeptide = GenomeEncoding(contents[0], contents[1])\r\n myPeptide.getCodonSeqs()\r\n myPeptide.getRevCodonSeqs()\r\n myPeptide.printEncodePep()",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('pdf_file')\n parser.add_argument('--pages', type=int, nargs='*')\n parser.add_argument('--width', type=int, nargs='?')\n parser.add_argument('--height', type=int, nargs='?')\n parser.add_argument('--resolution', type=int, nargs='?')\n parser.add_argument(\n '--angle', type=int, nargs='?',\n help='Angle of rotation (between -90 and 90 degrees)',\n )\n parser.add_argument('--offset_x', type=int, nargs='?')\n parser.add_argument('--offset_y', type=int, nargs='?')\n parser.add_argument('--spacing_x', type=int, nargs='?')\n parser.add_argument('--spacing_y', type=int, nargs='?')\n parser.add_argument('--zoom', type=float, nargs='?')\n parser.add_argument('--reverse', action='store_true', default=None)\n parser.add_argument('--border', type=int, nargs='?')\n parser.add_argument('--colour', nargs='?')\n parser.add_argument('--shadow', action='store_true', default=None)\n parser.add_argument('--affine', action='store_true', default=None)\n parser.add_argument('--use_convert', action='store_true', default=None)\n parser.add_argument(\n '--reuse', action='store_true', default=None,\n help='Re-use page PNG files between runs. If True, you need to clear '\n 'up after yourself, but multiple runs on the same input will be '\n 'much faster.',\n )\n parser.add_argument(\n '--delete', action='store_true', default=None,\n help='Delete the output file before running. If False, and the file '\n 'exists, an exception will be raised and nothing will happen.',\n )\n parser.add_argument('--config', help='path to config.ini')\n parser.add_argument('--preset', help='load parameters from a named preset')\n parser.add_argument(\n '--parallel', type=int, nargs='?',\n help='If set to a value > 1, we use that number of processes when '\n 'applying borders & shadow to individual pages.',\n )\n parser.add_argument('--verbose', action='store_true', default=None)\n args = parser.parse_args()\n\n name, ext = os.path.splitext(args.pdf_file)\n if ext != '.pdf':\n raise Exception(\"Input file doesn't look like a PDF\")\n\n kwargs = dict(args._get_kwargs())\n print(assemble(args.pdf_file, **kwargs))",
"def loadParams(self):\n\n if len(self.filParams) < 3:\n return\n\n if not os.access(self.filParams, os.R_OK):\n return\n\n print(\"Priors.loadParams INFO: loading priors from %s\" \\\n % (self.filParams))\n\n # This is a little bit painful without just using something\n # more mature like astropy.table or pandas:\n hypers = np.genfromtxt(self.filParams, usecols=(1,2))\n\n # Convert the angular arguments to radians\n hypers[4] = np.radians(hypers[4])\n hypers[5] = np.radians(hypers[5])\n hypers[7] = np.radians(hypers[7])\n\n # transpose into hyperparams\n self.hyper = np.transpose(hypers)\n\n # now we need to read in the function names. This only really\n # has meaning for the mixed prior...\n strNames = np.genfromtxt(self.filParams, usecols=(0), dtype='str')\n self.mixedNames = list(strNames)\n\n # Finally, read in the name of the function\n with open(self.filParams, 'r') as rObj:\n for sLine in rObj:\n if sLine.find('#') < 0:\n continue\n if sLine.find('NAME') < 0:\n continue\n\n vLine = sLine.strip().split()\n self.namePrior = vLine[-1]",
"def main():\n\n parser = argparse.ArgumentParser(description=\"Prints the contents of a NiPoPoW\")\n parser.add_argument(\"--blocks\", required=True, type=int, help=\"Number of blocks\")\n parser.add_argument(\n \"--output\", default=\"proof.pkl\", type=str, help=\"Name of exported proof\"\n )\n args = parser.parse_args()\n blocks = args.blocks\n output = args.output\n if output.find(\".pkl\") == -1:\n output += \".pkl\"\n\n # Create blockchain\n header, headers_map, interlink_map = create_blockchain(blocks=blocks)\n print_headers(headers_map)\n print_interlinks(headers_map, interlink_map)\n\n # Create proof\n proof = make_proof(header, headers_map, interlink_map)\n print_proof(proof, headers_map)\n\n ### Start spoiling proof\n\n # remove_genesis(proof)\n # proof = change_interlink_hash(proof, 0)\n # proof = skip_blocks(proof, -2)\n # proof = replace_block(proof, headers_map, interlink_map, int(len(proof)/2))\n # print_proof(proof, headers_map)\n # verify_proof(Hash(proof[0][0]), proof)\n\n ### Stop spoiling proof\n\n proof_tool = ProofTool(\"../../data/proofs/\")\n p, f, lca = proof_tool.create_proof_and_forkproof(blocks, forkindex, forkblocks)\n print(p, f, lca)\n\n fixed_fork_proof = proof_tool.fetch_proof(f)\n verify_proof(Hash(fixed_fork_proof[0][0]), fixed_fork_proof)\n\n # proof_tool = ProofTool(\"../../data/proofs/\")\n # proof_tool.export_proof(fixed_fork_proof, f)",
"def main():\r\n if len(sys.argv) == 2:\r\n if sys.argv[1] == 'branch_name':\r\n print branch_name()\r\n elif sys.argv[1] == 'plat_id':\r\n print plat_id()\r\n else:\r\n print plat_id()\r\n print branch_name()\r\n return",
"def print_usage():\n print(\"usage: MILP.py -p <parameter file> -i <payoff file> -o <output file>\")\n print(\"-p, --params\\t sets the parameter file\")\n print(\"-i, --payoff\\t sets the payoff file\")\n print(\"-o, --output\\t sets the output file. Defaults to out.csv\")\n print(\"-d, --delimiter\\t sets the delimiter of ALL files. Defaults to csv\")",
"def process_input(args, phil_args, paramfile=None, gui=False, write_files=False):\n\n working_phil, bad_args = get_input_phil(\n phil_args=phil_args, ha14=args.ha14, paramfile=paramfile, gui=gui\n )\n\n # Perform command line check and modify params accordingly\n params = working_phil.extract()\n if args.ha14:\n params.advanced.processing_backend = \"ha14\"\n\n if not params.description:\n from iota import now\n\n params.description = \"IOTA parameters auto-generated on {}\".format(now)\n\n if not params.output:\n params.output = os.path.abspath(os.curdir)\n\n # Check for -r option and set random subset parameter\n if args.random > 0:\n params.data_selection.random_sample.flag_on = True\n params.data_selection.random_sample.number = args.random[0]\n\n if args.range:\n params.data_selection.image_range.flag_on = True\n params.data_selection.image_range.range = args.range\n\n # Set temporary folder path\n if args.tmp is not None:\n params.advanced.temporary_output_folder = args.tmp[0]\n\n # Check for -n option and set number of processors override\n # (for parallel map only, for now)\n from multiprocessing import cpu_count\n\n max_proc = int(cpu_count() * 3 / 4)\n nproc = args.nproc[0] if isinstance(args.nproc, list) else args.nproc\n if nproc != 0:\n if nproc >= max_proc:\n params.mp.n_processors = max_proc\n else:\n params.mp.n_processors = nproc\n elif params.mp.method == \"multiprocessing\":\n if params.mp.n_processors >= max_proc or params.mp.n_processors == 0:\n params.mp.n_processors = int(max_proc / 2)\n\n working_phil = working_phil.format(python_object=params)\n\n if write_files:\n write_defaults(\n os.path.abspath(os.curdir),\n working_phil.as_str(),\n params.advanced.processing_backend,\n )\n\n return working_phil, bad_args",
"def main():\n\tif len(sys.argv) == 1:\n\t\tsys.argv.append(\"-h\")\n\n\t# the try-except is needed so -h doesnt exit at the parsing here\n\ttry:\n\t\tparser = argparse.ArgumentParser(description=\"Merlin, a plotting tool derived\\\n\t\t\tfrom HarryPlotter. Please have a look at the documentation for Excalibur\\\n\t\t\t(https://github.com/KIT-CMS/Excalibur/blob/master/README.md)\\\n\t\t\tand for HarryPlotter (https://github.com/KIT-CMS/Artus/blob/master/HarryPlotter/README.md).\")\n\t\tparser.add_argument('--python', nargs='+', default=[None],\n\t\t\thelp=\"execute python function(s). Available functions can be listed with --list-functions\")\n\t\tknown_args, unknown_args = parser.parse_known_args()\n\n\t\t# call python config function\n\t\tif known_args.python != [None]:\n\t\t\tlogger.initLogger()\n\t\t\tfor function in known_args.python:\n\t\t\t\ttoolsZJet.call_python_function(function, tools.get_environment_variable(\"PLOTCONFIGS\"), unknown_args)\n\t\telse:\n\t\t\tharryZJet.HarryPlotterZJet(list_of_args_strings=\" \".join(unknown_args))\n\n\texcept SystemExit, e:\n\t\tif '-h' in sys.argv or '--help' in sys.argv:\n\t\t\tharryZJet.HarryPlotterZJet()",
"def printSetup(self):\n if not self.rank:\n logging.info('Setting up printing options')\n\n freq, args = self.pargs['print'][0], self.pargs['print'][1:]\n\n self.lmp.command('thermo_style custom' + (' {}' * len(args)).format(*args))\n self.lmp.command('thermo {}'.format(freq))\n self.lmp.command('thermo_modify norm no lost ignore')",
"def setup_load_process(self, lib, cell, hist_name, outputs, precision):\n # type: (str, str, str, Dict[str, str], int) -> ProcInfo\n return '', '', None, None, ''",
"def main(self):\n self.parse_command_line()\n\n if self.args.unshuffle:\n suffix = \"-unshuffled\"\n else:\n suffix = \"-shuffled\"\n\n if self.args.input is None:\n self.guess_input(suffix)\n\n if self.args.output is None:\n inpath = self.args.input\n self.args.output = inpath.with_stem(f\"{inpath.stem}{suffix}\")\n\n print(f\"{self.args.input} -> {self.args.output}\")\n\n indoc = fitz.Document(self.args.input)\n n_pages = indoc.page_count\n if n_pages < 2:\n print(f\"Not enough pages ({n_pages}) in document\")\n return\n\n if self.args.pages or self.args.unshuffle:\n chaps = pd.DataFrame.from_records(\n [\n {\n \"first_1\": n_page,\n \"title\": self.get_title(n_page, page),\n \"has_text\": self.has_text(page),\n }\n for n_page, page in enumerate(indoc, 1)\n ]\n )\n if not self.args.keep_empty_pages:\n chaps = chaps.query(\"has_text\")\n else:\n # In most PyMuPDF APIs, page numbers are 0-based,\n # but in the toc they're 1-based\n toc = [\n {\"first_1\": entry[2], \"title\": entry[1]}\n for entry in indoc.get_toc(simple=False)\n if entry[3].get(\"to\", TOP_OF_PAGE) == TOP_OF_PAGE\n ]\n if len(toc) < 1:\n print(\"No relevant bookmarks in document\")\n return\n\n toc.append({\"first_1\": n_pages + 1, \"title\": \"END\"})\n chaps = pd.DataFrame.from_records(toc, index=\"first_1\")\n if 1 not in chaps.index:\n chaps.loc[1, \"title\"] = f\"{self.args.input.name}:1\"\n\n chaps = chaps.sort_index().reset_index()\n\n if (n_chaps := len(chaps)) < 2:\n print(f\"Not enough bookmarks in {self.args.input}\")\n return\n\n first_idx = self.num_to_idx(self.args.first_chapter, n_chaps)\n last_idx = self.num_to_idx(self.args.last_chapter, n_chaps)\n\n if not 0 <= first_idx <= last_idx < n_chaps:\n print(f\"Document has {n_chaps} chapters, doens't fit\")\n return\n\n chaps = chaps.iloc[first_idx:last_idx].copy()\n\n chaps[\"next_1\"] = chaps.first_1.shift(-1, fill_value=n_pages + 1)\n chaps[\"length\"] = chaps.next_1 - chaps.first_1\n chaps[\"first_0\"] = chaps.first_1 - 1\n chaps[\"last_0\"] = chaps.next_1 - 2\n\n chaps = chaps.loc[chaps.length >= self.args.min]\n if self.args.max:\n chaps = chaps.loc[chaps.length <= self.args.max]\n\n if self.args.no_shuffle:\n pass # Leave as-is\n elif self.args.reverse:\n chaps = chaps[::-1]\n elif self.args.unshuffle:\n chaps.sort_values(\"title\", inplace=True)\n else:\n rng = self.randomize()\n chaps = chaps.sample(frac=1, random_state=rng.bit_generator)\n\n outdoc = fitz.Document()\n outtoc = []\n\n for _, chap in chaps.iterrows():\n outtoc.append((1, chap.title, outdoc.page_count + 1))\n outdoc.insert_pdf(indoc, chap.first_0, chap.last_0, final=False)\n if not self.args.unshuffle:\n outdoc.set_toc(outtoc)\n\n if not self.args.keep_empty_pages:\n for pno in range(outdoc.page_count - 1, 0, -1):\n if not self.has_text(outdoc[pno]):\n outdoc.delete_page(pno)\n\n if outdoc.page_count < indoc.page_count:\n print(f\"Number of pages in {self.args.input}: {indoc.page_count}\")\n print(f\"Number of pages in {self.args.output}: {outdoc.page_count}\")\n outdoc.ez_save(self.args.output)",
"def main(args):\n g = parse_pubchem()\n add_lots(g)",
"def main():\n\n args = get_args()\n codons = {\n 'A': 4, 'C': 2, 'D': 2, 'E': 2, 'F': 2, 'G': 4, 'H': 2, 'I': 3,\n 'K': 2, 'L': 6, 'M': 1, 'N': 2, 'P': 4, 'Q': 2, 'R': 6, 'S': 6,\n 'T': 4, 'V': 4, 'W': 1, 'Y': 2, '*': 3,\n }\n print(product(map(codons.get, args.protein + '*')) % args.modulo)",
"def main():\n parser = ArgumentParser(\n description=\"Find the prime factors of a given number.\")\n parser.add_argument(\"number\",\n help='The number to find the prime factor of.',\n type=int)\n args = parser.parse_args()\n num_to_factor = args.number\n primes = read_from_file()\n factors = get_all_prime_factors(num_to_factor, primes)\n print_factorization(str(num_to_factor), factors)",
"def parseargs(p):\n p.set_defaults(func=func)\n p.description = \"Print machine architecture.\"\n return p",
"def piprot():\n cli_parser = argparse.ArgumentParser(\n epilog=\"Here's hoping your requirements are nice and fresh!\"\n )\n cli_parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"verbosity, can be supplied more than once \"\n \"(enabled by default, use --quiet to disable)\",\n )\n cli_parser.add_argument(\n \"-l\",\n \"--latest\",\n action=\"store_true\",\n help=\"print the lastest available version for out \" \"of date requirements\",\n )\n cli_parser.add_argument(\n \"-x\",\n \"--verbatim\",\n action=\"store_true\",\n help=\"output the full requirements file, with \"\n \"added comments with potential updates\",\n )\n cli_parser.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_true\",\n help=\"be a little less verbose with the output \" \"(<0.3 behaviour)\",\n )\n cli_parser.add_argument(\n \"-o\", \"--outdated\", action=\"store_true\", help=\"only list outdated requirements\"\n )\n\n cli_parser.add_argument(\n \"-g\",\n \"--github\",\n help=\"Test the requirements from a GitHub repo. \"\n \"Requires that a `requirements.txt` file \"\n \"exists in the root of the repository.\",\n )\n\n cli_parser.add_argument(\n \"-b\",\n \"--branch\",\n help=\"The branch to test requirements from, used with \"\n \"the Github URL support.\",\n )\n\n cli_parser.add_argument(\n \"-t\",\n \"--token\",\n help=\"Github personal access token to be used with \" \"the Github URL support.\",\n )\n\n cli_parser.add_argument(\n \"-p\", \"--path\", help=\"Path to requirements file in remote repository.\"\n )\n\n cli_parser.add_argument(\n \"-d\",\n \"--delay\",\n help=\"Delay before an outdated package triggers an error.\"\n \"(in days, default to 1).\",\n )\n\n cli_parser.add_argument(\"-u\", \"--url\", help=\"URL to requirements file.\")\n\n # if there is a requirements.txt file, use it by default. Otherwise print\n # usage if there are no arguments.\n nargs = \"+\"\n\n if (\n \"--github\" in sys.argv\n or \"-g\" in sys.argv\n or \"-u\" in sys.argv\n or \"--url\" in sys.argv\n ):\n nargs = \"*\"\n\n default = None\n if os.path.isfile(\"requirements.txt\"):\n nargs = \"*\"\n default = [open(\"requirements.txt\")]\n\n cli_parser.add_argument(\n \"file\",\n nargs=nargs,\n type=argparse.FileType(),\n default=default,\n help=\"requirements file(s), use \" \"`-` for stdin\",\n )\n\n cli_args = cli_parser.parse_args()\n\n if len(cli_args.file) > 1 and cli_args.verbatim:\n sys.exit(\"--verbatim only allowed for single requirements files\")\n\n verbose = True\n if cli_args.quiet:\n verbose = False\n elif cli_args.verbatim:\n verbose = False\n\n # call the main function to kick off the real work\n main(\n req_files=cli_args.file,\n verbose=verbose,\n outdated=cli_args.outdated,\n latest=cli_args.latest,\n verbatim=cli_args.verbatim,\n repo=cli_args.github,\n branch=cli_args.branch,\n path=cli_args.path,\n token=cli_args.token,\n url=cli_args.url,\n delay=cli_args.delay,\n )",
"def main():\n # Description string will show up in help.\n # Here is how you reuse the doc string from above.\n DESC_STR = main.__doc__\n # Create an epilogue string to further describe the input file\n # The epilogue will show up at the bottom of the help\n EPL_STR = \"\"\"More info shown at the bottom of the help.\"\"\"\n\n # **** argument parsing\n # define the arguments\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=DESC_STR,\n epilog=EPL_STR,\n )\n parser.add_argument(\"posArg1\", help=\"Positional Argument 1\")\n parser.add_argument(\"posArg2\", help=\"Positional Argument 2\")\n # degree must be an integer >= 1\n parser.add_argument(\n \"--degree\",\n default=1,\n type=intDegree,\n metavar=\"\",\n help=\"Polynomial degree used to \\\n curve fit the data. Default value is 1 for linear curve fit.\",\n )\n parser.add_argument(\n \"-c\",\n \"--configFile\",\n default=\"config.ini\",\n metavar=\"\",\n help=\"Configuration file. Default is config.ini.\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n default=False,\n help=\"Verbose output, usually used for troubleshooting.\",\n )\n parser.add_argument(\n \"-optArg1\",\n \"--optionalArgument1\",\n default=None,\n metavar=\"\",\n help=\"Optional Argument 1\",\n )\n parser.add_argument(\n \"-optArg2\",\n \"--optionalArgument2\",\n default=\"optArg2\",\n metavar=\"\",\n help='Optional Argument 2. How to escape a special \\\n character (\", \").How to escape the %% character.',\n )\n parser.add_argument(\n \"-optTFArg1\",\n action=\"store_true\",\n default=False,\n help=\"True/False argument set to default to False, and is \\\n set to True if the argument is specified. The argument takes no values.\",\n )\n # add a mutually exclusive required group\n typegroup = parser.add_mutually_exclusive_group(required=True)\n typegroup.add_argument(\n \"-me1\", action=\"store_true\", default=False, help=\"Mutually Exclusive choice 1\"\n )\n typegroup.add_argument(\n \"-me2\", action=\"store_true\", default=False, help=\"Mutually Exclusive choice 2\"\n )\n typegroup.add_argument(\n \"-me3\", action=\"store_true\", default=False, help=\"Mutually Exclusive choice 3\"\n )\n # parse the arguments\n args = parser.parse_args()\n\n # At this point, the arguments will be:\n # Argument Type Default Notes\n # args.posArg1 string\n # args.posArg2 string\n # args.degree integer >= 1\n # args.configFile string 'config.ini'\n # args.verbose True/False default False\n # args.optionalArgument1 string\n # args.optionalArgument2 string\n # args.optTFArg1 True/False\n # args.me1 True/False\n # args.me2 True/False\n # args.me3 True/False\n\n # Put the begin mark here, after the arg parsing, so argument problems are\n # reported first.\n print(\"**** Begin Processing ****\")\n # get start processing time\n procStart = datetime.now()\n print(\" Process start time: \" + procStart.strftime(\"%m/%d/%Y %H:%M:%S\"))\n\n # bring in config data from config.ini by default or from file specified\n # with -c argument\n config = configparser.ConfigParser()\n cfgFile = config.read(args.configFile)\n # bail out if no config file was read\n if not cfgFile:\n print(\n \"\\nERROR: The configuration file: \"\n + args.configFile\n + \" was not found. Exiting.\"\n )\n quit()\n # if we get here, we have config data\n if args.verbose:\n print(\"\\nThe config file(s) used are:\")\n print(cfgFile)\n print(\"\\nThe resulting configuration has these settings:\")\n for section in config:\n print(section)\n for key in config[section]:\n print(\" \", key, \":\", config[section][key])\n\n if args.verbose:\n print(\"\\nThe following arguments were parsed:\")\n print(args)\n\n # Process the arguments\n if args.posArg1 is not None:\n print(args.posArg1)\n else:\n # arg is none, so print a message.\n # Not actually possible, since this is a positional argument.\n # Included here so we can see how to process arguments.\n print(\"No value for posArg1.\")\n\n if args.posArg2 is not None:\n print(args.posArg2)\n else:\n # arg is none, so print a message.\n # Not actually possible, since this is a positional argument.\n # Included here so we can see how to process arguments.\n print(\"No value for posArg2.\")\n\n if args.degree is not None:\n print(args.degree)\n else:\n # arg is none, so print a message.\n print(\"No value for degree.\")\n\n if args.optionalArgument1 is not None:\n print(args.optionalArgument1)\n else:\n # arg is none, so print a message.\n print(\"No value for optArg1.\")\n\n if args.optionalArgument2 is not None:\n print(args.optionalArgument2)\n else:\n # arg is none, so print a message.\n print(\"No value for optArg1.\")\n\n if args.optTFArg1 is not None:\n print(args.optTFArg1)\n else:\n # arg is none, so print a message.\n print(\"No value for optTFArg1.\")\n\n if args.me1 is not None:\n print(args.me1)\n else:\n # arg is none, so print a message.\n print(\"No value for me1.\")\n\n if args.me2 is not None:\n print(args.me2)\n else:\n # arg is none, so print a message.\n print(\"No value for me2.\")\n\n if args.me3 is not None:\n print(args.me3)\n else:\n # arg is none, so print a message.\n print(\"No value for me3.\")\n\n # get end processing time\n procEnd = datetime.now()\n print(\"\\n**** End Processing ****\")\n print(\" Process end time: \" + procEnd.strftime(\"%m/%d/%Y %H:%M:%S\"))\n print(\" Duration: \" + str(procEnd - procStart) + \"\\n\")",
"def po(args):\n starttime = datetime.datetime.now()\n # Init, load and builds\n root_logger = init_logging(args.loglevel.upper(), logfile=args.logfile)\n \n # Only load optimus stuff after the settings module name has been retrieved\n os.environ['OPTIMUS_SETTINGS_MODULE'] = args.settings\n from optimus.conf import settings\n from optimus.utils import display_settings\n \n display_settings(settings, ('DEBUG', 'PROJECT_DIR','SOURCES_DIR','TEMPLATES_DIR','LOCALES_DIR'))\n \n i18n = I18NManager(root_logger, settings)\n \n # NOTE: Should we do this automatically to prevent error on missing files\n # OR should we only do checking before and abort on the first missing file ?\n if args.init or args.update or args.compile:\n i18n.init_locales_dir()\n i18n.extract(force=args.update)\n i18n.init_catalogs()\n \n if args.update:\n i18n.update_catalogs()\n \n if args.compile:\n i18n.compile_catalogs()\n \n endtime = datetime.datetime.now()\n root_logger.info('Done in %s', str(endtime-starttime))",
"def main():\r\n\t# If using the command line, parse it into a dictionary; otherwise save as None\r\n\tif len(sys.argv) > 1:\r\n\t\topt_dict = parse_command_line(sys.argv[1:])\r\n\telse:\r\n\t\topt_dict = None\r\n\t\r\n\t# Get module from user or from command line\r\n\tif opt_dict == None:\r\n\t\tprint(\"===== MENU ===== \"\r\n\t\t\t \"\\n1. make new .lab files (relabel) \"\r\n\t\t\t \"\\n2. reformat existing .lab files (clean) \"\r\n\t\t\t \"\\n3. make a dictionary from existing .lab file text \"\r\n\t\t\t \"\\nPlease enter the number for the option you would like to select:\")\r\n\t\tvalue = input(\"> \")\r\n\t\tassert value in [\"1\", \"2\", \"3\"]\r\n\telse:\r\n\t\tvalue = opt_dict[\"module\"] # stored in opt_dict\r\n\t\r\n\t# For each module, check dictionary and pass on\r\n\tif value == \"1\":\r\n\t\t# get language\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nmake new .lab files (relabel)\")\r\n\t\t\t\r\n\t\t\tprint(\"\\nWhat language are your .lab files in? Please use two-character language code\")\r\n\t\t\tprint(\"\\nRecognised codes are en, en-celex, en-timit, fr, de, nl\")\r\n\t\t\tprint(\"\\nIf your language is not listed, please leave this field blank\")\r\n\t\t\tlang = input(\"> \").lower()\r\n\t\telse:\r\n\t\t\tlang = opt_dict[\"lang\"]\r\n\t\t\r\n\t\t# get tab-delimited text files\r\n\t\tif opt_dict == None or opt_dict[\"text file\"] == None:\r\n\t\t\tprint(\"\\nWhat are the tab-delimited text files that contain the lab file text (experiment files)? (must include directory) \"\r\n\t\t\t\t \"\\nYou can drag and drop the files into the Terminal window to fill out this space \"\r\n\t\t\t\t \"\\nWARNING: No individual directory should include a space chacter\"\r\n\t\t\t\t \"\\nIf so, please go back and replace any spaces with underscores\")\r\n\t\t\texp_files = input(\"> \")\r\n\t\t\tif exp_files[-1] == \" \":\r\n\t\t\t\texp_files = exp_files[:-1]\r\n\t\telse:\r\n\t\t\texp_files = opt_dict[\"text file\"]\r\n\t\t\r\n\t\t# get column names\"amli\"\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWhat are the columns that identify the lab text (id columns of the experiment file)? \"\r\n\t\t\t\t \"\\nSeparate all id columns with an underscore \"\r\n\t\t\t\t \"\\nDefault is: experiment_item_condition Press enter to use default\")\r\n\t\t\texp_cols = str(input(\"> \"))\r\n\t\t\tif exp_cols == \"\":\r\n\t\t\t\texp_cols = \"experiment_item_condition\"\r\n\t\telse:\r\n\t\t\texp_cols = opt_dict[\"columns\"]\r\n\t\t\r\n\t\t# get file directory\r\n\t\tif opt_dict == None or opt_dict[\"file dir\"] == None:\r\n\t\t\tprint(\"\\nWhat is the sound directory? \"\r\n\t\t\t\t \"\\nYou can drag and drop the file into the Terminal window to fill out this space\")\r\n\t\t\tfile_dir = input(\"> \")\r\n\t\t\tfile_dir = name_check(file_dir)\r\n\t\telse:\r\n\t\t\tfile_dir = opt_dict[\"file dir\"]\r\n\t\t\r\n\t\t# get file name format\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWhat format are the sound file names in? (What are the id columns of the sound file?) \"\r\n\t\t\t\t \"\\nDefault is: experiment_participant_item_condition Press enter to use default\")\r\n\t\t\tformat = str(input(\"> \"))\r\n\t\t\tif format == '':\r\n\t\t\t\tformat = \"experiment_participant_item_condition\"\r\n\t\telse:\r\n\t\t\tformat = opt_dict[\"format\"]\r\n\t\t\r\n\t\t# get old directory name\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWhat would you like to call the directory for the old lab files? \"\r\n\t\t\t\t \"\\nDefault is: 0_old_labfile_relabel/ Press enter to use default\")\r\n\t\t\told_dir = input(\"> \")\r\n\t\t\tif old_dir == '':\r\n\t\t\t\told_dir = \"0_old_labfile_relabel/\"\r\n\t\t\told_dir = name_check(old_dir)\r\n\t\telse:\r\n\t\t\told_dir = opt_dict[\"old dir\"]\r\n\t\t\r\n\t\t# get dictionary option\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWould you like to create a dictionary from the lab file text?\")\r\n\t\t\tprint(\"Please answer 'y' or 'yes' if so\")\r\n\t\t\td_choice = input(\"> \").lower()\r\n\t\t\tif d_choice == \"y\" or d_choice == \"yes\":\r\n\t\t\t\tdict = True\r\n\t\t\telse:\r\n\t\t\t\tdict = False\r\n\t\telse:\r\n\t\t\tdict = opt_dict[\"dict\"]\r\n\t\t\r\n\t\t# pass on\r\n\t\trelabel_module(exp_files, exp_cols, file_dir, format, old_dir, lang, dict)\r\n\t\tsys.exit(0)\r\n\t\r\n\telif value == \"2\":\r\n\t\t# get language\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nreformat existing .lab files (clean)\")\r\n\t\t\t\r\n\t\t\tprint(\"\\nWhat language are your .lab files in? Please use the two-character language code\")\r\n\t\t\tprint(\"Recognised codes are en, en-celex, en-timit, fr, de, nl\")\r\n\t\t\tprint(\"If your language is not listed, please leave this field blank\")\r\n\t\t\tlang = input(\"> \").lower()\r\n\t\telse:\r\n\t\t\tlang = opt_dict[\"lang\"]\r\n\t\t\r\n\t\t# get file directory\r\n\t\tif opt_dict == None or opt_dict[\"file dir\"] == None:\r\n\t\t\tprint(\"\\nWhat is the sound directory? \"\r\n\t\t\t\t \"\\nYou can drag and drop the file into the Terminal window to fill out this space\")\r\n\t\t\tfile_dir = input(\"> \")\r\n\t\t\tfile_dir = name_check(file_dir)\r\n\t\telse:\r\n\t\t\tfile_dir = opt_dict[\"file dir\"]\r\n\t\t\t\r\n\t\t# get the name of the old directory\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWhat would you like to call the directory for old lab files? Default is: 0_old_labfile_clean/ Press enter to use default\")\r\n\t\t\told_dir = input(\"> \")\r\n\t\t\tif old_dir == '':\r\n\t\t\t\told_dir = \"0_old_labfile_clean/\"\r\n\t\t\told_dir = name_check(old_dir)\r\n\t\telse:\r\n\t\t\told_dir = opt_dict[\"old dir\"]\r\n\t\t\r\n\t\t# see if the user wants to use a dictionary\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nWould you like to create a dictionary from the lab file text?\")\r\n\t\t\tprint(\"Please answer 'y' or 'yes' if so\")\r\n\t\t\td_choice = input(\"> \").lower()\r\n\t\t\tif d_choice == \"y\" or d_choice == \"yes\":\r\n\t\t\t\tdict = True\r\n\t\t\telse:\r\n\t\t\t\tdict = False\r\n\t\telse:\r\n\t\t\tdict = opt_dict[\"dict\"]\r\n\t\t\r\n\t\t# pass on\r\n\t\tclean_module(file_dir, old_dir, lang, dict)\r\n\t\tsys.exit(0)\r\n\t\t\r\n\telif value == \"3\":\r\n\t\tif opt_dict == None:\r\n\t\t\tprint(\"\\nmake a dictionary from existing .lab file text\")\r\n\t\t\r\n\t\tif opt_dict == None or opt_dict[\"file dir\"] == None:\r\n\t\t\tprint(\"\\nWhat is the .lab file directory? \"\r\n\t\t\t\t \"\\nYou can drag and drop the file into the Terminal window to fill out this space\")\r\n\t\t\tfile_dir = input(\"> \")\r\n\t\t\tfile_dir = name_check(file_dir)\r\n\t\telse:\r\n\t\t\tfile_dir = opt_dict[\"file dir\"]\r\n\t\t\r\n\t\tcreate_dictionary(file_dir)\r\n\t\tsys.exit(0)"
] | [
"0.57706285",
"0.5497428",
"0.5485556",
"0.5466297",
"0.54553556",
"0.542836",
"0.5421417",
"0.53975254",
"0.53920454",
"0.53487873",
"0.5346001",
"0.5343639",
"0.53339195",
"0.5314539",
"0.53113174",
"0.5310713",
"0.5290864",
"0.52821314",
"0.52764744",
"0.5268802",
"0.5243064",
"0.5239778",
"0.52361465",
"0.52288896",
"0.5209176",
"0.5209173",
"0.52070975",
"0.520311",
"0.516221",
"0.5148126"
] | 0.6344126 | 0 |
This main function allows you to run the backup manually. | def main():
parser = init_parser()
args = parser.parse_args()
# Set up logging.
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \
'%(lineno)s %(message)s ', level=level)
logging.info("Logging started")
message = "Backing up "
if args.source_code:
message += "source and "
message += "data for: {0}".format(args.app_id)
logging.info(message)
zk_connection_locations = appscale_info.get_zk_locations_string()
zookeeper = zk.ZKTransaction(host=zk_connection_locations)
db_info = appscale_info.get_db_info()
table = db_info[':table']
skip_list = args.skip
if not skip_list:
skip_list = []
logging.info("Will skip the following kinds: {0}".format(sorted(skip_list)))
ds_backup = DatastoreBackup(args.app_id, zookeeper, table,
source_code=args.source_code, skip_list=sorted(skip_list))
try:
ds_backup.run()
finally:
zookeeper.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_backup():\n host = re.search(\"([\\w.-]+)[:]?\", env.host).group()\n date = time.strftime('%Y%m%d%H%M%S')\n fname = '%(host)s-backup-%(date)s.gz' % {'date': date, 'host': host}\n green(\"Ingrese la contraseña de la clave privada local.\")\n sudo(\"pg_dump kine | gzip > /tmp/%s\" % fname, user=\"postgres\")\n get(\"/tmp/%s\" % fname, os.path.join(backup_dir, fname))\n sudo(\"rm /tmp/%s\" % fname, user=\"postgres\")",
"def run_backup():\n\n from common.models import InvenTreeSetting\n\n if not InvenTreeSetting.get_setting('INVENTREE_BACKUP_ENABLE', False, cache=False):\n # Backups are not enabled - exit early\n return\n\n interval = int(InvenTreeSetting.get_setting('INVENTREE_BACKUP_DAYS', 1, cache=False))\n\n # Check if should run this task *today*\n if not check_daily_holdoff('run_backup', interval):\n return\n\n logger.info(\"Performing automated database backup task\")\n\n call_command(\"dbbackup\", noinput=True, clean=True, compress=True, interactive=False)\n call_command(\"mediabackup\", noinput=True, clean=True, compress=True, interactive=False)\n\n # Record that this task was successful\n record_task_success('run_backup')",
"def test_simple_backup(self):\n with TemporaryDirectory() as temporary_directory:\n source = os.path.join(temporary_directory, 'source')\n destination = os.path.join(temporary_directory, 'destination')\n latest_directory = os.path.join(destination, 'latest')\n # Create a source for testing.\n self.create_source(source)\n # Run the program through the command line interface.\n exit_code, output = run_cli(\n '--no-sudo', '--ionice=idle',\n '--disable-notifications',\n source, latest_directory,\n )\n assert exit_code == 0\n # Make sure the backup was created.\n self.verify_destination(latest_directory)\n # Make sure a snapshot was created.\n assert len(find_snapshots(destination)) == 1",
"def run(self):\n try:\n print \"# Obiba backup started (%s)\" % datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.__loadConfig()\n self.__setup()\n self.__backupRemoteProjects()\n self.__backupProjects()\n except Exception, e:\n print '*' * 80\n print \"* ERROR\"\n print\n print traceback.format_exc()\n print '*' * 80\n finally:\n print \"# Obiba backup completed (%s)\" % datetime.now().strftime('%Y-%m-%d %H:%M:%S')",
"def run(self):\n type = self.config.get('type', DEFAULT_BACKUP_TYPE)\n backup_dir = self.config.get('backup_dir', self.default_ongoing_backup_dir)\n archive = self.config.get('archive', False)\n only_postprocess = self.config.get('only_postprocess', False)\n compress = self.config.get('compress', False)\n rotate = self.config.get('rotate', False)\n threads = self.config.get('threads', DEFAULT_BACKUP_THREADS)\n\n # find or generate the backup file/dir\n if only_postprocess:\n if self.name.startswith('/'): # if passed an absolute path as section name\n # basedir doesn't work as intended if passed /a/path/like/this/\n backup_dir = os.path.normpath(os.path.join(self.name, '..'))\n self.parse_backup_file()\n else:\n self.find_backup_file(backup_dir)\n if self.file_name is None:\n msg = 'Problem while trying to find the backup files at %s'\n self.logger.error(msg, backup_dir)\n return 10\n else:\n self.generate_file_name(backup_dir)\n\n output_dir = os.path.join(backup_dir, self.dir_name)\n if type == 'dump':\n backup = MyDumperBackup(self.config, self)\n elif type == 'snapshot':\n backup = MariaBackup(self.config, self)\n elif type == 'null':\n backup = NullBackup(self.config, self)\n else:\n self.logger.error('Unrecognized backup format: %s', type)\n return 11\n\n # get the backup command\n if not only_postprocess:\n cmd = backup.get_backup_cmd(backup_dir)\n\n # start status monitoring\n if 'statistics' in self.config: # Enable statistics gathering?\n source = self.config.get('host', 'localhost') + \\\n ':' + \\\n str(self.config.get('port', DEFAULT_PORT))\n stats = DatabaseBackupStatistics(dir_name=self.dir_name, section=self.name,\n type=type, config=self.config.get('statistics'),\n backup_dir=output_dir, source=source)\n else:\n stats = DisabledBackupStatistics()\n\n stats.start()\n\n if not only_postprocess:\n # run backup command\n self.logger.debug(cmd)\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = process.communicate()\n if backup.errors_on_output(out, err):\n stats.fail()\n return 3\n\n # Check log for errors\n if backup.errors_on_log():\n self.logger.error('Error log found at %s', self.log_file)\n stats.fail()\n return 4\n\n # Check medatada file exists and containg the finish date\n if backup.errors_on_metadata(backup_dir):\n self.logger.error('Incorrect metadata file')\n stats.fail()\n return 5\n\n # Backups seems ok, prepare it for recovery and cleanup\n try:\n cmd = backup.get_prepare_cmd(backup_dir)\n except BackupException as ex:\n self.logger.error(str(ex))\n stats.fail()\n return 13\n if cmd != '':\n self.logger.debug(cmd)\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = process.communicate()\n if backup.errors_on_prepare(out, err):\n self.logger.error('The mariabackup prepare process did not complete successfully')\n stats.fail()\n return 6\n\n # get file statistics\n stats.gather_metrics()\n\n if archive:\n backup.archive_databases(output_dir, threads)\n\n if compress:\n # no consolidation per-db, just compress the whole thing\n result = self.tar_and_remove(backup_dir, self.file_name, [self.dir_name, ],\n compression='/usr/bin/pigz -p {}'.format(threads))\n if result != 0:\n self.logger.error('The compression process failed')\n stats.fail()\n return 11\n\n if rotate:\n # perform rotations\n # move the old latest one to the archive, and the current as the latest\n # then delete old backups of the same section, according to the retention\n # config\n result = self.move_backups(self.name, self.default_final_backup_dir,\n self.default_archive_backup_dir, self.name_regex)\n if result != 0:\n self.logger.warning('Archiving backups failed')\n result = self.os_rename(os.path.join(backup_dir, self.file_name),\n os.path.join(self.default_final_backup_dir, self.file_name))\n if result != 0:\n self.logger.error('Moving backup to final dir failed')\n stats.fail()\n return 12\n result = self.purge_backups()\n if result != 0:\n self.logger.warning('Purging old backups failed')\n\n # we are done\n stats.finish()\n return 0",
"def test_backup_only(self):\n # Check that by default a backup is performed and a snapshot is created.\n with TemporaryDirectory() as temporary_directory:\n source = os.path.join(temporary_directory, 'source')\n destination = os.path.join(temporary_directory, 'destination')\n latest_directory = os.path.join(destination, 'latest')\n # Create a source for testing.\n self.create_source(source)\n # Run the program through the command line interface.\n exit_code, output = run_cli(\n '--backup', '--no-sudo',\n '--disable-notifications',\n source, latest_directory,\n )\n assert exit_code == 0\n # Make sure the backup was created.\n self.verify_destination(latest_directory)\n # Make sure no snapshot was created.\n assert len(find_snapshots(destination)) == 0",
"def main(opts):\n\n if arguments['--generate-pigz']:\n gen_pigz_thread_helper()\n sys.exit(0)\n\n if arguments['--full']:\n cmd, cmd_hide, backup_path, backup_base, top_backup_base = build_full(arguments)\n clean.clean_backups(top_backup_base, int(arguments['--keep']), False)\n check_space(top_backup_base)\n succ = run_backup(cmd, cmd_hide)\n print('Backup ended {0}'.format(('Error', 'Successfully')[succ]))\n if not succ: raise BackupErrorBackupFailed('Backup', backup_path)\n if succ and not opts['--no-prepare']:\n cmd = build_full_prepare(opts, backup_path)\n succ = run_backup(cmd, cmd_hide)\n print('Prepare ended {0}'.format(('Error', 'Successfully')[succ]))\n if not succ: raise BackupErrorBackupFailed('Prepare', backup_path)\n if succ and (opts['--compress'] or int(opts['--compress-threads'])>0):\n threads = check_pigz_treads(opts['--compress-threads'])\n tar_file = tar_dir(backup_path, threads, check=not opts['--no-check'])\n if opts['--enc']:\n encrypt(tar_file, config.pass_phrase)\n elif arguments['--inc']:\n build_inc(arguments)",
"def __makeBackup(self):\n pass #FIXME!!!",
"def backup(self):\r\n print('Backing up old files...')\r\n\r\n # Connect with SSH-PubKey and execute backup script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n 'robolab-backup'\r\n ])\r\n\r\n print('Done.')",
"def setup():\r\n thread = threading.Thread(target = backup)\r\n thread.start()",
"def backup_database():\n logger.info(\"start database_backup\")\n management.call_command('dbbackup', compress=True)\n logger.info(\"end database_backup\")",
"def backup(self):\n import datetime\n suffix = datetime.datetime.now().strftime('%Y-%m-%d--%H-%M-%S')\n self.host.run(\"test -f '%s' && cp --archive '%s' '%s.%s'\" % (\n esc1(self.remote_path), esc1(self.remote_path), esc1(self.remote_path), esc1(suffix)), use_sudo=self.use_sudo)",
"def backup(self):\n self.rollback_steps.insert(0, self.mongos.start_balancer)\n self.run_step(self.mongos.stop_balancer, 2)\n\n self.run_step(self.wait_for_locks)\n\n self.rollback_steps.insert(0, self.finish_shards_maintenance)\n self.run_step(self.prepare_shards_maintenance)\n\n self.run_step(self.backup_dump)\n\n self.rollback_steps.remove(self.finish_shards_maintenance)\n self.run_step(self.finish_shards_maintenance, 2)\n\n self.rollback_steps.remove(self.mongos.start_balancer)\n self.run_step(self.mongos.start_balancer, 4) # it usually starts on\n # the second try\n\n if self.backup_bucket is not None:\n run(\"rmdir %s\" % self.backup_path)\n\n logging.info(\"Finished successfully\")",
"def backup():\n backup_shift(os, config.utils.tasks.backup_depth)\n if config.utils.tasks.secret_key is None:\n shutil.copyfile(config.core.database_name, config.core.database_name+'.1')\n else:\n data = get_encrypted_database()\n with open(config.core.database_name+'.1', 'wb') as f:\n f.write(data)",
"def test_main_noargs():\n startdir = os.getcwd()\n d = tempfile.mkdtemp()\n os.chdir(d)\n parser = argparse.ArgumentParser()\n parser.add_argument('--glob',\n default=os.path.join(os.getcwd(),\"backup*.gz.gpg\"))\n args=parser.parse_args()\n r.main(args)\n os.chdir(startdir)\n shutil.rmtree(d)",
"def backup_database(self):\n backup_file = \"{}-{}.sql\".format(\n config.DATABASE_NAME, datetime.today().strftime(\"%Y-%m-%d--%H%M\")\n )\n backup_uri = \"{}/{}\".format(config.DATABASE_BACKUP_BUCKET, backup_file)\n step = \"Backing Up Database:\\nbackup={}\".format(backup_uri)\n try:\n self.slacker.send_thread_reply(step)\n backup_command = [\n \"gcloud\",\n \"sql\",\n \"export\",\n \"sql\",\n config.DATABASE_INSTANCE_NAME,\n backup_uri,\n \"--database={}\".format(config.DATABASE_NAME),\n \"--verbosity=debug\",\n ]\n subprocess.run(backup_command, check=True)\n except Exception as e:\n self.raise_step_error(step=step, error=e)",
"async def module_command_backup(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n file = parsed.args[\"name\"]\n file = file.with_suffix(f\"{file.suffix}.sqlite\")\n await self.database_create_backup(file)\n await ctx.core_command_backup(parsed, file)",
"def do_backup(infile, simulate=False):\n\n # parse the input file\n cp = cparse.ConfigParser()\n cp.optionxform = str\n cp.read(infile)\n\n\n # store the list of files and directories we will backup\n\n # in each dictionary, the key is the root directory to copy from and the\n # list it indexes is the list of files/directories under that root to copy\n dirs = {}\n files = {}\n\n for sec in cp.sections():\n\n if sec == \"main\":\n\n # defaults\n root = \"/backup\"\n prefix = \"my-backup-\"\n nstore = 3\n email_sender = \"root\"\n email_receiver = \"root\"\n\n for opt in cp.options(\"main\"):\n if opt == \"root\":\n root = cp.get(sec, opt)\n elif opt == \"prefix\":\n prefix = cp.get(sec, opt)\n elif opt == \"nstore\":\n nstore = cp.get(sec, opt)\n elif opt == \"email_sender\":\n email_sender = cp.get(sec, opt)\n elif opt == \"email_receiver\":\n email_receiver = cp.get(sec, opt)\n else:\n sys.exit(\"invalid option in [main]\")\n\n bo = Backup(root, prefix, nstore,\n email_sender, email_receiver)\n else:\n\n for opt in cp.options(sec):\n value = cp.get(sec, opt)\n\n if opt == \"files\":\n flist = [f.strip() for f in value.split(',')]\n files[sec] = flist\n\n if opt == \"dirs\":\n dlist = [d.strip() for d in value.split(',')]\n dirs[sec] = dlist\n\n\n # log the output\n out_msg = f\"Output from backup-machine.py, inputs file: {infile}\\n\"\n\n blog = Log(out_msg)\n\n # make sure that the output directory exists and if so, get all the\n # subdirectories in it\n try:\n old_dirs = os.listdir(bo.root)\n except:\n blog.log(\"destination directory is not readable/doesn't exist\\n\")\n report(blog.ostr, SUBJECT_FAIL, bo.sender, bo.receiver)\n sys.exit(\"directory not readable\")\n\n\n # how many existing backups are in that directory?\n backup_dirs = [o for o in old_dirs if o.startswith(bo.prefix) and\n os.path.isdir(f\"{bo.root}/{o}\")]\n\n backup_dirs.sort()\n backup_dirs.reverse()\n\n\n # backup_dirs now contains a list of all the currently stored backups.\n # The most recent backups are at the start of the list.\n print(\"currently stored backups: \")\n for bdir in backup_dirs:\n print(bdir)\n\n # get ready for the new backups\n backup_dest = os.path.normpath(bo.root) + '/' + bo.prefix + bo.date\n\n if not simulate:\n try:\n os.mkdir(backup_dest)\n except:\n blog.log(\"error making directory\\n\")\n report(blog.ostr, SUBJECT_FAIL, bo.sender, bo.receiver)\n sys.exit(\"Error making dir\")\n else:\n blog.log(f\"mkdir {backup_dest}\\n\")\n\n\n blog.log(f\"writing to: {backup_dest}\\n\\n\")\n\n failure = 0\n\n # backup all the directories\n for root_dir in dirs:\n for d in dirs[root_dir]:\n\n mydir = os.path.normpath(root_dir + '/' + d)\n if not os.path.isdir(mydir):\n blog.log(f\"WARNING: directory {mydir} does not exist... skipping.\\n\")\n continue\n\n blog.log(f\"copying {mydir} ...\\n\")\n\n if not simulate:\n try:\n shutil.copytree(mydir,\n os.path.normpath(backup_dest) + '/' + d,\n symlinks=True)\n except:\n blog.log(f\"ERROR copying {mydir}\\n\")\n blog.log(\"aborting\\n\")\n failure = 1\n break\n\n blog.log(\"done with directories\\n\\n\")\n\n # backup all the files\n for root_dir in files.keys():\n for f in files[root_dir]:\n\n myfile = os.path.normpath(root_dir + '/' + f)\n if not os.path.isfile(myfile):\n blog.log(f\"WARNING: file {myfile} does not exist... skipping.\\n\")\n continue\n\n blog.log(f\"copying {root_dir}/{f} ...\\n\")\n\n if not simulate:\n try:\n shutil.copy(myfile,\n os.path.normpath(backup_dest) + '/' + f)\n except:\n blog.log(\"ERROR copying\\n\")\n blog.log(\"aborting\\n\")\n failure = 1\n break\n\n blog.log(\"done with individual files\\n\\n\")\n\n # if we were successful, then remove any old backups, as necessary\n if not failure:\n\n # keep in mind that we just stored another backup\n if len(backup_dirs) > bo.nstore-1:\n for n in range(bo.nstore-1, len(backup_dirs)):\n rm_dir = bo.root + '/' + backup_dirs[n]\n\n blog.log(f\"removing old backup: {rm_dir}\\n\")\n\n if not simulate:\n try:\n shutil.rmtree(rm_dir)\n except:\n blog.log(f\"ERROR removing {rm_dir}\\n\")\n\n subject = f\"summary from backup-machine.py, infile: {infile}\"\n if simulate:\n subject = \"[simulate] \" + subject\n else:\n subject = f\"ERROR from backup-machine.py, infile: {infile}\"\n\n\n report(blog.ostr, subject, bo.sender, bo.receiver)",
"def main():\n\tlogger.warn(\"leprechaun rsync started.\")\n\n\ttime.sleep(1)\n\tts_rsync()\n\tsqlite_rsync()\n\n\twhile True:\n\t\ttry:\n\t\t\t# schedule crash guard:\n\t\t\timportlib.reload(schedule)\n\t\t\tschedule.every(1301).seconds.do(ts_rsync)\n\t\t\tschedule.every(300).seconds.do(sqlite_rsync)\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\ttime.sleep(2)\n\t\t\t\t\tschedule.run_pending()\n\t\t\t\texcept Exception as ex:\n\t\t\t\t\tlogger.exception(ex)\n\t\t\t\t\ttime.sleep(2)\n\t\texcept Exception as ex:\n\t\t\tlogger.exception(ex)\n\t\t\ttime.sleep(2)",
"def backup_command(server, output):\n # Stop saving chunks\n server.save_off()\n # Run the external save program\n subprocess.call(CONFIG['backup_command']['script'].split())\n # Start saving chunks again\n server.save_on()\n return",
"def makeBackup(self):\n #--File Path\n original = self.path\n #--Backup\n backup = self.path+'.bak'\n shutil.copy(original,backup)\n #--First backup\n firstBackup = self.path+'.baf'\n if not os.path.exists(firstBackup):\n shutil.copy(original,firstBackup)",
"def test_backup_restore_misc(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backupset.name = \"!@#$%^&\"\n output, error = self.backup_create()\n self.assertTrue(\"Backup `!@#$%^` created successfully\" in output[0],\n \"Backup could not be created with special characters\")\n self.log.info(\"Backup created with special characters\")\n self.backupset.name = \"backup\"\n self.backup_create()\n self.backup_cluster()\n conn = RemoteMachineShellConnection(self.backupset.backup_host)\n command = \"ls -tr {0}/{1}/{2} | tail\".format(self.backupset.directory, self.backupset.name, self.backups[0])\n o, e = conn.execute_command(command)\n data_dir = o[0]\n conn.execute_command(\"dd if=/dev/zero of=/tmp/entbackup/backup/\" +\n str(self.backups[0]) +\n \"/\" + data_dir + \"/data/shard_0.sqlite\" +\n \" bs=1024 count=100 seek=10 conv=notrunc\")\n output, error = self.backup_restore()\n self.assertTrue(\"Restore failed due to an internal issue, see logs for details\" in output[-1],\n \"Expected error not thrown when file is corrupt\")\n self.log.info(\"Expected error thrown when file is corrupted\")\n conn.execute_command(\"mv /tmp/entbackup/backup /tmp/entbackup/backup2\")\n conn.disconnect()\n output, error = self.backup_restore()\n self.assertTrue(\"Backup Repository `backup` not found\" in output[-1], \"Expected error message not thrown\")\n self.log.info(\"Expected error message thrown\")",
"def backup(backupName, full, verify, verifyIncrementally = False, doTheBackup = True):\n testRestoreDir = localenv.backups.testRestoreDir\n backupDetails = localenv.backups.backups[backupName]\n backupMap = getBackupMap(backupName)\n BackupOperations.doBackup (backupDetails.source, backupMap, testRestoreDir, full = full, \n verify = verify, verifyIncrementally = verifyIncrementally, \n doTheBackup = doTheBackup, \n recordTrigger = localenv.backups.recordTrigger)",
"async def main():\n \n # workflow status\n global status\n\n # Mode says which objects must be archived: DB dump, source files or both.\n try:\n mode=sys.argv[1]\n except IndexError:\n mode = 'all'\n\n # queue of files to be archived\n files_to_upload = deque()\n \n logger.trace(\"Archiving ...\")\n # Tasks to archive files and database dump\n list_of_threads = get_list_of_threads(mode=mode)\n\n tar_names = await asyncio.gather(*list_of_threads)\n\n # Clear names list, removing None elements if exist\n tar_names = [name for name in tar_names if name]\n\n files_to_upload.extend(tar_names)\n logger.trace(\"Ok.\")\n\n logger.trace(\"Uploading ...\")\n\n # Connect to the ftp-server and upload the archived files.\n await upload_to_ftp_server(host=FTP.SERVER.value,\n port=FTP.PORT.value,\n login=FTP.LOGIN.value,\n password=FTP.PASSWORD.value,\n files=files_to_upload)\n\n # Remove archived and dump files on the server site.\n clear_garbage(mode=mode, files=tar_names)\n\n # Check the workflow status. If it's not empty, send an error email.\n if len(status) > 0 and ERROR_NOTIFICATION_BY_EMAIL:\n backup_email()",
"def main(args: Optional[Sequence[str]] = None):\n\n setup_logging()\n args = parse_args(args)\n now = datetime.utcnow()\n\n with doing(\"Parsing remote configuration\"):\n wp_config = parse_wp_config(args.source)\n\n with TemporaryDirectory() as d:\n work_location = parse_location(d, args.compression_mode)\n\n with doing(\"Saving settings\"):\n dump_settings(args, wp_config, now, join(d, \"settings.json\"))\n\n if args.maintenance_mode is True:\n with doing(\"Activate maintenance mode\"):\n activate_maintenance_mode(args.source)\n\n try:\n with doing(\"Copying database\"):\n db = create_from_source(wp_config, args.source, args.db_host)\n db.dump_to_file(join(d, \"dump.sql\"))\n\n with doing(\"Copying files\"):\n copy_files(args.source, work_location.child(\"wordpress\"), args.exclude, args.exclude_tag_all)\n\n finally:\n if args.maintenance_mode is True:\n with doing(\"Deactivate maintenance mode\"):\n deactivate_maintenance_mode(args.source)\n\n with doing(\"Writing archive\"):\n args.backup_dir.ensure_exists_as_dir()\n archive_location = make_dump_file_name(args, wp_config, now)\n\n archive_location.archive_local_dir(d, doing)\n doing.logger.info(\"Wrote archive %s\", archive_location)\n\n return archive_location",
"def test_restore_backup():",
"def acquirePath():\n\n def showBakList():\n '''prints the list of already available backups, paired with\n their original and bakPath paths.\n Returns a boolean: True if there are already backup saved in the\n file indice. False if the file indice is empty.'''\n shelveIndex = shelve.open(masterFile)\n print()\n if len(shelveIndex[indice]) > 0:\n print(\"#)\".ljust(4), \"BACKUP\".ljust(20))\n for item in shelveIndex[indice]:\n print((str(shelveIndex[indice].index(item)+1)+\")\").ljust(4),\n item.ljust(20))\n shelveIndex.close()\n return True\n else:\n print(\"Non sono stati trovati backup indicizzati.\")\n shelveIndex.close()\n\n return False\n\n def askUser0(areBackups):\n ''' asks the user if they want to work on a backup or create a \n new one.\n Return a string that represents the user choice.'''\n print()\n if areBackups: \n print((\n \"Digita il numero corrispondente per accedere ad un progetto\"\n \" backup.\")\n )\n print(('Digita \"delete\" seguito dal numero corrispondente '\n \"per eliminare un progetto backup . \\n--NOTA: questa proced\"\n \"ura non elimina le folder _bak relative al progetto, ma sol\"\n \"o la sua indicizzazione in Lithar.\"))\n print('Digita \"n\" per creare un nuovo progetto backup.')\n print('Digita \"chiudi\" per chiudere Lithar.')\n \n choice = input() #crash in console!!!\n return choice\n\n def createSave():\n '''creates the LitharMaster shelve files.\n Initialize them with a indexList list Variable.'''\n\n shelveIndex = shelve.open(masterFile)\n shelveIndex[indice]=[]\n shelveIndex.close()\n \n def createRecord():\n '''creates a new record (entry) in the savedata file.\n The entry includes the backup name, itand its original\n and backPath path'''\n #ToDo make it so that the shelveIndex[indice] does not contain \n # duplicates when a new project is added with the same name of\n # a previous one. \n name = input(\"Digita il nome che vuoi dare al backup:\\n\")\n original = input(\"Digita il percorso della folder originale:\\n\")\n bakPath = input(\"Digita il percorso dove salvare i/l backup:\\n\")\n \n \n shelveIndex = shelve.open(masterFile)\n \n ListaIndice = shelveIndex[indice]\n if name not in ListaIndice:\n ListaIndice.append(name) \n shelveIndex[indice]=ListaIndice\n shelveIndex[name]=(original,bakPath)\n \n shelveIndex.close()\n print(\"Il nuovo record %s è stato creato.\" %name)\n\n def deleteRecord(index):\n '''removes a record from the shelve save file.\n Deletes the shelveIndex key with the paths and the \n shelveIndex[indice] element\n index = index of the ShelveIndex[indice] correponding to the\n shelveIndex key to delete. '''\n shelveIndex = shelve.open(masterFile)\n ListaIndice = shelveIndex[indice]\n del shelveIndex[ListaIndice.pop(index)]\n shelveIndex[indice] = ListaIndice\n\n shelveIndex.close()\n print(\"Il record è stato eliminato.\")\n \n def accessRecord(index):\n '''returns the original and bakPath paths for the selected\n record.\n index = integer of the record index in shelveIndex[indice]'''\n shelveIndex = shelve.open(masterFile)\n ListaIndice = shelveIndex[indice]\n origin, dest = shelveIndex[ListaIndice[index]]\n \n print(\"Vuoi accedere a:\")\n print(\"Progetto: %s\".ljust(20) %ListaIndice[index])\n print(\"Folder attuale: %s\".ljust(20) %origin)\n print(\"Folder contenente i backup: %s\".ljust(20) %dest)\n while True:\n confirm = input(\"\\nConfermi (s/n): \")\n if confirm == \"s\":\n print(100*\"-\" + \"\\n\")\n return (origin,dest)\n elif confirm == \"n\":\n #ToDo trova un modo migliore di uscire.\n #TROVATO A CULO\n sys.exit()#ToDo indaga perché sys.exit non esce dal loop\n #Perché hai messo un exception in fondo!!! Genio!\n else:\n print('Devi risponde \"s\" o \"n\" per sì o no.')\n\n\n shelveIndex.close()\n\n logging.debug(\"cwd: %s\" %os.getcwd())\n \n masterFile = \"LitharMaster\" #name of the save data file\n indice = \"indexList\" #name of the save file core variable\n\n print(masterFile)\n\n while True:\n #ToDo: add all the 3 files for shelve? .dat .dir . bak?\n if os.path.isfile(os.path.join(('.'),masterFile)+\".dat\"):\n break\n else:\n createSave() \n while True:\n choice = (askUser0(showBakList()))\n print(100*\"~\" + \"\\n\")\n if choice == \"n\":\n createRecord()\n elif choice.startswith(\"delete\"):\n deleteRecord(int(choice.lstrip(\"delete\"))-1)\n elif choice == \"chiudi\":\n sys.exit()\n else:\n try:\n choice = int(choice)-1\n return accessRecord(choice)\n except:\n print(\"Hai inserito un valore sbagliato, riprova.\")",
"def run(self):\n self.archive_bash_inits()\n self.create_paths()\n self.copy_files()\n self.make_git_config()\n self.ensure_bash_history()",
"def runmain():\n\n if roboapps.Unchecked():\n roboapps.Exit()\n else:\n SaveView(\"Snapshot-View-0\")",
"def test_backup_with_erlang_crash(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n try:\n backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n resume=self.backupset.resume, purge=self.backupset.purge,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n if self.os_name != \"windows\":\n self.sleep(10)\n conn = RemoteMachineShellConnection(self.backupset.cluster_host)\n conn.kill_erlang(self.os_name)\n output = backup_result.result(timeout=200)\n if self.debug_logs:\n print((\"Raw output from backup run: \", output))\n error_mesgs = [\"Error backing up cluster: Not all data was backed up due to\",\n \"No connection could be made because the target machine actively refused it.\"]\n error_found = False\n for error in error_mesgs:\n if self._check_output(error, output):\n error_found = True\n if not error_found:\n raise(\"Expected error message not thrown by Backup 180 seconds after erlang crash\")\n except Exception as ex:\n self.fail(str(ex))\n finally:\n conn.start_couchbase()\n conn.disconnect()\n self.sleep(30)"
] | [
"0.7348714",
"0.7322273",
"0.72442436",
"0.71869266",
"0.7080326",
"0.7026621",
"0.6823129",
"0.6735388",
"0.66656953",
"0.6550088",
"0.6538847",
"0.6439064",
"0.63912153",
"0.6356379",
"0.63234514",
"0.6314879",
"0.6291406",
"0.6277249",
"0.62609226",
"0.6228204",
"0.6194076",
"0.6185276",
"0.6149395",
"0.6115328",
"0.6109159",
"0.61031145",
"0.60973656",
"0.6094223",
"0.6037529",
"0.6022175"
] | 0.7343254 | 1 |
Returns a function which takes Paths into the user data and returns csums. | def reverser(num_segs=3):
r = re.compile("((\/([0-9]|[a-f])+){%d})$" % (num_segs+1))
def checksum_from_link(link):
"""Takes a path into the userdata, returns the matching csum."""
m = r.search(safetype(link))
if (m):
csum_slash = m.group()[1:]
csum = _remove_sep_(csum_slash)
return csum
else:
raise ValueError("link %s checksum didn't parse" %(link))
return checksum_from_link | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate(d):\r\n\r\n # Set correct slashes for the OS\r\n if sys.platform == 'windows':\r\n slash = '\\\\'\r\n elif sys.platform == 'linux':\r\n slash = '/'\r\n else:\r\n print('#Error. Unknown platform.')\r\n return\r\n\r\n print('Files in the current directory and their md5-hashes:\\n')\r\n i = 0\r\n assert i == 0, '#Error. Variable i != 0.'\r\n\r\n for i in range(len(d[2])): # Go through the list of files\r\n full_path = d[0]+slash+d[2][i]\r\n print(full_path) # Get the list of files with full paths\r\n print(md5(full_path))\r\n size(full_path)",
"def path_checksum(paths):\n\n if not hasattr(paths, '__iter__'):\n raise TypeError('sequence or iterable expected not %r!' % type(paths))\n\n def _update_checksum(checksum, dirname, filenames):\n for filename in sorted(filenames):\n path = path_join(dirname, filename)\n if isfile(path):\n fh = open(path, 'rb')\n while 1:\n buf = fh.read(4096)\n if not buf : break\n checksum.update(buf)\n fh.close()\n\n chksum = sha1()\n\n for path in sorted([normpath(f) for f in paths]):\n if path_exists(path):\n if isdir(path):\n walk(path, _update_checksum, chksum)\n elif isfile(path):\n _update_checksum(chksum, dirname(path), basename(path))\n\n return chksum.hexdigest()",
"def data_checksum(self, node):\n cmd = f\"find {RedpandaService.DATA_DIR} -type f -exec md5sum '{{}}' \\; -exec stat -c %s '{{}}' \\;\"\n lines = node.account.ssh_output(cmd)\n tokens = lines.split()\n return {\n tokens[ix + 1].decode(): (tokens[ix].decode(), int(tokens[ix + 2]))\n for ix in range(0, len(tokens), 3)\n }",
"def fsum(fpath):\n import hashlib\n import codecs\n with codecs.open(fpath, \"r\", \"utf-8\") as filep:\n buff = filep.read()\n cksum = hashlib.md5(buff.encode(\"utf-8\"))\n return cksum.hexdigest()",
"def checkSumHelper(arg, dirname, fnames):\n val = 0\n files = [name for name in fnames if os.path.splitext(name)[1] in EXTENSIONS]\n for file in files:\n absFile = os.path.join(dirname,file)\n try:\n stats = os.stat(absFile)\n except OSError,e:\n # This is to skip over temporary files or files\n # nosy doesn't have permission to access\n # print \"Nosy: skipping file %s with error %s\"%(absFile,e)\n continue\n val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]\n arg.append(val)\n return",
"def checksum(path):\n with open(path, 'r') as f:\n return md5(f.read()).digest()",
"def FileRemoteChecksum(self, paths: list):\n try:\n g = (connectme_pb2.FilePath(path=p) for p in paths)\n return {c.path: c.sum for c in self.filemanager.Checksum(g)}\n except grpc.RpcError as e:\n status_code = e.code() # status_code.name and status_code.value\n if grpc.StatusCode.NOT_FOUND == status_code:\n raise FileNotFoundError(e.details()) from e\n else:\n # pass any other gRPC errors to user\n raise e",
"def generate_sum(file_path):\n #file = open(file_path, 'rb')\n #header = file.read()\n header = open(file_path, 'rb').read()\n suma_md5 = md5(header).hexdigest()\n return suma_md5",
"def checkSumWalk(top=\".\", func=checkSumHelper):\n values = []\n os.path.walk( top, checkSumHelper, values )\n return sum(values)",
"def checksum(self):\n checksums = {\n \"slug\": hashlib.sha256(\n self.slug.encode(\"utf-8\")\n ).hexdigest(),\n \"files\": {},\n }\n\n def file_hash(filepath):\n running_hash = hashlib.sha256()\n with open(filepath, \"rb\") as IN:\n while True:\n # Read file in as little chunks.\n buf = IN.read(4096)\n if not buf:\n break\n running_hash.update(buf)\n return running_hash.hexdigest()\n\n # iterate over the direcory and calucalte the hash\n for root, dirs, files in os.walk(self.thawed_dir):\n for file_path in sorted(files):\n full_path = str(Path(root) / file_path)\n # Calculate a relative path to the freezable object\n rel_path = full_path.replace(str(self.thawed_dir) + \"/\", \"\")\n # calculate and store the checksums\n phash = file_hash(full_path)\n filesize = os.path.getsize(full_path)\n checksums[\"files\"][rel_path] = {\n \"checksum\": phash,\n \"size\": filesize,\n }\n # calculate the total\n total = hashlib.sha256(checksums[\"slug\"].encode(\"utf-8\"))\n # Iterate over filenames AND hashes and update checksum\n for filename, data in checksums[\"files\"].items():\n total.update(filename.encode(\"utf-8\"))\n total.update(data[\"checksum\"].encode(\"utf-8\"))\n checksums[\"total\"] = total.hexdigest()\n return checksums",
"def calculate_checksum(lines):\n twos = 0\n threes = 0\n for box in lines:\n counts = count_twos_threes(box)\n if counts[0]:\n twos += 1\n if counts[1]:\n threes += 1\n return twos * threes",
"def get_value(path):\r\n return sum([d[sq] for sq in path])",
"def svn_fs_file_checksum(*args):\r\n return _fs.svn_fs_file_checksum(*args)",
"def calculate_md5sum_of_a_file(context, file_name, file_path):\n command = \"md5sum \" + file_path + \"/\" + file_name + \" | awk {'print $1'}\"\n return context.cme_session.send_ssh_command(command=command)",
"def FileSendRemoteChecksum(self, source_paths: list):\n try:\n paths = [p for pat in source_paths for p in self.expandPath(pat)]\n g = self.fileChunkGenerator(paths, False)\n return {c.path: c.sum for c in self.filemanager.SendChecksum(g)}\n except grpc.RpcError as e:\n status_code = e.code() # status_code.name and status_code.value\n if grpc.StatusCode.NOT_FOUND == status_code:\n raise FileNotFoundError(e.details()) from e\n else:\n # pass any other gRPC errors to user\n raise e",
"def svn_fs_file_md5_checksum(*args):\r\n return _fs.svn_fs_file_md5_checksum(*args)",
"def checksum(self):\n def stat_string(path):\n stat = os.stat(path)\n return '%s,%s' % (str(stat.st_size), str(stat.st_mtime))\n\n return dict((path, stat_string(path))\n for path in self.crawl()\n if os.path.exists(path))",
"def fill_checksums(mgr, nodes, *, path=None):\n from .core.structured_cell import StructuredCell\n first_exc = None\n for p in nodes:\n node, old_checksum = None, None\n try:\n pp = path + p if path is not None else p\n node = nodes[p]\n if node[\"type\"] in (\"link\", \"context\"):\n continue\n untranslated = node.get(\"UNTRANSLATED\")\n if not untranslated:\n assert \"TEMP\" not in node, (node[\"path\"], str(node[\"TEMP\"])[:80])\n continue\n old_checksum = node.pop(\"checksum\", None)\n if node[\"type\"] == \"transformer\":\n fill_checksum(mgr, node, \"input_auth\")\n node2 = node.copy()\n node2.pop(\"hash_pattern\", None)\n fill_checksum(mgr, node2, \"code\")\n if \"checksum\" in node2:\n node[\"checksum\"] = node2[\"checksum\"]\n fill_checksum(mgr, node2, \"result\")\n if node[\"compiled\"]:\n fill_checksum(mgr, node2, \"_main_module\")\n if \"checksum\" in node2 and \"checksum\" not in node:\n node[\"checksum\"] = node2[\"checksum\"]\n elif node[\"type\"] == \"macro\":\n fill_checksum(mgr, node, \"param_auth\")\n fill_checksum(mgr, node, \"code\")\n elif node[\"type\"] in (\"cell\", \"foldercell\"):\n if node[\"type\"] == \"foldercell\" or node[\"celltype\"] == \"structured\":\n temp_path = \"auth\"\n else:\n temp_path = \"value\"\n fill_checksum(mgr, node, temp_path, composite=False)\n elif node[\"type\"] in (\"deepcell\", \"deepfoldercell\"):\n fill_checksum(mgr, node, \"origin\", composite=False)\n fill_checksum(mgr, node, \"keyorder\", composite=False)\n fill_checksum(mgr, node, \"blacklist\", composite=False)\n fill_checksum(mgr, node, \"whitelist\", composite=False)\n elif node[\"type\"] == \"module\":\n fill_checksum(mgr, node, None, composite=False)\n else:\n raise TypeError(p, node[\"type\"])\n node.pop(\"TEMP\", None)\n if \"checksum\" not in node:\n if old_checksum is not None:\n node[\"checksum\"] = old_checksum\n else:\n if old_checksum is not None:\n if isinstance(node[\"checksum\"], dict):\n if isinstance(old_checksum, dict):\n for k in old_checksum:\n if k not in node[\"checksum\"]:\n node[\"checksum\"][k] = old_checksum[k]\n except Exception as exc:\n if first_exc is None:\n first_exc = exc\n else:\n import traceback\n traceback.print_exc()\n if node is not None and old_checksum is not None:\n node[\"checksum\"] = old_checksum\n if first_exc is not None:\n raise first_exc from None",
"def _checksum_compute(content, seed=0):\n csum = seed\n chunks = _chunkify(content, 4)\n for chunk in chunks:\n if len(chunk) == 4:\n ul = chunk[0]\n ul |= chunk[1] << 8\n ul |= chunk[2] << 16\n ul |= chunk[3] << 24\n else:\n # WTF: I can only assume this is a typo from the original\n # author of the cabinet file specification\n if len(chunk) == 3:\n ul = (chunk[0] << 16) | (chunk[1] << 8) | chunk[2]\n elif len(chunk) == 2:\n ul = (chunk[0] << 8) | chunk[1]\n elif len(chunk) == 1:\n ul = chunk[0]\n csum ^= ul\n return csum",
"def get_folder_total(path):\n files = os.listdir(path)\n pythonfiles = ['%s/%s' % (path, filename) for filename in files if filename[-3:] == '.py']\n total = { 'net': 0, 'total': 0, 'nonblank': 0, 'num_inputs':0 }\n for filename in pythonfiles:\n with open(filename, 'r') as thisfile:\n blob = thisfile.read()\n # print filename\n thisloc = loc(blob)\n for k, v in thisloc.items():\n total[k] += v\n return total",
"def calc_total_dist(path: Path) -> float:\n\t\n\treturn sum(\n\t\tcalc_dist(*cs)\n\t\tfor cs in iter_n(path, 2)\n\t)",
"def ukey(self, path):\n out = self._call(\"GETFILECHECKSUM\", path=path, redirect=False)\n if \"Location\" in out.headers:\n location = self._apply_proxy(out.headers[\"Location\"])\n out2 = self.session.get(location)\n out2.raise_for_status()\n return out2.json()[\"FileChecksum\"]\n else:\n out.raise_for_status()\n return out.json()[\"FileChecksum\"]",
"def _get_checksum(self, arg):",
"def compute(self):\n self.checksum = self.get_files_hashes_in_path()\n self.real_checksum = self.checksum\n # This appends the filename when checksum was made for a single file.\n # We need to get this when testing the consistency on the moment of\n # restore.\n if self.count == 1:\n self.checksum = self.real_checksum + os.path.basename(self.path)\n return self.checksum",
"def get_checksum(self, u_file: 'UserFile') -> str:\n ...",
"def md5_sum_file(path):\n with open(path, 'rb') as f:\n m = hashlib.md5()\n while True:\n data = f.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()",
"def GetPathCost(self, bucketOfActions):",
"def get_amount_by_file(path):\n if os.stat(path).st_size == 0:\n raise Exception\n with open(path, 'r') as file:\n items = file.read().split(',')\n total = reduce(lambda x, y: int(x) + int(y), items)\n return total",
"def csum_to_path(self, csum):\n #TODO remove callers so we can make internal.\n return Path(self._csum_to_name(csum), self.root)",
"def calculate_fid_given_paths(paths, batch_size, cuda, dims):\n for p in paths:\n if not os.path.exists(p):\n raise RuntimeError('Invalid path: %s' % p)\n\n block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]\n\n model = InceptionV3([block_idx])\n if cuda:\n model.cuda()\n\n m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size,\n dims, cuda)\n m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size,\n dims, cuda)\n fid_value = calculate_frechet_distance(m1, s1, m2, s2)\n\n return fid_value"
] | [
"0.5616519",
"0.5510596",
"0.54699963",
"0.5451842",
"0.54407364",
"0.53760487",
"0.5368446",
"0.53618073",
"0.5279357",
"0.52358234",
"0.52341944",
"0.5196225",
"0.5176148",
"0.51656383",
"0.51610726",
"0.5154971",
"0.5148992",
"0.5138188",
"0.5128646",
"0.51273227",
"0.51170766",
"0.51007247",
"0.50892603",
"0.50860876",
"0.5058754",
"0.50465935",
"0.50437975",
"0.49996188",
"0.49871203",
"0.49649066"
] | 0.59194446 | 0 |
Return absolute Path to a blob given a csum | def csum_to_path(self, csum):
#TODO remove callers so we can make internal.
return Path(self._csum_to_name(csum), self.root) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def link_to_blob(self, path, csum):\n new_link = self.csum_to_path(csum)\n ensure_symlink(path, new_link)\n ensure_readonly(path)",
"def _csum_to_name(self, csum):\n #TODO someday when csums are parameterized, we inject the has params here.\n return _checksum_to_path(csum)",
"def _blob_file(self, blob_name):\r\n return f\"{self._blob_folder()}/{blob_name}\"",
"def _get_blob_path(self, prefix: str, oid: str) -> str:\n if not self.path_prefix:\n storage_prefix = ''\n elif self.path_prefix[0] == '/':\n storage_prefix = self.path_prefix[1:]\n else:\n storage_prefix = self.path_prefix\n return posixpath.join(storage_prefix, prefix, oid)",
"def delete_blob(self, csum):\n blob_path = self.csum_to_path(csum)\n blob_path.unlink(clean=self.root)",
"def get_content_path(content):",
"def _send_blob(self, blob, blob_path_prefix):\n if len(blob) > self._max_blob_size:\n logger.warning(\n \"Blob too large; skipping. Size %d exceeds limit of %d bytes.\",\n len(blob),\n self._max_blob_size,\n )\n return None\n\n blob_id = uuid.uuid4()\n blob_path = (\n \"{}/{}\".format(blob_path_prefix, blob_id) if blob_path_prefix else blob_id\n )\n self._bucket.blob(blob_path).upload_from_string(blob)\n return blob_id",
"def getSha1Path(sha1):\n dir1=sha1[:2]\n dir2=sha1[2:4]\n dir3=sha1[4:6]\n filename=sha1[6:40]\n return(dir1+'/'+dir2+'/'+dir3,filename)",
"def get_filename(checksum):\n return '%s.svg' % checksum",
"def construct_sas_url(blob, uri):\n newuri = copy.copy(uri)\n newuri.pathname = '{}/{}'.format(uri.path, quote(blob.name.encode('utf-8')))\n return newuri.geturl()",
"def get_file(self, file_path, container_name, blob_name, **kwargs):\n return self.connection.get_blob_to_path(container_name, blob_name,\n file_path, **kwargs)",
"def read_blob(blob):\r\n if blob.hexsha != Diff.NULL_HEX_SHA:\r\n return blob.data_stream.read()\r\n else:\r\n with open(blob.path) as fp:\r\n return fp.read()",
"def verify_blob_checksum(self, blob):\n path = self.csum_to_path(blob)\n csum = path.checksum()\n return csum != blob",
"def get_path_to(self, content):\n exported = self.getExported()\n content_path = content.getPhysicalPath()\n if is_inside_path(exported.rootPath, content_path):\n return \"/\".join(canonical_tuple_path(\n [exported.root.getId()] + relative_tuple_path(\n exported.rootPath, content_path)))\n return \"root:\" + \"/\".join(canonical_tuple_path(\n relative_tuple_path(exported.basePath, content_path)))",
"def _get_target_hash(self, target_filepath, hash_function='sha256'):\n\n # Calculate the hash of the filepath to determine which bin to find the \n # target. The client currently assumes the repository uses\n # 'hash_function' to generate hashes.\n\n digest_object = tuf.hash.digest(hash_function)\n\n try:\n digest_object.update(target_filepath)\n except UnicodeEncodeError:\n # Sometimes, there are Unicode characters in target paths. We assume a\n # UTF-8 encoding and try to hash that.\n digest_object = tuf.hash.digest(hash_function)\n encoded_target_filepath = target_filepath.encode('utf-8')\n digest_object.update(encoded_target_filepath)\n\n target_filepath_hash = digest_object.hexdigest() \n\n return target_filepath_hash",
"def get_blob(uuid, path=''):\n check_bundles_have_read_permission(local.model, request.user, [uuid])\n bundle = local.model.get_bundle(uuid)\n\n target_info = local.download_manager.get_target_info(uuid, path, 0)\n if target_info is None:\n abort(httplib.NOT_FOUND, 'Not found.')\n\n # Figure out the file name.\n if not path and bundle.metadata.name:\n filename = bundle.metadata.name\n else:\n filename = target_info['name']\n\n if target_info['type'] == 'directory':\n # Always tar and gzip directories.\n filename = filename + '.tar.gz'\n fileobj = local.download_manager.stream_tarred_gzipped_directory(uuid, path)\n elif target_info['type'] == 'file':\n if not zip_util.path_is_archive(filename) and request_accepts_gzip_encoding():\n # Let's gzip to save bandwidth. The browser will transparently decode\n # the file.\n filename = filename + '.gz'\n fileobj = local.download_manager.stream_file(uuid, path, gzipped=True)\n else:\n fileobj = local.download_manager.stream_file(uuid, path, gzipped=False)\n else:\n # Symlinks.\n abort(httplib.FORBIDDEN, 'Cannot download files of this type.')\n \n # Set headers.\n mimetype, _ = mimetypes.guess_type(filename, strict=False)\n response.set_header('Content-Type', mimetype or 'text/plain')\n if zip_util.get_archive_ext(filename) == '.gz' and request_accepts_gzip_encoding():\n filename = zip_util.strip_archive_ext(filename)\n response.set_header('Content-Encoding', 'gzip')\n else:\n response.set_header('Content-Encoding', 'identity')\n response.set_header('Content-Disposition', 'filename=\"%s\"' % filename)\n\n return fileobj",
"def get_hash(path: Path) -> str:\n m = hashlib.sha256()\n m.update(path.read_bytes())\n return m.hexdigest()",
"def _file_storage_path(self, sha1, filename):\n # pylint: disable=no-member\n path = (\n '{loc.org}/{loc.course}/{loc.block_type}/{loc.block_id}/'\n '{student_id}/{sha1}{ext}'.format(\n\t\tstudent_id = self.xmodule_runtime.anonymous_student_id,\n loc=self.location,\n sha1=sha1,\n ext=os.path.splitext(filename)[1]\n )\n )\n return path",
"def file_path(base_path, subvolume, file_name):\n return '{}/{}_{}_{}/{}.hdf5'.format(base_path, *subvolume, file_name)",
"def write_tmp_blob(dir, name, sha):\n cmd = ['git', 'cat-file', '-p', sha ]\n abs_path = os.path.join(dir, os.path.basename(name))\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n output = popen.communicate()[0]\n f = file(abs_path, 'w')\n f.write(output)\n f.close()\n return abs_path",
"def path_image(image):\n return bpy.path.abspath(image.filepath, library=image.library).replace(\"\\\\\", \"/\")\n # .replace(\"\\\\\",\"/\") to get only forward slashes as it's what POV prefers,\n # even on windows",
"def _get_checksum(self, arg):",
"def gcPath(basePath, snapNum, chunkNum=0):\n gcPath = basePath + '/groups_%03d/' % snapNum\n filePath1 = gcPath + 'groups_%03d.%d.hdf5' % (snapNum, chunkNum)\n filePath2 = gcPath + 'fof_subhalo_tab_%03d.%d.hdf5' % (snapNum, chunkNum)\n\n if isfile(filePath1):\n return filePath1\n return filePath2",
"def getPath(obj):",
"def get_blob(self, blob_name):\n return self.bucket.get_blob(blob_name)",
"def _blob_folder(self):\r\n\r\n # extend with tenant_id and/or subscription_id if multi-tenant/subscription support required\r\n return f\"{self.cloud_folder}/{self.account_name}/{self.container_name}\"",
"def file_name(self):\n _, blob_name = self._get_container_and_blob()\n\n return blob_name",
"def _solution_storage_path(self, sha1, filename):\n # pylint: disable=no-member\n path = (\n '{loc.org}/{loc.course}/{loc.block_type}/{loc.block_id}/'\n 'static/solution/{sha1}{ext}'.format(\n sha1 = sha1,\n loc=self.location,\n ext=os.path.splitext(filename)[1]\n )\n )\n return path",
"def offsetPath(basePath, snapNum):\n offsetPath = basePath + '/../postprocessing/offsets/offsets_%03d.hdf5' % snapNum\n\n return offsetPath",
"def download_blob(bucket_name, source_blob_name):\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n return blob.download_as_string().decode()"
] | [
"0.6760842",
"0.6556648",
"0.64958185",
"0.6435036",
"0.62205434",
"0.58019423",
"0.5726288",
"0.55842215",
"0.5583444",
"0.5474924",
"0.5454985",
"0.5429763",
"0.5375666",
"0.5375143",
"0.5340256",
"0.5335034",
"0.5317139",
"0.53109914",
"0.5301503",
"0.5301253",
"0.5236432",
"0.5226131",
"0.5225542",
"0.5212728",
"0.517924",
"0.51553017",
"0.51469535",
"0.514381",
"0.51433706",
"0.5126046"
] | 0.7445334 | 0 |
Takes a csum, and removes it from the blobstore | def delete_blob(self, csum):
blob_path = self.csum_to_path(csum)
blob_path.unlink(clean=self.root) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove():",
"def remove(self, data):\n data_hash = hashlib.sha256(data).digest()\n self.denominator = (self.denominator * data_to_num3072(data_hash)) % self.MODULUS",
"def __do_binary_delete(item):\n\n file_path = DTF_BINARIES_DIR + item.install_name\n\n if utils.delete_file(file_path) != 0:\n log.e(TAG, \"Error removing binary file! Continuing.\")\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM binaries '\n \"WHERE name='%s'\" % item.name)\n\n cur.execute(sql)\n conn.commit()\n\n return 0",
"def cache_remove_hashed(item: str) -> None:\n\tcache_remove(md5(item))",
"def remove_file_from_cache(self, md5_hash):\n self.used_space -= len(self.storage[md5_hash])\n self.storage.pop(md5_hash)\n self.remove_from_usage_queue(md5_hash)",
"def test_remove(self):\n cons_hash = ConsistentHash(2)\n cons_hash.add('192.168.1.1') \n self.assertEquals(len(cons_hash), 2) \n cons_hash.remove('192.168.1.1') \n self.assertEquals(len(cons_hash), 0) \n \n self.assertTrue(cons_hash._is_consistent())",
"def remove_total_size(apps, schema_editor):\n Data = apps.get_model(\"flow\", \"Data\")\n for data in Data.objects.all():\n for field_schema, fields in iterate_fields(\n data.output, data.process.output_schema\n ):\n name = field_schema[\"name\"]\n value = fields[name]\n if \"type\" in field_schema:\n if field_schema[\"type\"].startswith(\"basic:file:\"):\n del value[\"total_size\"]\n elif field_schema[\"type\"].startswith(\"list:basic:file:\"):\n for obj in value:\n del obj[\"total_size\"]\n elif field_schema[\"type\"].startswith(\"basic:dir:\"):\n del value[\"total_size\"]\n elif field_schema[\"type\"].startswith(\"list:basic:dir:\"):\n for obj in value:\n del obj[\"total_size\"]\n data.save()",
"def remove(self, block):\n try:\n self.blocks[block.height]\n except:\n raise ValueError(\"cant remove block: \" + str(block))\n\n removed = False\n for b in self.blocks[block.height]:\n # only delete the one with equal hash\n if b.hash() == block.hash():\n self.blocks[block.height].remove(b)\n removed = True\n if self.blocks[block.height] == []:\n self.blocks.pop(block.height)\n\n if not removed:\n raise ValueError(\"cant remove block: \" + str(block))",
"def test_bit_remove_byte_offset_with_byte_too_large(self):\n ops = [bitwise_operations.bit_remove(self.test_bin_zeroes, 3, 3, None)]\n\n with pytest.raises(e.InvalidRequest):\n self.as_connection.operate(self.test_key, ops)",
"def unlink(address):",
"def remove_memo(mid):\n\trecord = {\n\t\t\"_id\": ObjectId(mid)\n\t}\n\tcollection.remove(record)\n\treturn",
"def remove(self, name):\n id_ = self.name_to_id(name)\n # Top nybbles of table entries are id_ + 1 (to avoid all-zero entries)\n id_in_table = (self.table >> self.maxtimebits) == id_ + 1\n hashes_removed = 0\n for hash_ in np.nonzero(np.max(id_in_table, axis=1))[0]:\n vals = self.table[hash_, :self.counts[hash_]]\n vals = [v for v, x in zip(vals, id_in_table[hash_])\n if not x]\n self.table[hash_] = np.hstack([vals,\n np.zeros(self.depth - len(vals))])\n # This will forget how many extra hashes we had dropped until now.\n self.counts[hash_] = len(vals)\n hashes_removed += np.sum(id_in_table[hash_])\n self.names[id_] = None\n self.hashesperid[id_] = 0\n self.dirty = True\n print(\"Removed\", name, \"(\", hashes_removed, \"hashes).\")",
"def removedb():\n\n try:\n os.remove(rebasedb)\n except OSError:\n pass",
"def remove_genesis(proof):\n\n old_size = len(proof)\n print(\"Removing genesis block from proof ...\")\n proof.pop(-1)\n print(\"OK\")\n print(\"old size:\", old_size, \"-> new size:\", len(proof))",
"def _remove_from_weakref(self, tx: BaseTransaction) -> None:\n if self._tx_weakref_disabled:\n return\n assert tx.hash is not None\n self._tx_weakref.pop(tx.hash, None)",
"def delete_io( hash ):\n res = 0\n record_used('cache', hash)\n for packet in get_filenames_for_hash(CACHE_DIRECTORY, hash):\n try:\n os.remove(packet)\n res = res + 1\n except:\n if not os.environ.get('CALIENDO_TEST_SUITE', None):\n logger.warning( \"Failed to remove file: \" + packet )\n return res",
"def DeleteSignedBinary(binary_urn: rdfvalue.RDFURN):\n try:\n data_store.REL_DB.ReadSignedBinaryReferences(\n SignedBinaryIDFromURN(binary_urn))\n except db.UnknownSignedBinaryError:\n raise SignedBinaryNotFoundError(binary_urn)\n data_store.REL_DB.DeleteSignedBinaryReferences(\n SignedBinaryIDFromURN(binary_urn))",
"def delete(self, key):\n validate_is_bytes(key)\n\n self.root_hash = self._set(self.root_hash, encode_to_bin(key), b\"\")",
"def delete_file(self, hash):\n self.tree.delete(hash)\n query = \"delete from files where hash='%s'\"%hash\n self.connection.execute(query)\n self.connection.commit()",
"def firmware_pack_remove(handle, org_name, name, org_parent=\"org-root\"):\n org_dn = org_parent + \"/org-\" + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info(\"Sub-Org <%s> not found!\" %org_name)\n else:\n fw_dn= org_dn + \"/fw-host-pack-\" + name\n mo = handle.query_dn(fw_dn)\n if not mo:\n log.info(\"Firmware host pack <%s> not found.Nothing to remove\" % name)\n else:\n handle.remove_mo(mo)\n handle.commit()",
"def delete(self, record):\n temp = self.hashing(record.get_key())\n if self.__buckets[temp].contains(record):\n self.__buckets[temp].delete(record)\n self.__num_records -= 1",
"def remove(self, data_id, idx):\n temp = self.database[data_id]\n del temp[idx]\n self.database[data_id] = temp",
"def dangerously_delete(self, bento_name, bento_version):",
"def test_bit_remove_randnumbytes_randoffset(self):\n offset = random.randint(0, 4)\n num_bytes = random.randint(1, (5 - offset))\n ops = [bitwise_operations.bit_remove(self.test_bin_zeroes, offset, num_bytes, None)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n assert bins[self.test_bin_zeroes] == bytearray([0] * (5 - num_bytes))\n # should have removed num_bytes 0s",
"def removeDataAt(self, address: ghidra.program.model.address.Address) -> None:\n ...",
"def test_bit_remove_byte_size_too_large(self):\n ops = [bitwise_operations.bit_remove(self.test_bin_zeroes, 0, 6, None)]\n\n with pytest.raises(e.InvalidRequest):\n self.as_connection.operate(self.test_key, ops)",
"def delSit(self, key):\n if hasattr(key, \"encode\"):\n key = key.encode(\"utf-8\") # convert str to bytes\n return self.delVal(self.sits, key)",
"def remove_sign(self):\n if self.is_signed():\n file_size = os.stat(self._file_name).st_size\n self._document.truncate(file_size - self._append_size)\n print(\"Sign removed from the document!\")\n else:\n print(\"The document is not signed!\")",
"def s3_delete_data(self):\n\n self.k.delete()",
"def delete_data(self, data_sig):\n data_i = sqlite3.connect('data::memory:', check_same_thread=False)\n data_cursor = data_i.cursor()\n data_cursor.execute('DELETE FROM localdata where data_sig==(:data_sig)', {\"data_sig\":data_sig})\n item = data_cursor.fetchall()\n data_i.commit()\n data_i.close()\n return item"
] | [
"0.5757154",
"0.5642187",
"0.5564729",
"0.5475264",
"0.5464008",
"0.5457136",
"0.53968495",
"0.5358681",
"0.5354388",
"0.5342333",
"0.53273803",
"0.5321266",
"0.5296075",
"0.52734",
"0.5217147",
"0.5210733",
"0.5208237",
"0.5182357",
"0.5178936",
"0.5171412",
"0.5165453",
"0.5155233",
"0.5144636",
"0.5137805",
"0.513395",
"0.51231635",
"0.51202476",
"0.5112849",
"0.51075035",
"0.5099964"
] | 0.78742325 | 0 |
Forces path into a symlink to csum | def link_to_blob(self, path, csum):
new_link = self.csum_to_path(csum)
ensure_symlink(path, new_link)
ensure_readonly(path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def relink(f):\n if os.path.islink(f):\n linkto = os.path.join(NEW_LINK_BASE, os.path.basename(os.readlink(f)))\n #print 'Relinking %s-> %s from \\n %s' % (f, linkto, os.readlink(f))\n #print 'removing %s' % f\n os.remove(f)\n os.symlink(linkto, f)",
"def symlink(self, req, link, parent, name):\r\n self.reply_err(req, EROFS)",
"def update_link(self):\n try:\n relpath = os.path.relpath(self.path, os.path.dirname(self.link_path))\n os.symlink(relpath, self.link_path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.unlink(self.link_path)\n os.symlink(self.path, self.link_path)",
"def symlink_force(source, link_name):\n try:\n os.symlink(source, link_name)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.remove(link_name)\n os.symlink(source, link_name)",
"def _sync_symlink(self, binary_name, link_to):\n\n # The symlink we are creating:\n link_path = os.path.join(self.bin_dir, binary_name)\n\n # The expected file we should be linking to:\n link_dest = os.path.join(self.bin_dir, link_to)\n\n if not os.path.exists(link_path) or \\\n not os.path.islink(link_path) or \\\n os.path.realpath(link_path) != os.path.realpath(link_dest):\n if os.path.exists(link_path):\n os.remove(link_path)\n os.symlink(link_to, os.path.join(self.bin_dir, binary_name))\n self.output.append(\"Symlinked %s to %s.\" % (link_path, link_dest))\n self.changed = True",
"def symlink_hash(path):\n hasher = sha1()\n data = path_to_bytes(os.readlink(path))\n hasher.update(('blob %u\\0' % len(data)).encode('ascii'))\n hasher.update(data)\n return hasher",
"def fix_link(hook, target_link):\n if os.path.exists(hook):\n os.unlink(hook)\n os.symlink(target_link, hook)",
"def ln_overwrite(src, dest):\n if exists(dest, use_sudo=True):\n sudo(\"rm %s && ln -s %s %s\" % (dest, src, dest))\n else:\n sudo(\"ln -s %s %s\" % (src, dest))",
"def _symlink(conf, devname, label, remove=False):\n return\n\n linkpath = conf.get('symlink')\n if linkpath:\n linkpath = expanduser(linkpath)\n if lexists(linkpath):\n os.unlink(linkpath)\n if not remove:\n # TODO: handle path errors\n os.symlink(get_mount_target(devname, label), linkpath)",
"def force_symlink(src, dst):\n try:\n os.unlink(dst)\n os.symlink(src, dst)\n except OSError:\n os.symlink(src, dst)",
"def overwrite_symlinks ( self ):\n return self.value & self.OV_SYM",
"def force_symlink(target, name):\n makedirs(os.path.dirname(name))\n try:\n os.symlink(target, name)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.remove(name)\n os.symlink(target, name)",
"def checksum_from_link(link):\n m = r.search(safetype(link))\n if (m):\n csum_slash = m.group()[1:]\n csum = _remove_sep_(csum_slash)\n return csum\n else:\n raise ValueError(\"link %s checksum didn't parse\" %(link))",
"def test_create_symlink_file(self):\n pass",
"def _symlink(source, link_name):\n flags = 0\n\n if source is not None and os.path.isdir(source):\n flags = 1\n\n CreateSymbolicLinkW(link_name, source, flags)",
"def make_symlink(dbconfig, targ):\n if \"latest\" in dbconfig and not dbconfig[\"latest\"]:\n return\n link = re.sub(r'[0-9]+', 'latest', targ)\n try:\n os.symlink(targ, link)\n info(\"create link \" + link + \" --> \" + targ)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.remove(link)\n os.symlink(targ, link)\n info(\"move link \" + link + \" --> \" + targ)",
"def createLink(self):\n \n if( self.useLink ):\n trymakedir( self.parent.installPath + \"/\" + self.alias )\n\n os.chdir( self.parent.installPath + \"/\" + self.alias )\n \n # check for already existing symlinks or dirs \n if( os.path.islink( self.version )):\n os.unlink( self.version )\n elif( os.path.isdir( self.version )):\n self.abort( \"could not create link to [ \" + self.linkPath + \" ]\\nin [ \" \\\n + os.path.basename( self.installPath ) + \" ]!!!\" )\n\n os.symlink( self.linkPath , self.version )\n print \"+ Linking \" + self.parent.installPath + \"/\" + self.alias + \"/\" + self.version \\\n + \" -> \" + self.linkPath",
"def symlink_force(target: str, link_name: str):\n\n # os.replace() may fail if files are on different filesystems\n link_dir = os.path.dirname(link_name)\n\n while True:\n temp_link_name = tempfile.mktemp(dir=link_dir)\n try:\n os.symlink(target, temp_link_name)\n break\n except FileExistsError:\n pass\n try:\n os.replace(temp_link_name, link_name)\n except OSError: # e.g. permission denied\n os.remove(temp_link_name)\n raise",
"def new_realpath(name):\n if name.startswith('link-to-ham'):\n return name[len('link-to-'):]\n else:\n return name",
"def overwrite_dead_symlinks ( self ):\n return self.value & self.OV_SYM_DEAD",
"def import_via_link(self, path, csum):\n blob = self.csum_to_path(csum)\n duplicate = blob.exists()\n if not duplicate:\n ensure_link(blob, path)\n ensure_readonly(blob)\n return duplicate",
"def _safe_setup_link(link_filename, real_filename):\r\n real_filename = os.path.relpath(real_filename, os.path.dirname(link_filename))\r\n\r\n if os.path.exists(link_filename):\r\n try:\r\n os.unlink(link_filename)\r\n except OSError:\r\n pass\r\n try:\r\n os.symlink(real_filename, link_filename)\r\n except OSError as e:\r\n # Typically permission denied.\r\n pass",
"def force_link(src, dst):\n try:\n os.unlink(dst)\n os.link(src, dst)\n except OSError:\n os.link(src, dst)",
"def symlink(target, path):\n unlink(path)\n path = os.path.realpath(path)\n target = os.path.relpath(os.path.realpath(target), os.path.dirname(path))\n logging.info('Symlinking %s -> %s', path, target)\n os.symlink(target, path)",
"def reverser(num_segs=3):\n r = re.compile(\"((\\/([0-9]|[a-f])+){%d})$\" % (num_segs+1))\n def checksum_from_link(link):\n \"\"\"Takes a path into the userdata, returns the matching csum.\"\"\"\n m = r.search(safetype(link))\n if (m):\n csum_slash = m.group()[1:]\n csum = _remove_sep_(csum_slash)\n return csum\n else:\n raise ValueError(\"link %s checksum didn't parse\" %(link))\n return checksum_from_link",
"def symlink(path, v=False):\r\n if not os.path.exists(path):\r\n err(path + ' : no such file or directory')\r\n elif not os.path.isdir(path):\r\n err(path + ' : not a directory')\r\n else:\r\n theme_name = os.path.basename(os.path.normpath(path))\r\n theme_path = os.path.join(_THEMES_PATH, theme_name)\r\n if os.path.exists(theme_path):\r\n err(path + ' : already exists')\r\n else:\r\n if v:\r\n print(\"Linking `{p}' to `{t}' ...\".format(p=path, t=theme_path))\r\n try:\r\n os.symlink(path, theme_path)\r\n except Exception as e:\r\n err(\"Cannot link `{p}' to `{t}':\\n{e}\".format(p=path, t=theme_path, e=str(e)))",
"def ln(src, dst):\n os.symlink(src, dst)",
"def force_symlink(target_path, link_location):\n\n pardir = os.path.dirname(link_location)\n if not os.path.exists(pardir):\n os.makedirs(pardir)\n\n if os.path.lexists(link_location):\n assert os.path.islink(link_location), \\\n \"The path {} exists but is not a symlink\".format(link_location)\n if os.readlink(link_location) != target_path:\n os.remove(link_location)\n os.symlink(target_path, link_location)\n else:\n os.symlink(target_path, link_location)",
"def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)",
"def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)"
] | [
"0.6718929",
"0.66228133",
"0.6465445",
"0.6442848",
"0.6344745",
"0.6339009",
"0.6291033",
"0.625382",
"0.62475646",
"0.62025553",
"0.6126322",
"0.61046165",
"0.6099585",
"0.6029164",
"0.5990421",
"0.5971646",
"0.59707147",
"0.5963814",
"0.5950644",
"0.5917308",
"0.58955467",
"0.5869705",
"0.5848351",
"0.5827815",
"0.5810428",
"0.58022594",
"0.578599",
"0.5780806",
"0.5779617",
"0.5779617"
] | 0.72160524 | 0 |
Returns True when the blob's checksum matches. Returns False when there is a checksum corruption. | def verify_blob_checksum(self, blob):
path = self.csum_to_path(blob)
csum = path.checksum()
return csum != blob | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_checksum(self):\n return self.calculate_checksum() == self.checksum()",
"def verify_checksum(self):\n return self.generate_header_checksum(omit_checksum=False) == 0",
"def _verify_checksum(data, checksum):\n sha256_hash = hashlib.sha256(data).hexdigest().encode()\n return to_bin(sha256_hash)[0 : len(data) * 8 // 32] == checksum",
"def validate_checksum(blob: bytes, offset: int, length: int):\n\n checksum = ord(blob[offset + length - 1:offset + length])\n data_sum = sum(\n struct.unpack('%dB' % (length - 1), blob[offset:offset + length - 1])\n )\n if 0xff & (data_sum + checksum) != 0:\n raise ValueError('The data do not match the checksum')",
"def _validate_checksum(self, msg: bytes) -> bool:\n return self._checksum(msg) == msg[8]",
"def valid_checksum(self, msg: dict) -> bool:\n packed_seg = struct.pack(HEADER_FORMAT + DATA_FORMAT, msg['seq_nr'], msg['ack_nr'], msg['flag'].value,\n msg['win'], msg['dlen'], 0, msg['data'])\n cksum = self.calc_checksum(packed_seg)\n return cksum == msg['cksum']",
"def check_crc(chunk, crc):\n\n crc = bytes(crc)\n crc_this = bytes(ensure_crc(crc16.crc16xmodem(bytes(chunk))).encode('utf-8'))\n if crc_this == crc:\n return True\n else:\n return False",
"def checkChecksum(self):\n if not self.checkPacketLength():\n return False\n return CCSDS.DU.DataUnit.checkChecksum(self)",
"def check(self, stream):\n return np.all(self._crc(stream.copy()) == 0)",
"def check(self) -> bool:\n return self.check_sum() == self.__md5_sum",
"def compare(self, checksum):\n real_checksum = checksum\n if len(checksum) > self.hasher_size:\n real_checksum = checksum[0:self.hasher_size]\n afile = checksum[self.hasher_size:len(checksum)]\n self.path = os.path.join(self.path, afile)\n self.compute()\n return self.real_checksum == real_checksum",
"def verify_checksum(message, previous_csum=0):\n if message.message_type in CHECKSUM_MSG_TYPES:\n csum = compute_checksum(\n message.checksum[0],\n message.args,\n previous_csum,\n )\n\n if csum == message.checksum[1]:\n return True\n else:\n return False\n else:\n return True",
"def _bytes_match(fd: BinaryIO, expected: bytes) -> bool:\n try:\n offset = fd.tell()\n data = fd.read(len(expected))\n fd.seek(offset)\n return data == expected\n except IOError:\n return False",
"def checkChecksum(key):\n\t#decode to base256\n\tcheckKey = enc.b58decode(key)\n\tchecksum = checkKey[-4:]\n\thash = hashlib.sha256(hashlib.sha256(checkKey[:-4]).digest()).digest()[:4]\n\tif hash == checksum:\n\t\treturn True\n\telse:\n\t\treturn False",
"def has_checksum_file(self):\n return self.checksum_file_path.is_file()",
"def _check_md5(self):\n\n self.log.info('-' * 80)\n self.log.info('Check md5 sum')\n\n self.log.info(self._ref_value)\n self.log.info(self._output_file)\n\n code, out = cmd_exec(['md5sum', self._output_file], shell=False, log=self.log)\n if code:\n self.log.error(out)\n return False\n self.log.info(out)\n\n md5sum, _ = out.split(' ')\n\n self.log.info(f'reference md5: {self._ref_value}')\n self.log.info(f'actual md5: {md5sum}')\n\n if self._ref_value != md5sum:\n return False\n\n return True",
"def check_pack_checksums():\n conn = sqlite3.connect(DBNAME)\n c = conn.cursor()\n for row in c.execute(\"SELECT lower(hex(sum)) FROM packs\"):\n checksum = row[0]\n res = s3.get_object(Bucket=BUCKET, Key=f\"{checksum}.pack\")\n body = res[\"Body\"]\n h = blake3.blake3()\n for chunk in iter(lambda: body.read(4096), b\"\"):\n h.update(chunk)\n\n c = h.hexdigest()\n if c != checksum:\n raise ValueError(\"pack {checksum}: checksum {c} does not match\")",
"def check_crc(function_specific_data, crc):\n crc_cal = calculate_crc(function_specific_data)\n \n if crc == crc_cal:\n return True\n else:\n return False",
"def _is_hash_valid(self):\n downloaded_hash = sha1(self._downloaded_bytes).digest()\n return downloaded_hash == self.hash",
"def bech32_verify_checksum(hrp, data):\n return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1",
"def verify_sum(file_path, md5_sum):\n file_md5_sum = generate_sum(file_path)\n return (file_md5_sum == md5_sum)",
"def crcCheck(serialMessage):\n checkResult = False\n\n #CRC from serial message\n crc = int.from_bytes(serialMessage[14:16], byteorder='little', signed=False)\n #calculated CRC\n crcCalc = libscrc.modbus(serialMessage[0:14])\n\n if crc == crcCalc:\n checkResult = True\n\n return checkResult",
"def check_hmac_signature(self, message):\n data = message[:-20]\n checksum = message[-20:]\n hmac_data = hmac.new(bytes(self.settings['hmac_key'].encode('utf-8')), bytes(data), hashlib.sha1)\n\n return True if hmac_data.digest() == checksum else False",
"def check_md5(filename, stored_md5):\n computed_md5 = _get_file_md5(filename)\n if stored_md5 != computed_md5:\n print (\"MD5 checksum of filename\", filename, \"failed. Expected MD5 was\", stored_md5,\n \"but computed MD5 was\", computed_md5, '\\n',\n \"Please check if the data has been downloaded correctly or if the upstream data has changed.\")",
"def test_wrong_checksum(self):\n self.assertNotEqual(utils.checksum('fooo'), b'A')",
"def hash_comparison(self):\n for result in self.cards:\n if result.hash_status:\n return True\n return False",
"def check_magic(self, target: str):\n\t\twith open(target, \"rb+\") as archive:\n\t\t\tmagic = archive.read(4)\n\t\t\tif magic == struct.pack(\"I\", self.magic):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False",
"def verify_blob_permissions(self, blob):\n path = self.csum_to_path(blob)\n return is_readonly(path)",
"def need_checksum(self):\n if self.skip_checksum:\n log.warning(\"Skip checksum because --skip-checksum is specified\")\n return False\n # There's no point running a checksum compare for selective dump\n if self.where:\n log.warning(\"Skip checksum because --where is given\")\n return False\n # If the collation of primary key column has been changed, then\n # it's high possible that the checksum will mis-match, because\n # the returning sequence after order by primary key may be vary\n # for different collations\n for pri_column in self._pk_for_filter:\n old_column_tmp = [\n col for col in self._old_table.column_list if col.name == pri_column\n ]\n if old_column_tmp:\n old_column = old_column_tmp[0]\n new_column_tmp = [\n col for col in self._new_table.column_list if col.name == pri_column\n ]\n if new_column_tmp:\n new_column = new_column_tmp[0]\n if old_column and new_column:\n if not is_equal(old_column.collate, new_column.collate):\n log.warning(\n \"Collation of primary key column {} has been \"\n \"changed. Skip checksum \".format(old_column.name)\n )\n return False\n # There's no way we can run checksum by chunk if the primary key cannot\n # be covered by any index of the new schema\n if not self.validate_post_alter_pk():\n if self.skip_pk_coverage_check:\n log.warning(\n \"Skipping checksuming because there's no unique index \"\n \"in new table schema can perfectly cover old primary key \"\n \"combination for search\".format(old_column.name)\n )\n return False\n else:\n # Though we have enough coverage for primary key doesn't\n # necessarily mean we can use it for checksum, it has to be an\n # unique index as well. Skip checksum if there's no such index\n if not self.find_coverage_index():\n log.warning(\n \"Skipping checksuming because there's no unique index \"\n \"in new table schema can perfectly cover old primary key \"\n \"combination for search\".format(old_column.name)\n )\n return False\n return True",
"def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True"
] | [
"0.7736474",
"0.7485926",
"0.74634707",
"0.7280268",
"0.71696",
"0.71337336",
"0.71063906",
"0.7051834",
"0.69469905",
"0.6904954",
"0.6893968",
"0.68395376",
"0.6808126",
"0.6773289",
"0.67445666",
"0.673051",
"0.6638136",
"0.6498566",
"0.6496091",
"0.63991594",
"0.6348812",
"0.62603706",
"0.6240219",
"0.6238587",
"0.62296706",
"0.621206",
"0.6205156",
"0.6194259",
"0.6190626",
"0.61808467"
] | 0.8861687 | 0 |
Returns True when the blob's permissions is read only. Returns False when the blob is mutable. | def verify_blob_permissions(self, blob):
path = self.csum_to_path(blob)
return is_readonly(path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_only(self):\n return bool(self.__read_only)",
"def is_read_only(self):\n\t\treturn bool(call_sdk_function('PrlShare_IsReadOnly', self.handle))",
"def is_read_only(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_read_only\")",
"def get_can_read(self):\n\t\tif not self.can_read:\n\t\t\tself.build_permissions()\n\t\treturn self.can_read",
"def read_only(self) -> Optional[bool]:\n return self._read_only",
"def is_read_only(self):\n return self.__aceQLHttpApi.is_read_only()",
"def storage_can_read(self):\n return True",
"def public_read_access(self) -> typing.Optional[bool]:\n return self._values.get('public_read_access')",
"def isReadOnly(self) -> bool:\n ...",
"def isReadOnly(self) -> bool:\n ...",
"def is_read_only(self):\n return (self.get_name().startswith(\"b\")\n or self.get_name() == \"jump_cond\" # meta-instruction\n or self.get_name() == \"j\"\n or self.get_name() == \"ld\"\n or self.get_name() == \"lw\"\n or self.get_name() == \"lb\")",
"def _has_read_perm(self, perm: WorkspacePermission) -> bool:\n read_perms = [\n WorkspacePermission.ADMINISTRATOR,\n WorkspacePermission.READ_WRITE,\n WorkspacePermission.READ,\n ]\n return perm in read_perms",
"def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")",
"def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")",
"def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")",
"def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")",
"def read_only(self) -> bool:\n return self._widget._mgui_get_read_only()",
"def canread(self):\n return False",
"def IsReadOnly(self) -> bool:",
"def is_writable(self, object, content_type):\n return False",
"def has_edit_permissions(ps_or_token, selected_dataset_id):\n try:\n role = pennsieve_get_current_user_permissions(selected_dataset_id, ps_or_token)[\"role\"]\n except Exception as e:\n abort(500, \"Could not get permissions for this dataset.\")\n\n return role in [\"owner\", \"manager\"]",
"def _is_xblock_read_only(xblock):\r\n # We allow direct editing of xblocks in DIRECT_ONLY_CATEGORIES (for example, static pages).\r\n if xblock.category in DIRECT_ONLY_CATEGORIES:\r\n return False\r\n component_publish_state = compute_publish_state(xblock)\r\n return component_publish_state == PublishState.public",
"def isReadOnly(self):\n try:\n return self._editor.isReadOnly()\n except AttributeError:\n return not self._editor.isEditable()",
"def check_permission(perm_mode, flags=stat.S_IWOTH):\n return bool(perm_mode & flags)",
"def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0",
"def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0",
"def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True",
"def canwrite(self):\n return False",
"def can_read_blob(func, data, user):\n if user.is_superuser:\n return func(data, user)\n\n if data._blob is not None:\n _check_can_read(data._blob, user)\n\n return func(data, user)",
"def edit_allowed(self):\n account = Account.current_user_account\n if account is None:\n return False\n return self.user_can_edit(account.user)"
] | [
"0.74087244",
"0.7357655",
"0.7207462",
"0.7194548",
"0.718061",
"0.7174269",
"0.7069815",
"0.70423627",
"0.70013654",
"0.70013654",
"0.6821539",
"0.6797577",
"0.67828053",
"0.67828053",
"0.67828053",
"0.67828053",
"0.6748725",
"0.65980434",
"0.65751696",
"0.65058196",
"0.65027124",
"0.6495943",
"0.64822286",
"0.6470459",
"0.64678365",
"0.64678365",
"0.64613426",
"0.6449268",
"0.64394903",
"0.6409206"
] | 0.7997186 | 0 |
Iterator across all blobs | def blobs(self):
def blob_iterator():
with s3conn(self.access_id, self.secret) as s3:
key_iter = s3.list_bucket(self.bucket, prefix=self.prefix+"/")
for key in key_iter:
blob = key[len(self.prefix)+1:]
yield blob
return blob_iterator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def blob_generator(self):\n for blob in self.data:\n yield blob",
"def blob_stats(self):\n def blob_iterator():\n with s3conn(self.access_id, self.secret) as s3:\n key_iter = s3.list_bucket2(self.bucket, prefix=self.prefix+\"/\")\n for head in key_iter:\n blob = head[LIST_BUCKET_KEY][len(self.prefix)+1:]\n head['blob'] = blob\n yield head\n return blob_iterator",
"def blobs(self):\n blobs = pipeline(\n ftype_selector([FILE]),\n fmap(first),\n fmap(self.reverser),\n )(self.root.entries())\n return blobs",
"def iter_any(self) -> AsyncStreamIterator[bytes]:\n ...",
"def _iter_images(self):\n for image in self._images:\n yield image",
"def _iter_images(self):\n raise NotImplementedError",
"def __iter__(self):\r\n try:\r\n dup_fp = self._fp.dup()\r\n except self._fp.Error:\r\n log.error('Failed to dup %r' % self._fp)\r\n return\r\n\r\n try:\r\n while True:\r\n blob = RecordIO.Reader.do_read(dup_fp, self._codec)\r\n if blob:\r\n yield blob\r\n else:\r\n break\r\n finally:\r\n dup_fp.close()",
"def iterate(self):",
"def blob_generator(bucket_name, pattern):\n cloud_bucket = get_gcsbucket(bucket_name)\n for blob in cloud_bucket.objects():\n if blob.key.endswith(pattern):\n yield blob.uri",
"def iter_chunks(self) -> ChunkTupleAsyncStreamIterator:\n ...",
"def fileobjects_iter(imagefile=None,xmlfile=None,fiwalk=\"fiwalk\",flags=0):\n def local_iter(fi):\n yield fi\n fiwalk_using_sax(imagefile=imagefile,xmlfile=xmlfile,fiwalk=fiwalk,flags=flags,\n callback=local_iter)",
"def iterator(self):\n yield",
"def iterdescriptors(self):",
"def __iter__(self):\n\n return iter(self.files)",
"def for_each_chunk(blob: Blob, chunk_size: int=default_chunk_size, async_queue: Optional[AsyncQueue]=None):\n reader = Reader(blob, chunk_size=chunk_size)\n if async_queue is not None:\n for chunk_number in reader._unfetched_chunks:\n async_queue.put(reader._fetch_chunk, chunk_number)\n for chunk in async_queue.consume():\n yield chunk\n else:\n for chunk_number in reader._unfetched_chunks:\n yield reader._fetch_chunk(chunk_number)",
"def _iter_images(self):\n for image in self._images:\n yield np.array(image.convert('RGB'))",
"def bytes_iteration(self) -> global___Statement.Iteration.BytesIteration:",
"def iterate_bucket_objects(self, bucket):\n client = self.credentials.session.client('s3')\n page_iterator = client.list_objects_v2(Bucket=bucket)\n if 'Contents' not in page_iterator:\n return []\n for item in page_iterator['Contents']:\n yield item",
"def __iter__(self):\n return self.contents.__iter__()",
"def __iter__(self):\n return self.contents.__iter__()",
"def getBlobs( self ):\n return self.__blobs;",
"def get_images(eol_id):\n page = 1\n while True:\n details_url = f\"https://eol.org/api/pages/1.0/{eol_id}.json\"\n payload = {\"id\": eol_id, \n \"images_per_page\": 75,\n \"images_page\": page,\n }\n r = requests.get(details_url, params=payload)\n\n response = json.loads(r.text)\n content = response[\"taxonConcept\"]\n if not \"dataObjects\" in content:\n return\n\n for item in content[\"dataObjects\"]:\n yield item[\"mediaURL\"]\n page += 1",
"def __iter__(self):\n for item in self._reader:\n yield item",
"def iterate(self):\n raise NotImplementedError()",
"def __iter__():",
"def __iter__():",
"def __iter__():",
"def __iter__():",
"def test_list_blobs(*args, **kwargs):\n bucket_or_name = args[0]\n prefix = kwargs['prefix']\n candidate_path = f'{bucket_or_name}/{prefix}'\n config_paths = []\n\n for c in config_hierarchy:\n if c.startswith(candidate_path):\n fn = '/'.join(c.split('/')[1:])\n b = Blob(bucket='dummy', name=fn)\n config_paths.append(b)\n\n return iter(config_paths)",
"def __iter__(self) -> Iterator[Any]:\n return iter(self.contents)"
] | [
"0.7579303",
"0.6972601",
"0.6484718",
"0.64345145",
"0.6410156",
"0.62850124",
"0.6241016",
"0.61964935",
"0.6173275",
"0.61723167",
"0.61494774",
"0.6100578",
"0.60827994",
"0.6061901",
"0.6057329",
"0.60366875",
"0.60343045",
"0.6032287",
"0.5957977",
"0.5957977",
"0.59544235",
"0.59458286",
"0.59206074",
"0.58913887",
"0.5859999",
"0.5859999",
"0.5859999",
"0.5859999",
"0.5855018",
"0.58406585"
] | 0.7229534 | 1 |
Iterator across all blobs, retaining the listing information | def blob_stats(self):
def blob_iterator():
with s3conn(self.access_id, self.secret) as s3:
key_iter = s3.list_bucket2(self.bucket, prefix=self.prefix+"/")
for head in key_iter:
blob = head[LIST_BUCKET_KEY][len(self.prefix)+1:]
head['blob'] = blob
yield head
return blob_iterator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def blobs(self):\n def blob_iterator():\n with s3conn(self.access_id, self.secret) as s3:\n key_iter = s3.list_bucket(self.bucket, prefix=self.prefix+\"/\")\n for key in key_iter:\n blob = key[len(self.prefix)+1:]\n yield blob\n return blob_iterator",
"def blob_generator(self):\n for blob in self.data:\n yield blob",
"def blobs(self):\n blobs = pipeline(\n ftype_selector([FILE]),\n fmap(first),\n fmap(self.reverser),\n )(self.root.entries())\n return blobs",
"def list_blobs(self, prefix=''):\n return [b.name for b in self.bucket.list_blobs(prefix=prefix)]",
"def test_list_blobs(*args, **kwargs):\n bucket_or_name = args[0]\n prefix = kwargs['prefix']\n candidate_path = f'{bucket_or_name}/{prefix}'\n config_paths = []\n\n for c in config_hierarchy:\n if c.startswith(candidate_path):\n fn = '/'.join(c.split('/')[1:])\n b = Blob(bucket='dummy', name=fn)\n config_paths.append(b)\n\n return iter(config_paths)",
"def get_images(eol_id):\n page = 1\n while True:\n details_url = f\"https://eol.org/api/pages/1.0/{eol_id}.json\"\n payload = {\"id\": eol_id, \n \"images_per_page\": 75,\n \"images_page\": page,\n }\n r = requests.get(details_url, params=payload)\n\n response = json.loads(r.text)\n content = response[\"taxonConcept\"]\n if not \"dataObjects\" in content:\n return\n\n for item in content[\"dataObjects\"]:\n yield item[\"mediaURL\"]\n page += 1",
"def iterate_bucket_objects(self, bucket):\n client = self.credentials.session.client('s3')\n page_iterator = client.list_objects_v2(Bucket=bucket)\n if 'Contents' not in page_iterator:\n return []\n for item in page_iterator['Contents']:\n yield item",
"def list_blobs(bucket_name):\n # bucket_name = \"your-bucket-name\"\n\n storage_client = storage.Client()\n print(storage_client.current_batch)\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n # print(len([1 for blob in blobs]))\n for blob in blobs:\n print(blob.name)",
"def _recurse(self) -> Iterator[str]:\n\n client: s3.Client = boto3.client('s3')\n\n decoded_url = urlparse(self.url)\n bucket_name = decoded_url.netloc\n\n paginator = client.get_paginator('list_objects_v2')\n\n page_iterator: PageIterator = paginator.paginate(\n Bucket=bucket_name,\n Prefix=decoded_url.path.lstrip('/'),\n )\n\n for page in page_iterator:\n records = page.get('Contents', [])\n\n for record in records:\n key = record['Key']\n yield f's3://{bucket_name}/{key}'",
"def list_files(self, container_name, dir_path):\n blobs = RetryHandler.retry(lambda: self.blob_client.list_blobs(container_name, prefix=dir_path))\n for b in blobs:\n yield b.name",
"def blobs(self):\n if not self._blobs:\n workspace = self.attributes.workspace\n # Instantiates a google client, & get all blobs in bucket\n storage_client = storage.Client(project=self._user_project)\n bucket = storage_client.bucket(workspace['bucketName'], user_project=self._user_project)\n # get subset of data\n _blobs = {}\n try:\n for b in bucket.list_blobs(fields='items(size, etag, crc32c, name, timeCreated),nextPageToken'):\n name = f\"gs://{workspace['bucketName']}/{b.name}\"\n # cache.put(name, {'size': b.size, 'etag': b.etag, 'crc32c': b.crc32c, 'time_created': b.time_created, 'name': name})\n _blobs[name] = AttrDict({'size': b.size, 'etag': b.etag, 'crc32c': b.crc32c, 'time_created': b.time_created, 'name': name})\n self._blobs = _blobs\n except Exception as e:\n print(f\"{self.id} {workspace['bucketName']} {e}\")\n self._blobs = _blobs\n return self._blobs",
"def fileobjects_iter(imagefile=None,xmlfile=None,fiwalk=\"fiwalk\",flags=0):\n def local_iter(fi):\n yield fi\n fiwalk_using_sax(imagefile=imagefile,xmlfile=xmlfile,fiwalk=fiwalk,flags=flags,\n callback=local_iter)",
"def iterate(self):",
"def getBlobs( self ):\n return self.__blobs;",
"def blob_generator(bucket_name, pattern):\n cloud_bucket = get_gcsbucket(bucket_name)\n for blob in cloud_bucket.objects():\n if blob.key.endswith(pattern):\n yield blob.uri",
"def list(self, prefix=\"\"):\n try:\n list_rep = self.client.listdir(self.bucket + \"/\" + prefix)\n for i in list_rep:\n # Remove preceding bucket name and potential leading slash from returned key value\n i = i.replace(self.bucket, \"\").replace('tar', 'wsp.sz')\n if i[0] == '/': i = i[1:]\n yield i\n except pyhdfs.HdfsFileNotFoundException:\n pass",
"def get_list_of_blobs(bucket_name, prefix=None, delimiter=None):\r\n\r\n # initialize client\r\n storage_client = storage.Client()\r\n\r\n # get list blobs\r\n blobs = storage_client.list_blobs(bucket_name, prefix=prefix, delimiter=delimiter)\r\n\r\n for blob in blobs:\r\n print(blob.name)\r\n\r\n if delimiter:\r\n print(\"Prefixes:\")\r\n for prefix in blobs.prefixes:\r\n print(prefix)\r\n\r\n return None",
"def list(self, glob_pattern=\"\"):\r\n # strip relative path so we don't step outside our emulated storage area\r\n glob_pattern = force_local_path(glob_pattern)\r\n\r\n # analyze glob_pattern to determine how to return blob names\r\n\r\n # if glob_pattern is a folder\r\n if not glob_pattern:\r\n # default to all blobs at the root level\r\n glob_pattern = \"*\"\r\n elif is_folder(f\"{self._blob_folder()}/{glob_pattern}\"):\r\n # if glob_pattern is a folder, return all blobs within folder\r\n glob_pattern = f\"{force_trailing_slash(glob_pattern)}*\"\r\n else:\r\n # use glob_pattern as-is\r\n pass\r\n\r\n # retrieve sorted blob names\r\n target_path = f\"{self._blob_folder()}/{glob_pattern}\"\r\n\r\n # build list of blob names with local parent path stripped from names\r\n blob_names = list()\r\n for blob_name in sorted(glob.glob(target_path)):\r\n # format name using Linux path delimiters\r\n blob_name = blob_name.replace(chr(92), \"/\")\r\n blob_name = blob_name.replace(f\"{self._blob_folder()}/\", \"\")\r\n blob_names.append(blob_name)\r\n\r\n blob_count = len(blob_names)\r\n logger.debug(\r\n self._context(f\"list({glob_pattern}) returned {blob_count} blob names\")\r\n )\r\n logger.debug(self._context(f\"list({glob_pattern}) = {blob_names}\"))\r\n return blob_names",
"def chunk_content(self):\n entries = DataObject.objects.filter(uuid=self.uuid)\n for entry in entries:\n if entry.compressed:\n data = BytesIO(entry.blob)\n z = zipfile.ZipFile(data, \"r\")\n content = z.read(\"data\")\n data.close()\n z.close()\n yield content\n else:\n yield entry.blob",
"def _iter_images(self):\n for image in self._images:\n yield image",
"def list_command(rsf_file, output_format):\n\n try:\n if output_format:\n stream = StringIO()\n list_blobs(rsf_file, output_format, stream)\n\n click.echo(stream.read())\n\n else:\n result = list_blobs(rsf_file)\n\n for blob in result:\n click.echo(repr(blob))\n\n except RegistersException as err:\n utils.error(str(err))",
"def list_blobs(rsf_file, output_format=None, stream=None):\n\n cmds = rsf.read(rsf_file)\n register = Register(cmds)\n\n utils.check_readiness(register)\n blobs = register.log.blobs\n\n if output_format == \"csv\":\n schema = register.schema()\n headers = [attr.uid for attr in schema.attributes]\n xsv.serialise(stream, blobs, headers)\n\n stream.seek(0)\n\n return None\n\n if output_format == \"json\":\n utils.serialise_json({repr(k): v for k, v in blobs.items()}, stream)\n\n stream.seek(0)\n\n return None\n\n return blobs.values()",
"def enumerate_files(self, table):\n for i in range(self.nrofrecords()):\n data = self.bank.readrec(i + 1)\n if data and data[0] == table.tableid:\n yield i + 1, data[1:]",
"def list_bucket_objects(bucket):\n for obj in BUCKET_MANAGER.all_objects(bucket).all():\n print(obj)",
"def _iter_images(self):\n raise NotImplementedError",
"def __iter__(self):\r\n try:\r\n dup_fp = self._fp.dup()\r\n except self._fp.Error:\r\n log.error('Failed to dup %r' % self._fp)\r\n return\r\n\r\n try:\r\n while True:\r\n blob = RecordIO.Reader.do_read(dup_fp, self._codec)\r\n if blob:\r\n yield blob\r\n else:\r\n break\r\n finally:\r\n dup_fp.close()",
"def next(self) -> List[object]:\n ...",
"def iterRegularFileContents(self):\n unpack = {}\n for (oldFileId, newFileId), stream in self.files.iteritems():\n if not files.frozenFileHasContents(stream):\n continue\n if files.frozenFileFlags(stream).isEncapsulatedContent():\n continue\n cont = files.frozenFileContentInfo(stream)\n unpack[newFileId] = cont.sha1()\n\n want_tag = '0 ' + ChangedFileTypes.file[4:]\n while True:\n f = self._nextFile()\n if not f:\n break\n name, tag, fobj, csf = f\n if len(name) != 36 or tag != want_tag:\n continue\n fileId = name[16:]\n sha1 = unpack.get(fileId)\n if not sha1:\n continue\n yield sha1, fobj",
"def iter_list(self):\n # XXX better docstring for this one\n\n if self.plaintext:\n fin = io.StringIO(self.plaintext)\n else:\n try:\n filename = os.path.join(self.plaintext_dir, '%s.list' % self.list_name)\n fin = io.open(filename, encoding=self.encoding)\n except IOError:\n filename = os.path.join(self.plaintext_dir, '%s.list.gz' % self.list_name)\n fin_raw = gzip.GzipFile(filename)\n fin = codecs.getreader(self.encoding)(fin_raw)\n\n for rawline in forward_stream(fin, self.re_guard):\n line = rawline.rstrip()\n if not re.match('-+$', line):\n yield line",
"def iterator(self):\n yield"
] | [
"0.68466896",
"0.6780061",
"0.6164148",
"0.6107229",
"0.60251284",
"0.60030335",
"0.5962732",
"0.5953068",
"0.58656806",
"0.58382636",
"0.5753683",
"0.5752837",
"0.5747022",
"0.57239527",
"0.57012093",
"0.56974304",
"0.56971043",
"0.56695175",
"0.56648165",
"0.56566805",
"0.5654154",
"0.5635201",
"0.5630587",
"0.56151897",
"0.55871314",
"0.55805105",
"0.5572288",
"0.55719185",
"0.55715597",
"0.5570182"
] | 0.72505546 | 0 |
reload a module, either larch or python | def _reload(mod,larch=None,**kw):
if isinstance(mod, str):
return larch.import_module(mod, do_reload=True)
for k,v in chain(larch.symtable._sys.modules.iteritems(), sys.modules.iteritems()):
if v == mod:
modname = k
break
try:
return larch.import_module(modname,do_reload=True)
except NameError:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reload_module(module_name):\n try:\n reload(eval(module_name))\n except:\n pass",
"def reloadModule(module):\n\ttry:\n\t\treload # Python 2.7\n\texcept NameError:\n\t\ttry:\n\t\t\tfrom importlib import reload # Python 3.4+\n\t\texcept ImportError:\n\t\t\tfrom imp import reload # Python 3.0 - 3.3\n\n\treload(module)",
"def reload(self,module):\n try:\n code = 'import %s; reload(%s)' % ((module.__name__,)*2)\n except AttributeError:\n code = 'import %s; reload(%s)' % ((module,)*2)\n self.workers.exec_code(code)",
"def reload(self):\n\n\t\tif self.module is None:\n\t\t\t# Do nothing, as the module will be imported on attribute access.\n\t\t\tpass\n\t\telse:\n\t\t\texec \"reload(\" + self.name + \")\"\n\t\t\t# The module object is still identical, only its code has been\n\t\t\t# replaced. Thus no eval(self.name) is necessary.",
"def reload(*mods):\n for mod in mods:\n importlib.reload(importlib.import_module(mod))",
"def reload_from_cwd(module, reloader=...):\n ...",
"def onReload(self,moduleName=\"NeedleFinder\"):\n if profiling : profbox()\n #framework\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)",
"def reload_import(path, hard = True):\r\n\r\n # in case the path is not present in the\r\n # system modules no need to reload\r\n if not path in sys.modules: return\r\n\r\n # in case the hard approach for reloading is\r\n # taken the system modules should be changed\r\n if hard:\r\n # retrieves the module for the given path from\r\n # system module and then removes it from the system\r\n # modules and then deletes it from the virtual\r\n # machine environment\r\n module = sys.modules[path]\r\n del sys.modules[path]\r\n del module\r\n # otherwise the \"soft\" reload provides the normal\r\n # module reload method\r\n else:\r\n # retrieves the module for the given path from\r\n # system module and then forces a reload on the\r\n # module (to flush the contents)\r\n module = sys.modules[path]\r\n legacy.reload(module)",
"def onReload(self, moduleName=\"NeedleFinder\"):\r\n if profiling : profbox()\r\n # framework\r\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)",
"def reload_dependences(module):\n tree = get_reversed_tree()\n reload(module)\n for dependant in tree[module]:\n reload(dependant)",
"def reload(self):\n self.rpc.call(MsfRpcMethod.CoreReloadModules)",
"def onReload(self,moduleName=\"FlexCrop\"):\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)",
"def onReload(self,moduleName=\"MarkupsInViewsSelfTest\"):\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)",
"def reload():\n import cubegame\n importlib.reload(cubegame)\n exec(\"from cubegame import *\")",
"def reload_module_by_name(mod_name, var_name):\n for mod in list(sys.modules.keys()):\n if mod_name in mod:\n del sys.modules[mod]\n if var_name in globals():\n del globals()[var_name] # deletes the variable named <var_name>\n return importlib.__import__(mod_name)",
"def command_reload(interface,command,args):\n command_unload(interface,command,args)\n command_load(interface,command,args)",
"async def _reload(self, ctx, *, module: str=None):\n if module is None or module == \"all\":\n await ctx.message.add_reaction('\\N{HOURGLASS}')\n try:\n for extension in startup_extensions:\n self.bot.unload_extension(extension)\n self.bot.load_extension(extension)\n except Exception as e:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{CROSS MARK}')\n await ctx.send('{}: {}'.format(type(e).__name__, e))\n traceback.print_exc()\n else:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{WHITE HEAVY CHECK MARK}')\n else:\n await ctx.message.add_reaction('\\N{HOURGLASS}')\n try:\n self.bot.unload_extension(module)\n self.bot.load_extension(module)\n except Exception as e:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{CROSS MARK}')\n await ctx.send('{}: {}'.format(type(e).__name__, e))\n traceback.print_exc()\n else:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{WHITE HEAVY CHECK MARK}')",
"def reload(mod):\n import difflib, imp, logging\n # Set the logger\n logger = logging.getLogger(\"myfuncs.reload\")\n logger.addHandler( logging.NullHandler() )\n logger.setLevel( logging.DEBUG )\n #\n if mod.__file__[-1] in \"oc\":\n mod.__file__ = mod.__file__[:-1]\n # end if\n #\n if \"__track_source__\" in mod.__dict__:\n orig = mod.__track_source__\n else:\n orig = None\n # end if\n #\n # Read the source file in its current state.\n with open(mod.__file__, \"r\") as fid:\n mod.__track_source__ = fid.readlines()\n # end with\n #\n # Check for differences and report any changes.\n logger.debug(mod.__file__)\n if orig is None:\n for it in range(len(mod.__track_source__)):\n logger.debug(\"{:d} {:s}\".format( \\\n it+1, mod.__track_source__[it].rstrip() \\\n ) )\n # end for\n else:\n diffs = difflib.unified_diff( \\\n orig, mod.__track_source__, \\\n fromfile=\"Original\", tofile=\"Updated\" \\\n )\n for line in diffs:\n logger.debug(line.rstrip())\n # end for\n # end if\n return imp.reload(mod)",
"async def reload(ctx, name):\n await unload_extension(name, channel=ctx.channel)\n await load_extension(name, channel=ctx.channel)",
"def transitive_reload(module, visited):\n if not module in visited:\n status(module)\n reload(module)\n visited[module] = None\n for attrobj in module.__dict__.values():\n if type(attrobj) is types.ModuleType:\n transitive_reload(attrobj, visited)",
"def _reloadLc(self, lc):\n # unicon\n dialog = Dialog([\n Statement(pattern=r'Proceed\\[y\\/n\\]\\?.*',\n action='sendline(y)',\n loop_continue=True,\n continue_timer=False),\n Statement(pattern=r'\\(y\\/n\\)\\?.*',\n action='sendline(y)',\n loop_continue=True,\n continue_timer=False)\n ])\n # Execute command to reload LC\n self.device.execute('reload module {}'.format(lc), reply=dialog)\n time.sleep(5)",
"async def reload(self, ctx, *, extension: str):\r\n try:\r\n self.bot.unload_extension(extension)\r\n self.bot.load_extension(extension)\r\n await ctx.send(f\":ok_hand: Reloaded module `{extension}`\")\r\n except Exception as e:\r\n await ctx.send(f\":sob: I-I'm sorry, I couldn't reload the `{extension}` module >w< \"\r\n + f\"```py\\n{traceback.format_exc()}```\")",
"def __load_python_module_dynamically(module_name, put_in_cache=True):\n if module_name in sys.modules:\n module_obj = sys.modules[module_name]\n else:\n exec (\"import {0}\".format(module_name))\n module_obj = eval(module_name)\n\n module_obj = type_inference_proxy_copy.TypeInferenceProxy(module_obj).clone()\n if put_in_cache:\n __put_module_in_sys_cache(module_name, module_obj)\n return module_obj",
"def reload_subs(verbose=True):\n if verbose:\n print('Reloading submodules')\n rrr(verbose=verbose)\n def wrap_fbrrr(mod):\n def fbrrr(*args, **kwargs):\n \"\"\" fallback reload \"\"\"\n if verbose:\n print('No fallback relaod for mod=%r' % (mod,))\n # Breaks ut.Pref (which should be depricated anyway)\n # import imp\n # imp.reload(mod)\n return fbrrr\n def get_rrr(mod):\n if hasattr(mod, 'rrr'):\n return mod.rrr\n else:\n return wrap_fbrrr(mod)\n def get_reload_subs(mod):\n return getattr(mod, 'reload_subs', wrap_fbrrr(mod))\n get_rrr(util_graph)(verbose=verbose)\n rrr(verbose=verbose)\n try:\n # hackish way of propogating up the new reloaded submodule attributes\n reassign_submodule_attributes(verbose=verbose)\n except Exception as ex:\n print(ex)",
"def load_module(name):\n return __import__(\"metaswitch.%s\" % name,\n fromlist=[\"ROUTES\"])",
"def reload(self):",
"def reload(self):",
"async def tool_reload(self, ctx, *, cog: str):\n\n try:\n self.bot.unload_extension(cog)\n self.bot.load_extension(cog)\n except Exception as e:\n await zb.bot_errors(ctx,sp.format(e))\n else:\n await ctx.send('**`SUCCESS`**')",
"def update_module(conn, module, chunk_size = 16000):\n rmodule = conn.modules[module.__name__]\n lf = inspect.getsourcefile(module)\n rf = conn.modules.inspect.getsourcefile(rmodule)\n upload_file(conn, lf, rf, chunk_size = chunk_size)\n conn.modules.__builtin__.reload(rmodule)",
"def comando_reload(self):\r\n\tif args.opcao == 'gne':\r\n configs = self.reload_gne_framework(args.file, args.loja, args.serie, args.nnf)\r\n return configs\r\n else:\r\n configs = self.reload_daruma_framework(args.file)\r\n return configs"
] | [
"0.80148953",
"0.7742815",
"0.7427528",
"0.73654693",
"0.7348668",
"0.7205982",
"0.7027763",
"0.6984617",
"0.69592416",
"0.69405115",
"0.6676888",
"0.6640121",
"0.6631857",
"0.65679514",
"0.655727",
"0.6529555",
"0.647623",
"0.64265627",
"0.639218",
"0.6344517",
"0.63303524",
"0.62734234",
"0.61255735",
"0.61002505",
"0.6084173",
"0.6076261",
"0.6076261",
"0.6076175",
"0.60423887",
"0.603605"
] | 0.8660127 | 0 |
show lines of text in the style of more | def show_more(text,filename=None,writer=None,pagelength=30,prefix=''): # pragma: no cover
pager(text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def abbrev_help_more_text(self):\n pass",
"def generate_excerpt():",
"def echo(self):\n new_paragraph = True\n for line in self.msg.splitlines():\n if not line.strip():\n new_paragraph = True\n click.echo()\n elif new_paragraph:\n click.secho(line, fg='yellow', bold=True)\n new_paragraph = False\n else:\n click.secho(line, fg='yellow')\n click.echo('\\n' + self.style_class_and_exc_name())",
"def show_more ( url, url_extern, info='Mehr ...' ) :\n return show_link ( url, info, url_extern )",
"def __str__(self):\n\t\treturn self.text[:50] + \"...\"",
"def summary_line_and_description():",
"def display(self):\n art = \"\\n\".join([\"\".join(row) for row in self.text])\n if self.args.output:\n with open(self.args.output, \"w\") as f:\n f.write(art)\n\n if self.args.verbose:\n print(art)",
"def msg(caller, text=\"\", **kwargs):\n always_more = kwargs.pop(\"always_more\", False)\n EvMore(caller, text, always_more, **kwargs)",
"def printme_text(self, line):\n self.otag.printme_text(line)",
"def msg(\n caller,\n text=\"\",\n always_page=False,\n session=None,\n justify=False,\n justify_kwargs=None,\n exit_on_lastpage=True,\n **kwargs,\n):\n EvMore(\n caller,\n text,\n always_page=always_page,\n session=session,\n justify=justify,\n justify_kwargs=justify_kwargs,\n exit_on_lastpage=exit_on_lastpage,\n **kwargs,\n )",
"def large_text(self):\n pass",
"def pager():\n lines = []\n for x in range(200):\n lines.append('%s. Hello World!' % click.style(str(x), fg='green'))\n click.echo_via_pager('\\n'.join(lines))",
"def small_text(self):\n pass",
"def double_line():\n print (\"=============================================================\")",
"def _shortened_description(self):\n limit = 150\n for ticket in self:\n d = html.fromstring((ticket.description or '').strip()\n or ' ').text_content()\n ticket.shortened_description = (d[:limit] + u'(…)'\n if len(d) > limit else d)",
"def the_display(self):\r\n return f\"\"\"\r\n {self.display[0]}\\n\r\n {self.display[1]}\\n\r\n {self.display[2]}\\n\r\n {self.display[3]}\\n\r\n {self.display[4]}\\n\r\n \"\"\"",
"def more_info(self, more_info: \"str\"):\n self._attrs[\"moreInfo\"] = more_info",
"def msg(text):\n for line in text.splitlines():\n if JS.alignment == \"left\":\n print(demarkup(line))\n elif JS.alignment == \"center\":\n print(demarkup(line).center(get_terminal_size()[0] - 1))\n else:\n print(demarkup(line).rjust(get_terminal_size()[0] - 1))",
"async def more(self, ctx: Context) -> None:\r\n e_title: str = MORE_TITLE\r\n e_message: str = \"\\n\".join(self.yt_result.titles)\r\n embed: Embed = Embed(title=e_title, description=e_message, color=int(\"CC181E\", 16))\r\n\r\n await ctx.send(embed=embed)\r\n await ctx.message.delete()",
"def show(self):\n print highlight(self.current_content, self.lexer(), Formatter())",
"def format_text(self):\n for line, _ in enumerate(self.readlines()[:-1]):\n self.root.colour_line(line + 1)",
"def _one_line_summary_from_text(text, length=78,\n escapes={'\\n':\"\\\\n\", '\\r':\"\\\\r\", '\\t':\"\\\\t\"}):\n if len(text) > length:\n head = text[:length-3]\n else:\n head = text\n escaped = _escaped_text_from_text(head, escapes)\n if len(text) > length:\n summary = escaped[:length-3] + \"...\"\n else:\n summary = escaped\n return summary",
"def less(self, printto, what, nr=365):\n if type(what) == types.ListType: txtlist = what\n else:\n what = what.strip()\n txtlist = splittxt(what, nr)\n size = 0\n if not txtlist: \n logging.debug(\"can't split txt from %s\" % what)\n return [\"\", \"\"]\n res = txtlist[0]\n length = len(txtlist)\n if length > 1:\n logging.info(\"addding %s lines to %s outputcache\" % (len(txtlist), printto))\n outcache.set(u\"%s-%s\" % (self.name, printto), txtlist[1:])\n res += \"<b> - %s more</b>\" % (length - 1) \n return [res, length]",
"def return_excerpt():",
"def show(self) -> str:\n return f'[{self.font}]{self.text}[{self.font}]' if self.font else self.text",
"def show_theme_message(width):\n print_dotted_line()\n print_bold(\"Attack of The Orcs v0.0.5:\")\n msg = (\n \"\"\"\n Hrabri vitez Talion vraća se iz boja i želi predahnuti u jednom seocetu. \n U seocu se nalazi 5 kućica i želi stupiti u jednu od njih. \n Hrabri vitez ne zna da u ovom području ima neprijatelja.\n Odluči se za jedna od vrata ...\n \"\"\")\n\n print(textwrap.fill(msg, width=width))",
"def display_text(target_text):\n\n print('Text to analyze:')\n print('')\n print('-------TEXT BELOW-------')\n print(target_text)\n print('-------TEXT ENDS-------')\n print('')",
"def show_text(text, args):\n return expyriment.stimuli.TextLine(text,\n text_font=args[\"--text-font\"],\n text_size=args[\"--text-size\"],\n text_colour=args[\"stimuli_color\"],\n background_colour=args[\"bg_color\"])",
"def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))",
"def showText(pos):\n\treturn OnscreenText( \\\n\t\ttext=\" \", \\\n\t\tstyle=1, fg=(0,0,0,1), pos=(-1.3, pos), \\\n\t\talign=TextNode.ALeft, scale = .06, mayChange = True)"
] | [
"0.6258086",
"0.61826456",
"0.6127772",
"0.5877098",
"0.5868962",
"0.5836664",
"0.5801012",
"0.57793504",
"0.5729908",
"0.57035375",
"0.5700666",
"0.5665235",
"0.56474644",
"0.56377524",
"0.5631425",
"0.56087565",
"0.5605236",
"0.5601699",
"0.55921155",
"0.55473125",
"0.55454236",
"0.5539451",
"0.5539339",
"0.55383193",
"0.5498281",
"0.545966",
"0.54389167",
"0.5435856",
"0.5392124",
"0.5373433"
] | 0.6663092 | 0 |
class that maintains all data related to the regulatory on the FContact | def __init__(self, contact = None):
try:
self.__contact = contact
if not self.__contact:
FRegulatoryLogger.ERROR(logger, "The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object")
return None
self.__reg_date_of_birth = None
self.__reg_first_name = None
self.__reg_last_name = None
self.__reg_national_id = None
self.__reg_crm_id = None
self.__crm_id_source = None
self.__reg_exchange_id = None
self.__reg_unique_name = None
self.__client_type = None
self.__is_general_partner = None
if contact:
self.__refresh(contact)
self.__integration_utils = FIntegrationUtils.FIntegrationUtils()
except Exception as e :
FRegulatoryLogger.ERROR(logger, str(e)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def RegulatoryInfo(self):\n conactRegInfo = FContactRegulatoryInfo(self)\n return conactRegInfo",
"def Attributes(self):\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)",
"def __init__(self):\n\n\t\tself.__contacts = None\n\t\tself.__accounts = None\n\t\tself.__deals = None\n\t\tself.__key_modified = dict()",
"def _back_compat_hook_ri_data(self):\n\n # Nothing to do.\n if not (hasattr(cdp, 'frq_labels') and hasattr(cdp, 'noe_r1_table') and hasattr(cdp, 'remap_table')):\n return\n\n # Initialise the new structures.\n cdp.ri_ids = []\n cdp.ri_type = {}\n frq = {} # This will be placed into cdp later as cdp.spectrometer_frq still exists.\n\n # Generate the new structures.\n for i in range(cdp.num_ri):\n # The ID.\n ri_id = \"%s_%s\" % (cdp.ri_labels[i], cdp.frq_labels[cdp.remap_table[i]])\n\n # Not unique.\n if ri_id in cdp.ri_ids:\n # Loop until a unique ID is found.\n for j in range(100):\n # New id.\n new_id = \"%s_%s\" % (ri_id, j)\n\n # Unique.\n if not new_id in cdp.ri_ids:\n ri_id = new_id\n break\n\n # Add the ID.\n cdp.ri_ids.append(ri_id)\n\n # The relaxation data type.\n cdp.ri_type[ri_id] = cdp.ri_labels[i]\n\n # The frequency data.\n frq[ri_id] = cdp.frq[cdp.remap_table[i]]\n\n # Delete the old structures.\n del cdp.frq\n del cdp.frq_labels\n del cdp.noe_r1_table\n del cdp.num_frq\n del cdp.num_ri\n del cdp.remap_table\n del cdp.ri_labels\n\n # Set the frequencies.\n cdp.frq = frq",
"def __init__(self):\n\t\tself.freq_count = [0] * 4 \n\t\t\"\"\" \n\t\ta connection priority value is defined as the \n\t\tno of occurrences of this particular relation between this pair of taxa \n\t\tminus the sum of no of occurrences of other relation types between this couplet\n\t\t\"\"\"\n\t\tself.priority_reln = [0] * 4 \n\t\t\"\"\" \n\t\tthis is the support score for different types of relations between a couplet\n\t\t\"\"\"\n\t\tself.support_score = [0] * 4\n\t\t\"\"\" \n\t\tFor this couplet, it stores the extra gene count with respect to all the gene trees\n\t\t\"\"\"\n\t\tself.XL_sum_gene_trees = []\n\t\t\"\"\" \n\t\tthis variable stores the no of trees supporting the taxa pair \n\t\t\"\"\"\n\t\tself.supporting_trees = 0\n\t\t\"\"\"\n\t\tthis list maintains the consensus relations for this couplet\n\t\twith respect to the input trees\n\t\t\"\"\"\n\t\tself.consensus_reln_list = []\n\t\t\"\"\"\n\t\tthis list contains the union of taxa list underlying the LCA of this couplet\n\t\tfor individual input trees\n\t\t\"\"\"\n\t\tself.LCA_Underlying_Taxa_List = []\n\t\t\"\"\"\n\t\tthis is a variable containing the binned average of the XL values\n\t\tof very high frequency\n\t\tinitially the value is set as -1, to signify that the computation is not done\n\t\tonce the computation (for a couplet) is done, the value is subsequently used and returned\n\t\t\"\"\"\n\t\tself.binned_avg_XL = -1\n\t\t\"\"\"\n\t\tallowed relation list which are included in the support score queue\n\t\t\"\"\"\n\t\tself.Allowed_Reln_List = []",
"def get_feature_value(self,contact_only=True):\n\n sql = pdb2sql(self.pdbfile)\n xyz_info = sql.get('chainID,resSeq,resName',name='CB')\n xyz = sql.get('x,y,z',name='CB')\n\n xyz_dict = {}\n for pos,info in zip(xyz,xyz_info):\n xyz_dict[tuple(info)] = pos\n\n contact_residue = sql.get_contact_residue()\n contact_residue = contact_residue[0] + contact_residue[1]\n sql.close()\n\n pssm_data_xyz = {}\n pssm_data = {}\n\n for res,data in zip(self.res_data,self.pssm_data):\n\n if contact_only and res not in contact_residue:\n continue\n\n if tuple(res) in xyz_dict:\n chain = {'A':0,'B':1}[res[0]]\n key = tuple([chain] + xyz_dict[tuple(res)])\n sasa = self.sasa[tuple(res)]\n\n pssm_data[res] = [data*sasa]\n pssm_data_xyz[key] = [data*sasa]\n else:\n printif([tuple(res), ' not found in the pdbfile'],self.debug)\n\n # if we have no contact atoms\n if len(pssm_data_xyz) == 0:\n pssm_data_xyz[tuple([0,0.,0.,0.])] = [0.0]\n pssm_data_xyz[tuple([1,0.,0.,0.])] = [0.0]\n\n self.feature_data['pssm'] = pssm_data\n self.feature_data_xyz['pssm'] = pssm_data_xyz",
"def contact_information(self) -> ContactInformation:\n return self._contact_information",
"def contact_info(self, sensitive=True):\n account_id = self.account_id()\n retry_count = 5\n\n req_url = self.get(\"/accounts/{}/contacts\".format(account_id))['ResultUrl']\n resp = self.get(req_url)\n tries = 0\n while 'Contacts' not in resp and tries < retry_count:\n resp = self.get(req_url)\n tries += 1\n time.sleep(1)\n contacts = resp['Contacts']\n\n contact_data = list()\n for contact in contacts:\n row_data = {\n 'ContactId': contact['Id'],\n 'Email': \"*****@****.***\" if sensitive else contact['Email'],\n 'FirstName': \"*****\" if sensitive else contact['FirstName'],\n 'LastName': \"*****\" if sensitive else contact['LastName'],\n 'Status': contact.get('Status'),\n 'MembeshipEnabled': contact.get('MembershipEnabled'),\n 'TermsOfUseAccepted': contact['TermsOfUseAccepted'],\n }\n\n if 'MembershipLevel' in contact:\n row_data['MembershipLevel'] = contact['MembershipLevel']['Name']\n\n # Map all field values into a dict for convenience\n field_values = {val['FieldName']: val['Value']\n for val in contact['FieldValues']}\n\n # Get list of authorizations\n if 'Managed Authorizations' in field_values:\n authorizations = [i['Label']\n for i in field_values['Managed Authorizations']]\n row_data['Authorizations'] = authorizations\n\n contact_data.append(row_data)\n self.__contact_df = pd.DataFrame(contact_data).set_index('ContactId')\n return self.__contact_df",
"def __init__(self):\n self.organization_id = ''\n self.name = ''\n self.is_default_org = None\n self.account_created_date = ''\n self.time_zone = ''\n self.language_code = ''\n self.date_format = ''\n self.field_separator = ''\n self.fiscal_year_start_month = ''\n self.contact_name = ''\n self.industry_type = ''\n self.industry_size = ''\n self.company_id_label = ''\n self.company_id_value = ''\n self.tax_id_label = ''\n self.tax_id_value = ''\n self.currency_id = ''\n self.currency_code = ''\n self.currency_symbol = ''\n self.currency_format = ''\n self.price_precision = 0\n self.address = Address()\n self.org_address = ''\n self.remit_to_address = ''\n self.phone = ''\n self.fax = ''\n self.website = ''\n self.email = ''\n self.tax_basis = ''\n self.is_org_active = None\n self.name = ''\n self.value = ''\n self.version = ''\n self.plan_type = 0\n self.plane_name = ''\n self.plan_period = ''\n self.tax_group_enabled = None\n self.account_created_date_formatted = \"\"\n self.zi_migration_status = 0\n self.user_role = ''\n self.custom_fields = []\n self.is_new_customer_custom_fields = None\n self.is_portal_enabled = None\n self.portal_name = ''\n self.tax_type = ''",
"def contact_info(self):\n return self._contact_info",
"def contact_info(self):\n return [\n {\n 'contact_info': c.get('contactInfo'),\n 'type': c.get('type'),\n 'primary': c.get('primary'),\n 'verified': c.get('verified'),\n }\n for c in self.entity_payload.get('contactInfo')]",
"def constraintData(self):\n pass",
"def ERCC_Stat(self):\n self.l_ERCC_name = []\n# self.l_RGCs_name = []\n self.l_mRNA_name = []\n self.l_ERCC_FPKM = {}\n# self.l_RGCs_FPKM = {}\n self.l_mRNA_FPKM = {}\n\n self.l_ERCC_UMI = {}\n self.l_mRNA_UMI = {}\n\n# self.l_cirRNA_FPKM={}\n \n self.l_ERCC_HTSname = []\n# self.l_RGCs_HTSname = []\n self.l_mRNA_HTSname = []\n self.l_ERCC_RPKM = {}\n# self.l_RGCs_RPKM = {}\n self.l_mRNA_RPKM = {}\n \n \n self.l_ERCC_MOLs = {}\n# self.l_RGCs_MOLs = {}\n self.l_mRNA_MOLs = {}\n self.l_cirRNA_MOLs={}\n self.l_mRNA_MOLs_HTSname = {}\n \n self.regression = {}\n \n self.__load_FPKM()\n self.__load_MOLs() # ERCC RGC mols\n self.__get_mRNA_MOLs() # get mRNA mols using ERCC_FPKM, ERCC_MOLs and mRNA_FPKM\n# self.__load_Count()\n self.__load_umi()\n \n out_file = \"%s/02.%s.ERCC_Mols.xls\" % (self.dir_StatInfo, self.s_idx)\n f_out_file = open( out_file,\"w\" )\n \n l_info = [\n \"Sample\", \"ERCC_MOLs\", \"mRNA_MOLs\",\n \"RefSeq_mRNA_MOLs\", \"Regression_R\",\n \"Regression_P\", \"RefSeq_mRNA_TPM>0\", \"ERCC_UMI\", \"RefSeq_mRNA_UMI\"\n ] \n print >>f_out_file, \"\\t\".join(l_info)\n \n for samp in self.samInfo_pd_RNA['sample']:\n idx = (self.samInfo_pd_RNA['sample'] == samp)\n brief_name = self.samInfo_pd_RNA[idx]['brief_name'].values[0]\n rename = self.samInfo_pd_RNA[idx]['rename'].values[0]\n\n ERCC_MOLs = sum( self.l_ERCC_MOLs[brief_name])\n# RGC_MOLs = sum( self.l_RGCs_MOLs[brief_name])\n mRNA_MOLs = np.sum(self.l_mRNA_MOLs[brief_name])\n RefSeq_mRNA_MOLs = \\\n np.sum(self.l_mRNA_MOLs[brief_name][self.mRNA_refSeq_index])\n \n RefSeq_mRNA_lFPKM = \\\n np.array(self.l_mRNA_FPKM[brief_name],dtype=float)\n \n RefSeq_mRNA_lFPKM = RefSeq_mRNA_lFPKM[ self.mRNA_refSeq_index ]\n RefSeq_mRNA_Exps = \\\n np.shape(RefSeq_mRNA_lFPKM[RefSeq_mRNA_lFPKM > 0])[0]\n\n regression_R = self.regression[brief_name]['r_value']\n regression_P = self.regression[brief_name]['p_value']\n \n RefSeq_mRNA_lUMI = \\\n np.array(self.l_mRNA_UMI[brief_name],dtype=int)\n\n RefSeq_mRNA_UMI_count = np.sum( RefSeq_mRNA_lUMI )\n\n ERCC_lUMI = \\\n np.array(self.l_ERCC_UMI[brief_name],dtype=int)\n\n ERCC_UMI_count= np.sum( ERCC_lUMI )\n \n l_out = [\n rename,\n ERCC_MOLs, mRNA_MOLs, RefSeq_mRNA_MOLs,\n regression_R, regression_P, RefSeq_mRNA_Exps, ERCC_UMI_count, RefSeq_mRNA_UMI_count\n ]\n l_out = [str(i) for i in l_out]\n print >>f_out_file, \"\\t\".join(l_out)\n f_out_file.close()",
"def get_contact_info(self):\n outputDict = {\"USERNAME\": consts.USERNAME,\n \"IP\": consts.IPADDRESS, \n \"MACHINE\": consts.HOSTNAME, \n \"EMAIL\": '[email protected]', \n \"PHONE\": '203-722-6620'} # ::: TO DO::: dynamically get phone and email info automatically\n return outputDict",
"def Commit(self):\n try:\n acm.BeginTransaction()\n self.__contact.Commit()\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() < 2015.4:\n self.__integration_utils.set_additional_info('DateOfBirth', self.__contact, self.__reg_date_of_birth)\n self.__integration_utils.set_additional_info('FirstName', self.__contact, self.__reg_first_name)\n self.__integration_utils.set_additional_info('LastName', self.__contact, self.__reg_last_name)\n self.__integration_utils.set_additional_info('NationalId', self.__contact, self.__reg_national_id)\n self.__integration_utils.set_additional_info('RegContactCrmId', self.__contact, self.__reg_crm_id)\n self.__integration_utils.set_additional_info('RegContExchangeId', self.__contact, self.__reg_exchange_id)\n self.__integration_utils.set_additional_info('UniqueName', self.__contact, self.__reg_unique_name)\n self.__integration_utils.set_additional_info('RegGeneralPartner', self.__contact, self.__is_general_partner)\n acm.CommitTransaction()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n FRegulatoryLogger.ERROR(logger, \"ABORTING TRANSACTION***********\")\n acm.AbortTransaction()",
"def __init__(self, raw_facil, raw_gir, raw_geo, proj):\n address1 = raw_facil.get('address1')\n address2 = raw_facil.get('address2')\n\n lon_lat = None\n if raw_geo:\n lon_lat = proj(\n raw_geo['longitude'],\n raw_geo['latitude'],\n inverse=True\n )\n\n self._init_attributes()\n self.source = 'facil-location'\n self.bldg_id = raw_facil['id']\n self.type = 'building'\n self.tags = []\n self.banner_abbreviation = raw_facil.get('abbreviation')\n self.name = raw_facil.get('name')\n self.campus = self._get_pretty_campus(raw_facil.get('campus'))\n self.address = self._get_address(address1, address2)\n self.city = raw_facil.get('city')\n self.state = raw_facil.get('state')\n self.zip = raw_facil.get('zip')\n self.geo_location = self._create_geo_location(\n lon_lat[0] if lon_lat else None,\n lon_lat[1] if lon_lat else None\n )\n self.geometry = self._create_geometry(\n raw_geo['coordinatesType'] if raw_geo else None,\n raw_geo['coordinates'] if raw_geo else None\n )\n self.gir_count = raw_gir['count'] if raw_gir else 0\n self.gir_limit = bool(raw_gir['limit'].strip()) if raw_gir and raw_gir['limit'] else None\n self.gir_locations = raw_gir['all'].strip() if raw_gir else None\n self.arcgis_abbreviation = (\n (raw_geo.get('abbreviation') if raw_geo else None)\n or (raw_gir.get('abbreviation') if raw_gir else None)\n )\n self.relationships = {'services': {'data': []}}\n self.merge = False\n self.open_hours = None\n self.description = None\n self.descriptionHtml = None\n self.images = None\n self.thumbnails = []\n self.website = None\n self.synonyms = None",
"def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []",
"def _get_cus_info(self):\n label_enc = LabelEncoder()\n customer_info = self._inv.drop_duplicates(['customer_code'], keep='last')\n customer_info = customer_info[['customer_code', 'customer_name', 'sales_cen_code',\n 'sales_cen_name', 'sales_region_name', 'province',\n 'city', 'district', 'customer_type', 'is_usable', 'channel_level']]\n customer_info['customer_id'] = label_enc.fit_transform(customer_info['customer_code'])\n customer_info['sales_cen_id'] = label_enc.fit_transform(customer_info['sales_cen_code'])\n customer_info['sales_region_id'] = label_enc.fit_transform(customer_info['sales_region_name'])\n customer_info['province_id'] = label_enc.fit_transform(customer_info['province'])\n customer_info['city_id'] = label_enc.fit_transform(customer_info['city'])\n customer_info['district_id'] = label_enc.fit_transform(customer_info['district'])\n customer_info['customer_type'] = label_enc.fit_transform(customer_info['customer_type'])\n customer_info['is_usable'] = label_enc.fit_transform(customer_info['is_usable'])\n customer_info['channel_level'] = label_enc.fit_transform(customer_info['channel_level'])\n customer_info_encoded = customer_info.drop(\n columns=['customer_name', 'sales_cen_code', 'sales_cen_name',\n 'sales_region_name', 'province', 'city', 'district']\n ).set_index('customer_code')\n customer_info.set_index('customer_code', inplace=True)\n customer_info_encoded = customer_info_encoded.reindex(self._index.get_level_values(0))\n return customer_info, customer_info_encoded",
"def __init__(self, contact_id=None, date=None, due_date=None, postponed_accounting=None, _import=None, contact_name=None, contact_reference=None, reference=None, vendor_reference=None, notes=None, total_quantity=None, net_amount=None, tax_amount=None, total_amount=None, currency_id=None, exchange_rate=None, inverse_exchange_rate=None, base_currency_net_amount=None, base_currency_tax_amount=None, base_currency_total_amount=None, status_id=None, tax_address_region_id=None, withholding_tax_rate=None, withholding_tax_amount=None, base_currency_withholding_tax_amount=None, invoice_lines=None, tax_analysis=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._contact_id = None\n self._date = None\n self._due_date = None\n self._postponed_accounting = None\n self.__import = None\n self._contact_name = None\n self._contact_reference = None\n self._reference = None\n self._vendor_reference = None\n self._notes = None\n self._total_quantity = None\n self._net_amount = None\n self._tax_amount = None\n self._total_amount = None\n self._currency_id = None\n self._exchange_rate = None\n self._inverse_exchange_rate = None\n self._base_currency_net_amount = None\n self._base_currency_tax_amount = None\n self._base_currency_total_amount = None\n self._status_id = None\n self._tax_address_region_id = None\n self._withholding_tax_rate = None\n self._withholding_tax_amount = None\n self._base_currency_withholding_tax_amount = None\n self._invoice_lines = None\n self._tax_analysis = None\n self.discriminator = None\n\n self.contact_id = contact_id\n self.date = date\n self.due_date = due_date\n if postponed_accounting is not None:\n self.postponed_accounting = postponed_accounting\n if _import is not None:\n self._import = _import\n if contact_name is not None:\n self.contact_name = contact_name\n if contact_reference is not None:\n self.contact_reference = contact_reference\n if reference is not None:\n self.reference = reference\n if vendor_reference is not None:\n self.vendor_reference = vendor_reference\n if notes is not None:\n self.notes = notes\n if total_quantity is not None:\n self.total_quantity = total_quantity\n if net_amount is not None:\n self.net_amount = net_amount\n if tax_amount is not None:\n self.tax_amount = tax_amount\n if total_amount is not None:\n self.total_amount = total_amount\n if currency_id is not None:\n self.currency_id = currency_id\n if exchange_rate is not None:\n self.exchange_rate = exchange_rate\n if inverse_exchange_rate is not None:\n self.inverse_exchange_rate = inverse_exchange_rate\n if base_currency_net_amount is not None:\n self.base_currency_net_amount = base_currency_net_amount\n if base_currency_tax_amount is not None:\n self.base_currency_tax_amount = base_currency_tax_amount\n if base_currency_total_amount is not None:\n self.base_currency_total_amount = base_currency_total_amount\n if status_id is not None:\n self.status_id = status_id\n if tax_address_region_id is not None:\n self.tax_address_region_id = tax_address_region_id\n if withholding_tax_rate is not None:\n self.withholding_tax_rate = withholding_tax_rate\n if withholding_tax_amount is not None:\n self.withholding_tax_amount = withholding_tax_amount\n if base_currency_withholding_tax_amount is not None:\n self.base_currency_withholding_tax_amount = base_currency_withholding_tax_amount\n self.invoice_lines = invoice_lines\n if tax_analysis is not None:\n self.tax_analysis = tax_analysis",
"def _get_information(self):\n pass",
"def __init__(self):\n # self.organism = []\n self.weighting_dict = defaultdict(list)\n # self.codon_obj_dict = {}\n self.codon_dict = {\n 'UUU':'F','UUC':'F',\n 'UUA':'L','UUG':'L','CUU':'L','CUC':'L','CUA':'L','CUG':'L',\n 'AUU':'I','AUC':'I','AUA':'I',\n 'AUG':'M',\n 'GUU':'V', 'GUC':'V','GUA':'V','GUG':'V',\n 'UCU':'S','UCC':'S','UCA':'S','UCG':'S',\n 'CCU':'P','CCC':'P','CCA':'P','CCG':'P',\n 'ACU':'T','ACC':'T','ACA':'T','ACG':'T',\n 'GCU':'A','GCC':'A','GCA':'A','GCG':'A',\n 'UAU':'Y','UAC':'Y',\n 'UAA':'X','UAG':'X',\n 'CAU':'H','CAC':'H',\n 'CAA':'Q','CAG':'Q',\n 'AAU':'N','AAC':'N',\n 'AAA':'K','AAG':'K',\n 'GAU':'D','GAC':'D',\n 'GAA':'E','GAG':'E',\n 'UGU':'C','UGC':'C',\n 'UGA':'X',\n 'UGG':'W',\n 'CGU':'R','CGC':'R','CGA':'R','CGG':'R',\n 'AGU':'S','AGC':'S',\n 'AGA':'R','AGG':'R',\n 'GGU':'G','GGC':'G', 'GGA':'G','GGG':'G'\n }",
"def new_contact(self, context, payload):\n\n data = OntraportContact(\n contact_id= payload[\"data\"].get(\"id\"),\n first_name= payload[\"data\"].get(\"firstname\"),\n last_name= payload[\"data\"].get(\"lastname\"),\n email_address= payload[\"data\"].get(\"email\"),\n date= payload[\"data\"].get(\"date\"),\n office_phone= payload[\"data\"].get(\"office_phone\"),\n company= payload[\"data\"].get(\"company\"),\n title= payload[\"data\"].get(\"title\"),\n country= payload[\"data\"].get(\"country\"),\n zip_code= payload[\"data\"].get(\"zip\"),\n owner= payload[\"data\"].get(\"owner\"),\n unique_id= payload[\"data\"].get(\"unique_id\"),\n profile_image= payload[\"data\"].get(\"profile_image\")\n )\n return data.__dict__",
"def __init__(self, tefflogg=True, abundances=True, cannon=True):\n hdu = pyfits.open('../data/GALAH_DR2_withSH_feb2020.fits', names=True)\n data = hdu[1].data\n if tefflogg:\n data = data[ (data['teff']>5300) * (data['teff']<6500) *\n (data['logg']>3) * (data['logg']<5) ]\n if abundances:\n data = data[ (data['flag_o_fe']==0) * (data['flag_na_fe']==0) * \n (data['flag_mg_fe']==0) * (data['flag_al_fe']==0) * \n (data['flag_si_fe']==0) * (data['flag_k_fe']==0) * \n (data['flag_ca_fe']==0) * (data['flag_sc_fe']==0) * \n (data['flag_ti_fe']==0) * (data['flag_v_fe']==0) * \n (data['flag_cr_fe']==0) * (data['flag_mn_fe']==0) *\n (data['flag_ni_fe']==0) * (data['flag_cu_fe']==0) * \n (data['flag_zn_fe']==0) * (data['flag_y_fe']==0) * \n (data['flag_ba_fe']==0) #* (data['flag_la_fe']==0)\n ] #(data['e_ba_fe]<1)\n if cannon:\n data = data[ (data['flag_cannon']==0) ]\n self.data = data\n return None",
"def __init__(self, contact_detail):\n\t\tself.first_name = contact_detail['First Name'].strip()\n\t\tself.last_name = contact_detail['Last Name'].strip()\n\t\tself.mobile = contact_detail['Mobile Phone'].strip()\n\t\tself.email = contact_detail['E-mail Address'].strip()",
"def __init__(self, **kwargs):\n self.suspension = kwargs[\"suspension\"] # Suspension Status\n self.license = kwargs[\"license\"] # License Number\n self.first_name = kwargs[\"first_name\"] # First Name\n self.last_name = kwargs[\"last_name\"] # Last Name\n self.city = kwargs[\"city\"] # City\n self.state = kwargs[\"state\"] # State\n self.zipcode = kwargs[\"zipcode\"] # Zip Code\n self.gender = kwargs[\"gender\"] # Gender\n self.racing_age = kwargs[\"racing_age\"] # Racing Age\n self.expire_date = kwargs[\"expire_date\"] # License Expiration Date\n self.intl_team = kwargs[\"intl_team\"] # Intl Team\n self.road_cat = kwargs[\"road_cat\"] # Road Category\n self.track_cat = kwargs[\"track_cat\"] # Track Category\n self.xc_cat = kwargs[\"xc_cat\"] # XC Category\n self.dh_cat = kwargs[\"dh_cat\"] # DH Category\n self.ot_cat = kwargs[\"ot_cat\"] # OT Category\n self.mx_cat = kwargs[\"mx_cat\"] # MX Category\n self.cx_cat = kwargs[\"cx_cat\"] # CX Category\n self.birth_date = kwargs[\"birth_date\"] # Birthday\n self.citizenship = kwargs[\"citizenship\"] # Citizenship\n self.road_club_id = kwargs[\"road_club_id\"]\n self.road_team_id = kwargs[\"road_team_id\"] # RD Club/Team ID\n self.track_club_id = kwargs[\"track_club_id\"]\n self.track_team_id = kwargs[\"track_team_id\"] # Track Club/Team ID\n self.mtn_club_id = kwargs[\"mtn_club_id\"]\n self.mtn_team_id = kwargs[\"mtn_team_id\"] # MTN Club/Team ID\n self.cx_club_id = kwargs[\"cx_club_id\"]\n self.cx_team_id = kwargs[\"cx_team_id\"] # CX Club/Team ID\n self.coll_club_id = kwargs[\"coll_club_id\"] # Collegiate Club ID\n self.uci_code = kwargs[\"uci_code\"] # UCI Code\n self.cx_rank = kwargs[\"cx_rank\"] # CX Rank\n self.hs_club_id = kwargs[\"hs_club_id\"]\n self.hs_team_id = kwargs[\"hs_team_id\"] # HS Club/Team ID",
"def save_object(self, data):\n return Contact(**data)",
"def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)",
"def update_data(self, data):\r\n rebuild = False\r\n\r\n # This method needs to substitute some defaultdicts for the normal\r\n # dictionaries that come back from the server.\r\n\r\n # Metacontact information\r\n\r\n #if data['metacontacts']\r\n mc_dict = data.get('metacontacts', {})\r\n if not isinstance(mc_dict, dict):\r\n log.critical('invalid metacontacts dictionary')\r\n mc_dict = {}\r\n\r\n # Contact information like SMS numbers and email addresses.\r\n self.info = defaultdict(dict)\r\n\r\n si = self.info\r\n if 'info' in data:\r\n for (k, v) in data['info'].iteritems():\r\n if isinstance(k, str):\r\n cmpk = k.decode('utf8')\r\n else:\r\n cmpk = k\r\n\r\n if not isinstance(cmpk, unicode):\r\n continue\r\n\r\n if cmpk.startswith('Meta') or any((cmpk.endswith('_' + prot)\r\n for prot in protocols.iterkeys())):\r\n if any(v.values()):\r\n si[k] = v\r\n\r\n for c, v in si.iteritems():\r\n for attr in ('email', 'sms'):\r\n if attr in v:\r\n self.contact_info_changed(c, attr, v[attr])\r\n\r\n self.metacontacts = MetaContactManager(self, mc_dict)\r\n if hasattr(self, 'new_sorter'):\r\n on_thread('sorter').call(self.new_sorter.removeAllContacts)\r\n rebuild = True\r\n\r\n # Manual ordering of groups\r\n try:\r\n self.order = deepcopy(data['order'])\r\n self.order['groups'] = list(oset(self.order['groups']))\r\n contacts = self._filtered_contacts()\r\n self.order['contacts'] = defaultdict(list)\r\n self.order['contacts'].update(contacts)\r\n except Exception:\r\n log.critical('error receiving order')\r\n self._init_order()\r\n\r\n # note: loading tofrom data from the network is deprecated. this data\r\n # now goes out to disk. see save/load_local_data\r\n if 'tofrom' in data and isinstance(data['tofrom'], dict) and \\\r\n 'im' in data['tofrom'] and 'email' in data['tofrom']:\r\n self.dispatch.set_tofrom(deepcopy(data['tofrom']))\r\n\r\n if rebuild:\r\n self.rebuild()\r\n\r\n self.update_order()",
"def add_contact(self):\n contact = Contact.create_contact()\n self.contact_list.append(contact)\n\n df = pd.read_csv('address_book.csv')\n #print(df)\n adf = pd.DataFrame({'FIRST NAME': [contact.first_name],\n 'LAST NAME': [contact.last_name],\n 'ADDRESS': [contact.address],\n 'CITY': [contact.city],\n 'STATE': [contact.state],\n 'ZIP CODE': [contact.zip],\n 'PHONE NUMBER': [contact.phone_number],\n 'EMAIL': [contact.email]})\n adf.to_csv('address_book.csv',mode='a', header=False, index=None)\n #storing all contacts in address_book.csv file\n \"\"\"with open(\"address_book.csv\", \"w\") as f:\n for contact in self.contact_list:\n f.write(f\"FIRST NAME -> {contact.first_name}\\n\"\n f\"LAST NAME -> {contact.last_name}\\n\"\n f\"ADDRESS -> {contact.address}\\n\"\n f\"CITY -> {contact.city}\\n\"\n f\"STATE -> {contact.state}\\n\"\n f\"ZIP CODE -> {contact.zip}\\n\"\n f\"PHONE NUMBER -> {contact.phone_number}\\n\"\n f\"EMAIL -> {contact.email}\\n\\n\")\"\"\"",
"def __init__(self):\n self.swagger_types = {\n 'source_contact': 'AddressableEntityRef',\n 'target_contact': 'AddressableEntityRef',\n 'resulting_contact': 'AddressableEntityRef'\n }\n\n self.attribute_map = {\n 'source_contact': 'sourceContact',\n 'target_contact': 'targetContact',\n 'resulting_contact': 'resultingContact'\n }\n\n self._source_contact = None\n self._target_contact = None\n self._resulting_contact = None"
] | [
"0.7621419",
"0.6440766",
"0.6052107",
"0.58735913",
"0.58644634",
"0.56104106",
"0.55951077",
"0.55670434",
"0.5546189",
"0.5410019",
"0.53739226",
"0.5360233",
"0.53541",
"0.53510576",
"0.52669334",
"0.5263579",
"0.5242533",
"0.52414125",
"0.521908",
"0.5218182",
"0.52145547",
"0.5182502",
"0.51648086",
"0.51618963",
"0.513556",
"0.51352626",
"0.5115933",
"0.5112706",
"0.510814",
"0.50906706"
] | 0.6573449 | 1 |
NationalId of the concerned natural person | def NationalId(self, reg_national_id = VALUE_NOT_SET):
if reg_national_id != VALUE_NOT_SET:
self.__reg_national_id = reg_national_id
try:
self.__contact.AdditionalInfo().NationalId(self.__reg_national_id)
except:
pass
else:
if not self.__reg_national_id:
self.__reg_national_id = None
return self.__reg_national_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_NID():\n return NID",
"def government_id_number(self) -> str:\n return self._government_id_number",
"def get_person_id(person_data):\n person_ref = person_data['Casualty_Reference']\n veh_ref = person_data['Vehicle_Reference']\n acc_id = get_acc_id_from_data(person_data)\n person_id = common.get_gb_person_id(acc_id, int(veh_ref), int(person_ref))\n return person_id",
"def party_id(self):\n pass",
"def idn(self):\n hname = (ct.c_char * 100)()\n self.lib.GetHeadModel(ct.pointer(hname))\n hname = str(hname.value)[2:-1]\n sn = ct.c_uint()\n self.lib.GetCameraSerialNumber(ct.pointer(sn))\n return 'Andor ' + hname + ', serial number ' + str(sn.value)",
"def myID() -> np.int:\r\n return 304976335",
"def myID() -> np.int:\r\n return 304976335",
"def create(self, validated_data):\n national_id = validated_data['national_id']\n nid = str(national_id)\n year = int(nid[1:3])\n if int(nid[0]) == 2:\n year += 1900 # 2 -> add 20th century (1900)\n else:\n year += 2000 # 3 -> add 21th century (2000)\n\n validated_data['date_of_birth'] = date(\n year, int(nid[3:5]), int(nid[5:7]))\n\n validated_data['place_of_birth'] = GOVERNORATES[nid[7:9]]\n\n # digit 13 represents gender even -> femal, odd -> male\n if int(int(nid[12])) % 2 == 0:\n validated_data['gender'] = 'Female'\n else:\n validated_data['gender'] = 'Male'\n\n return NationalId.objects.create(**validated_data)",
"def organizational_id_number(self) -> str:\n return self._organizational_id_number",
"def identifier(self):\n mrn_field = 'Patient Account No'\n if mrn_field in self.data:\n ident = {\"system\": KentPatientAdapter.SITE_SYSTEM, \"value\": self.data[mrn_field]}\n # FHIR keeps lists of identifiers, return as list\n return [ident]",
"def get_identity(self):\n return self.query_serial('*IDN?')",
"def getSerpentId(self):\n return \"{}-nat\".format(self.element.symbol.capitalize())",
"def get_primary_id(self):",
"def annulus_ident(self) -> int:\n return self._ann_ident",
"def get_ident():\n return -1",
"def getID():",
"def identifier(self):\n return str(self._nfcid)",
"def nit_sin_digito_verificacion(self):\n\n return self.identificacion.split('-')[0]",
"def get_actual_id(translated):",
"def id(self) -> str:\n\n return self._inst.query('*IDN?')",
"def instrID(self):\n return self.query('*IDN?')",
"def internal_id(self) -> str:\n return pulumi.get(self, \"internal_id\")",
"def openneuro_id_lookup(rvid):\n onid = id_mapping.loc[id_mapping['SUBJECT_NUMBER'] == rvid, 'open_neuro_id'].values[0]\n return onid",
"def get_identifier(self):",
"def get_patient_nr(segment):\n try:\n national_register = str(segment[19])\n except IndexError:\n nr_list = segment[2:5]\n national_register = [nr for nr in nr_list if str(nr) is not \"\"].pop()[0]\n national_register = str(national_register).split(\"^\")[0]\n return national_register",
"def QueryIdentification(self):\n try:\n return self.instr.query(\"*IDN?\")\n except pyvisa.errors.VisaIOError:\n return np.nan",
"def get_id(self):\n try:\n return self.inst.query('*IDN?')[:36]\n except errors.VisaIOError as e:\n logger.warning(e)\n return 'Device not connected.'",
"def num_id(self) -> str:\n return pulumi.get(self, \"num_id\")",
"def computed_id(o):\n\n if o.id is not None and o.id.startswith(namespace + \":\"):\n return o.id\n\n return \"{i.namespace}:{i.accession}\".format(i=computed_identifier(o))",
"def next_identity(self) -> PublicationId:\n ..."
] | [
"0.6752156",
"0.64024127",
"0.6256402",
"0.6193446",
"0.61844784",
"0.6149932",
"0.6149932",
"0.61367977",
"0.6133966",
"0.6050021",
"0.6030649",
"0.6021784",
"0.6012439",
"0.59253937",
"0.59218484",
"0.58435404",
"0.58401144",
"0.5793599",
"0.57825446",
"0.57644486",
"0.5758251",
"0.57570976",
"0.574923",
"0.574173",
"0.57220745",
"0.5715181",
"0.5685636",
"0.56685305",
"0.5658295",
"0.5642626"
] | 0.73979646 | 0 |
An optional unique name, if specified there can only be one contact with this name for each party. | def UniqueName(self, unique_name = VALUE_NOT_SET):
if unique_name != VALUE_NOT_SET:
try:
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() >= 2017.2:
self.__contact.UniqueName(unique_name)
else:
is_unique, contact_name = FRegulatoryUtils.is_unique_name(self.__contact, unique_name)
if is_unique:
try:
self.__contact.AdditionalInfo().UniqueName(unique_name)
except:
pass
else:
msg = "The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name."%(unique_name, self.__contact.Fullname(), self.__contact.Party().Name(), contact_name)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
self.__reg_unique_name = unique_name
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))
else:
if not self.__reg_unique_name:
self.__reg_unique_name = None
return self.__reg_unique_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def contact_name(self) -> str:\n return pulumi.get(self, \"contact_name\")",
"def party_id(self):\n pass",
"def create_contact(contact, party_type, party):\n\tcontact = contact\t.split(\" \")\n\n\tcontact = frappe.get_doc({\n\t\t\"doctype\":\"Contact\",\n\t\t\"first_name\":contact[0],\n\t\t\"last_name\": len(contact) > 1 and contact[1] or \"\"\n\t})\n\tcontact.append('links', dict(link_doctype=party_type, link_name=party))\n\tcontact.insert()",
"def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)",
"def contact_name(self, contact_name):\n\n self._contact_name = contact_name",
"def contact_name(self, contact_name):\n\n self._contact_name = contact_name",
"def _get_unique_name(self, name):\n\n return get_unique_notice_name(name, self.session, self.model_class)",
"def name_is_unique(self, name):\n unique = True\n for client in self.clients:\n unique = unique and (False if name == client.get_name() else True)\n return unique",
"def _get_name(self):\n partner = self\n name = partner.name or ''\n\n if partner.company_name or partner.parent_id:\n if not name and partner.type in ['invoice', 'delivery', 'other']:\n name = dict(self.fields_get(['type'])['type']['selection'])[partner.type]\n #if not partner.is_company:\n # name = \"%s, %s\" % (partner.commercial_company_name or partner.parent_id.name, name)\n if self._context.get('show_address_only'):\n name = partner._display_address(without_company=True)\n if self._context.get('show_address'):\n name = name + \"\\n\" + partner._display_address(without_company=True)\n name = name.replace('\\n\\n', '\\n')\n name = name.replace('\\n\\n', '\\n')\n if self._context.get('address_inline'):\n name = name.replace('\\n', ', ')\n if self._context.get('show_email') and partner.email:\n name = \"%s <%s>\" % (name, partner.email)\n if self._context.get('html_format'):\n name = name.replace('\\n', '<br/>')\n if self._context.get('show_vat') and partner.vat:\n name = \"%s ‒ %s\" % (name, partner.vat)\n return 'HOLA'",
"def add_person(self, name, email, typ, wants_accomodation='N'):\n if typ == \"FELLOW\":\n if not email in self.all_persons.keys():\n new_fellow = Fellow(name, email, wants_accomodation)\n self.fellows[email] = new_fellow\n self.allocate_room(new_fellow)\n return new_fellow\n else:\n return \"Email already used!\"\n elif typ == \"STAFF\":\n if not email in self.all_persons.keys():\n new_staff = Staff(name, email)\n self.staff[email] = new_staff\n self.allocate_room(new_staff)\n return new_staff\n else:\n return \"Email already used!\"\n else:\n return -1",
"def create_contact(contact, party_type, party, email):\n\tcontact = contact.split(' ')\n\n\tcontact = frappe.get_doc({\n\t\t'doctype': 'Contact',\n\t\t'first_name': contact[0],\n\t\t'last_name': len(contact) > 1 and contact[1] or \"\"\n\t})\n\tcontact.append('email_ids', dict(email_id=email, is_primary=1))\n\tcontact.append('links', dict(link_doctype=party_type, link_name=party))\n\tcontact.insert()",
"def try_create_uniqe_name(self,name=None,plan_id=None):\n if self.valid_name(name):\n for i in range (1,20):\n new_name=name+\"_\"+str(i)\n if self.unique_name(name=new_name,plan_id=plan_id):\n return new_name\n return False\n else:\n return False",
"def primary_contact_name(self, primary_contact_name):\n\n self._primary_contact_name = primary_contact_name",
"def test_validate_party_info_name_is_none(self):\n self.party_test_data[\"name\"] = None\n response = validate_party_info(self.party_test_data)\n self.assertDictEqual(\n response, {\"message\": \"name is required\", \"code\": 400})",
"def _init_company_name(cls, company_name: str = None) -> str:\n if company_name and isinstance(company_name, str):\n return company_name\n if company_name:\n return TypeError(\"company_name kwarg should be an instance of str\")\n return FAKE.format('company')",
"def __assign_name_id(self):\n if not self.name_id:\n self.name_id = str(BaseTicketing.objects.create())",
"def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]",
"def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]",
"def name(self) -> Optional[str]:\n ...",
"def __str__(self):\n if self.name != None and self.name != '':\n return self.name\n else:\n return \"Organization object owned by %s.\"%(self.owner)",
"def add_contact(self):\n contact_mob_num = self._input_mob_num(\"-=\" * 30 + \"\\n\" + \"Please enter contact's mobile number to be added: \")\n if contact_mob_num == self._user.mob_num:\n print(\"You can't add yourself, IDIOT!!\")\n return self.homepage()\n \n found_contact = self.auth.get_users_by_MobNum(contact_mob_num)\n if found_contact != None:\n print('A user with Mobile number: \"{0}\", and User name: \"{1}\" is found'.format(found_contact.mob_num, found_contact.username))\n user_choice = self._int_input_in_range(\" (1) Add the found user. \\n (0) Back to Home page \\n Your choice: \" \n ,range_ = (0, 1))\n if user_choice:\n add_flag = self._user.add_contact(found_contact)\n if not add_flag:\n print('This user is already one of your contacts')\n return self.homepage()\n print(\"Contact added successfully\")\n else:\n self.homepage()\n else:\n print('This user mobile number has no matches')\n return self.homepage()",
"def getContactByName(self, name):\n for contact in self.contacts:\n if name == contact.name:\n return contact\n\n return None",
"def clean_name(self):\n name = self.cleaned_data['name']\n if self.instance and self.instance.name and name == self.instance.name:\n return name\n if Organization.objects.filter(slug=slugify(name)).exists():\n raise forms.ValidationError(\n _('Organization %(name)s already exists'),\n params={'name': name},\n )\n return name",
"def contact_full_name(self):\n first = self.contact_first_name\n last = self.contact_last_name\n if first and last:\n return f'{first} {last}'\n return first or last",
"def sender(self, addr,name):\n self.s[name] = (addr,self.ssn.sender(addr)) \n return self.s[name]",
"def _cname(self,account_id):\n company = self.pool.get('account.account').browse(self.cr, self.uid, account_id).company_id\n self.caddress = self._cadd(company)\n return company.name",
"def add_person(self, per: str):\n if per not in self._people:\n self._people.append(per)\n else:\n raise IDAlreadyExists",
"def person_name(self, person_name):\n\n self._person_name = person_name",
"def person_name(self, person_name):\n\n self._person_name = person_name",
"def getMember(unique_name):"
] | [
"0.58776766",
"0.5617005",
"0.56108975",
"0.5571864",
"0.556685",
"0.556685",
"0.5513689",
"0.5500145",
"0.5463818",
"0.536946",
"0.5348271",
"0.53382397",
"0.53270036",
"0.5291315",
"0.52906907",
"0.5287689",
"0.52846396",
"0.52846396",
"0.52688885",
"0.52560043",
"0.5240531",
"0.522626",
"0.5222502",
"0.5219421",
"0.52177674",
"0.5210732",
"0.52037126",
"0.520165",
"0.520165",
"0.5200654"
] | 0.70607924 | 0 |
returns the ClientType based on where the CrmId is found on the linked objects | def ClientType(self):
self.__client_type = FRegulatoryUtils.getClientType(self.__contact)
return self.__client_type | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def client_type(self) -> Optional[str]:\n return pulumi.get(self, \"client_type\")",
"def type(self):\n return _internals.CorrelationId_type(self)",
"def find_type(source, target):\n x = [r for r in source.synset_relations if r.target == target.id]\n if len(x) != 1:\n raise Exception(\n \"Synsets not linked or linked by more than one property\")\n return x[0].rel_type",
"def get_db_client(db_type):\n\tfor client_cls in DatabaseClient.__subclasses__():\n\t\ttry:\n\t\t\tif client_cls.meets_condition(db_type):\n\t\t\t\treturn client_cls()\n\t\texcept KeyError:\n\t\t\tcontinue\n\n\traise UnknownDatabaseType(db_type)",
"def test_get_relation_type(self):\n pass",
"def get_object_type(self, ref):\n ws = Workspace(self.ws_url)\n info = ws.get_object_info3({\"objects\": [{\"ref\": ref}]})\n obj_info = info.get(\"infos\", [[]])[0]\n if len(obj_info) == 0:\n raise RuntimeError(\"An error occurred while fetching type info from the Workspace. \"\n \"No information returned for reference {}\".format(ref))\n return obj_info[2]",
"def test_companies_company_id_connections_connection_id_options_data_type_get(self):\n pass",
"def getTypeID(self) -> int:\n ...",
"def test_get_contact_person_types_key(self):\n pass",
"def types_clients_view(request):\n query = request.dbsession.query(ClientType).all()\n return Utils.serialize_many(query)",
"def ObjectLinetype(object_ids, linetype=None):\n id = rhutil.coerceguid(object_ids, False)\n if id:\n rhino_object = rhutil.coercerhinoobject(id, True, True)\n oldindex = scriptcontext.doc.Linetypes.LinetypeIndexForObject(rhino_object)\n if linetype:\n newindex = scriptcontext.doc.Linetypes.Find(linetype, True)\n rhino_object.Attributes.LinetypeSource = Rhino.DocObjects.ObjectLinetypeSource.LinetypeFromObject\n rhino_object.Attributes.LinetypeIndex = newindex\n rhino_object.CommitChanges()\n scriptcontext.doc.Views.Redraw()\n return scriptcontext.doc.Linetypes[oldindex].Name\n\n newindex = scriptcontext.doc.Linetypes.Find(linetype, True)\n if newindex<0: raise Exception(\"%s does not exist in LineTypes table\"%linetype)\n for id in object_ids:\n rhino_object = rhutil.coercerhinoobject(id, True, True)\n rhino_object.Attributes.LinetypeSource = Rhino.DocObjects.ObjectLinetypeSource.LinetypeFromObject\n rhino_object.Attributes.LinetypeIndex = newindex\n rhino_object.CommitChanges()\n scriptcontext.doc.Views.Redraw()\n return len(object_ids)",
"def paypal_reference_id_type_enum(self) -> ReferenceIdType:\n return _REFERENCE_ID_MAPPINGS.get(self.paypal_reference_id_type)",
"def get_record_type_id(self, obj_type, developer_name):\n soql = \"SELECT Id FROM RecordType WHERE SObjectType='{}' and DeveloperName='{}'\".format(\n obj_type, developer_name\n )\n res = self.cumulusci.sf.query_all(soql)\n return res[\"records\"][0][\"Id\"]",
"def get_typ(self, refobj):\n enum = cmds.getAttr(\"%s.type\" % refobj)\n try:\n return JB_ReftrackNode.types[enum]\n except IndexError:\n raise ValueError(\"The type on the node %s could not be associated with an available type: %s\" %\n (refobj, JB_ReftrackNode.types))",
"def get_user_type(_id):\n _object = get_user_type_rec(_id, Doctor)\n if _object:\n return \"doctor\", _object\n _object = get_user_type_rec(_id, Pharmacist)\n if _object:\n return \"pharmacist\", _object\n _object = get_user_type_rec(_id, Patient)\n if _object:\n return \"patient\", _object\n return None, _object",
"def ObjectType(object_id):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n geom = rhobj.Geometry\n if isinstance(geom, Rhino.Geometry.Brep) and geom.Faces.Count==1:\n return 8 #surface\n return int(geom.ObjectType)",
"def read_object_type(self, object_type_id=''):\n # Return Value\n # ------------\n # object_type: {}\n #\n if not object_type_id:\n raise BadRequest(\"The resource_type_id parameter is missing\")\n return self.clients.resource_registry.read(object_type_id)",
"def specific_class(self):\n\n specific_type = ContentType.objects.get_for_id(self.specific_type_id)\n return specific_type.model_class()",
"def get_drs_object_type(object_info: dict) -> DRSObjectType:\n if \"form\" in object_info:\n if object_info[\"form\"] is None:\n return DRSObjectType.object\n return DRSObjectType(object_info[\"form\"])\n\n if \"contents\" in object_info and len(object_info[\"contents\"]) > 0:\n return DRSObjectType.bundle\n else:\n return DRSObjectType.object",
"def get_type(self):\n\t\treturn call_sdk_function('PrlSrvCfgDev_GetType', self.handle)",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")"
] | [
"0.5487231",
"0.5164598",
"0.51574206",
"0.51353335",
"0.5011951",
"0.49763677",
"0.49734464",
"0.49716905",
"0.49659985",
"0.49616665",
"0.49428558",
"0.49414885",
"0.4923532",
"0.49094817",
"0.48822343",
"0.48643944",
"0.48532623",
"0.48462522",
"0.48260435",
"0.48219457",
"0.48190778",
"0.48190778",
"0.48190778",
"0.48190778",
"0.48190778",
"0.48190778",
"0.48190778",
"0.48190778",
"0.48190778",
"0.48190778"
] | 0.5837794 | 0 |
Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact | def Commit(self):
try:
acm.BeginTransaction()
self.__contact.Commit()
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() < 2015.4:
self.__integration_utils.set_additional_info('DateOfBirth', self.__contact, self.__reg_date_of_birth)
self.__integration_utils.set_additional_info('FirstName', self.__contact, self.__reg_first_name)
self.__integration_utils.set_additional_info('LastName', self.__contact, self.__reg_last_name)
self.__integration_utils.set_additional_info('NationalId', self.__contact, self.__reg_national_id)
self.__integration_utils.set_additional_info('RegContactCrmId', self.__contact, self.__reg_crm_id)
self.__integration_utils.set_additional_info('RegContExchangeId', self.__contact, self.__reg_exchange_id)
self.__integration_utils.set_additional_info('UniqueName', self.__contact, self.__reg_unique_name)
self.__integration_utils.set_additional_info('RegGeneralPartner', self.__contact, self.__is_general_partner)
acm.CommitTransaction()
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
FRegulatoryLogger.ERROR(logger, "ABORTING TRANSACTION***********")
acm.AbortTransaction() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def commit(self):\n self.cnx.commit()",
"def commit(self):\n pass",
"def commit(self):\n pass",
"def commit(self):\n pass",
"def commit(self):\n pass",
"def commit(self):\n pass",
"def commit(self):\n return",
"def commitChanges(self):\n \n ## User is prompted that changes are being committed\n print(\"Committing changes to the CRM and Mailings database...\")\n db_connection.executeQuery(\"COMMIT;\")",
"def commit(self):\n self.success = True\n self.close()",
"def commit(self):",
"def commit(self):\n raise NotImplementedError",
"def commit(self) -> None:\n pass",
"def commit(self):\n # PEP 249\n pass",
"def commit(self):\n self.DB.commit()",
"def commit(self):\n #main.signals['exit'].disconnect(self.commit)\n self._dirty = False\n with self._lock:\n self._db.commit()",
"def commit( self ) :\n self.oracle_connection.commit()",
"async def commit(self):\n if await self.is_valid():\n await self.update(committed=True).apply()",
"def commit(self):\n self.db.commit()",
"def commit(self):\n self.session.commit()",
"def commit(self):\n self.getSession().commit()",
"def commit(self) -> None:\n self._connector.commit_transaction()",
"def commit(self):\n self.__connection.commit()",
"def commit(self):\n self.connection.commit()",
"def commit(self):\n try:\n db.session.commit()\n except:\n db.session.rollback()\n raise",
"def commit(self):\n raise multisearch.errors.FeatureNotAvailableError",
"def commit(self) -> None:\n commit_app_config()",
"def save(self):\n self.__db.commit()",
"def commit_changes(self):\n self.connection.commit()",
"def dbcommit(self):\n self.con.commit()",
"def commit(self):\n db.session.add(self)\n db.session.commit()"
] | [
"0.6179308",
"0.6118413",
"0.6118413",
"0.6118413",
"0.6118413",
"0.6118413",
"0.60592747",
"0.6013804",
"0.59319466",
"0.5926993",
"0.5916751",
"0.5899212",
"0.58974123",
"0.5881195",
"0.5875204",
"0.5863884",
"0.58605444",
"0.5854823",
"0.5801967",
"0.5795815",
"0.57171786",
"0.5715148",
"0.56427616",
"0.56227016",
"0.56123513",
"0.56111693",
"0.55959356",
"0.5595035",
"0.55750227",
"0.5570865"
] | 0.79224825 | 0 |
Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS | def Delete(self):
FRegulatoryUtils.Delete(self.__contact, "Contact")
FRegulatoryLogger.DEBUG(logger, "Deleted all AdditionalInfos on Contact related to Regulatory Reporting") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cleanup_aai(cls):\n logger.info(\"####################### Start to clean up AAI settings\")\n aai = Customer.get_by_global_customer_id(\"5GCustomer\")\n aai.delete()",
"def delete(self):\n self._instance.delete()\n self._instance = None\n self._data_defs = []",
"def clearRecord(self): \n if self._isinstalled:\n for f in self._table:\n try:\n del self.__dict__[f.name]\n except KeyError:\n pass\n \n for f in self._extra_sql_columns:\n try:\n del self.__dict__[f]\n except KeyError:\n pass\n \n self._original_values.clear()\n self._modified_values.clear()\n self._mtm_referencelist.clear()\n self._child_referencelist.clear()\n self._hasdata = False\n self._ismodified = False\n self._hasdata = False\n self._isnew = False\n self._objectid = None\n self._isinstalled = False\n self._astxt = \"(null)\"",
"def cleanup(self):\n for attribute in self._all_db_field_names:\n delattr(self, attribute)",
"def clean(self):\n self.clean_rally_conf()\n rally.RallyBase.clean_rally_logs()\n if self.image_alt:\n self.cloud.delete_image(self.image_alt)\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()",
"def _data_reset(self):\n conn = self.get_connection()\n\n elements = {\n **self.domain.registry.aggregates,\n **self.domain.registry.entities,\n **self.domain.registry.views,\n }\n for _, element_record in elements.items():\n provider = current_domain.providers[element_record.cls.meta_.provider]\n repo = self.domain.repository_for(element_record.cls)\n\n model_cls = repo._model\n if provider.conn_info[\n \"DATABASE\"\n ] == Database.ELASTICSEARCH.value and conn.indices.exists(\n model_cls._index._name\n ):\n conn.delete_by_query(\n refresh=True,\n index=model_cls._index._name,\n body={\"query\": {\"match_all\": {}}},\n )",
"def clear_data():\n logger.info(\"Delete Structure instances\")\n Structure.objects.all().delete()\n logger.info(\"Delete StructureType instances\")\n StructureType.objects.all().delete()\n logger.info(\"Delete Industry instances\")\n Industry.objects.all().delete()\n logger.info(\"Delete Price instances\")\n PriceList.objects.all().delete()\n logger.info(\"Delete Stock instances\")\n Stock.objects.all().delete()\n logger.info(\"Delete News instances\")\n News.objects.all().delete()\n logger.info(\"Delete NewsImages instances\")\n NewsImage.objects.all().delete()\n logger.info(\"Delete News Sections instances\")\n NewsCategorySection.objects.all().delete()\n logger.info(\"Delete Analysis instances\")\n AnalysisOpinion.objects.all().delete()\n logger.info(\"Delete Analysis Images instances\")\n AnalysisImage.objects.all().delete()\n logger.info(\"Delete Analysis Sections instances\")\n AnalysisCategorySection.objects.all().delete()",
"def delete(self,\n signal_kwargs=None,\n **write_concern):\n self._config.write_to_log(f\"Deleting {self.patientId} and associated documents...\")\n for references in [self.outcomeEvents,\n self.measurements,\n self.criticalCare]:\n for doc in references:\n doc.delete(signal_kwargs=signal_kwargs, **write_concern)\n super().delete(self=self,\n signal_kwargs=signal_kwargs,\n **write_concern)\n self._config.write_to_log(f\"Deleted patient and asssociated documents.\")",
"def __del__(self):\n\n # Base class destructor is called ?? needed\n sim.Simulation.__del__(self)\n\n if self.verbose:\n print \"Cleaning derived simulation object LAMMPS1\"\n\n del self.pairCoeffDct\n del self.bondCoeffDct",
"def deleteAll(self):\n self.deleteAttributeRange() #Default args = everything",
"def unlink(self):\n if not self:\n return True\n \n # for recomputing fields\n self.modified(self._fields)\n \n self._check_concurrency()\n \n self.check_access_rights('unlink')\n \n # Check if the records are used as default properties.\n refs = ['%s,%s' % (self._name, i) for i in self.ids]\n if self.env['ir.property'].search([('res_id', '=', False), ('value_reference', 'in', refs)]):\n raise UserError(_('Unable to delete this document because it is used as a default property'))\n \n # Delete the records' properties.\n with self.env.norecompute():\n self.env['ir.property'].search([('res_id', 'in', refs)]).unlink()\n self.delete_workflow()\n self.check_access_rule('unlink')\n \n cr = self._cr\n Data = self.env['ir.model.data'].sudo().with_context({})\n Defaults = self.env['ir.default'].sudo()\n Attachment = self.env['ir.attachment']\n \n for sub_ids in cr.split_for_in_conditions(self.ids):\n query = \"DELETE FROM %s WHERE id IN %%s\" % self._table\n cr.execute(query, (sub_ids,))\n \n # Removing the ir_model_data reference if the record being deleted\n # is a record created by xml/csv file, as these are not connected\n # with real database foreign keys, and would be dangling references.\n #\n # Note: the following steps are performed as superuser to avoid\n # access rights restrictions, and with no context to avoid possible\n # side-effects during admin calls.\n data = Data.search([('model', '=', self._name), ('res_id', 'in', sub_ids)])\n if data:\n data.unlink()\n \n # For the same reason, remove the defaults having some of the\n # records as value\n Defaults.discard_records(self.browse(sub_ids))\n \n # For the same reason, remove the relevant records in ir_attachment\n # (the search is performed with sql as the search method of\n # ir_attachment is overridden to hide attachments of deleted\n # records)\n query = 'SELECT id FROM ir_attachment WHERE res_model=%s AND res_id IN %s'\n cr.execute(query, (self._name, sub_ids))\n attachments = Attachment.browse([row[0] for row in cr.fetchall()])\n if attachments:\n attachments.unlink()\n \n # invalidate the *whole* cache, since the orm does not handle all\n # changes made in the database, like cascading delete!\n self.invalidate_cache()\n \n # recompute new-style fields\n if self.env.recompute and self._context.get('recompute', True):\n self.recompute()\n # auditing: deletions are infrequent and leave no trace in the database\n _unlink.info('User #%s deleted %s records with IDs: %r', self._uid, self._name, self.ids)\n return True",
"def __deleteASG2Tracker( self, metaModelName ): \r\n name = self.__sanitizeMetaModelName( metaModelName )\r\n if( self.__trackASG.has_key( name ) ):\r\n del self.__trackASG[ name ]",
"def clean(self):\n self.clean_rally_conf()\n self.clean_rally_logs()\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()",
"def del_sensordata(self):\n\n organisation_id = '5af01e0210bac288dba249ad'\n animal_id = '5b6419ff36b96c52808951b1'\n\n with self.writefile() as file:\n del file[f'data/{organisation_id}/{animal_id}/sensordata']",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n self.data = None",
"def erase(self):\n for b in self.posted_on:\n b.erase(self)\n self._catalog.erase(self)\n if self._logbook is not None:\n self._logbook.close()\n self._logbook = None\n if self.state != states.UNCLAIMED:\n self._claimer.unclaim(self, self.owner)",
"def deleteAttributes(self, keys):\n self.graph.deleteExtendedAttributes(self.entityId, keys)",
"def __delitem__(self, key):\n try:\n del self._axl_data[key]\n except KeyError:\n raise AXLAttributeError(f\"Unknown AXL attribute for API endpoint: {key}\")",
"def unload(self) -> None:\n for attr in self._attrs:\n setattr(self, attr, None)",
"def __exit__(self, exc_type, exc_value, traceback):\n\t\tself.delete_extracted()\n\t\tself.delete()",
"def clear_attrs(self):\n self._attributes.clear()",
"def destroy(self):\n related_efficacy_indicators = objects.EfficacyIndicator.list(\n context=self._context,\n filters={\"action_plan_uuid\": self.uuid})\n\n # Cascade soft_delete of related efficacy indicators\n for related_efficacy_indicator in related_efficacy_indicators:\n related_efficacy_indicator.destroy()\n\n self.dbapi.destroy_action_plan(self.uuid)\n self.obj_reset_changes()",
"def clear_attributes(self):\n self.attrs = etad.AttributeContainer()",
"def delete_identifying_fields(self, view):\n\t\tassert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test\n\t\t\n\t\tself.dsDoc['about']['datasetName']='NULL'\n\t\tself.dsDoc['about']['redacted'] = True\n\t\t\n\t\ttry:\n\t\t\tdel self.dsDoc['about']['description']\n\t\texcept KeyError:\n\t\t\tpass\n\t\ttry:\n\t\t\tdel self.dsDoc['about']['citation']\n\t\texcept KeyError:\n\t\t\tpass\n\t\ttry:\n\t\t\tdel self.dsDoc['about']['source']\n\t\texcept KeyError:\n\t\t\tpass\n\t\ttry:\n\t\t\tdel self.dsDoc['about']['sourceURI']\n\t\texcept KeyError:\n\t\t\tpass\n\t\t\n\t\t# save datasetDoc.json file\n\t\twith open(os.path.join(self.dsHome, 'datasetDoc.json'), 'w') as fp:\n\t\t\tjson.dump(self.dsDoc, fp, indent=2, sort_keys=False)",
"def cleanup(self):\n self.sagemaker.delete_endpoint(EndpointName=self.endpoint_name)\n self.sagemaker.delete_endpoint_config(EndpointConfigName=self.endpoint_name)",
"def delete(self, attribute):\n self.__delattr__(attribute)"
] | [
"0.6274676",
"0.6151817",
"0.60736465",
"0.58964264",
"0.58367074",
"0.58200717",
"0.58007216",
"0.5781004",
"0.57804185",
"0.5743467",
"0.5707958",
"0.56463885",
"0.56211966",
"0.56100756",
"0.5555345",
"0.5555345",
"0.5555345",
"0.5555345",
"0.55472887",
"0.55391157",
"0.55120826",
"0.5495212",
"0.5491179",
"0.5485837",
"0.5483803",
"0.54799026",
"0.54546124",
"0.5453605",
"0.54476273",
"0.5442013"
] | 0.74394304 | 0 |
returns the attributes on the FContactRegulatoryInfoBase instance | def Attributes(self):
return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def RegulatoryInfo(self):\n conactRegInfo = FContactRegulatoryInfo(self)\n return conactRegInfo",
"def getAttributes(self):\n pass",
"def get_attributes(self):\n return self.attributes",
"def get_attributes(cls):\r\n return []",
"def get_attributes(self):\n _attributes = {\n 'function_id': self.function_id,\n 'hardware_id': self.hardware_id,\n 'mode_id': self.mode_id,\n 'critical_item': self.critical_item,\n 'description': self.description,\n 'design_provisions': self.design_provisions,\n 'detection_method': self.detection_method,\n 'effect_end': self.effect_end,\n 'effect_local': self.effect_local,\n 'effect_next': self.effect_next,\n 'effect_probability': self.effect_probability,\n 'hazard_rate_source': self.hazard_rate_source,\n 'isolation_method': self.isolation_method,\n 'mission': self.mission,\n 'mission_phase': self.mission_phase,\n 'mode_criticality': self.mode_criticality,\n 'mode_hazard_rate': self.mode_hazard_rate,\n 'mode_op_time': self.mode_op_time,\n 'mode_probability': self.mode_probability,\n 'mode_ratio': self.mode_ratio,\n 'operator_actions': self.operator_actions,\n 'other_indications': self.other_indications,\n 'remarks': self.remarks,\n 'rpn_severity': self.rpn_severity,\n 'rpn_severity_new': self.rpn_severity_new,\n 'severity_class': self.severity_class,\n 'single_point': self.single_point,\n 'type_id': self.type_id\n }\n\n return _attributes",
"def get_attributes(self) -> Dict[str, str]:\n pass",
"def get_attributes(self):\n\n _attributes = (self.survival_id, self.record_id, self.name,\n self.source_id, self.failure_date, self.left_interval,\n self.right_interval, self.status_id, self.quantity,\n self.tbf, self.mode_type_id, self.nevada_chart,\n self.ship_date, self.number_shipped, self.return_date,\n self.number_returned, self.user_float_1,\n self.user_float_2, self.user_float_3,\n self.user_integer_1, self.user_integer_2,\n self.user_integer_3, self.user_string_1,\n self.user_string_2, self.user_string_3)\n\n return _attributes",
"def getAttributes(self):\n return self.attributes",
"def getAttributes(self):\n return self.attributes",
"def attrib(self) -> Any:\n return self.attributes",
"def get_info(self):\n self.exists = self.check_subscr()\n return self.attrs",
"def get_attributes(cls):\n return cls._attributes",
"def attributes(self):",
"def attributes(self):\n _attrs = [\"type\", \"name\", \"value\"]\n if self.confidence is not None:\n _attrs.append(\"confidence\")\n if self.constant:\n _attrs.append(\"constant\")\n if self.tags:\n _attrs.append(\"tags\")\n\n return _attrs",
"def custom_attributes(self):\n return self._custom_attributes",
"def attributes(self):\n ...",
"def get_attributes(self):\n attrs = list()\n syms = list()\n for item in self.gradual_items:\n gi = item.as_integer()\n attrs.append(gi[0])\n syms.append(gi[1])\n return attrs, syms",
"def attributes(self):\n return self.problem.attributes",
"def get_attributes(self):\n\n endpoint = self._get_api_endpoint() + '/attributes'\n results = self.tq.get(endpoint, withp='attribute')\n if 'data' not in results:\n return {}\n\n return results['data']\n # tr = {}\n # for attribute in results['data']:\n # tr[attribute['attribute']['name']] = attribute['value']\n # return tr",
"def attributes(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")",
"def get_attributes(self):\n return dict(self.attributes) # return the attributes",
"def get_attributes(self):\n return dict(self.attributes) # return the attributes",
"def get_attributes(self):\n return dict(self.attributes) # return the attributes",
"def contact_info(self):\n return [\n {\n 'contact_info': c.get('contactInfo'),\n 'type': c.get('type'),\n 'primary': c.get('primary'),\n 'verified': c.get('verified'),\n }\n for c in self.entity_payload.get('contactInfo')]",
"def GetAttributes(self):\r\n\r\n return self._attr",
"def attributes(self):\n raise NotImplementedError",
"def get_attributes(cls):\r\n return [Attribute('size', '20'),\r\n Attribute('label', ''), ]",
"def device_state_attributes(self):\n attrs = {}\n\n attrs[ATTR_ATTRIBUTION] = ATTRIBUTION\n attrs[\"brand\"] = DEFAULT_BRAND\n attrs[ATTR_CAMERA_TYPE] = self._camera_type\n attrs[\"friendly_name\"] = self._name\n\n return attrs",
"def getattrs(self):\n # ICAT 4.5.0 also lists the meta attributes as attributes in\n # the entity info. Need to remove them here, as they should\n # not be added to InstAttr.\n return self.getfieldnames('ATTRIBUTE') - Entity.MetaAttr",
"def device_state_attributes(self):\n return self.custom_attributes"
] | [
"0.76906836",
"0.7054827",
"0.67832905",
"0.67631334",
"0.6749298",
"0.67152345",
"0.66815585",
"0.6680654",
"0.6680654",
"0.66506875",
"0.66434884",
"0.6618119",
"0.65413237",
"0.6490594",
"0.64605737",
"0.64063853",
"0.6323494",
"0.6309063",
"0.63062006",
"0.62716734",
"0.62682414",
"0.62682414",
"0.62682414",
"0.6259484",
"0.6245705",
"0.62419724",
"0.6211055",
"0.62035877",
"0.6189323",
"0.61764306"
] | 0.85865146 | 0 |
returns the FContactRegulatoryInfoBase instance for the given contact | def RegulatoryInfo(self):
conactRegInfo = FContactRegulatoryInfo(self)
return conactRegInfo | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, contact = None):\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger, \"The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object\")\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e :\n FRegulatoryLogger.ERROR(logger, str(e))",
"def contact_information(self) -> ContactInformation:\n return self._contact_information",
"def contact_info(self):\n return self._contact_info",
"def contact(self, contactid):\r\n return contacts.Contact(self, contactid)",
"def Attributes(self):\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)",
"def Contact(self):\n return self.__contact",
"def contact(self):\n return self._contact",
"def contact(self):\n return self._contact",
"def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")",
"def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")",
"def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")",
"def get_please_contact(self):\n if self.please_contact:\n return self.please_contact.get_please_contact()\n else:\n return self",
"def get_expected_data_from_contact(contact):\n return {\n 'address_1': contact.address_1,\n 'address_2': contact.address_2,\n 'address_country__name': get_attr_or_none(contact, 'address_country.name'),\n 'address_county': contact.address_county,\n 'address_postcode': contact.address_postcode,\n 'address_same_as_company': contact.address_same_as_company,\n 'address_town': contact.address_town,\n 'archived': contact.archived,\n 'archived_on': format_date_or_datetime(contact.archived_on),\n 'company_id': str(contact.company_id) if contact.company_id is not None else None,\n 'created_by_id': str(contact.created_by_id) if contact.created_by is not None else None,\n 'created_on': format_date_or_datetime(contact.created_on),\n 'email': contact.email,\n 'email_alternative': contact.email_alternative,\n 'id': str(contact.id),\n 'job_title': contact.job_title,\n 'modified_on': format_date_or_datetime(contact.modified_on),\n 'name': contact.name,\n 'notes': contact.notes,\n 'primary': contact.primary,\n 'telephone_alternative': contact.telephone_alternative,\n 'telephone_number': contact.telephone_number,\n }",
"def contact_info(self):\n return [\n {\n 'contact_info': c.get('contactInfo'),\n 'type': c.get('type'),\n 'primary': c.get('primary'),\n 'verified': c.get('verified'),\n }\n for c in self.entity_payload.get('contactInfo')]",
"def create_contact(self):\n if not hasattr(self, '_table'):\n self.get_meta_data()\n\n return Contact(from_table=self._table)",
"def get_contact(self, uuid):\n return Contact.deserialize(self._get_single('contacts', {'uuid': uuid}))",
"def get(self,id) -> Contact:\n data=ContactSet.query.get(id)\n if data:\n contact = Contact(data.id,data.name,data.birthdate,data.contact_type,data.description, data.phone)\n return contact\n return None",
"def get_contact_info(self):\n return f\"Contact {self} at {self.email}\"",
"def get_feature_value(self,contact_only=True):\n\n sql = pdb2sql(self.pdbfile)\n xyz_info = sql.get('chainID,resSeq,resName',name='CB')\n xyz = sql.get('x,y,z',name='CB')\n\n xyz_dict = {}\n for pos,info in zip(xyz,xyz_info):\n xyz_dict[tuple(info)] = pos\n\n contact_residue = sql.get_contact_residue()\n contact_residue = contact_residue[0] + contact_residue[1]\n sql.close()\n\n pssm_data_xyz = {}\n pssm_data = {}\n\n for res,data in zip(self.res_data,self.pssm_data):\n\n if contact_only and res not in contact_residue:\n continue\n\n if tuple(res) in xyz_dict:\n chain = {'A':0,'B':1}[res[0]]\n key = tuple([chain] + xyz_dict[tuple(res)])\n sasa = self.sasa[tuple(res)]\n\n pssm_data[res] = [data*sasa]\n pssm_data_xyz[key] = [data*sasa]\n else:\n printif([tuple(res), ' not found in the pdbfile'],self.debug)\n\n # if we have no contact atoms\n if len(pssm_data_xyz) == 0:\n pssm_data_xyz[tuple([0,0.,0.,0.])] = [0.0]\n pssm_data_xyz[tuple([1,0.,0.,0.])] = [0.0]\n\n self.feature_data['pssm'] = pssm_data\n self.feature_data_xyz['pssm'] = pssm_data_xyz",
"def contact_info(self, sensitive=True):\n account_id = self.account_id()\n retry_count = 5\n\n req_url = self.get(\"/accounts/{}/contacts\".format(account_id))['ResultUrl']\n resp = self.get(req_url)\n tries = 0\n while 'Contacts' not in resp and tries < retry_count:\n resp = self.get(req_url)\n tries += 1\n time.sleep(1)\n contacts = resp['Contacts']\n\n contact_data = list()\n for contact in contacts:\n row_data = {\n 'ContactId': contact['Id'],\n 'Email': \"*****@****.***\" if sensitive else contact['Email'],\n 'FirstName': \"*****\" if sensitive else contact['FirstName'],\n 'LastName': \"*****\" if sensitive else contact['LastName'],\n 'Status': contact.get('Status'),\n 'MembeshipEnabled': contact.get('MembershipEnabled'),\n 'TermsOfUseAccepted': contact['TermsOfUseAccepted'],\n }\n\n if 'MembershipLevel' in contact:\n row_data['MembershipLevel'] = contact['MembershipLevel']['Name']\n\n # Map all field values into a dict for convenience\n field_values = {val['FieldName']: val['Value']\n for val in contact['FieldValues']}\n\n # Get list of authorizations\n if 'Managed Authorizations' in field_values:\n authorizations = [i['Label']\n for i in field_values['Managed Authorizations']]\n row_data['Authorizations'] = authorizations\n\n contact_data.append(row_data)\n self.__contact_df = pd.DataFrame(contact_data).set_index('ContactId')\n return self.__contact_df",
"def contact(session, contact_factory):\n contact_factory.get()",
"def contact_details(self):\n return self.data.get(\"contactDetails\")",
"def get_contact(self, object_name, user_key = None):\n\t\treturn self.get_object('contact',object_name, user_key = user_key)",
"def get_contact(self, guid):\n for contact in self._contacts:\n if contact.guid == guid:\n return contact\n return None",
"def Delete(self):\n FRegulatoryUtils.Delete(self.__contact, \"Contact\")\n FRegulatoryLogger.DEBUG(logger, \"Deleted all AdditionalInfos on Contact related to Regulatory Reporting\")",
"def register(self, name, contact):\n return Registration(self.request).add(name, contact)",
"def get_contact_info(self):\n outputDict = {\"USERNAME\": consts.USERNAME,\n \"IP\": consts.IPADDRESS, \n \"MACHINE\": consts.HOSTNAME, \n \"EMAIL\": '[email protected]', \n \"PHONE\": '203-722-6620'} # ::: TO DO::: dynamically get phone and email info automatically\n return outputDict",
"async def getContactInfo(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ConfigurationValidator.getContactInfo()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getContactInfo\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getContactInfo\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/configuration/v1.0/information\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)",
"def _getcontact(id):\n contact = {}\n idwrapper = {}\n \n try:\n contact[\"name\"] = r.get(\"uid:\" + id + \":name\")\n contact[\"address\"] = r.get(\"uid:\" + id + \":address\")\n contact[\"phone\"] = r.get(\"uid:\" + id + \":phone\")\n contact[\"email\"] = r.get(\"uid:\" + id + \":email\")\n idwrapper[id] = contact\n\n return idwrapper\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise",
"def get_contact_info(self, html_content: str) -> object:\n if not html_content:\n raise Exception(\"HTML content not found\")\n\n soup = BeautifulSoup(html_content, 'html.parser')\n\n self.contact = {}\n cards = soup.select(self.tags.get(\"contact.panels\"))\n\n # read cards panels for cotnact info\n for card in cards:\n form = card.parent.select_one(\"form\")\n\n # if is form of user information\n if form:\n rows = form.select(self.tags.get(\"contact.form.row\"))\n for row in rows:\n label = row.select_one(self.tags.get(\"contact.form.row.label\")).get_text(strip=True)\n value = row.select_one(self.tags.get(\"contact.form.row.value\")).get_text(strip=True)\n\n if label == \"User ID\":\n self.contact[\"account\"] = value\n\n elif label == \"Name\":\n self.contact[\"full_name\"] = value\n\n elif label == \"Email\":\n self.contact[\"email\"] = value\n\n else:\n lis = card.parent.select(\"li\")\n for li in lis:\n label = li.select_one(\"label\").get_text(strip=True)\n if label == \"Address\":\n street1 = get_value(li.select_one(self.tags.get(\"contact.address.street1\"))).strip()\n street2 = get_value(li.select_one(self.tags.get(\"contact.address.street2\"))).strip()\n state = get_value(li.select_one(self.tags.get(\"contact.address.state\"))).strip()\n postalcode = get_value(li.select_one(self.tags.get(\"contact.address.zip\"))).strip()\n\n self.contact[\"address_line1\"] = street1\n self.contact[\"address_line2\"] = street2\n self.contact[\"address_state\"] = letters_only(state.strip())\n self.contact[\"address_postal_code\"] = postalcode\n\n elif label in [\"Phone\", \"Time Zone\"]:\n\n key = \"phone_number\" if label == \"Phone\" else \"timezone\"\n self.contact[key] = li.select_one(self.tags.get(\"contact.phone\")).get_text(strip=True).strip()\n\n return self.contact"
] | [
"0.71412814",
"0.62537146",
"0.58082145",
"0.5799355",
"0.55888337",
"0.5545059",
"0.55302864",
"0.55302864",
"0.54784495",
"0.54784495",
"0.54784495",
"0.53763604",
"0.532584",
"0.5272704",
"0.52715975",
"0.52623534",
"0.5120345",
"0.51012933",
"0.50584227",
"0.50581264",
"0.5004834",
"0.49553406",
"0.49397984",
"0.4935386",
"0.49328518",
"0.49163365",
"0.4887759",
"0.48522106",
"0.48166615",
"0.47912824"
] | 0.76837945 | 0 |
Function to print a header with satellite info for the satellite number | def print_satellite_header(st):
# Retrieve TLE data
print "Satellite Number/Launch Year/Launch Number of Year: %s/20%s/%s" % \
(st.get_satellite_number(), st.get_launch_year(), \
st.get_launch_year_number())
year = 2000 + int(st.get_epoch_year())
fracyear = timedelta(float(st.get_epoch_day()))
time = datetime(year, 1, 1) + fracyear - timedelta(1)
print "Epoch Date Time/Rev At Epoch: %s/%s" % \
(time, st.get_rev_at_epoch())
print "Inclination/Eccentricity/Average Revs Per Day: %s/0.%s/%s" % \
(st.get_inclination(), st.get_eccentricity(), st.get_mean_motion())
print "" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_header():\n print(\"STEM Center Temperature Project\")\n print(\"Shaotong Wen\")",
"def print_header_information():\n\t\tprint \"Elijah Molloy\"\n\t\tprint \"70-510 - Spring 1 - 2018\"\n\t\tprint \"PROGRAMMING ASSIGNMENT #4\\n\"",
"def print_the_header():\n print('-------------------')\n print(' Weather APP')\n print('-------------------')\n print()",
"def write_header(self, *, version=3.01, file_type='O: Observation', satellite_type='M: Mixed GNSS',\n run_by='GPSLiDAR', organization='CCAR', observer='Adam Dodge', agency='CCAR', receiver_num='1',\n receiver_type='GENERIC_P1', receiver_vers='1.0.0', antenna_number=1, antenna_type='RTK2-F9P',\n delta_pos=[0,0,0]):\n markerstr = 'GPS LiDAR System at ' + self.longname\n if not os.path.isfile(self.fname):\n tstr = self.t.strftime('%Y%m%d %H%M%S')\n # TODO: Fix header (not working in readers)\n r = 6371000 + self.alt\n x = r * np.cos(self.lat * np.pi/180) * np.cos(self.lon * np.pi/180)\n y = r * np.cos(self.lat * np.pi/180) * np.sin(self.lon * np.pi/180)\n z = r * np.sin(self.lat * np.pi/180)\n header = f'{version:>9.2f}{\" \":<11s}{file_type:<20s}{satellite_type:<20s}{\"RINEX VERSION / TYPE\":<20s}\\n' + \\\n f'{run_by:<20s}{organization:<20s}{tstr:<16s}UTC {\"PGM / RUN BY / DATE\":<20s}\\n' + \\\n f'{markerstr:<60}{\"MARKER NAME\":<20s}\\n' + \\\n f'{self.station:<60}{\"MARKER NUMBER\":<20s}\\n' + \\\n f'{\"GEODETIC\":<20s}{\" \":40s}{\"MARKER TYPE\":<20s}\\n' + \\\n f'{observer:<20}{agency:<40}{\"OBSERVER / AGENCY\":<20s}\\n' + \\\n f'{receiver_num:<20}{receiver_type:<20}{receiver_vers:<20}{\"REC # / TYPE / VERS\":<20s}\\n' + \\\n f'{antenna_number:<20}{antenna_type:<40s}{\"ANT # / TYPE\":<20s}\\n' + \\\n f'{x:14.4f}{y:>14.4f}{z:>14.4f}{\" \":18s}{\"APPROX POSITION XYZ\":<20s}\\n' + \\\n f'{delta_pos[0]:14.4f}{delta_pos[1]:>14.4f}{delta_pos[2]:>14.4f}{\" \":18s}{\"ANTENNA: DELTA H/E/N\":<20s}\\n' + \\\n f'G {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'R {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'E {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'S {8:<3d} C1 L1 D1 S1 C5 L5 D5 S5 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'{\"DBHZ\":<60s}{\"SIGNAL STRENGTH UNIT\":<20s}\\n' + \\\n f'{self.t.year:>6d}{self.t.month:>6d}{self.t.day:>6d}{self.t.hour:>6d}{self.t.minute:>6d}' + \\\n f'{self.t.second:>13.7f} UTC{\" \":<9s}{\"TIME OF FIRST OBS\":<20s}\\n' + \\\n f' 0{\" \":54s}{\"RCV CLOCK OFFS APPL\":<20s}\\n' + \\\n f'G{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'R{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'E{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'S{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'{self.leapS:>6d}{\" \":>54s}{\"LEAP SECONDS\":<20s}\\n' + \\\n f'{\" \":>60s}{\"END OF HEADER\":<20s}\\n'\n\n try:\n with open(self.fname, 'w') as f:\n f.write(header)\n except FileNotFoundError:\n print('Data directory is bad. Try again.')\n sys.exit(0)",
"def header(name, value):\n print '%s: %s\\n' % (name, value)",
"def header(self, format=None):\n return [\" ID \",\n \"East\",\n \"North\",\n \"TARGET ELEV\",\n \" LENGTH\",\n \" AZ\",\n \" DIP\",\n \"PLAN ELEV\"]",
"def _print_header():\n print()\n print(\n \" ┌─────────────────────── Measurements in BPM ─────────────────────┐\"\n )\n print(\n \"ID Date Activity Distance Elevation Start Duration 5s 30s 60s 5m 10m 20m 30m 60m 90m 120m\"\n )\n _print_separator()",
"def headerstring(self):\n sss = 'IVO LEGEND:\\n'\n sss += ' Created from 152 or 155\\n'\n sss += ' Pct number\\n'\n sss += ' Found in 152 (Y/N)\\n'\n sss += ' Found in 155 (Y/N)\\n'\n sss += ' Ivo serial number\\n'\n sss += ' PEB used for opening\\n'\n sss += ' Opening date/time\\n'\n sss += ' Date/time of first vote\\n'\n sss += ' PEB used for closing\\n'\n sss += ' Closing date/time\\n'\n sss += ' Date/time of last vote\\n'\n sss += ' Number of vote events 152\\n'\n sss += ' Number of vote events 155\\n'\n sss += ' Number of vote events 155 by precinct\\n'\n sss += ' Number of late vote events 152\\n'\n sss += ' Pct numbers\\n'\n sss += ' Ballot styles\\n'\n sss += ' Memory collection times\\n'\n return sss",
"def debug_info_header(header):\n print(colored(\"Header:\", 'cyan'), colored(\"Valid FDT magic value found\", \"green\", attrs=['bold']))\n print(colored(\"Header\", 'cyan'), \"-> Total Size of file: \",\n colored('{0:>8d} {0:>#8x}'.format(header.totalsize), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to Struct Block: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_dt_struct), 'yellow'), \" with size: \",\n colored('{0:>8d} {0:>#8x}'.format(header.size_dt_struct), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to String Block: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_dt_strings), 'yellow'), \" with size: \",\n colored('{0:>8d} {0:>#8x}'.format(header.size_dt_strings), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to Memory Reser: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_mem_rsvmap), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Version of DTB: \",\n colored('{0:>8d} {0:>#8x}'.format(header.version), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Previous Version of DTB:\",\n colored('{0:>8d} {0:>#8x}'.format(header.last_comp_version), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Boot CPU Number: \",\n colored('{0:>8d} {0:>#8x}'.format(header.boot_cpuid_phys), 'yellow'))\n print()",
"def niriss_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F150W', grism='GR150R'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0658, 0], [0, 0.0654]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F090W':0.50, 'F115W':0.47, 'F140M':0.23, 'F150W':0.48, 'F158M':0.25, 'F200W':0.44}\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRISS'\n h['READN'] = 6 , 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'GR150R':\n h['GRISM'] = 'GR150R', 'Spectral trace along X'\n else:\n h['GRISM'] = 'GR150C', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs",
"def _header_string( self, title='title' ): \n return_str = ''\n return_str += '{}\\n\\n'.format( title )\n return_str += '{} atoms\\n'.format( len(self.atoms) )\n if len(self.bonds) != 0:\n return_str += '{} bonds\\n\\n'.format( len(self.bonds) )\n return_str += '{} atom types\\n'.format( len(self.atom_types ) )\n if len(self.bond_types) != 0:\n return_str += '{} bond types\\n\\n'.format( len(self.bond_types ) )\n return_str += '\\n'\n return return_str",
"def format_report_header(self):",
"def print_header():\n print('------------------------------------')\n print(' Lesson04')\n print(' Kata Fourteen Assignment')\n print('------------------------------------\\n')",
"def show_header():\n return {};",
"def info(self):\n txt = \"\"\"Lick Index {s.name}\n wavelength units: {s.wavelength_unit}\n Index Band: {s.band}\n Blue continuum band: {s.blue}\n Red continuum band: {s.red}\n Measurement unit: {s.index_unit}\"\"\".format(s=self)\n print(txt)",
"def info(self):\n txt = \"\"\"Lick Index {s.name}\n wavelength units: {s.wavelength_unit}\n Index Band: {s.band}\n Blue continuum band: {s.blue}\n Red continuum band: {s.red}\n Measurement unit: {s.index_unit}\"\"\".format(s=self)\n print(txt)",
"def display_headers(model_file, model_data):\n # netCDF header\n print('\\n\\nnetCDF header information:\\n\\n', flush=True)\n\n # dimension information.\n nc_dims = [dim for dim in model_data.dimensions] # list of netCDF dimensions\n print ('\\tdimensions:', flush=True)\n for dim in nc_dims:\n print('\\t\\t{} {}'.format(model_data.dimensions[dim].name, model_data.dimensions[dim].size), flush=True)\n\n # variable information.\n nc_vars = [var for var in model_data.variables] # list of nc variables\n\n print('\\n\\tvariables:', flush=True)\n for var in nc_vars:\n if var not in nc_dims:\n print('\\t\\t{}:'.format(var), flush=True)\n for attr, value in vars(model_data.variables[var]).items():\n print('\\t\\t\\t{} = {}'.format(attr, value), flush=True)\n\n # global attributes\n print('\\n\\tglobal attributes:', flush=True)\n for attr, value in vars(model_data).items():\n if isinstance(value, str):\n value = value.replace('\\n', ' ')\n print('\\t\\t\\t{} = {}'.format(attr, value), flush=True)\n\n # GeoCSV header\n print('\\n\\nGeoCSV header information:\\n\\n{}\\n\\n'.format(get_model_header(model_file, model_data)), flush=True)",
"def print_header(name, texfile):\n texfile.write('\\n')\n texfile.write('%--------------------\\n')\n texfile.write('%---' + name.upper() + ('-' * (17 - len(name))) + '\\n')\n texfile.write('%--------------------\\n')",
"def info(self):\n tline = \"\"\n for (ii, projection) in enumerate(self._ProjectionList):\n tiltAngle = projection._tiltAngle\n transX = -projection._alignmentTransX\n transY = -projection._alignmentTransY\n rot = -(projection._alignmentRotation + 90.)\n mag = projection._alignmentMagnification\n tline = tline + (\"%3d: \" % ii)\n tline = tline + (\"%15s; \" % projection._filename)\n tline = tline + (\"tiltAngle=%9.3f; \" % tiltAngle)\n tline = tline + (\"transX=%9.3f; \" % transX)\n tline = tline + (\"transY=%9.3f; \" % transY)\n tline = tline + (\"rot=%9.3f; \" % rot)\n tline = tline + (\"mag=%9.3f\\n\" % mag)\n print(tline)",
"def header(self):\n\n data = {}\n data['latitude'] = self.latitude()\n data['latitude_unc'] = self.latitude_unc()\n data['longitude'] = self.longitude()\n data['longitude_unc'] = self.longitude_unc()\n data['uid'] = self.uid()\n data['n_levels'] = self.n_levels()\n data['year'] = self.year()\n data['month'] = self.month()\n data['day'] = self.day()\n data['time'] = self.time()\n data['cruise'] = self.cruise()\n data['probe_type'] = self.probe_type()\n \n header = pd.Series(data)\n\n return header",
"def print_headings(self):\n hdg_list = sorted(self.data.keys())\n sys.stdout.write('Offset: %.1f; ' % self.offset)\n sys.stdout.write('Magnetic Declination: %.2f\\n' % np.rad2deg(self.mag_var))\n # get maximum length of row headers for lining up everything\n max_len = max(map(lambda x: len(x[0]), PRINT_ROW_INFO))\n while hdg_list:\n # this part ensures printing only 6 columns at a time to prevent\n # text from wrapping when printed to a terminal\n if len(hdg_list) > 6:\n last = 6\n else:\n last = len(hdg_list)\n hdgs = hdg_list[0:last]\n # pop the headings used in HDGS out of HDG_LIST\n hdg_list[0:last] = []\n\n # Printing handled\n for row_header, fmt, dat_key in PRINT_ROW_INFO:\n # print row header\n lead_space = ' ' * (max_len - len(row_header))\n sys.stdout.write(lead_space + row_header)\n # print row data\n #pdb.set_trace()\n for hdg in hdgs:\n sys.stdout.write(' '+fmt % self.data[hdg][dat_key])\n sys.stdout.write('\\n')\n # print sample data gathered\n lead_space = ' ' * (max_len - 5)\n sys.stdout.write(lead_space + 'Data:')\n for ii in range(self.n_samples):\n if ii > 0:\n sys.stdout.write(' ' * max_len)\n for hdg in hdgs:\n comp_dat = self.data[hdg]['compass_sample_rad'][ii]\n sys.stdout.write(' %6.2f' % comp_dat)\n sys.stdout.write('\\n')\n sys.stdout.write('\\n') # add a line between sections",
"def print_header(now):\n global config\n date_time = datetime.datetime.fromtimestamp(now).strftime('%Y-%m-%d %H:%M:%S')\n\n print('*************************************')\n print(f'HTTP LOGS STATISTICS - {date_time}')",
"def print_header(banner_name):\n print()\n print()\n print(\"----------------------------------------------------\")\n print(\" {0}\".format(banner_name))\n print(\"-----------------------------------------------------\")\n print()",
"def Show_Headers( self ):\r\n self.system.Change_Seq( \"Header\" )",
"def print_header(filename):\n\n date_list = filename[0:10].split('_')\n # Hint: CWB Metadata cannot contain dashes -\n name = 'id=\"{}\"'.format(filename[0:-4].replace('-', '_'))\n date = 'date=\"{}\"'.format('_'.join(date_list))\n year = 'year=\"{}\"'.format(date_list[0])\n month = 'month=\"{}\"'.format(date_list[1])\n day = 'day=\"{}\"'.format(date_list[2])\n\n header = '<text {} {} {} {} {}>'.format(name, date, year, month, day)\n\n print(header)",
"def section_header(text):\n\n print \"---- %s ----\" % text",
"def print_inview_header(minimum_elevation_angle, now, gs):\n print \"Inviews (above %s degrees) on %s-%s-%s\" % \\\n (minimum_elevation_angle, now.year, now.month, now.day)\n print \"At %s: Lat/Lon/El: %s/%s/%s\" % \\\n (gs.get_name(), gs.get_latitude(), gs.get_longitude(),\n gs.get_elevation_in_meters())\n print \"where local time is UTC%+s hours\" % \\\n (gs.get_utcoffset_hours_ondate(now.year, now.month, now.day))\n print \" Rise (UTC) Set ( Duration ) Rise (UTC%+s) Set\" % \\\n (gs.get_utcoffset_hours_ondate(now.year, now.month, now.day))",
"def PrintEnergyHeader(self):\n self.efile.write('# ')\n self.efile.write('%10.3e %9.3e %9.3e %9.3e %9.3e\\n' % (\n self.conv_delta_e, self.conv_grad_max, self.conv_grad_rms,\n self.conv_disp_max, self.conv_disp_rms))\n self.efile.write('# iter energy delta_e grad_max')\n self.efile.write(' grad_rms disp_max disp_rms\\n')",
"def nircam_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F444W', grism='DFSR'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0648, 0], [0, 0.0648]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F277W':0.30, 'F356W':0.90, 'F444W': 3.00, 'F322W2':1.25, 'F430M':0.65, 'F460M':0.86, 'F410M':0.5} # F410M is a hack, no number\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRCam'\n h['READN'] = 9, 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'DFSR':\n h['GRISM'] = 'DFSR', 'Spectral trace along X'\n else:\n h['GRISM'] = 'DFSC', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs",
"def print_info(self):\n \n print \"\"\"version: %d\\t header_len: %d\\t tos: %s\\t total_len: %d\n id: %s\\t flags_reservedbit: %d\\t flags_dont_fragment: %d\\t flags_more_fragment: %d\n fragment_offset: %d\\t TTL: %d\\t protocol: %s\\t\n header_checksum: %s\\t\n src: %s\\t dst: %s\n opt_paddings: %s\"\"\" % (\n self.version, self.header_len, self.type_of_service, self.total_len, self.id, self.flags_reservedbit, \n self.flags_dont_fragment, self.flags_more_fragment, \n self.fragment_offset, self.TTL, self.protocol, self.header_checksum, self.src, self.dst, repr(self.opt_paddings))"
] | [
"0.73586893",
"0.71547425",
"0.6722391",
"0.64073485",
"0.6397794",
"0.6378043",
"0.63605297",
"0.6274469",
"0.6251121",
"0.6184243",
"0.616818",
"0.6158332",
"0.61397535",
"0.61184627",
"0.6110125",
"0.6110125",
"0.6106982",
"0.60947645",
"0.60926664",
"0.608937",
"0.60871035",
"0.6086324",
"0.6069051",
"0.6055609",
"0.6049807",
"0.604443",
"0.6038157",
"0.60109997",
"0.6003284",
"0.5956298"
] | 0.8453364 | 0 |
Function to print the inviews | def print_inviews(gs, inviews):
#print "Number of inviews from %s to %s: %d" % \
# (today_start.isoformat(), today_end.isoformat(),len(inviews))
for i in range(0, len(inviews)):
#print "%s to %s" % (inviews[i][0].isoformat(), inviews[i][1].isoformat())
print_inview(inviews[i][0], inviews[i][1], gs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_details(self):\n self.view.print_details()",
"def print(self):\r\n self.print_avec_separateur()",
"def print_out():\n pass",
"def _print_inwards(middleware_name):\n if _VERBOSE_MODE:\n print('{}--->'.format(middleware_name))",
"def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass",
"def show_data():",
"def print_azeltables(inviews, ic):\n for i in range(0, len(inviews)):\n print \" \"\n print \"Az/El for inview %s to %s\" % (inviews[i][0], inviews[i][1])\n azels = ic.compute_azels(inviews[i][0], inviews[i][1], 15)\n for j in range(0, len(azels)):\n print \"At %s, azimuth=%8.2f, elevation=%8.2f\" % \\\n (azels[j][0], azels[j][1], azels[j][2])",
"def view(self):",
"def __window_print(self):\n pass",
"def _print_custom(self):\n pass",
"def print_list(self):\r\n pass",
"def show(self):",
"def show(self) -> None:",
"def print_inview_header(minimum_elevation_angle, now, gs):\n print \"Inviews (above %s degrees) on %s-%s-%s\" % \\\n (minimum_elevation_angle, now.year, now.month, now.day)\n print \"At %s: Lat/Lon/El: %s/%s/%s\" % \\\n (gs.get_name(), gs.get_latitude(), gs.get_longitude(),\n gs.get_elevation_in_meters())\n print \"where local time is UTC%+s hours\" % \\\n (gs.get_utcoffset_hours_ondate(now.year, now.month, now.day))\n print \" Rise (UTC) Set ( Duration ) Rise (UTC%+s) Set\" % \\\n (gs.get_utcoffset_hours_ondate(now.year, now.month, now.day))",
"def output(self):\n print \"Name:\", self.name\n print \"City:\", self.city\n print \"Country:\", self.country\n print \"Number of Reviews:\", len(self.sentiments)\n print \"Old Reviews (Stars):\", self.stars_avg\n print \"Old Reviews (%):\", self.stars_avg/5\n print \"New Rating (Stars)\", self.new_rating*5\n print \"New Rating (%):\", self.new_rating",
"def show(self):\n pass",
"def show(self):\n\n print(self._walk(self, depth=1))",
"def print(self):\n # Your implementation here",
"def show(self):\n print(\"APKs in Session: {}\".format(len(self.analyzed_apk)))\n for d, a in self.analyzed_apk.items():\n print(\"\\t{}: {}\".format(d, a))\n print(\"DEXs in Session: {}\".format(len(self.analyzed_dex)))\n for d, dex in self.analyzed_dex.items():\n print(\"\\t{}: {}\".format(d, dex))\n print(\"Analysis in Session: {}\".format(len(self.analyzed_vms)))\n for d, a in self.analyzed_vms.items():\n print(\"\\t{}: {}\".format(d, a))",
"def show(self):\n\n pass",
"def _printable(self):\n pass",
"def visualizar(self):\n print(self.stack)",
"def intf_VIEWSHOW(E):\n out= \"View Properties\\n\"\n out+= \"---------------\\n\"\n out+= \"svgoutfile=%s\\n\" % OUT.outfile\n out+= \"camera=%s {camset}\\n\" % (','.join([str(x) for x in OUT.camera]))\n out+= \"target=%s {tarset}\\n\" % (','.join([str(x) for x in OUT.target]))\n out+= \"opacity=%s {hlr,hide}\\n\" % str(OUT.opacity)\n out+= \"facelines=%s {facelines}\\n\" % str(OUT.facelines)\n out+= \"vlinewidth=%0.2f {vlw,viewlinewidth}\\n\" % OUT.vlinewidth\n out+= \"vrefreshms=%d {refreshms,viewrefreshms}\\n\" % OUT.vrefreshms\n out+= \"vbox=(%d,%d) {viewbox[xy]}\\n\" % (OUT.vboxX,OUT.vboxY)\n out+= \"vtran=(%d,%d) {vtran[xy],viewtran[xy]}\\n\" % (OUT.vtranX,OUT.vtranY)\n out+= \"vscale=(%d,%d) {vscale[xy],viewscale[xy]}\\n\" % (OUT.vscaleX,OUT.vscaleY)\n print(out)",
"def pretty_view(self):\n return self.pretty_response()",
"def display_results_for_views(result):\n i = 0\n for r in result:\n print('\\t'+str(result[i][0]) + ' ---> '+str(result[i][1])+' views')\n i = i + 1",
"def debug_print(self):\n print self.title\n print self.storyline\n print self.poster_image_url\n print self.trailer_youtube_url\n print \"------\"",
"def show_trailer(self):",
"def print_inview(rise, set, gs):\n riselocal = rise + gs.get_utcoffset_ondate(rise.year, rise.month, rise.day)\n setlocal = set + gs.get_utcoffset_ondate(set.year, set.month, set.day)\n delta = set - rise\n print \"%2d:%02d:%02d to %2d:%02d:%02d (%3d seconds) %2d:%02d:%02d to %2d:%02d:%02d\" % \\\n (rise.hour, rise.minute, rise.second, set.hour, set.minute, set.second, delta.seconds,\n riselocal.hour, riselocal.minute, riselocal.second, setlocal.hour, setlocal.minute, setlocal.second)\n return",
"def _print_outwards(middleware_name):\n if _VERBOSE_MODE:\n print('<---{}'.format(middleware_name))",
"def print_out(self):\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n lb = self.arc_info[arc]['lower_bound']\n u = self.arc_info[arc]['upper_bound']\n print(\"{} {} {} {} flow={}, edgeId={}\".format(s, t, lb, u, w,\n arc))"
] | [
"0.67796624",
"0.6479328",
"0.6461996",
"0.6318401",
"0.63149685",
"0.61923224",
"0.61706716",
"0.61218095",
"0.6109504",
"0.6102467",
"0.6101652",
"0.6087358",
"0.60798454",
"0.6077613",
"0.60708314",
"0.6038918",
"0.60107464",
"0.6009129",
"0.5995637",
"0.59919393",
"0.5979286",
"0.59493715",
"0.59441304",
"0.59275883",
"0.59145045",
"0.59116703",
"0.5897358",
"0.5878942",
"0.58736473",
"0.58606696"
] | 0.75969446 | 0 |
Function to print a table of time, azimuth, elevation for each inview | def print_azeltables(inviews, ic):
for i in range(0, len(inviews)):
print " "
print "Az/El for inview %s to %s" % (inviews[i][0], inviews[i][1])
azels = ic.compute_azels(inviews[i][0], inviews[i][1], 15)
for j in range(0, len(azels)):
print "At %s, azimuth=%8.2f, elevation=%8.2f" % \
(azels[j][0], azels[j][1], azels[j][2]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_inview_header(minimum_elevation_angle, now, gs):\n print \"Inviews (above %s degrees) on %s-%s-%s\" % \\\n (minimum_elevation_angle, now.year, now.month, now.day)\n print \"At %s: Lat/Lon/El: %s/%s/%s\" % \\\n (gs.get_name(), gs.get_latitude(), gs.get_longitude(),\n gs.get_elevation_in_meters())\n print \"where local time is UTC%+s hours\" % \\\n (gs.get_utcoffset_hours_ondate(now.year, now.month, now.day))\n print \" Rise (UTC) Set ( Duration ) Rise (UTC%+s) Set\" % \\\n (gs.get_utcoffset_hours_ondate(now.year, now.month, now.day))",
"def info(self):\n tline = \"\"\n for (ii, projection) in enumerate(self._ProjectionList):\n tiltAngle = projection._tiltAngle\n transX = -projection._alignmentTransX\n transY = -projection._alignmentTransY\n rot = -(projection._alignmentRotation + 90.)\n mag = projection._alignmentMagnification\n tline = tline + (\"%3d: \" % ii)\n tline = tline + (\"%15s; \" % projection._filename)\n tline = tline + (\"tiltAngle=%9.3f; \" % tiltAngle)\n tline = tline + (\"transX=%9.3f; \" % transX)\n tline = tline + (\"transY=%9.3f; \" % transY)\n tline = tline + (\"rot=%9.3f; \" % rot)\n tline = tline + (\"mag=%9.3f\\n\" % mag)\n print(tline)",
"def print_inview(rise, set, gs):\n riselocal = rise + gs.get_utcoffset_ondate(rise.year, rise.month, rise.day)\n setlocal = set + gs.get_utcoffset_ondate(set.year, set.month, set.day)\n delta = set - rise\n print \"%2d:%02d:%02d to %2d:%02d:%02d (%3d seconds) %2d:%02d:%02d to %2d:%02d:%02d\" % \\\n (rise.hour, rise.minute, rise.second, set.hour, set.minute, set.second, delta.seconds,\n riselocal.hour, riselocal.minute, riselocal.second, setlocal.hour, setlocal.minute, setlocal.second)\n return",
"def print_inviews(gs, inviews):\n #print \"Number of inviews from %s to %s: %d\" % \\\n # (today_start.isoformat(), today_end.isoformat(),len(inviews))\n\n for i in range(0, len(inviews)):\n #print \"%s to %s\" % (inviews[i][0].isoformat(), inviews[i][1].isoformat())\n print_inview(inviews[i][0], inviews[i][1], gs)",
"def showp():\n def show1(i):\n a=SAC.queryDouble('carma.Ovro%d.Drive.Point.offsetAz' % (i+1) ,qmax_)\n e=SAC.queryDouble('carma.Ovro%d.Drive.Point.offsetEl' % (i+1) ,qmax_)\n return (a,e)\n print ' ant dAz dEl'\n for i in range(6):\n (a,e) = show1(i)\n print ' 00%d %7.3f %7.3f' % (i+1,a,e)",
"def display(self):\n print (\"+\" + \"-\"*self.size + \"+\")\n for i in range(self.size):\n terrain_strs = [Terrain.display_string(self.array[j, i]) for j in range(self.size)]\n print(\"|\" + \"\".join(terrain_strs) + \"|\")\n print (\"+\" + \"-\"*self.size + \"+\")",
"def print_readings(data):\n output = [str(data['timestamp'])]\n output.append(getvalue(data, 't_in', '%0.2f'))\n output.append(getvalue(data, 'h_in', '%d'))\n for i in range(1, 6):\n output.append(getvalue(data, 't_%d' % i, '%0.2f'))\n output.append(getvalue(data, 'h_%d' % i, '%d'))\n output.append(getvalue(data, 'slp', '%0.1f'))\n output.append(getvalue(data, 'uv', '%0.1f'))\n output.append(getvalue(data, 'forecast', '%d'))\n output.append(getvalue(data, 'storm', '%d'))\n output.append(getvalue(data, 'winddir', '%d'))\n output.append(getvalue(data, 'windspeed', '%0.1f'))\n output.append(getvalue(data, 'windgust', '%0.1f'))\n output.append(getvalue(data, 'windchill', '%0.1f'))\n output.append(getvalue(data, 'rain', '%d'))\n print ':'.join(output)",
"def intf_VIEWSHOW(E):\n out= \"View Properties\\n\"\n out+= \"---------------\\n\"\n out+= \"svgoutfile=%s\\n\" % OUT.outfile\n out+= \"camera=%s {camset}\\n\" % (','.join([str(x) for x in OUT.camera]))\n out+= \"target=%s {tarset}\\n\" % (','.join([str(x) for x in OUT.target]))\n out+= \"opacity=%s {hlr,hide}\\n\" % str(OUT.opacity)\n out+= \"facelines=%s {facelines}\\n\" % str(OUT.facelines)\n out+= \"vlinewidth=%0.2f {vlw,viewlinewidth}\\n\" % OUT.vlinewidth\n out+= \"vrefreshms=%d {refreshms,viewrefreshms}\\n\" % OUT.vrefreshms\n out+= \"vbox=(%d,%d) {viewbox[xy]}\\n\" % (OUT.vboxX,OUT.vboxY)\n out+= \"vtran=(%d,%d) {vtran[xy],viewtran[xy]}\\n\" % (OUT.vtranX,OUT.vtranY)\n out+= \"vscale=(%d,%d) {vscale[xy],viewscale[xy]}\\n\" % (OUT.vscaleX,OUT.vscaleY)\n print(out)",
"def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)",
"def elevation(self):\n\n\t\twidth = self.no_name_level[0]\n\t\theight = self.no_name_level[1]\n\t\ttile = self.no_name_level[2]\n\t\tx = self.no_name_level[3]\n\t\ty = self.no_name_level[4]\n\t\t\n\t\ttiles = []\n\t\tfor i in tile:\n\t\t\ti = i[:-1]\n\t\t\ttiles.append(i)\t\n\t\ttiles_arranged = [tiles[i:i + width] for i in range(0, len(tile), width)]\n\t\n\t\tplanet_co = []\n\t\t\n\t\tfor i in tiles_arranged:\n\t\t\t\n\t\t\tplanet = []\n\t\t\tfor n in i:\n\t\t\t\tn = n.split(',')\n\t\t\t\tif len(n) != 3:\n\t\t\t\t\ta = ['-']\n\t\t\t\t\tn += a\n\t\t\t\t\t\n\t\t\t\t\tplanet.append(n)\n\t\t\t\telse:\n\t\t\t\t\tplanet.append(n)\n\t\t\t\t\t\n\t\t\tplanet_co.append(planet)\n\t\t\t\n\t\n\t\tplanet_map = Planet(planet_co, width, height)\n\t\tcoordinates = Planet(planet_co, width, height)\n\t\tcoordinates = Planet.coordinates(coordinates)\n\t\tplanet_map = Planet.coordinates_dict(planet_map)#this is my map in dictionary format(coordinates : tile)\n\t\t\n\t\tfor y1 in coordinates:\n\t\t\tif coordinates.index(y1) == y:\n\t\t\t\ty_value = coordinates.index(y1)\n\t\t\t\tfor x1 in y1:\n\t\t\t\t\tif x1 == [x, y]:\n\t\t\t\t\t\tx_value = y1.index(x1)\n\t\trover_d = coordinates[y_value][x_value]\n\t\n\t\tx1 = x_value + 1\n\t\tx2 = x_value + 2\n\t\ty1 = y_value + 1\n\t\ty2 = y_value + 2\n\t\n\t\tif x1 == len(coordinates[1]):\n\t\t\tx1 == 0\n\t\tif y1 == len(coordinates):\n\t\t\ty1 == 0\n\t\n\t\tif x2 > len(coordinates[1]):\n\t\t\tx2 = 1\n\t\tif y2 > len(coordinates[1]):\n\t\t\ty2 == 1\n\t\n\t\tfront2 = coordinates[y2][x_value]\n\t\tfront1 = coordinates[y1][x_value]\n\t\tback1 = coordinates[y_value-1][x_value]\n\t\tback2 = coordinates[y_value-2][x_value]\n\t\tright1 = coordinates[y_value][x1]\n\t\tright2 = coordinates[y_value][x2]\n\t\tleft1 = coordinates[y_value][x_value-1]\n\t\tleft2 = coordinates[y_value][x_value-2]\n\t\n\t\n\t\tfront1_right1 = coordinates[y1][x1]\n\t\tfront1_right2 = coordinates[y1][x2]\n\t\tfront2_right1 = coordinates[y2][x1]\n\t\tfront2_right2 = coordinates[y2][x2]\n\t\tfront1_left1 = coordinates[y1][x_value-1]\n\t\tfront1_left2 = coordinates[y1][x_value-2]\n\t\tfront2_left1 = coordinates[y2][x_value-1]\n\t\tfront2_left2 = coordinates[y2][x_value-2]\n\t\n\t\tback1_right1 = coordinates[y_value-1][x1]\n\t\tback1_right2 = coordinates[y_value-1][x2]\n\t\tback2_right1 = coordinates[y_value-2][x1]\n\t\tback2_right2 = coordinates[y_value-2][x2]\n\t\tback1_left1 = coordinates[y_value-1][x_value-1]\n\t\tback1_left2 = coordinates[y_value-1][x_value-2]\n\t\tback2_left1 = coordinates[y_value-2][x_value-1]\n\t\tback2_left2 = coordinates[y_value-2][x_value-2]\n\t\t\n\t\tco_f2r2 = planet_map[str(front2_right2)]\n\t\tco_f2r1 = planet_map[str(front2_right1)]\n\t\tco_f2 = planet_map[str(front2)]\n\t\tco_f2l1 = planet_map[str(front2_left1)]\n\t\tco_f2l2 = planet_map[str(front2_left2)]\n\t\tco_f1r2 = planet_map[str(front1_right2)]\n\t\tco_f1r1 = planet_map[str(front1_right1)]\n\t\tco_f1 = planet_map[str(front1)]\n\t\tco_f1l1 = planet_map[str(front1_left1)]\n\t\tco_f1l2 = planet_map[str(front1_left2)]\n\t\tco_r2 = planet_map[str(right2)]\n\t\tco_r1 = planet_map[str(right1)]\n\t\tco_rover = planet_map[str([x, y])]\n\t\tco_l1 = planet_map[str(left1)]\n\t\tco_l2 = planet_map[str(left2)]\n\t\tco_b1r2 = planet_map[str(back1_right2)]\n\t\tco_b1r1 = planet_map[str(back1_right1)]\n\t\tco_b1 = planet_map[str(back1)]\n\t\tco_b1l1 = planet_map[str(back1_left1)]\n\t\tco_b1l2 = planet_map[str(back1_left2)]\n\t\tco_b2r2 = planet_map[str(back2_right2)]\n\t\tco_b2r1 = planet_map[str(back2_right1)]\n\t\tco_b2 = planet_map[str(back2)]\n\t\tco_b2l1 = planet_map[str(back2_left1)]\n\t\tco_b2l2 = planet_map[str(back2_left2)]\n\t\n\t\tfirst_lineco = [co_f2l2, co_f2l1, co_f2, co_f2r1, co_f2r2]\n\t\tsecond_lineco = [co_f1l2, co_f1l1, co_f1, co_f1r1, co_f1r2]\n\t\tthird_lineco = [co_l2, co_l1, co_rover, co_r1, co_r2]\n\t\tfourth_lineco = [co_b1l2, co_b1l1, co_b1, co_b1r1, co_b1r2]\n\t\tfifth_lineco = [co_b2l2, co_b2l1, co_b2, co_b2r1, co_b2r2]\n\n\t\tfirst_line = ['|']\n\t\tsec_line = ['|']\n\t\tthird_line = ['|']\n\t\tfourth_line = ['|']\n\t\tfifth_line = ['|']\n\t\tfor i in first_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tfirst_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tfirst_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tfirst_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tfirst_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tfirst_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(\"\\|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tfirst_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tfirst_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(\"+|\")\n\n\n\n\t\tfor i in second_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tsec_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tsec_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tsec_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tsec_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tsec_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tsec_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tsec_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tsec_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tsec_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tsec_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(\"+|\")\n\t\n\t\tfor i in third_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tthird_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tthird_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tthird_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tthird_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tthird_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tthird_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tthird_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tthird_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tthird_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tthird_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(\"+|\")\n\t\n\t\tfor i in fourth_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tfourth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tfourth_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tfourth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tfourth_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tfourth_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tfourth_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tfourth_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(\"+|\")\n\t\n\t\tfor i in fifth_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tfifth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tfifth_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tfifth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tfifth_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tfifth_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tfifth_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tfifth_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(\"+|\")\n\t\tthird_line2 = []\n\t\n\t\tfor n, i in enumerate(third_line):\n\t\t\tif n == 3:\n\t\t\t\ta = \"H|\"\n\t\t\t\t \n\t\t\t\tthird_line2.append(a)\n\t\t\telse:\n\t\t\t\tthird_line2.append(i)\n\t\tnumber1_line = \"\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format(\"\".join(fifth_line), \"\".join(fourth_line), \"\".join(third_line2),\"\".join(sec_line) , \"\".join(first_line))\n\t\t\n\t\treturn number1_line\n\n\n\n\n\t\tpass",
"def __str__(self):\n out_str = \"\\n\".join(`\"%.5f, %.5f, %.1f, %s, %s\" % (point[0], point[1], point[2], point[3], point[4])` for point in self.__traectory_list)\n return \"\\'x, y, altitude, capture time, capture date'\\n\"+out_str",
"def overview(data):\n\n printer.table(['Name', 'El', 'Invariom name', 'Model compound'], head=True)\n for atom in data.iter_atoms(True):\n printer.table([atom.name, atom.element, atom.invariom_name, atom.invariom.molecule.name])\n printer.table(done=True)",
"def print_cell_information(obj_ase_cell):\n # print the lattice vectors\n print('a1=',obj_ase_cell.cell[0,:])\n print('a2=',obj_ase_cell.cell[1,:])\n print('a3=',obj_ase_cell.cell[2,:])\n for i,a in enumerate(obj_ase_cell):\n print(i,a.symbol,a.position)",
"def visualize(z_in, azimuth=25., elevation=30.,\n thresholds=[0.95, .9, .75, .5, .25, .125], opacities=[1, .9, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75, .5, .25, .1], opacities=[.9, .8, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75], opacities=[.99, .7, .2],\n# thresholds=[0.7, .5, .2], opacities=[.95, .5, .2],\n fourier_label = {'f_x':'f_x', 'f_y':'f_y', 'f_t':'f_t'},\n filename=None, do_axis=True, do_grids=False, draw_projections=True,\n colorbar=False, f_N=2., f_tN=2., figsize=figsize, dpi=300, figpath=figpath, **kwargs):\n z = z_in.copy()\n N_X, N_Y, N_frame = z.shape\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n\n # Normalize the amplitude.\n z /= z.max()\n\n from vispy import app, scene, use\n try:\n AffineTransform = scene.transforms.AffineTransform\n except:\n AffineTransform = scene.transforms.MatrixTransform\n\n use(app='pyglet', gl='pyopengl2')\n #from vispy.util.transforms import perspective, translate, rotate\n from vispy.color import Color\n transparent = Color(color='black', alpha=0.)\n import colorsys\n canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=dpi)\n view = canvas.central_widget.add_view()\n\n vol_data = np.rollaxis(np.rollaxis(z, 1), 2)\n# volume = scene.visuals.Volume(vol_data, parent=view.scene)#frame)\n center = scene.transforms.STTransform(translate=( -N_X/2, -N_Y/2, -N_frame/2))\n# volume.transform = center\n# volume.cmap = 'blues'\n\n if draw_projections:\n from vispy.color import Colormap\n cm = Colormap([(1.0, 1.0, 1.0, 1.0), 'k'])\n opts = {'parent':view.scene, 'cmap':cm, 'clim':(0., 1.)}\n\n energy_xy = np.rot90(np.max(z, axis=2)[:, ::-1], 3)#[:, ::-1]\n fourier_xy = scene.visuals.Image(np.rot90(energy_xy), **opts)\n tr_xy = AffineTransform()\n tr_xy.rotate(90, (0, 0, 1))\n tr_xy.translate((N_X/2, -N_Y/2, -N_frame/2))\n fourier_xy.transform = tr_xy\n\n energy_xt = np.rot90(np.max(z, axis=1)[:, ::-1], 3)[::-1, ::-1]\n fourier_xt = scene.visuals.Image(energy_xt, **opts)\n tr_xt = AffineTransform()\n tr_xt.rotate(90, (1, 0, 0))\n tr_xt.translate((-N_X/2, N_Y/2, -N_frame/2))\n fourier_xt.transform = tr_xt\n\n energy_yt = np.max(z, axis=0)[:, ::-1]\n fourier_yt = scene.visuals.Image(energy_yt, **opts)\n tr_yt = AffineTransform()\n tr_yt.rotate(90, (0, 1, 0))\n tr_yt.translate((-N_X/2, -N_Y/2, N_frame/2))\n fourier_yt.transform = tr_yt\n\n # Generate iso-surfaces at different energy levels\n surfaces = []\n for i_, (threshold, opacity) in enumerate(zip(thresholds, opacities)):\n surfaces.append(scene.visuals.Isosurface(z, level=threshold,\n# color=Color(np.array(colorsys.hsv_to_rgb(1.*i_/len(thresholds), 1., 1.)), alpha=opacity),\n color=Color(np.array(colorsys.hsv_to_rgb(.66, 1., 1.)), alpha=opacity),\n shading='smooth', parent=view.scene)\n )\n surfaces[-1].transform = center\n\n # Draw a sphere at the origin\n axis = scene.visuals.XYZAxis(parent=view.scene)\n for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):\n line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_Y/2, p[2]*N_frame/2], [p[3]*N_X/2, p[4]*N_Y/2, p[5]*N_frame/2]]), color='black', parent=view.scene)\n\n axisX = scene.visuals.Line(pos=np.array([[0, -N_Y/2, 0], [0, N_Y/2, 0]]), color='red', parent=view.scene)\n axisY = scene.visuals.Line(pos=np.array([[-N_X/2, 0, 0], [N_X/2, 0, 0]]), color='green', parent=view.scene)\n axisZ = scene.visuals.Line(pos=np.array([[0, 0, -N_frame/2], [0, 0, N_frame/2]]), color='blue', parent=view.scene)\n\n if do_axis:\n t = {}\n for text in ['f_x', 'f_y', 'f_t']:\n t[text] = scene.visuals.Text(fourier_label[text], parent=canvas.scene, face='Helvetica', color='black')\n t[text].font_size = 8\n t['f_x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8\n t['f_y'].pos = canvas.size[0] - canvas.size[0] // 8, canvas.size[1] - canvas.size[1] // 6\n t['f_t'].pos = canvas.size[0] // 8, canvas.size[1] // 2\n\n cam = scene.TurntableCamera(elevation=elevation, azimuth=azimuth, up='z')\n cam.fov = 48\n cam.scale_factor = N_X * 1.8\n if do_axis: margin = 1.35\n else: margin = 1\n cam.set_range((-N_X/2*margin, N_X/2/margin), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2*margin, N_frame/2/margin))\n view.camera = cam\n\n render_im = canvas.render()\n app.quit()\n if not(filename is None):\n import vispy.io as io\n io.write_png(filename, render_im)\n else:\n return render_im",
"def print_headings(self):\n hdg_list = sorted(self.data.keys())\n sys.stdout.write('Offset: %.1f; ' % self.offset)\n sys.stdout.write('Magnetic Declination: %.2f\\n' % np.rad2deg(self.mag_var))\n # get maximum length of row headers for lining up everything\n max_len = max(map(lambda x: len(x[0]), PRINT_ROW_INFO))\n while hdg_list:\n # this part ensures printing only 6 columns at a time to prevent\n # text from wrapping when printed to a terminal\n if len(hdg_list) > 6:\n last = 6\n else:\n last = len(hdg_list)\n hdgs = hdg_list[0:last]\n # pop the headings used in HDGS out of HDG_LIST\n hdg_list[0:last] = []\n\n # Printing handled\n for row_header, fmt, dat_key in PRINT_ROW_INFO:\n # print row header\n lead_space = ' ' * (max_len - len(row_header))\n sys.stdout.write(lead_space + row_header)\n # print row data\n #pdb.set_trace()\n for hdg in hdgs:\n sys.stdout.write(' '+fmt % self.data[hdg][dat_key])\n sys.stdout.write('\\n')\n # print sample data gathered\n lead_space = ' ' * (max_len - 5)\n sys.stdout.write(lead_space + 'Data:')\n for ii in range(self.n_samples):\n if ii > 0:\n sys.stdout.write(' ' * max_len)\n for hdg in hdgs:\n comp_dat = self.data[hdg]['compass_sample_rad'][ii]\n sys.stdout.write(' %6.2f' % comp_dat)\n sys.stdout.write('\\n')\n sys.stdout.write('\\n') # add a line between sections",
"def print_vdloc(*args):\n return _ida_hexrays.print_vdloc(*args)",
"def viewer(self):\n output = []\n for pin_name in self.pin_defs:\n details = self.pin_defs[pin_name]\n value = self.pin(pin_name).value\n details['curr_value'] = value\n output.append(details)\n output = sorted(output, key=lambda x: x['pin_num'], reverse=False)\n\n print(tabulate(output, headers=\"keys\"))\n return",
"def plot_network_azi(stadict):\n for key in stadict.keys():\n data=np.array(stadict[key])\n text=\"Mean %.2f - Std %.2f\\nMedian %.2f\" % (np.mean(data[:,1]),np.std(data[:,1]),np.median(data[:,1]))\n plt.figure()\n plt.subplot(211)\n plt.plot_date(data[:,0],data[:,1])\n plt.figtext(.6,.8,text)\n plt.ylabel('Offset (degrees)')\n plt.subplot(212)\n plt.plot_date(data[:,0],data[:,2])\n plt.ylabel('Linearity') \n plt.savefig(\"Azimuth_%s.png\" % (key))\n plt.close()",
"def print(self):\n tiles = list(map(list, zip(*self.tiles))) # transposed\n print('tiles = [')\n for row in tiles:\n print('\\t' + repr(row))\n print(']')\n print('props = [')\n for prop in self.props:\n print('\\t' + repr(prop))\n print(']')",
"def display_taxis(taxis):\n for i, taxi in enumerate(taxis):\n print(\"{} - {}\".format(i, taxi))",
"def log_state(self):\n\n log('-' * 50)\n log('.level=%d' % self.level)\n log('.view_llon=%.3f, .view_rlon=%.3f'\n % (self.view_llon, self.view_rlon))\n log('.view_tlat=%.3f, .view_blat=%.3f'\n % (self.view_tlat, self.view_blat))\n log('.ppd_x=%.2f, .ppd_y=%.2f' % (self.ppd_x, self.ppd_y))\n log('.view_offset_x=%d, .view_offset_y=%d'\n % (self.view_offset_x, self.view_offset_y))\n log('.view_width=%d, .view_height=%d'\n % (self.view_width, self.view_height))\n log('-' * 50)\n log('')",
"def display_taxis(taxis):\n for i, taxi in enumerate(taxis):\n print(f\"{i} - {taxi}\")",
"def display_map(map):\n for row in map:\n line = \"\"\n for point in row:\n line += point.display_point()\n print(line)",
"def info(self):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d-%HH-%MM-%SS\")\n print(f\"Exploration info ({now})\")\n print(f\"HDF name: {self.HDF_FILE}\")\n print(f\"Trajectory name: {self.trajectoryName}\")\n if self.model is not None:\n print(f\"Model: {self.model.name}\")\n if hasattr(self, \"nRuns\"):\n print(f\"Number of runs {self.nRuns}\")\n print(f\"Explored parameters: {self.exploreParameters.keys()}\")\n if hasattr(self, \"_t_end_exploration\") and hasattr(self, \"_t_start_exploration\"):\n print(f\"Duration of exploration: {self._t_end_exploration-self._t_start_exploration}\")",
"def plot_orientation(tpf):\n\tmean_tpf = np.mean(tpf.flux,axis=0)\n\tnx,ny = np.shape(mean_tpf)\n\tx0,y0 = tpf.column+int(0.2*nx)+0.5,tpf.row+int(0.2*ny)+0.5\n\t# East\n\ttmp = tpf.get_coordinates()\n\tra00, dec00 = tmp[0][0][0][0], tmp[1][0][0][0]\n\tra10,dec10 = tmp[0][0][0][-1], tmp[1][0][0][-1]\n # Each degree of RA is not a full degree on the sky if not\n # at equator; need cos(dec) factor to compensate\n\tcosdec = np.cos(np.deg2rad(0.5*(dec10+dec00)))\n # Reverse the order of RA arguments here relative to dec\n # args to account for handedness of RA/Dec vs. x/y coords:\n\ttheta = np.arctan((dec10-dec00)/(cosdec*(ra00-ra10)))\n\tif (ra10-ra00) < 0.0: theta += np.pi\n\t#theta = -22.*np.pi/180.\n # If angle is small, arrows can be a bit closer to corner:\n\tif (abs(np.rad2deg(theta)) < 30):\n\t\tx0 -= 0.08*nx\n\t\ty0 -= 0.08*ny\n\tx1, y1 = 1.*np.cos(theta), 1.*np.sin(theta)\n\tplt.arrow(x0,y0,x1,y1,head_width=0.2,color='white')\n\tplt.text(x0+1.6*x1,y0+1.6*y1,'E',color='white',ha='center',va='center')\n\t# North\n\ttheta = theta +90.*np.pi/180.\n\tx1, y1 = 1.*np.cos(theta), 1.*np.sin(theta)\n\tplt.arrow(x0,y0,x1,y1,head_width=0.2,color='white')\n\tplt.text(x0+1.6*x1,y0+1.6*y1,'N',color='white',ha='center',va='center')",
"def dumpResults(x,y,lon,lat):\n for i in range(0,len(x)):\n print(x[i],y[i],\"lonlat\",lon[i],lat[i])\n return",
"def _print_tisserand_lists(self, Trajectory=[]):\n\t\n\timport numpy as np\n\t\n\tn = len(Trajectory);\n\trpl = [];\n\tral = [];\n\tpl = [];\n\tvinfl = [];\n\tfor i in range(n):\n\t\tral.append(Trajectory[i][6]);\n\t\trpl.append(Trajectory[i][5]);\n\t\tpl.append(Trajectory[i][7]);\n\t\tvinfl.append(Trajectory[i][8]);\n\t\n\tprint 'list_ra_python = [',\n\tn = len(ral);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % ral[i],\n\tprint '%f];' % ral[n-1];\n\t\n\tprint 'list_rp_python = [',\n\tn = len(rpl);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % rpl[i],\n\tprint '%f];' % rpl[n-1];\n\t\n\tprint 'list_period_python = [',\n\tn = len(pl);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % pl[i],\n\tprint '%f];' % pl[n-1];\n\t\n\tprint 'list_vinf_python = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % np.linalg.norm(vinfl[i]),\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % np.linalg.norm(vinfl[n-1]);\n\t\n\tprint 'list_vinf_python_x = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][0],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][0];\n\t\n\tprint 'list_vinf_python_y = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][1],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][1];\n\t\n\tprint 'list_vinf_python_z = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][2],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][2];",
"def info(self):\r\n print(f\"filename: {self.filename}\")\r\n print(f\"comments: \\n{self.comment_1}{self.comment_2}\")\r\n print(f\"origin: {self.origin[0]}, {self.origin[1]}, {self.origin[2]}\")\r\n print(f\"atoms count: {self.n_atoms}\")\r\n print(f\"voxels count: {self.n_x}, {self.n_y}, {self.n_z}\")\r\n print(f\"voxel x-axis: {self.x[0]}, {self.x[1]}, {self.x[2]}\")\r\n print(f\"voxel y-axis: {self.y[0]}, {self.y[1]}, {self.y[2]}\")\r\n print(f\"voxel z-axis: {self.z[0]}, {self.z[1]}, {self.z[2]}\")",
"def summary(self):\n self.tiles.refreshnames()\n self.glues.refreshnames()\n # self.check_consistent()\n info = {\n \"ntiles\": len(self.tiles),\n \"nrt\": len([x for x in self.tiles if not x.is_fake]),\n \"nft\": len([x for x in self.tiles if x.is_fake]),\n \"nends\": len(self.glues),\n \"ntends\": len(self.tiles.glues_from_tiles()),\n \"tns\": \" \".join(x.name for x in self.tiles if x.name),\n \"ens\": \" \".join(x.name for x in self.glues if x.name)\n # if (\"info\" in self.keys() and \"name\" in self[\"info\"].keys())\n # else \"\",\n }\n tun = sum(1 for x in self.tiles if x.name is None)\n if tun > 0:\n info[\"tns\"] += \" ({} unnamed)\".format(tun)\n eun = sum(1 for x in self.glues if x.name is None)\n if eun > 0:\n info[\"ens\"] += \" ({} unnamed)\".format(eun)\n if info[\"nft\"] > 0:\n info[\"nft\"] = \" (+ {} fake)\".format(info[\"nft\"])\n else:\n info[\"nft\"] = \"\"\n return \"TileSet: {nrt} tiles{nft}, {nends} ends, {ntends} ends in tiles.\\nTiles: {tns}\\nEnds: {ens}\".format(\n **info\n )",
"def show_affine(self):\n for row in self.affine:\n print(row)"
] | [
"0.7208172",
"0.67271405",
"0.6130861",
"0.61259687",
"0.59162146",
"0.5775518",
"0.57627594",
"0.5703854",
"0.56089044",
"0.5606912",
"0.56049377",
"0.5569098",
"0.5553486",
"0.5533526",
"0.54851115",
"0.5451577",
"0.54403365",
"0.5424925",
"0.54075736",
"0.5402264",
"0.5393215",
"0.5385143",
"0.5380945",
"0.53743136",
"0.53728205",
"0.5371149",
"0.53589064",
"0.5358125",
"0.53435487",
"0.52951413"
] | 0.7502286 | 0 |
Use as a decorator to print info about the function and its result. Follows deferred results. | def showResult(f):
def substitute(self, *args, **kw):
def msg(result, callInfo):
resultInfo = str(result)
if len(callInfo) + len(resultInfo) > 70:
callInfo += "\n"
print("\n{} -> {}".format(callInfo, resultInfo))
return result
SR_STUFF[0] += 1
callInfo = "{:03d}: {}".format(
SR_STUFF[0],
SR_STUFF[1].setCall(
instance=self, args=args, kw=kw).aboutCall())
result = f(self, *args, **kw)
if isinstance(result, defer.Deferred):
return result.addBoth(msg, callInfo)
return msg(result, callInfo)
SR_STUFF[1] = Info(whichThread=SR_STUFF[2]).setCall(f)
substitute.func_name = f.func_name
return substitute | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_result(func):\n def new_func(*args, **kwargs):\n result = func(*args, **kwargs)\n print(result)\n return result\n return new_func",
"def print_log(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n n = func.__name__\n print('{} has started with arguments:\\n{}\\n{}'.format(\n n, args, kwargs))\n res = func(*args, **kwargs)\n print('{} has finished and returned: {}'.format(\n n, res))\n return res\n\n return wrapper",
"def dump_args_and_ret(func):\n fname = func.__name__\n\n def echo_func(*args, **kwargs):\n print(f\"{fname} args={args} kwargs={kwargs}\")\n ret = func(*args, *kwargs)\n print(f\"{fname} args={args} kwargs={kwargs} ret={ret}\")\n return ret\n return echo_func",
"def debug(func):\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n if args and not kwargs:\n print(\"~ input of {}: args: {}\".format(func.__name__, args))\n elif not args and kwargs:\n print(\"~ input of {}: kwargs: {}\".format(func.__name__, kwargs))\n elif args and kwargs:\n print(\"~ input of {}: args: {}, kwargs: {}\".format(func.__name__, args, kwargs))\n else:\n print(\"~ input of {}: NO_ARGS\".format(func.__name__))\n output = func(*args, **kwargs) # stores the result of the function\n print(\"~ output of {}:\".format(func.__name__), output)\n return output\n\n return decorated",
"def _debug_wrap(func):\n\n def wrapper(*args, **kwargs):\n _debug_print(f\"{datetime.datetime.now()} - About to run: {func.__name__}\")\n ret_val = func(*args, **kwargs)\n _debug_print(f\"{datetime.datetime.now()} - Completed run: {func.__name__}\")\n return ret_val\n\n return wrapper",
"def logged(func):\n def wrapper(*args, **kwargs):\n print(’you called {.__name__}({}{}{})’.format(\n func,\n str(list(args))[1:-1], \n ’, ’ if kwargs else ’’,\n ’, ’.join(’{}={}’.format(*pair) for pair in kwargs.items()),\n ))\n val = func(*args, **kwargs)\n print(’it returned’, val)\n return val",
"def debug(func):\n\[email protected](func)\n\tdef wrapper_debug(*args, **kwargs):\n\t\targs_repr = [repr(a) for a in args] \n\t\tkwargs_repr = [f\"{k}={v}\" for k, v in kwargs.items()] \n\t\tsignature = \", \".join(args_repr + kwargs_repr) \n\n\t\tprint(f\"Calling {func.__name__} ({signature})\")\n\n\t\tvalue = func(*args, **kwargs)\n\t\tprint(f\"{func.__name__!r} returned {value!r}\") \n\t\t\n\t\treturn value\n\n\treturn wrapper_debug",
"def print_timing(func):\n def wrapper(*arg):\n t1 = time.time()\n res = func(*arg)\n t2 = time.time()\n print '%s took %0.3f ms' % (func.func_name, (t2-t1)*1000.0)\n return res\n return wrapper",
"def logged(fn):\n def wrapped(*args, **kwargs):\n retval = fn(*args, **kwargs)\n log_tpl = \"%s called with arguments: %s %s \\n\\tReturning %s\"\n print(log_tpl % (fn.__name__,\n args,\n kwargs,\n retval))\n return retval\n return wrapped",
"def debug(func):\n if VERBOSE > 0:\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n\n print(f\"Calling {func.__name__}({signature})\\n\")\n value = func(*args, **kwargs)\n print(f\"{func.__name__!r} returned {value!r}\\n\")\n\n return value\n\n return wrapper_debug\n else:\n return func",
"def debug(func):\n\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n print(f\"Calling {func.__name__}({signature})\")\n value = func(*args, **kwargs)\n print(f\"{func.__name__!r} returned {value!r}\")\n return value\n return wrapper_debug",
"def debug(func):\n\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n print(f\"Calling {func.__name__}({signature})\")\n value = func(*args, **kwargs)\n print(f\"{func.__name__!r} returned {value!r}\")\n return value\n\n return wrapper_debug",
"def info(func):\n\n def decorated(*args, **kwargs):\n r\"\"\"Decorated method.\"\"\"\n runLog.info(func(*args, **kwargs))\n\n return decorated",
"def debug_print(function):\n def debug(thing):\n print(function(thing))\n return thing\n return debug",
"def shown(func):\n name = f\"{func.__name__}( )\"\n @wraps(func)\n def wrapped_func(*args, **kwargs):\n res = func(*args, **kwargs)\n res = show(**{name: res})\n return res\n return wrapped_func",
"def print_execution_time(function):\n def wrapper(*args, **kw):\n start_time = time.clock()\n result = function(*args, **kw)\n formatted_time_took = datetime.timedelta(seconds=(time.clock() - start_time))\n print('Function {} took: {}'.format(\n function.__name__, formatted_time_took))\n return result\n\n return wrapper",
"def print_time(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n # Current timestep\n t1 = time.time()\n f_out = f(*args, **kwargs)\n t2 = time.time()\n text = f\"{f.__name__} output (exec time: {(t2 - t1) * 1000 :.6f} ms)\"\n print(text)\n return f_out\n return wrapper",
"def logging(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n res = func(*args, **kwargs)\n print(func.__name__, args, kwargs)\n return res\n return wrapper",
"def runtime_print(f):\n def decorated_fun(*args, **kwargs):\n t0 = datetime.now()\n ret = f(*args, **kwargs)\n t1 = datetime.now()\n print(f'Runtime: {t1 - t0}')\n return ret\n\n return decorated_fun",
"def debug(func):\n\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args] # 1\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()] # 2\n signature = \", \".join(args_repr + kwargs_repr) # 3\n print(f\"Calling {func.__name__}({signature})\")\n value = func(*args, **kwargs)\n print(f\"Returning {func.__name__!r}: {value!r}\") # 4\n return value\n\n return wrapper_debug",
"def _info(self, func):\n self.logger.info(\"llamando a %s\" % func)",
"def __call__(self, func):\n\n # set logger if it was not set earlier\n if not self.logger:\n logging.basicConfig()\n self.logger = logging.getLogger(func.__module__)\n\n @functools.wraps(func)\n def wrapper(*args, **kwds):\n st = datetime.datetime.now()\n f_result = func(*args, **kwds)\n et = datetime.datetime.now()\n self.logger.debug(\"%s duration: %s\" % (func.__name__, et - st))\n return f_result\n\n return wrapper",
"def func_info(func=None, **options):\n if func is None:\n def partial_infer(func):\n return func_info(func, **options)\n return partial_infer\n def inner(*args, **kwargs):\n global COUNT\n convert_generators = options.get(\"listify\", False)\n print(f\"Calling ({COUNT})\", func.__qualname__)\n COUNT += 1\n if args:\n print(\"Args:\", args)\n if kwargs:\n print(\"kwargs:\", kwargs)\n try:\n result = func(*args, **kwargs)\n if isinstance(result, abc.Generator) and convert_generators:\n result = list(result)\n except Exception as exc:\n COUNT -= 1\n print(f\"exception {type(exc).__name__} ({COUNT})\")\n raise\n else:\n COUNT -= 1\n print(f\"result ({COUNT})\", result)\n print(\"-------\")\n print()\n if convert_generators:\n result = iter(result)\n return result\n return inner",
"def clocked( fun, output = sys.stderr ):\n @functools.wraps( fun )\n def call( *args, **kword ):\n \"\"\" \n Call the function\n \"\"\"\n # create and output message\n msg = fun.func_name\n start = time.time()\n result = fun( *args, **kword )\n end = time.time() \n msg += \" (%.4f s)\" % ( end - start)\n print >> output, msg \n return result\n return call",
"def info_decorator(func):\n\n def wrapper(*args, **kwargs):\n\n return func(*args, **kwargs)\n\n return wrapper",
"def log_dec(fn: \"Function\"):\n @wraps(fn)\n def inner(*args, **kwargs):\n run_dt = datetime.now()\n result = fn(*args, **kwargs)\n end_dt = datetime.now()\n print(f\"{run_dt}: called {fn.__name__}\")\n print(f\"Execution time: {end_dt - run_dt}\")\n print(f\"Function description:\\n{fn.__doc__}\")\n print(f\"Function returned something: {True if result else False}\")\n return result\n return inner",
"def decor(func):\n def wrap():\n print(\"@@@ STATISTICS REPORT START @@@\\n\")\n func()\n print(\"@@@ STATISTICS REPORT FINISH @@@\\n\")\n return wrap",
"def print_time(func):\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n done = time.time()\n print(f\"start_time: {start}, end_time: {done}, duration: {done - start}\")\n return result\n\n return wrapper",
"def log_stdout(self, function):\n return function()",
"def show_details(name, f, is_partial=False):\n print '%s:' % name\n print '\\tobject:', f\n if not is_partial:\n print '\\t__name__:', f.__name__\n print '\\t__doc__', repr(f.__doc__)\n if is_partial:\n print '\\tfunc:', f.func\n print '\\targs:', f.args\n print '\\tkeywords:', f.keywords\n return"
] | [
"0.7564186",
"0.69486636",
"0.68396944",
"0.67687505",
"0.6661167",
"0.6646779",
"0.6636007",
"0.6607297",
"0.65720874",
"0.6512646",
"0.6510811",
"0.65025723",
"0.64824915",
"0.6458015",
"0.6400681",
"0.639704",
"0.63808566",
"0.6372295",
"0.6261466",
"0.62549406",
"0.6220342",
"0.6200202",
"0.61783284",
"0.6176556",
"0.61253035",
"0.61010265",
"0.60966957",
"0.60936147",
"0.60898226",
"0.6088879"
] | 0.78961205 | 0 |
Returns the fully qualified name of the supplied string if it can be imported and then reflected back into the FQN, or C{None} if not. | def strToFQN(self, x):
try:
obj = reflect.namedObject(x)
fqn = reflect.fullyQualifiedName(obj)
except:
return
return fqn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def try_import(import_str, default=None):\r\n try:\r\n return import_module(import_str)\r\n except ImportError:\r\n return default",
"def get_qualified_class_name(o):\n module = o.__class__.__module__\n module = '' if module in IGNORABLE_MODULES else module\n module = module + '.' if module else module\n return module + o.__class__.__qualname__",
"def _tp_relfq_name(tp, tp_name=None, assumed_globals=None, update_assumed_globals=None,\n implicit_globals=None):\n # _type: (type, Optional[Union[Set[Union[type, types.ModuleType]], Mapping[Union[type, types.ModuleType], str]]], Optional[bool]) -> str\n if tp_name is None:\n tp_name = util.get_class_qualname(tp)\n if implicit_globals is None:\n implicit_globals = _implicit_globals\n else:\n implicit_globals = implicit_globals.copy()\n implicit_globals.update(_implicit_globals)\n if assumed_globals is None:\n if update_assumed_globals is None:\n return tp_name\n md = sys.modules[tp.__module__]\n if md in implicit_globals:\n return tp_name\n name = tp.__module__+'.'+tp_name\n pck = None\n if not (md.__package__ is None or md.__package__ == ''\n or name.startswith(md.__package__)):\n pck = md.__package__\n return name if pck is None else pck+'.'+name\n if tp in assumed_globals:\n try:\n return assumed_globals[tp]\n except:\n return tp_name\n elif hasattr(tp, '__origin__') and tp.__origin__ in assumed_globals:\n try:\n return assumed_globals[tp.__origin__]\n except:\n return tp_name\n # For some reason Callable does not have __origin__, so we special-case\n # it here. Todo: Find a cleaner solution.\n elif is_Callable(tp) and typing.Callable in assumed_globals:\n try:\n return assumed_globals[typing.Callable]\n except:\n return tp_name\n elif update_assumed_globals == True:\n if not assumed_globals is None:\n if hasattr(tp, '__origin__') and not tp.__origin__ is None:\n toadd = tp.__origin__\n elif is_Callable(tp):\n toadd = typing.Callable\n else:\n toadd = tp\n if not sys.modules[toadd.__module__] in implicit_globals:\n assumed_globals.add(toadd)\n return tp_name\n else:\n md = sys.modules[tp.__module__]\n if md in implicit_globals:\n return tp_name\n md_name = tp.__module__\n if md in assumed_globals:\n try:\n md_name = assumed_globals[md]\n except:\n pass\n else:\n if not (md.__package__ is None or md.__package__ == ''\n or md_name.startswith(md.__package__)):\n md_name = md.__package__+'.'+tp.__module__\n return md_name+'.'+tp_name",
"def _import_string(import_name):\n if \".\" in import_name:\n module, obj = import_name.rsplit(\".\", 1)\n else:\n return importlib.import_module(import_name)\n return getattr(importlib.import_module(module), obj)",
"def get_class_import_name(name):\n name = _strip_class_name(name)\n return name",
"def _fullname(obj):\n if obj is None:\n return None\n return _modname(obj, True)",
"def resolve_name(name):\n parts = name.split('.')\n cursor = len(parts)\n module_name, rest = parts[:cursor], parts[cursor:]\n\n while cursor > 0:\n try:\n ret = __import__('.'.join(module_name))\n break\n except ImportError:\n if cursor == 0:\n raise\n cursor -= 1\n module_name = parts[:cursor]\n rest = parts[cursor:]\n ret = ''\n\n for part in parts[1:]:\n try:\n ret = getattr(ret, part)\n except AttributeError:\n raise ImportError\n\n return ret",
"def import_function(s):\n a = s.split('.')\n j = lambda x: '.'.join(x)\n return getattr(import_module(j(a[:-1])), a[-1])",
"def resolve_name(name):\n parts = name.split('.')\n used = parts.pop(0)\n found = __import__(used)\n for part in parts:\n used += '.' + part\n try:\n found = getattr(found, part)\n except AttributeError:\n __import__(used)\n found = getattr(found, part)\n return found",
"def get_class(string):\n logger = logman.getLogger(__name__)\n if '/' not in string:\n logger.error(\"The string is not properly formatted. Use '/' to separate module path from classname. String is: {}\".format(string))\n return\n module_name, class_name = string.split('/')\n try:\n logger.debug('Retrieving class {} from module {}'.format(class_name, module_name))\n temp_class = getattr(importlib.import_module(module_name), class_name)\n except ModuleNotFoundError:\n logger.error(\"Module not found: {}\".format(module_name))\n raise\n except AttributeError:\n logger.error(\"Class not found: {}\".format(class_name))\n raise\n except:\n logger.error(\"Unexpected error while loading {}\".format(string))\n raise\n\n return temp_class",
"def resolve_import(self, item):\n name = item.name\n # The last part in `from a.b.c import d` might be a symbol rather than a\n # module, so we try a.b.c and a.b.c.d as names.\n short_name = None\n if item.is_from and not item.is_star:\n if '.' in name.lstrip('.'):\n # The name is something like `a.b.c`, so strip off `.c`.\n rindex = name.rfind('.')\n else:\n # The name is something like `..c`, so strip off just `c`.\n rindex = name.rfind('.') + 1\n short_name = name[:rindex]\n\n if import_finder.is_builtin(name):\n filename = name + '.so'\n return Builtin(filename, name)\n\n filename, level = convert_to_path(name)\n if level:\n # This is a relative import; we need to resolve the filename\n # relative to the importing file path.\n filename = os.path.normpath(\n os.path.join(self.current_directory, filename))\n\n if not short_name:\n try_filename = True\n try_short_filename = False\n elif item.source:\n # If the import has a source path, we can use it to eliminate\n # filenames that don't match.\n source_filename, _ = os.path.splitext(item.source)\n dirname, basename = os.path.split(source_filename)\n if basename == \"__init__\":\n source_filename = dirname\n try_filename = source_filename.endswith(filename)\n try_short_filename = not try_filename\n else:\n try_filename = try_short_filename = True\n\n files = []\n if try_filename:\n files.append((name, filename))\n if try_short_filename:\n short_filename = os.path.dirname(filename)\n files.append((short_name, short_filename))\n\n for module_name, path in files:\n for fs in self.fs_path:\n f = self._find_file(fs, path)\n if not f or f == self.current_module.path:\n # We cannot import a file from itself.\n continue\n if item.is_relative():\n package_name = self.current_module.package_name\n if package_name is None:\n # Relative import in non-package\n raise ImportException(name)\n module_name = get_absolute_name(package_name, module_name)\n if isinstance(self.current_module, System):\n return System(f, module_name)\n return Local(f, module_name, fs)\n\n # If the module isn't found in the explicit pythonpath, see if python\n # itself resolved it.\n if item.source:\n prefix, ext = os.path.splitext(item.source)\n mod_name = name\n # We need to check for importing a symbol here too.\n if short_name:\n mod = prefix.replace(os.path.sep, '.')\n mod = utils.strip_suffix(mod, '.__init__')\n if not mod.endswith(name) and mod.endswith(short_name):\n mod_name = short_name\n\n if ext == '.pyc':\n pyfile = prefix + '.py'\n if os.path.exists(pyfile):\n return System(pyfile, mod_name)\n elif not ext:\n pyfile = os.path.join(prefix, \"__init__.py\")\n if os.path.exists(pyfile):\n return System(pyfile, mod_name)\n return System(item.source, mod_name)\n\n raise ImportException(name)",
"def _modname(cls, full=False):\n module = getattr(cls, '__module__', None)\n if module is None or module == str.__class__.__module__:\n return cls.__name__\n if full and module == \"__main__\":\n import inspect\n the_module = inspect.getmodule(cls)\n spec = getattr(the_module, '__spec__', None)\n if spec is None:\n if the_module.__name__ == '__main__':\n module = '.'.join([the_module.__package__,\n os.path.basename(the_module.__file__.split('.')[0])])\n else:\n module = getattr(the_module, '__package__', None)\n else:\n module = spec.name if spec else module\n return module\n return module + '.' + cls.__class__.__name__",
"def qname(type_):\n # type: (type) -> str\n\n return \"{0.__module__}.{0.__qualname__}\".format(type_)",
"def str_to_class(referance_name):\n return getattr(sys.modules[__name__], referance_name)",
"def test_get_module_qualname_from_path_rel_typical(self):\n\n name = b_utils.get_module_qualname_from_path(\n os.path.join(\n self.reltempdir, \"good\", \"a\", \"b\", \"c\", \"test_typical.py\"\n )\n )\n self.assertEqual(\"good.a.b.c.test_typical\", name)",
"def fullname(cls):\n module = cls.__module__\n if module is None or module == str.__class__.__module__:\n return cls.__class__.__name__\n return module + '.' + cls.__class__.__name__",
"def import_name(fullname):\n namespace, name = fullname.rsplit('.', 1)\n obj = __import__(namespace, None, None, [name])\n return getattr(obj, name)",
"def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n __import__(mod_str)\r\n return getattr(sys.modules[mod_str], class_str)",
"def find_module_by_fq_name(model, fq_mod_name):\n for module in model.modules():\n if hasattr(module, 'distiller_name') and fq_mod_name == module.distiller_name:\n return module\n return None",
"def import_string(import_name):\n # force the import name to automatically convert to strings\n # __import__ is not able to handle unicode strings in the fromlist\n # if the module is a package\n import_name = str(import_name).replace(':', '.')\n\n try:\n __import__(import_name)\n except ImportError:\n if '.' not in import_name:\n raise\n else:\n return sys.modules[import_name]\n\n module_name, obj_name = import_name.rsplit('.', 1)\n try:\n module = __import__(module_name, None, None, [obj_name])\n except ImportError:\n # support importing modules not yet set up by the parent module\n # (or package for that matter)\n module = import_string(module_name)\n\n try:\n return getattr(module, obj_name)\n except AttributeError as e:\n raise ImportError(e)",
"def full_object_name(obj):\n\n try:\n module = obj.__module__\n if module is None or module == str.__class__.__module__:\n return obj.__name__ # Avoid reporting __builtin__\n else:\n return module + '.' + obj.__name__\n except Exception:\n return None",
"def test_get_module_qualname_from_path_abs_typical(self):\n\n name = b_utils.get_module_qualname_from_path(\n os.path.join(\n self.tempdir, \"good\", \"a\", \"b\", \"c\", \"test_typical.py\"\n )\n )\n self.assertEqual(\"good.a.b.c.test_typical\", name)",
"def find_name():\n name_file = read_file('__init__.py')\n name_match = re.search(r'^__package_name__ = [\"\\']([^\"\\']*)[\"\\']',\n name_file, re.M)\n if name_match:\n return name_match.group(1)\n raise RuntimeError('Unable to find name string.')",
"def import_class(import_str):\n mod_str, _sep, class_str = import_str.rpartition('.')\n try:\n __import__(mod_str)\n return getattr(sys.modules[mod_str], class_str)\n except (ImportError, ValueError, AttributeError), exc:\n logging.debug('Inner Exception: %s', exc)\n raise",
"def _qualname(obj):\n return obj.__module__ + '.' + obj.__qualname__",
"def getMangledName(self, name, module=None):\n if module is os.path:\n return \"os.path\"\n if isinstance(name, str) and (name.startswith(self.start) or name == self.package):\n return self.prefix + name\n return name",
"def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n module = import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError:\n msg = 'Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)\n six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])\n\n except ValueError:\n msg = \"%s doesn't look like a module path\" % dotted_path\n six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])",
"def to_full_name(typ: type) -> str:\n return f\"{typ.__module__}.{typ.__qualname__}\"",
"def qualified_name(self) -> Optional[str]:\n return None if self.name is None else get_qname(self.target_namespace, self.name)",
"def normalise_ref(ref):\n if ref.startswith((\"builtins.\", \"__main__\")):\n return ref\n try:\n mod_name, name = ref.rsplit(\".\", maxsplit=1)\n mod = __import__(mod_name)\n for sub in mod_name.split(\".\")[1:]:\n mod = getattr(mod, sub)\n obj = getattr(mod, name)\n if isinstance(obj, ModuleType):\n return ref\n if getattr(obj, \"__name__\", None) is None:\n return ref\n\n return obj.__module__ + \".\" + obj.__name__\n except Exception:\n pass\n return ref"
] | [
"0.6144014",
"0.6059056",
"0.60289675",
"0.6027369",
"0.5973197",
"0.5959286",
"0.5954686",
"0.58813894",
"0.5866241",
"0.5862896",
"0.58329266",
"0.58159465",
"0.58044475",
"0.5801065",
"0.57967067",
"0.5795597",
"0.5794225",
"0.57900107",
"0.5770697",
"0.57595575",
"0.5758328",
"0.57474506",
"0.57453644",
"0.57398385",
"0.57203704",
"0.5714288",
"0.57111263",
"0.5692618",
"0.56882465",
"0.5658809"
] | 0.610944 | 1 |
Returns the fully qualified name of the supplied object if it can be reflected into an FQN and back again, or C{None} if not. | def objToFQN(self, x):
try:
fqn = reflect.fullyQualifiedName(x)
reflect.namedObject(fqn)
except:
return
return fqn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def full_object_name(obj):\n\n try:\n module = obj.__module__\n if module is None or module == str.__class__.__module__:\n return obj.__name__ # Avoid reporting __builtin__\n else:\n return module + '.' + obj.__name__\n except Exception:\n return None",
"def _fullname(obj):\n if obj is None:\n return None\n return _modname(obj, True)",
"def _qualname(obj):\n return obj.__qualname__",
"def get_qualified_class_name(o):\n module = o.__class__.__module__\n module = '' if module in IGNORABLE_MODULES else module\n module = module + '.' if module else module\n return module + o.__class__.__qualname__",
"def fullobjname(obj):\n return obj.__module__ + '.' + typename(obj)",
"def derive_qualname(obj):\n classes = []\n stack = frame = None\n is_eligible = False\n try:\n stack = inspect.stack()\n for frame in stack:\n if frame.function == '__call__':\n is_eligible = True\n elif frame.function == '<module>':\n break\n elif is_eligible:\n if is_class_name(frame.function):\n classes.append(frame.function)\n finally:\n del frame\n del stack\n\n outer = '.'.join(reversed(classes))\n qualname = obj.__class__.__qualname__\n\n if classes and not qualname.startswith(f'{outer}.'):\n qualname = '.'.join((outer, qualname))\n\n return qualname",
"def strToFQN(self, x):\n try:\n obj = reflect.namedObject(x)\n fqn = reflect.fullyQualifiedName(obj)\n except:\n return\n return fqn",
"def _qualname(obj):\n return obj.__module__ + '.' + obj.__qualname__",
"def name(o):\n if hasattr(o, \"__name__\"):\n rv = o.__name__\n modname = getattr(o, \"__module__\", None)\n # This work-around because Python does it itself,\n # see typeobject.c, type_repr.\n # Note that Python only checks for __builtin__.\n if modname not in (None, \"\", \"__builtin__\", \"builtins\"):\n rv = o.__module__ + \".\" + rv\n else:\n for o in getattr(o, \"__mro__\", o.__class__.__mro__):\n rv = name(o)\n # If there is no name for the this baseclass, this ensures we check\n # the next rather than say the object has no name (i.e., return\n # None)\n if rv is not None:\n break\n return rv",
"def fullname(o):\n\n builtins_module = str.__class__.__module__\n if o.__class__.__name__ == 'function':\n module = o.__module__\n name = o.__name__\n else:\n module = o.__class__.__module__\n name = o.__class__.__name__\n\n # Avoid reporting __builtin__\n if module is None or module == builtins_module:\n return name\n else:\n return f'{module}.{name}'",
"def get_base_name(obj):\n return obj.__qualname__.split('.')[0]",
"def get_type_name_value(obj):\n return None if obj is None else obj.GetTypeName()",
"def typename(obj):\n return obj.__name__ if hasattr(obj, '__name__') else type(obj).__qualname__",
"def is_qualified(obj):\n return obj.is_qualified()",
"def get_pretty_name(obj):\n if not hasattr(obj, \"__qualname__\") and not hasattr(obj, \"__name__\"):\n obj = getattr(obj, \"__class__\", obj)\n if hasattr(obj, \"__qualname__\"):\n return obj.__qualname__\n if hasattr(obj, \"__name__\"):\n return obj.__name__\n return str(obj)",
"def full_class_name(obj):\n\n # Source: https://stackoverflow.com/questions/2020014/get-fully-qualified-class-name-of-an-object-in-python\n return full_object_name(obj.__class__)",
"def get_object_name(obj):\n\n namespace = dict(globals(), **locals()) \n return [name for name in namespace if namespace[name] is obj][0]",
"def resolve_name(obj, _):\n return obj.name.decode()",
"def get_class_base_name(name):\n if name is not None:\n return get_class_name(name)\n else:\n return 'object'",
"def get_obj_path_name(uobject: unrealsdk.UObject) -> str:\n if uobject:\n return uobject.PathName(uobject)\n else:\n return \"None\"",
"def get_class_name_value(obj):\n if obj is None:\n return None\n\n t = obj.GetType()\n \"\"\":type: lldb.SBType\"\"\"\n if t is None:\n return None\n\n if t.IsPointerType():\n t = t.GetPointeeType()\n if t.IsReferenceType():\n t = t.GetDereferencedType()\n\n return None if t is None else t.GetName()",
"def _get_object_name(self) :\n\t\ttry :\n\t\t\tif self.name is not None :\n\t\t\t\treturn str(self.name)\n\t\t\treturn None\n\t\texcept Exception as e :\n\t\t\traise e",
"def _get_object_name(self) :\n\t\ttry :\n\t\t\tif self.name is not None :\n\t\t\t\treturn str(self.name)\n\t\t\treturn None\n\t\texcept Exception as e :\n\t\t\traise e",
"def _get_object_name(self) :\n try :\n if self.name is not None :\n return str(self.name)\n return None\n except Exception as e :\n raise e",
"def qualified_name(self) -> Optional[str]:\n return None if self.name is None else get_qname(self.target_namespace, self.name)",
"def get_class_name(obj) -> str:\n return obj.__class__.__name__",
"def get_qual_name(o, lower=False):\n if not isinstance(o, type):\n o = o.__class__\n if lower:\n return o.__qualname__.lower()\n else:\n return o.__qualname__",
"def classname(class_object):\n return class_object.__class__.__name__",
"def _tp_relfq_name(tp, tp_name=None, assumed_globals=None, update_assumed_globals=None,\n implicit_globals=None):\n # _type: (type, Optional[Union[Set[Union[type, types.ModuleType]], Mapping[Union[type, types.ModuleType], str]]], Optional[bool]) -> str\n if tp_name is None:\n tp_name = util.get_class_qualname(tp)\n if implicit_globals is None:\n implicit_globals = _implicit_globals\n else:\n implicit_globals = implicit_globals.copy()\n implicit_globals.update(_implicit_globals)\n if assumed_globals is None:\n if update_assumed_globals is None:\n return tp_name\n md = sys.modules[tp.__module__]\n if md in implicit_globals:\n return tp_name\n name = tp.__module__+'.'+tp_name\n pck = None\n if not (md.__package__ is None or md.__package__ == ''\n or name.startswith(md.__package__)):\n pck = md.__package__\n return name if pck is None else pck+'.'+name\n if tp in assumed_globals:\n try:\n return assumed_globals[tp]\n except:\n return tp_name\n elif hasattr(tp, '__origin__') and tp.__origin__ in assumed_globals:\n try:\n return assumed_globals[tp.__origin__]\n except:\n return tp_name\n # For some reason Callable does not have __origin__, so we special-case\n # it here. Todo: Find a cleaner solution.\n elif is_Callable(tp) and typing.Callable in assumed_globals:\n try:\n return assumed_globals[typing.Callable]\n except:\n return tp_name\n elif update_assumed_globals == True:\n if not assumed_globals is None:\n if hasattr(tp, '__origin__') and not tp.__origin__ is None:\n toadd = tp.__origin__\n elif is_Callable(tp):\n toadd = typing.Callable\n else:\n toadd = tp\n if not sys.modules[toadd.__module__] in implicit_globals:\n assumed_globals.add(toadd)\n return tp_name\n else:\n md = sys.modules[tp.__module__]\n if md in implicit_globals:\n return tp_name\n md_name = tp.__module__\n if md in assumed_globals:\n try:\n md_name = assumed_globals[md]\n except:\n pass\n else:\n if not (md.__package__ is None or md.__package__ == ''\n or md_name.startswith(md.__package__)):\n md_name = md.__package__+'.'+tp.__module__\n return md_name+'.'+tp_name",
"def _declaring_class(obj):\n name = _qualname(obj)\n return name[:name.rfind('.')]"
] | [
"0.720825",
"0.68506885",
"0.68197757",
"0.6692258",
"0.669201",
"0.66850084",
"0.6678043",
"0.6637933",
"0.655427",
"0.6363912",
"0.6304597",
"0.62705034",
"0.6256707",
"0.6187803",
"0.6154",
"0.6152605",
"0.612782",
"0.6108689",
"0.603036",
"0.60212445",
"0.60132855",
"0.5985028",
"0.5985028",
"0.5919722",
"0.5886732",
"0.58699465",
"0.58191556",
"0.57944226",
"0.57928425",
"0.5791953"
] | 0.6943579 | 1 |
Attempts to convert the supplied object to a pickle and, failing that, to a fully qualified name. | def processObject(self, x):
pickled = self.objToPickle(x)
if pickled:
return pickled
return self.objToFQN(x) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def try_pickle_dumps(obj):\n try:\n return cloudpickle.dumps(obj)\n except Exception:\n pass\n\n try:\n return pickle.dumps(obj)\n except Exception:\n raise",
"def pickle_object(obj, ofname: \"Path|str\"):\n ofname = Path(ofname)\n maybe_make_output_dir(ofname)\n with ofname.open(\"wb\") as f:\n pickle.dump(obj, f)",
"def serialize(obj):\n serial = repr(obj)\n try:\n if eval(serial) == obj:\n return serial\n except:\n pass\n try:\n serial = pickle.dumps(obj)\n return 'pickle.loads(%s)' % repr(serial)\n except:\n raise Exception #unable to serialize",
"def _load_obj(name):\n with open('/bigdisk/pickles/' + name, 'r') as f:\n return pickle.load(f)",
"def obj2pickle(obj, file):\n # Ensure parent directory and necesary file structure exists\n pardir = os.path.dirname(file)\n if pardir.strip() != \"\": # ensure pardir is not an empty string\n if not os.path.exists(pardir):\n os.makedirs(pardir)\n\n with open(file, mode=\"wb\") as fileObj:\n pickle.dump(obj, fileObj, protocol=2) # compatible with py2.7 and 3.x",
"def from_pickle(filename):\n\t\tif isinstance(filename, strcomp):\n\t\t\tif os.path.exists(filename):\n\t\t\t\ttry:\n\t\t\t\t\tfile = open(filename, \"rb\")\n\t\t\t\t\tobj = pickle.load(file)\n\t\t\t\t\tfile.close()\n\t\t\t\t\treturn obj\n\t\t\t\t\t# return pickle.load(open(filename, \"rb\"))\n\t\t\t\texcept:\n\t\t\t\t\traise IOError(\"Could not unpickle file: %s\" % (filename))\n\t\t\telse:\n\t\t\t\traise FileNotFoundError(\"File does not exist: %s\" % (filename))\n\t\telse:\n\t\t\traise TypeError(\"Must be of type str. Got: %s\" % (type(filename)))",
"def savePickle(object, name):\n epoch = time.time()\n filename = name + str(epoch) + \".pkl\" # Save name\n fullPath = path.join(PICKLE_DIR, filename) # Save path\n\n # Get permissions and save the file\n with open(fullPath, \"w\") as outfile:\n pickle.dump(object, outfile)",
"def load_obj(saved_name):\n with open( saved_name + '.pkl', 'rb') as f:\n return pickle.load(f)",
"def _save_obj(obj, name):\n with open('/bigdisk/pickles/' + name, 'w') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)",
"def load_obj(name):\r\n with open('../pickle/' + name + '.pkl', 'rb') as fout:\r\n return pickle.load(fout)\r\n # end with\r",
"def do_pickle(self, arg):\n try:\n from pickling import Pickling\n Pickling(arg, input(\"Please enter the name of file: \")).pickle_it()\n except TypeError as e:\n print(e)\n except():\n print(\"Error!!\")",
"def pickle_loader(fileobj):\n if isinstance(fileobj, bytes):\n data = pickle.loads(fileobj, encoding=\"latin1\")\n elif isinstance(fileobj, six.string_types):\n with open(fileobj, 'rb') as f:\n data = pickle.load(f, encoding=\"latin1\")\n elif hasattr(fileobj, 'read'):\n data = pickle.load(fileobj, encoding=\"latin1\")\n else:\n raise ValueError('fileobj is not a filename or a file object')\n return data",
"def pickle(obj):\n return pickletools.optimize(cPickle.dumps(obj))",
"def do_unpickle(self, arg):\n try:\n from pickling import Pickling\n Pickling('output.pickle', arg).unpickle_it()\n print('The pickled file has been un-pickled')\n except FileNotFoundError as e:\n print(e)\n except():\n print(\"Error!!\")",
"def _resolve_load(self, basename):\n relpath = os.path.join(self.dst, basename) + \".pickle\"\n try:\n with open(relpath, \"rb\") as fp:\n return pickle.load(fp)\n except:\n pass\n raise IOError(\"can not load '{}'\".format(relpath))",
"def loadObj(name):\n\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)",
"def pickleSave(object, filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'wb')\n pickle.dump(object, filehandler)",
"def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)",
"def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)",
"def serialize(file_location, obj):\n if obj is None:\n raise ValueError('ERROR: Can not serialize when object to serialize is None')\n with open(file_location, 'wb') as file:\n pickle.dump(obj, file=file)",
"def loadFromFile(file_name=\"saved_object.pickle\"):\n\n try:\n with open(file_name, \"rb\") as inputToLoad:\n loaded_object = pickle.load(inputToLoad)\n return loaded_object\n except IOError:\n raise InvalidFilesPath\n except ImportError as e:\n raise InvalidFile(\n \"Structure of project has been changed since saving this object: %s\" % str(e))\n except TypeError:\n return pickle.load(file_name)",
"def _pickle_sub_obj(\n self,\n sub_obj,\n path\n ):\n with path.open(mode='wb') as f:\n pickle.dump(sub_obj, f, 2)\n return path",
"def _resolve_save(self, basename, what):\n relpath = os.path.join(self.dst, basename) + \".pickle\"\n print \"saving '{}'\".format(relpath)\n try:\n with open(relpath, \"wb\") as fp:\n pickle.dump(what, fp, protocol=2)\n except Exception, e:\n raise IOError(\"can not save '{}':\\n{}\".format(relpath, e))",
"def load_object(self, name: str):\r\n with open_(self._path_for_pickle(name), \"rb\") as f:\r\n return dill.load(f)",
"def save_obj(obj, name):\n \n with open(name + '.pkl', 'wb') as objec:\n pickle.dump(obj, objec)",
"def save_pickle(obj, filename, use_dill=False, protocol=4, create_folder=True):\n if create_folder:\n _create_folder_if_not_exist(filename)\n\n # Save\n with open(filename, 'wb') as file:\n if not use_dill:\n pickle.dump(obj, file, protocol=protocol)\n else:\n dill.dump(obj, file)",
"def _restore_sub_obj(\n self,\n attr_name: pathlib.Path\n ):\n return pickle.load(attr_name.open(mode=\"rb\"))",
"def save(obj, obj_name):\n try:\n _save(obj, os.path.join(KALE_DATA_DIRECTORY, obj_name))\n except KaleMarshalException as e:\n log.error(e)\n log.debug(\"Original Traceback\", exc_info=e.__traceback__)\n utils.graceful_exit(1)",
"def save_obj(obj, saved_name ):\n with open( saved_name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)",
"def save_object(self, name: str, obj: object):\r\n with open_(self._path_for_pickle(name), \"wb\") as f:\r\n dill.dump(obj, f)"
] | [
"0.7063126",
"0.64876986",
"0.60792595",
"0.5974439",
"0.58921015",
"0.58882225",
"0.58367944",
"0.57654095",
"0.5750935",
"0.57494664",
"0.5721882",
"0.56769556",
"0.5674027",
"0.5642555",
"0.56334174",
"0.5625167",
"0.5624294",
"0.560091",
"0.560091",
"0.5558587",
"0.5555589",
"0.5549082",
"0.54964733",
"0.5490503",
"0.54823285",
"0.54777265",
"0.54761714",
"0.5470377",
"0.5469468",
"0.54675925"
] | 0.6659083 | 1 |
Sets my current fargskw tuple, returning a reference to myself to allow easy method chaining. The function I{f} must be an actual callable object if you want to use L{nn}. Otherwise it can also be a string depicting a callable. You can specify I{args} with a second argument (as a list or tuple), and I{kw} with a third argument (as a C{dict}). If you are only specifying a single arg, you can just provide it as your second argument to this method call without wrapping it in a list or tuple. I try to be flexible. If you've set a function name and want to add a sequence of args or a dict of keywords, you can do it by supplying the I{args} or I{kw} keywords. You can also set a class instance at that time with the I{instance} keyword. | def setCall(self, *metaArgs, **kw):
if metaArgs:
equiv = True
if self.lastMetaArgs is None:
equiv = False
elif len(metaArgs) != len(self.lastMetaArgs):
equiv = False
else:
for k, arg in enumerate(metaArgs):
try:
thisEquiv = (arg == self.lastMetaArgs[k])
except:
thisEquiv = False
if not thisEquiv:
equiv = False
break
if equiv and not hasattr(self, 'pastInfo'):
# We called this already with the same metaArgs and
# without any pastInfo to reckon with, so there's
# nothing to do.
return self
# Starting over with a new f
callDict = {'f': metaArgs[0], 'fs': self._funcText(metaArgs[0])}
args = metaArgs[1] if len(metaArgs) > 1 else []
if not isinstance(args, (tuple, list)):
args = [args]
callDict['args'] = args
callDict['kw'] = metaArgs[2] if len(metaArgs) > 2 else {}
callDict['instance'] = None
if self.whichThread:
callDict['thread'] = threading.current_thread().name
self.callDict = callDict
elif hasattr(self, 'callDict'):
# Adding to an existing f
for name in ('args', 'kw', 'instance'):
if name in kw:
self.callDict[name] = kw[name]
else:
raise ValueError(
"You must supply at least a new function/string "+\
"or keywords adding args, kw to a previously set one")
if hasattr(self, 'currentID'):
del self.currentID
# Runs the property getter
self.ID
if metaArgs:
# Save metaArgs to ignore repeated calls with the same metaArgs
self.lastMetaArgs = metaArgs
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, func, *args, **kwargs):\n self._func = func\n self._args = args\n self._kwargs = kwargs\n self._fully_bound = None",
"def set_func_args(self, *args, **kwargs):\n self._func_args = args \n self._func_kw_args = kwargs",
"def call(f):\n def g(*args, **kwds):\n return (f, args, kwds)\n return g",
"def chainable(fn: Callable):\n # @validate_arguments\n @functools.wraps(fn)\n def setter_wrapper(self, *args: Any, **kwargs: Any) -> Any:\n fn(self, *args, **kwargs)\n return self\n\n return setter_wrapper",
"def __call__(self, f: Callable[..., int]) -> BaseNLPLabelingFunction:\n if self._lf_cls is None:\n raise NotImplementedError(\"_lf_cls must be defined\")\n name = self.name or f.__name__\n return self._lf_cls(\n name=name,\n f=f,\n resources=self.resources,\n pre=self.pre,\n text_field=self.text_field,\n doc_field=self.doc_field,\n language=self.language,\n disable=self.disable,\n memoize=self.memoize,\n memoize_key=self.memoize_key,\n gpu=self.gpu,\n )",
"def newf(self,*args,**kargs): \n return self._set_coords(coords_func(self.coords,*args,**kargs))",
"def SetParseFn(fn, *arguments):\n def _Decorator(func):\n parse_fns = GetParseFns(func)\n if not arguments:\n parse_fns['default'] = fn\n else:\n for argument in arguments:\n parse_fns['named'][argument] = fn\n _SetMetadata(func, FIRE_PARSE_FNS, parse_fns)\n return func\n\n return _Decorator",
"def f_set(self, *args, **kwargs):\n if args and self.v_name is None:\n raise AttributeError(\n \"Cannot set positional value because I do not have a name!\"\n )\n for idx, arg in enumerate(args):\n valstr = self.f_translate_key(idx)\n self.f_set_single(valstr, arg)\n\n for key, arg in kwargs.items():\n self.f_set_single(key, arg)",
"def __init__(self, function, *args):\n self.function = function\n self.args = args",
"def register_args(f):\n\n @wraps(f)\n def inner(*_args, **_kwargs):\n self = _args[0]\n args, kwargs = args_kwargs(f)\n args = dict(zip(args[1::], _args[1::]))\n kwargs.update(_kwargs)\n self.args = args\n self.kwargs = kwargs\n return f(self, *args.values(), **kwargs)\n\n return inner",
"def __call__(self, function: FuncSpeechArg):\n self._add_attr(function)\n return function",
"def __call__(self, fn=None, *args, **kwargs):\n if callable(fn):\n self.fn = fn\n\n return self",
"def f_onearg_and_default(self, arg1, default = 1) :\n pass",
"def __call__(self, f: Callable[..., int]) -> LabelingFunction:\n name = self.name or f.__name__\n return LabelingFunction(name=name, resources=self.resources, f=f, pre=self.pre, cont_scorer=self.cont_scorer, label=self.label)",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):",
"def __setattr__(*args):"
] | [
"0.5703724",
"0.56538165",
"0.55401057",
"0.5524479",
"0.54307365",
"0.5427452",
"0.54265815",
"0.5302647",
"0.52991205",
"0.5291197",
"0.5282004",
"0.5254453",
"0.5248723",
"0.523779",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767",
"0.52307767"
] | 0.6287769 | 0 |
Returns a unique ID for my current callable. | def ID(self):
if hasattr(self, 'currentID'):
return self.currentID
if hasattr(self, 'callDict'):
thisID = hashIt(self.callDict)
if hasattr(self, 'pastInfo'):
self.pastInfo[thisID] = {'callDict': self.callDict}
else:
thisID = None
self.currentID = thisID
return thisID | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_id(self): # real signature unknown; restored from __doc__\n return \"\"",
"def _get_unique_id(self):\n now = datetime.now()\n\n u_id = now.second + 60*(now.minute + 60*(now.hour + 24*(now.day + 31*(now.month + 366*(now.year)))))\n return \"instance\" + str(u_id)",
"def generate_id(cls):\n cls._index += 1\n return 'fp_%s' % cls._index",
"def unique_id() -> str:",
"def unique_id(self):\n return f\"c{self._controller.controller_index + 1}_m\"",
"def id(self):\n # Might also be a first 12-characters shortcut.\n return self._id",
"def get_id(self) -> str:\n return self._register_id",
"def unique_identifier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"unique_identifier\")",
"def unique_id(self) -> str:\n return f\"{self.entry_id}_{self.module_id}_{self.data_id}\"",
"def generate_id():\n\treturn \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())",
"def get_id(self):\n pass",
"def get_id(self):\n pass",
"def get_id(self):\n pass",
"def get_id(self):\n pass",
"def full_id(self):\n name = self.strategy.__class__.__name__\n return f'{name}/{self.id()}'",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id"
] | [
"0.7378038",
"0.70565146",
"0.7052797",
"0.69692785",
"0.6922952",
"0.685859",
"0.68508834",
"0.6849531",
"0.6843613",
"0.68147796",
"0.68109673",
"0.68109673",
"0.68109673",
"0.68109673",
"0.6809885",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235",
"0.67909235"
] | 0.7128812 | 1 |
Use this whenever info won't be needed anymore for the specified call ID, to avoid memory leaks. | def forgetID(self, ID):
if ID in getattr(self, 'pastInfo', {}):
del self.pastInfo[ID] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_call_id(self, call_id):\n\n self._validate_required_data(call_id, self.CALL_ID)\n\n query = CallRecord.objects.filter(call_id=call_id)\n\n if query.exists():\n raise NotAcceptable(\n detail='Call id is already in use. Please, choose another')",
"def _slot_timer_info_later(self, _sender, _data):\r\n self.request_info()\r\n self._info_timer = None",
"def getInfo(self, ID, name, nowForget=False):\n def getCallDict():\n if hasattr(self, 'callDict'):\n result = self.callDict\n if nowForget:\n del self.callDict\n else:\n result = None\n return result\n \n if hasattr(self, 'pastInfo'):\n if ID is None and name == 'callDict':\n return getCallDict()\n if ID in self.pastInfo:\n x = self.pastInfo[ID]\n if nowForget:\n del self.pastInfo[ID]\n return x.get(name, None)\n return None\n if name == 'callDict':\n return getCallDict()\n return None",
"def place_call_offhold(self) -> None:",
"def ID(self):\n if hasattr(self, 'currentID'):\n return self.currentID\n if hasattr(self, 'callDict'):\n thisID = hashIt(self.callDict)\n if hasattr(self, 'pastInfo'):\n self.pastInfo[thisID] = {'callDict': self.callDict}\n else:\n thisID = None\n self.currentID = thisID\n return thisID",
"def on_incoming_call(self, call):\n\n try:\n current_time = time.time()\n remote_uri = hash_remote_uri(self.cfg, call.info().remote_uri)\n\n if not self.cfg['VoipIO']['reject_calls']:\n if self.voipio.black_list[get_user_from_uri(remote_uri)] < current_time:\n # answer the call\n self.voipio.call = call\n self.voipio.on_incoming_call(remote_uri)\n\n if self.cfg['VoipIO']['debug']:\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Incoming call from %s\" % remote_uri)\n\n call_cb = CallCallback(self.cfg, call, self.voipio)\n call.set_callback(call_cb)\n\n call.answer()\n else:\n # rejected the call since the caller is blacklisted\n if self.cfg['VoipIO']['debug']:\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Rejected call from blacklisted remote URI %s \" % remote_uri)\n wait_hours = (self.voipio.black_list[get_user_from_uri(remote_uri)] - current_time) / (60 * 60)\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Must wait for %d hours\" % wait_hours)\n # respond by \"Busy here\"\n call.answer(486)\n\n self.voipio.on_rejected_call_from_blacklisted_uri(remote_uri)\n else:\n # reject the call since all calls must be rejected\n if self.cfg['VoipIO']['debug']:\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Rejected call from %s\" % remote_uri)\n\n # respond by \"Busy here\"\n call.answer(486)\n # respond by \"Decline\"\n #call.answer(603)\n\n self.voipio.on_rejected_call(remote_uri)\n except:\n self.voipio.close_event.set()\n self.cfg['Logging']['system_logger'].exception('Uncaught exception in the AccountCallback class.')\n raise",
"def drain_call_queue(self):\n pass",
"def place_call_onhold(self) -> None:",
"def free(self, valueid):\n raise NotImplementedError(\"abstract\")",
"def aboutCall(self, ID=None, nowForget=False):\n if ID:\n pastInfo = self.getInfo(ID, 'aboutCall', nowForget)\n if pastInfo:\n return pastInfo\n callDict = self.getInfo(ID, 'callDict')\n if not callDict:\n return \"\"\n func, args, kw = [callDict[x] for x in ('f', 'args', 'kw')]\n instance = callDict.get('instance', None)\n text = repr(instance) + \".\" if instance else \"\"\n text += self._funcText(func) + \"(\"\n if args:\n text += \", \".join([str(x) for x in args])\n for name, value in kw.items():\n text += \", {}={}\".format(name, value)\n text += \")\"\n if 'thread' in callDict:\n text += \" <Thread: {}>\".format(callDict['thread'])\n return self.saveInfo('aboutCall', text, ID)",
"def prep_info_callset(df, info):\n df = df.copy()\n df = df[df.ID.isin(info.ID_original.tolist())]\n return df",
"def bill_call(self, call: Call) -> None:\n self.bill.add_billed_minutes(ceil(call.duration / 60.0))",
"def remove_item(self, call_number):\n if self.item_exists(call_number):\n del self.item_list[call_number]\n print(f\"The item({call_number}) is removed.\")\n else:\n print(\"No matching call number found.\")",
"def allocate_id(self, allocated_id):\n self._free_ids.remove(allocated_id)",
"def context(self, *metaArgs, **kw):\n if not hasattr(self, 'pastInfo'):\n raise Exception(\n \"Can't use a context manager without saving call info\")\n ID = self.setCall(*metaArgs, **kw).ID\n yield InfoHolder(self, ID)\n self.forgetID(ID)",
"def create_usage(self, def_id):\n raise NotImplementedError()",
"def reset_calls(self) -> None:\n self.logger.info(\"Reset calls\")\n\n self._has_bob = False\n self._has_single = False",
"def cleanIdleConnections(self):\n\n for cid in self.calls.keys():\n if self.calls[cid]['state'] == 'IDLE':\n del self.calls[cid]\n elif self.calls[cid]['state'] == 'IDLE-CALL-STOPPED':\n core.dissector_dict.dissd.unloadDissectorsInstances(['rtp'])\n del self.calls[cid]",
"def api_call_counter(self, api_call_counter):\n\n self._api_call_counter = api_call_counter",
"def _invalidate(self, used_id):\n valid_ids = self._load_transids(lock=True)\n try:\n valid_ids.remove(used_id)\n except ValueError:\n return\n self._save_transids(valid_ids)",
"def free(self):\n pass",
"def free(self):\n pass",
"def ReleaseCall(self, call=None, byDefault=0, extensions=None):\n #byDefault = 0, extensions = None - sequence different from other methods for compatibility\n pt, call = self.getPartyFromCall((PartyState.Dialing, PartyState.Established, PartyState.Ringing), call)\n otherPartyRinging = False\n for otherPt in call.PartyList: #checking if there is a ringing party\n if otherPt.State == PartyState.Ringing:\n otherPartyRinging = True\n break\n if otherPartyRinging and not byDefault and not self.tserver.requestTimout:\n time.sleep(0.2)\n DebugPrint(\"Debug: otherPartyRinging timeout 0.2\")\n cl = DN.ReleaseCall(self, call, byDefault, extensions)\n if call and call.pendingQueue:\n call.pendingQueue.processRouteDestReleased(cl, self)\n return cl",
"async def _pre_call(self, _request_id: int, request: fastapi.Request, *args, **kwargs) -> None:\n return",
"def _invalidate_local_get_event_cache(self, event_id: str) -> None:\n\n self._get_event_cache.invalidate_local((event_id,))\n self._event_ref.pop(event_id, None)\n self._current_event_fetches.pop(event_id, None)",
"def update_calls(self, calls):\n for call in calls:\n call.site = self\n self.call_for_sample = {call.sample: call for call in calls}",
"def _ensureCached(self, id):\n if id not in self._objects:\n self._fetchObjectsByID([id])\n misc.cdblogv(misc.kLogMsg, 0,\n \"WsObjectCache: object with id '%s' unexpectedly not cached.\" % id)",
"def Close(self, cause = None, mainCall = None):\n #if self.PartyList:\n # print self\n ptList = copy.copy(self.PartyList); self.PartyList = []\n for pt in ptList:\n pt.Close()\n if self.created:\n addPrm = {}\n if cause:\n if not mainCall:\n mainCall = self.MainCall\n if mainCall:\n addPrm[\"RefCallUUID\"] = mainCall.CallUUID\n mainCall.ownISLinkList = All([mainCall.ownISLinkList, self.ownISLinkList])\n #print self.ownISLinkListToString()\n for isLink in self.ownISLinkList.keys():\n #print isLink\n cl = self.ownISLinkList[isLink]\n #print cl.ownISLinkListToString()\n\n if cause and self.MainCall:\n cl.ownISLinkList[isLink] = self.MainCall\n else:\n if cl.ownISLinkList.has_key(isLink): #it does not if key == NewISLinkID\n del cl.ownISLinkList[isLink]\n\n if not self.ghost:\n if self.tserver.CallReleaseTrackingFeature():\n if self.ControlParty: addPrm[\"CtrlParty\"] = self.ControlParty\n self.tserver.CallMonitoring.eventCallDeleted(self, addPrm = addPrm)\n\n #self.ConnID = None\n self.PreviousConnID = None\n self.Scope = None\n self.Conference = None\n self.ConsultType = None\n self.PropagatedType = None\n self.LocalType = None\n self.ForwardingDN = None\n self.LastForwardingDN = None\n self.CalledPartyAddress = None\n self.MediaType = None\n self.SessionID = None\n self.GlobalCallID = None\n self.ControlParty = None\n\n # Remove consult call connection\n if self.MainCall:\n self.MainCall.ConsultList.remove(self)\n self.MainCall = None\n\n for cl in self.ConsultList:\n cl.MainCall = None\n self.ConsultList = []\n\n for cl in self.connectedCalls:\n if self in cl.connectedCalls:\n cl.connectedCalls.remove(self)\n self.connectedCalls = []\n\n self.ownISLinkList = {}\n\n self.tserver.remove_call(self)\n if sys.copyright.lower().find(\"python\") > -1:\n gc.collect()",
"def get(self, request, id):\n queryset = get_object_or_404(CallRegister, pk=id)\n call_allocations = CallAllocation.objects.filter(call=queryset).order_by('-timestamp')\n if call_allocations:\n call_visits = CallVisit.objects.filter(call_id=call_allocations[0].call.pk).order_by('-timestamp')\n else:\n call_visits = None\n engineers = Engineer.objects.all()\n context = {\n 'object' : queryset,\n 'call_allocations' : call_allocations,\n 'engineers' : engineers,\n 'call_visits' : call_visits\n }\n return render(request, self.template_name, context)",
"def performance(self, id):"
] | [
"0.5423897",
"0.53597885",
"0.5170545",
"0.51276785",
"0.5111587",
"0.5107926",
"0.50829285",
"0.5059674",
"0.50555295",
"0.49963364",
"0.49642846",
"0.495139",
"0.4910475",
"0.48489508",
"0.47891104",
"0.4779412",
"0.47645634",
"0.47535005",
"0.47411475",
"0.4731908",
"0.46933368",
"0.46933368",
"0.46861959",
"0.46851596",
"0.46843916",
"0.46827027",
"0.46825475",
"0.46754792",
"0.46745196",
"0.46681932"
] | 0.59721094 | 0 |
Context manager for setting and getting call info. Call this context manager method with info about a particular call (same format as L{setCall} uses) and it yields an L{InfoHolder} object keyed to that call. It lets you get info about the call inside the context, without worrying about the ID or calling L{forgetID}, even after I have been used for other calls outside the context. | def context(self, *metaArgs, **kw):
if not hasattr(self, 'pastInfo'):
raise Exception(
"Can't use a context manager without saving call info")
ID = self.setCall(*metaArgs, **kw).ID
yield InfoHolder(self, ID)
self.forgetID(ID) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def aboutCall(self, ID=None, nowForget=False):\n if ID:\n pastInfo = self.getInfo(ID, 'aboutCall', nowForget)\n if pastInfo:\n return pastInfo\n callDict = self.getInfo(ID, 'callDict')\n if not callDict:\n return \"\"\n func, args, kw = [callDict[x] for x in ('f', 'args', 'kw')]\n instance = callDict.get('instance', None)\n text = repr(instance) + \".\" if instance else \"\"\n text += self._funcText(func) + \"(\"\n if args:\n text += \", \".join([str(x) for x in args])\n for name, value in kw.items():\n text += \", {}={}\".format(name, value)\n text += \")\"\n if 'thread' in callDict:\n text += \" <Thread: {}>\".format(callDict['thread'])\n return self.saveInfo('aboutCall', text, ID)",
"def get_thread_call_context(create=False):\n rv = getattr(_local, 'context', None)\n if rv is not None:\n return rv\n if not create:\n return\n return set_thread_call_context(contextlib.new_call_context())",
"def getInfo(self, ID, name, nowForget=False):\n def getCallDict():\n if hasattr(self, 'callDict'):\n result = self.callDict\n if nowForget:\n del self.callDict\n else:\n result = None\n return result\n \n if hasattr(self, 'pastInfo'):\n if ID is None and name == 'callDict':\n return getCallDict()\n if ID in self.pastInfo:\n x = self.pastInfo[ID]\n if nowForget:\n del self.pastInfo[ID]\n return x.get(name, None)\n return None\n if name == 'callDict':\n return getCallDict()\n return None",
"def set_thread_call_context(ctx):\n _local.context = ctx\n return ctx",
"def request_info(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/info\", {}, \"info\")\r\n else:\r\n self.send_signed_call(\"private/info\", {}, \"info\")",
"def get_info(self, info):\r\n pass",
"def _get_rpc_call_object(self):\n callobj = dict()\n callobj[\"jsonrpc\"] = \"2.0\"\n callobj[\"method\"] = self.command\n callobj[\"id\"] = self.cmd_id\n callobj[\"params\"] = self.arguments\n return callobj",
"def get_context_data(self):\n calls = Call.objects.all()\n return {\"calls\": reversed(calls)}",
"def set_call_context(task, ctx):\n setattr(task, AsyncioContextProvider._CONTEXT_ATTR, ctx)",
"def calls(self):\r\n return calls.Calls(self)",
"def call(self, callee: \"SIPPhoneTemplate\") -> None:",
"def _initiate(self, call):\n if not self.gsm_call:\n raise Exception(\"No connectivity\")\n number = str(call.number)\n logger.info(\"initiate call to %s\", number)\n call_id = yield WaitDBus(self.gsm_call.Initiate, number, \"voice\")\n call_id = int(call_id)\n logger.info(\"call id : %d\", call_id)\n self.lines[call_id] = call\n # TODO: mabe not good idea to store this in the call itself,\n # beside, it makes pylint upset.\n call.__id = call_id",
"def transform_call(call):\n return {\n 'type': 'call',\n 'chain': [str(fn.name) for fn in call.names()],\n 'arguments': [str(arg) for arg in call.arguments()],\n 'body': transform_block(call.body())\n }",
"def test_set_info_callback(self):\n (server, client) = socket_pair()\n\n clientSSL = Connection(Context(SSLv23_METHOD), client)\n clientSSL.set_connect_state()\n\n called = []\n\n def info(conn, where, ret):\n called.append((conn, where, ret))\n\n context = Context(SSLv23_METHOD)\n context.set_info_callback(info)\n context.use_certificate(load_certificate(FILETYPE_PEM, root_cert_pem))\n context.use_privatekey(load_privatekey(FILETYPE_PEM, root_key_pem))\n\n serverSSL = Connection(context, server)\n serverSSL.set_accept_state()\n\n handshake(clientSSL, serverSSL)\n\n # The callback must always be called with a Connection instance as the\n # first argument. It would probably be better to split this into\n # separate tests for client and server side info callbacks so we could\n # assert it is called with the right Connection instance. It would\n # also be good to assert *something* about `where` and `ret`.\n notConnections = [\n conn\n for (conn, where, ret) in called\n if not isinstance(conn, Connection)\n ]\n assert (\n [] == notConnections\n ), \"Some info callback arguments were not Connection instances.\"",
"def context(self) -> _C_out:\n return self._context",
"def call_method(self, id=None, qc_simplified='0'):\n\t\tview_call_query = \"select * from view_call\"\n\t\tview_qc_query = \"select * from view_qc\"\n\t\tif id:\n\t\t\tview_call_query += ' where call_method_id=%s'%id\n\t\t\tview_qc_query += ' where call_method_id=%s'%id\n\t\tview_call_query += ' order by nativename, stockparent'\n\t\trows = model.db.metadata.bind.execute(view_call_query)\n\t\t\n\t\tc.call_info_ls = []\n\t\tqc_simplified = request.params.get('qc_simplified', qc_simplified)\n\t\tc.qc_simplified = qc_simplified\n\t\ti = 0\n\t\tfor row in rows:\n\t\t\tpassingObject = PassingData()\n\t\t\tfor key in row.keys():\n\t\t\t\tsetattr(passingObject, key, getattr(row, key, None))\n\t\t\t#2008-10-09 find QC for calls\n\t\t\tif id:\n\t\t\t\tqc_rows = model.db.metadata.bind.execute('%s and call_info_id=%s'%(view_qc_query, row.call_info_id))\n\t\t\telse:\n\t\t\t\tqc_rows = model.db.metadata.bind.execute('%s where call_info_id=%s'%(view_qc_query, row.call_info_id))\n\t\t\tfor qc_row in qc_rows:\n\t\t\t\tif not hasattr(passingObject, 'call_NA_rate'):\n\t\t\t\t\tsetattr(passingObject, 'call_NA_rate', qc_row.call_NA_rate)\n\t\t\t\tif not hasattr(passingObject, 'array_created'):\n\t\t\t\t\tsetattr(passingObject, 'array_created', qc_row.array_created)\n\t\t\t\t\n\t\t\t\tif qc_simplified=='1' or qc_simplified==1:\n\t\t\t\t\tqc_data = '%.4f'%qc_row.mismatch_rate\n\t\t\t\telse:\n\t\t\t\t\tqc_data = '%.4f(%s/%s)'%(qc_row.mismatch_rate, qc_row.no_of_mismatches, qc_row.no_of_non_NA_pairs)\n\t\t\t\t\n\t\t\t\tsetattr(passingObject, qc_row.QC_method_name, qc_data)\n\t\t\t\t\n\t\t\ti += 1\n\t\t\tsetattr(passingObject, 'no', i)\n\t\t\tc.call_info_ls.append(passingObject)\n\t\treturn render('/call_info.html')",
"def ID(self):\n if hasattr(self, 'currentID'):\n return self.currentID\n if hasattr(self, 'callDict'):\n thisID = hashIt(self.callDict)\n if hasattr(self, 'pastInfo'):\n self.pastInfo[thisID] = {'callDict': self.callDict}\n else:\n thisID = None\n self.currentID = thisID\n return thisID",
"def __call__(self, **kwargs):\n return Context(self, kwargs)",
"def call(self, sid):\r\n return calls.Call(self, sid)",
"def get_info(self):\n pass",
"def get_info(self):\n pass",
"def process_info(self, info):\n return info",
"def add_call(self, session, call_data: Dict) -> Calls:\n planned_at = call_data[\"new_call_datetime\"]\n linkedin = call_data[\"new_call_link\"]\n leadgen_id = call_data[\"chat_id\"]\n\n new_call = Calls(\n planned_at = planned_at,\n linkedin = linkedin,\n leadgen_id = leadgen_id\n )\n session.add(new_call)\n session.commit()\n return new_call",
"def get_caller_context(depth=None, **kwarg):\r\n if TIK_ERROR_MSG.api_source_info is not None:\r\n return TIK_ERROR_MSG.api_source_info\r\n if depth is None:\r\n raise RuntimeError(\"There are two reasons for the error:\\n\"\r\n \"If it is called by the user, please register source\"\r\n \" info before entering decorators;\\n\"\r\n \"If it is an internal call, please specify \"\r\n \"the stack depth;\")\r\n additional_stack = kwarg.get('stack_depth', 0)\r\n depth += additional_stack\r\n if ERROR_MSG_LEVEL.err_msg_level == 0:\r\n caller = stack(depth)\r\n else:\r\n caller = current_frame(depth)\r\n return caller",
"def calls(self, arg=1):\r\n call_counter()\r\n return arg, call_counter.call_count",
"def notify(self, info, context=None):\n\n info[\"project\"] = self.project\n info[\"service\"] = self.service\n self.client.info(context or self.context,\n \"profiler.%s\" % info[\"service\"],\n info)",
"def getInfo():",
"def visit_Call(self, node: ast.Call) -> None:\n self._check_buggy_super_context(node)\n self.generic_visit(node)",
"def info(self, *args, **kwargs):",
"def get_context(self):\n return {\"request\": self.request, \"format\": self.format_kwarg, \"view\": self}"
] | [
"0.6116589",
"0.5598187",
"0.55867136",
"0.5528214",
"0.53109026",
"0.5237054",
"0.51175916",
"0.51127905",
"0.50732195",
"0.5064161",
"0.5057853",
"0.50216436",
"0.50200754",
"0.5004932",
"0.50042874",
"0.49915943",
"0.4979725",
"0.49719357",
"0.49228385",
"0.4916614",
"0.4916614",
"0.4902194",
"0.48957312",
"0.48786002",
"0.4838181",
"0.48375392",
"0.4797969",
"0.47972238",
"0.47941297",
"0.47784224"
] | 0.7960227 | 0 |
Provides info about a call. If the supplied name is 'callDict', returns the fargskwinstance dict for my current callable. The value of I{ID} is ignored in such case. Otherwise, returns the named information attribute for the previous call identified with the supplied ID. | def getInfo(self, ID, name, nowForget=False):
def getCallDict():
if hasattr(self, 'callDict'):
result = self.callDict
if nowForget:
del self.callDict
else:
result = None
return result
if hasattr(self, 'pastInfo'):
if ID is None and name == 'callDict':
return getCallDict()
if ID in self.pastInfo:
x = self.pastInfo[ID]
if nowForget:
del self.pastInfo[ID]
return x.get(name, None)
return None
if name == 'callDict':
return getCallDict()
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def aboutCall(self, ID=None, nowForget=False):\n if ID:\n pastInfo = self.getInfo(ID, 'aboutCall', nowForget)\n if pastInfo:\n return pastInfo\n callDict = self.getInfo(ID, 'callDict')\n if not callDict:\n return \"\"\n func, args, kw = [callDict[x] for x in ('f', 'args', 'kw')]\n instance = callDict.get('instance', None)\n text = repr(instance) + \".\" if instance else \"\"\n text += self._funcText(func) + \"(\"\n if args:\n text += \", \".join([str(x) for x in args])\n for name, value in kw.items():\n text += \", {}={}\".format(name, value)\n text += \")\"\n if 'thread' in callDict:\n text += \" <Thread: {}>\".format(callDict['thread'])\n return self.saveInfo('aboutCall', text, ID)",
"def get_info(self, name):\n return self.info[name]",
"def __getattr__(self, name):\n try:\n return self[self.sig.argpos(name)]\n except:\n pass\n return BasicCall.__getattr__(self, name)",
"def get_call(call_node):\n if not isinstance(call_node, ast.Call):\n # print(\"this node is \" + str(type(call_node)) + \" node, not call node\")\n return None\n\n elif isinstance(call_node.func, ast.Name):\n return call_node.func.id\n\n elif isinstance(call_node.func, ast.Attribute):\n if isinstance(call_node.func.value, ast.Name):\n return call_node.func.value.id + '.' + call_node.func.attr\n else:\n get_call(call_node.func.value)\n\n elif isinstance(call_node.func, ast.Call):\n get_call(call_node.func)",
"def json_to_call(name: str) -> Optional[CallDef]:\n if name not in json:\n logger.warning(f\"No field '{name}' in the row generator JSON\")\n return None\n\n call: Dict[int, str] = {}\n for key, value in json[name].items():\n try:\n index = int(key)\n except ValueError as e:\n raise_error(name, f\"Call index '{key}' is not a valid integer\", e)\n call[index] = value\n return CallDef(call)",
"def ID(self):\n if hasattr(self, 'currentID'):\n return self.currentID\n if hasattr(self, 'callDict'):\n thisID = hashIt(self.callDict)\n if hasattr(self, 'pastInfo'):\n self.pastInfo[thisID] = {'callDict': self.callDict}\n else:\n thisID = None\n self.currentID = thisID\n return thisID",
"def data(self) -> dict[str, Any]:\n return self.coordinator.data[\"call_deflections\"].get(self.deflection_id, {})",
"def get_call_string(self) -> Optional[str]: # noqa\n call_repr = get_call_string(self.func_name, self.args, self.kwargs, max_length=75)\n return call_repr",
"def info(self, name):\n if isinstance(self._state[name], dict):\n attrs = self._state[name]['attrs']\n return '{} {}({}), {} records: {}'.format(\n attrs['type_str'], name, ','.join(attrs['domain']),\n attrs['records'], attrs['description'])\n else:\n return repr(self[name])",
"def info(self, name=None, **kwargs):\n\n result = None\n if name is None:\n Console.error(\"Instance name is required to start.\")\n return\n\n display_kind = kwargs.pop('kind', \"vm\")\n\n try:\n result = self.__info(name, displayType=display_kind, **kwargs)\n except Exception as se:\n print(se)\n if type(se) == HttpError:\n Console.error(\n f'Unable to get instance {name} info. Reason: {se._get_reason()}')\n else:\n Console.error(f'Unable to get info of instance {name}.')\n\n return result",
"def test_documentation_popxl_call_with_info(self):\n filename = \"call_with_info.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)",
"def add_call(self, session, call_data: Dict) -> Calls:\n planned_at = call_data[\"new_call_datetime\"]\n linkedin = call_data[\"new_call_link\"]\n leadgen_id = call_data[\"chat_id\"]\n\n new_call = Calls(\n planned_at = planned_at,\n linkedin = linkedin,\n leadgen_id = leadgen_id\n )\n session.add(new_call)\n session.commit()\n return new_call",
"def get_info(self): \n return {\n \"ident\": self.ident,\n \"interval\": self._interval,\n \"exception\": self._exception,\n \"execute\": self._execute,\n \"args\": self._args,\n \"kwargs\": self._kwargs}",
"def call_name(self):\n return str(self.executable.name)",
"def _handle_info_response(self, resp, info, prev_info):\r\n if info.line_num != prev_info.line_num:\r\n return\r\n\r\n if resp['calltip']:\r\n info.editor.show_calltip('Arguments', resp['calltip'],\r\n signature=True,\r\n at_position=prev_info.position)\r\n\r\n if resp['name']:\r\n self.send_to_inspector.emit(\r\n resp['name'], resp['argspec'],\r\n resp['note'], resp['docstring'],\r\n not prev_info.auto)",
"def info(self, *args, **kwargs):",
"def transform_call(call):\n return {\n 'type': 'call',\n 'chain': [str(fn.name) for fn in call.names()],\n 'arguments': [str(arg) for arg in call.arguments()],\n 'body': transform_block(call.body())\n }",
"def __getitem__(self, name):\n ikEl = self.infoKinds.get(name, None)\n if ikEl:\n return ikEl.toDict(self)\n return None",
"def calling_stack_info(print_res=True, code_context=1):\n\n start_frame = inspect.currentframe().f_back\n\n fil = generate_frame_list_info(start_frame, code_context=code_context)\n\n if print_res:\n # noinspection PyUnresolvedReferences\n print(fil.tb_txt)\n return fil",
"def get_info(self, info):\r\n pass",
"def info(self, id):",
"def __repr__(self):\n return '<Twilio.Preview.TrustedComms.BrandedCallInstance>'",
"def info(self):\n return self.current_run.info",
"def _get_rpc_call_object(self):\n callobj = dict()\n callobj[\"jsonrpc\"] = \"2.0\"\n callobj[\"method\"] = self.command\n callobj[\"id\"] = self.cmd_id\n callobj[\"params\"] = self.arguments\n return callobj",
"def _get_caller_detail(n=2):\n if not _show_caller_details:\n return None\n s = inspect.stack()[:n + 1]\n try:\n frame = s[n]\n try:\n return frame[1]\n # WARNING(dhellmann): Using frame.lineno to include the\n # line number in the return value causes some sort of\n # memory or stack corruption that manifests in values not\n # being cleaned up in the cfgfilter tests.\n # return '%s:%s' % (frame[1], frame[2])\n finally:\n del frame\n finally:\n del s",
"def call_method(self, id=None, qc_simplified='0'):\n\t\tview_call_query = \"select * from view_call\"\n\t\tview_qc_query = \"select * from view_qc\"\n\t\tif id:\n\t\t\tview_call_query += ' where call_method_id=%s'%id\n\t\t\tview_qc_query += ' where call_method_id=%s'%id\n\t\tview_call_query += ' order by nativename, stockparent'\n\t\trows = model.db.metadata.bind.execute(view_call_query)\n\t\t\n\t\tc.call_info_ls = []\n\t\tqc_simplified = request.params.get('qc_simplified', qc_simplified)\n\t\tc.qc_simplified = qc_simplified\n\t\ti = 0\n\t\tfor row in rows:\n\t\t\tpassingObject = PassingData()\n\t\t\tfor key in row.keys():\n\t\t\t\tsetattr(passingObject, key, getattr(row, key, None))\n\t\t\t#2008-10-09 find QC for calls\n\t\t\tif id:\n\t\t\t\tqc_rows = model.db.metadata.bind.execute('%s and call_info_id=%s'%(view_qc_query, row.call_info_id))\n\t\t\telse:\n\t\t\t\tqc_rows = model.db.metadata.bind.execute('%s where call_info_id=%s'%(view_qc_query, row.call_info_id))\n\t\t\tfor qc_row in qc_rows:\n\t\t\t\tif not hasattr(passingObject, 'call_NA_rate'):\n\t\t\t\t\tsetattr(passingObject, 'call_NA_rate', qc_row.call_NA_rate)\n\t\t\t\tif not hasattr(passingObject, 'array_created'):\n\t\t\t\t\tsetattr(passingObject, 'array_created', qc_row.array_created)\n\t\t\t\t\n\t\t\t\tif qc_simplified=='1' or qc_simplified==1:\n\t\t\t\t\tqc_data = '%.4f'%qc_row.mismatch_rate\n\t\t\t\telse:\n\t\t\t\t\tqc_data = '%.4f(%s/%s)'%(qc_row.mismatch_rate, qc_row.no_of_mismatches, qc_row.no_of_non_NA_pairs)\n\t\t\t\t\n\t\t\t\tsetattr(passingObject, qc_row.QC_method_name, qc_data)\n\t\t\t\t\n\t\t\ti += 1\n\t\t\tsetattr(passingObject, 'no', i)\n\t\t\tc.call_info_ls.append(passingObject)\n\t\treturn render('/call_info.html')",
"def format_call(func, args, kwargs, object_name=\"Memory\"):\r\n path, signature = format_signature(func, *args, **kwargs)\r\n msg = '%s\\n[%s] Calling %s...\\n%s' % (80 * '_', object_name,\r\n path, signature)\r\n return msg\r\n # XXX: Not using logging framework\r\n #self.debug(msg)\r",
"def test_get_call_name1(self):\n tree = ast.parse(\"a.b.c.d(x,y)\").body[0].value\n name = b_utils.get_call_name(tree, {})\n self.assertEqual(\"a.b.c.d\", name)",
"def get_info(self) -> Optional[Dict[str, Any]]:",
"def info(self) -> Optional[Dict[str, Any]]:\n return self._state.get(\"info\", None)"
] | [
"0.6510412",
"0.5486511",
"0.5346446",
"0.52030444",
"0.5146749",
"0.5081739",
"0.5034171",
"0.50240844",
"0.50165373",
"0.4871073",
"0.48639536",
"0.48548323",
"0.4849726",
"0.48485854",
"0.48369116",
"0.48026302",
"0.47847903",
"0.4784088",
"0.4783681",
"0.47752234",
"0.47677132",
"0.4761972",
"0.47544688",
"0.4723143",
"0.47183025",
"0.47156766",
"0.46838716",
"0.46731037",
"0.46712568",
"0.46643218"
] | 0.6836767 | 0 |
Returns an informative string describing my current function call or a previous one identified by ID. | def aboutCall(self, ID=None, nowForget=False):
if ID:
pastInfo = self.getInfo(ID, 'aboutCall', nowForget)
if pastInfo:
return pastInfo
callDict = self.getInfo(ID, 'callDict')
if not callDict:
return ""
func, args, kw = [callDict[x] for x in ('f', 'args', 'kw')]
instance = callDict.get('instance', None)
text = repr(instance) + "." if instance else ""
text += self._funcText(func) + "("
if args:
text += ", ".join([str(x) for x in args])
for name, value in kw.items():
text += ", {}={}".format(name, value)
text += ")"
if 'thread' in callDict:
text += " <Thread: {}>".format(callDict['thread'])
return self.saveInfo('aboutCall', text, ID) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)",
"def get_call_string(self) -> Optional[str]: # noqa\n call_repr = get_call_string(self.func_name, self.args, self.kwargs, max_length=75)\n return call_repr",
"def __repr__(self):\n\t\treturn self.func.__doc__",
"def __repr__(self):\n return self.func.__doc__",
"def __repr__(self):\n return self.func.__doc__",
"def __repr__(self):\n return self.func.__doc__",
"def __repr__(self):\n return self.func.__doc__",
"def __repr__(self):\n return self.func.__doc__",
"def __repr__(self):\n return self.func.__doc__",
"def __repr__(self):\n return self.func.__doc__",
"def __repr__(self):\n return self.func.__doc__",
"def __repr__(self):\n return self.func.__doc__",
"def __repr__(self):\n return self.func.__doc__",
"def __repr__(self):\n return self.func.__doc__",
"def __repr__(self):\r\n return self.func.__doc__",
"def __repr__(self):\r\n return self.func.__doc__",
"def __repr__(self):\r\n return self.func.__doc__",
"def __repr__(self):\r\n return self.func.__doc__",
"def __repr__(self):\r\n return self.func.__doc__",
"def __repr__(self):\r\n return self.func.__doc__",
"def getCallTip(self, command='', *args, **kwds):\n return ('', '', '')",
"def _get_debug_text(self, text):\n\n func = inspect.currentframe().f_back.f_back.f_code\n return \"{}: Function {} in {}:{}\".format(text, func.co_name, os.path.basename(func.co_filename), func.co_firstlineno)",
"def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")",
"def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")",
"def function_name(func):\n return log(level=\"info\", message=_function_name(func))",
"def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(WangLandau.key, self.delta0, self.c_upd, self.n_upd)\n if self.smooth:\n strme = \"{} {}\".format(strme, self.smooth)\n\n return strme",
"def name(self):\n\t\treturn self._func_name",
"def display_name(self) -> str:\n return f\"{self.func.__module__}.{self.func.__qualname__}\"",
"def __str__(self) -> str:\r\n\t\treturn \"{state}\".format(state=self.NextState.__func__.__qualname__)",
"def get_trace_string(self):\n return (\"%s -> %s(0x%s) addr:0x%s\" %\n (self.instr_str, self.rd, self.rd_val, self.addr))"
] | [
"0.6560909",
"0.6306112",
"0.6107266",
"0.6059423",
"0.6059423",
"0.6059423",
"0.6059423",
"0.6059423",
"0.6059423",
"0.6059423",
"0.6059423",
"0.6041999",
"0.6041999",
"0.6041999",
"0.6029351",
"0.6029351",
"0.6029351",
"0.6029351",
"0.6029351",
"0.60204625",
"0.6013816",
"0.5996452",
"0.5904364",
"0.5904364",
"0.5856524",
"0.5805704",
"0.57078415",
"0.5698588",
"0.5693123",
"0.56844246"
] | 0.71258074 | 0 |
Returns an informative string describing an exception raised from my function call or a previous one identified by ID, or one you supply (as an instance, not a class). | def aboutException(self, ID=None, exception=None, nowForget=False):
if ID:
pastInfo = self.getInfo(ID, 'aboutException', nowForget)
if pastInfo:
return pastInfo
if exception:
lineList = ["Exception '{}'".format(repr(exception))]
else:
stuff = sys.exc_info()
lineList = ["Exception '{}'".format(stuff[1])]
callInfo = self.aboutCall()
if callInfo:
lineList.append(
" doing call '{}':".format(callInfo))
self._divider(lineList)
if not exception:
lineList.append("".join(traceback.format_tb(stuff[2])))
del stuff
text = self._formatList(lineList)
return self.saveInfo('aboutException', text, ID) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ErrorString(self): # real signature unknown; restored from __doc__\n pass",
"def __str__(self):\n error = '{0} ({1}): {2}'.format(self.__class__.__name__, self.code,\n self.args)\n return error",
"def __str__(self):\n return \"Error: %s\"%self.__message",
"def exception(self) -> str:\n return pulumi.get(self, \"exception\")",
"def __str__(self):\n return \"ERROR: \" + self.error_message",
"def error(self, e):\n return \"{}: {} ({})\".format(e.__class__.__name__, e.__doc__, e.message)",
"def __str__(self) -> str:\r\n return f'{self.error_code} ---> {self.message}'",
"def exception(self):\n exc_type, exc_value, exc_tb = sys.exc_info()\n cui.message(traceback.format_exception_only(exc_type, exc_value)[-1],\n log_message=traceback.format_exc())",
"def error_general_details(traceback_str: str) -> str:\n return f\"Here is some more info on the error I encountered:\\n```{traceback_str}```\"",
"def error_msg(self) -> str:\n # subclasses can override as needed\n return self.__doc__",
"def __str__(self) -> str:\n message = (\n f\"ERROR: Registration Exception.\\n\"\n f\" - Internal error code: {str(self.code)}\\n\"\n f\" - Internal error message: {str(self.message)}\"\n )\n return message",
"def exception_description(err):\n result = ''\n if isinstance(err, str):\n result = err\n elif isinstance(err, Exception):\n result = \"Exception class: %s.%s\\n\" % (err.__class__.__module__, \\\n err.__class__.__name__)\n if len(err.args) > 0:\n result += \"Args:\\n\"\n arg_num = 0\n for arg in err.args:\n if not isinstance(arg, str):\n arg = str(arg)\n\n arg = arg.replace('\\n', '\\n\\t' + ' '*(len(str(arg_num)) + 3))\n\n result += \"\\t%s : %s\\n\" % (arg_num, arg)\n arg_num += 1\n else:\n result = str(err)\n return result",
"def __str__(self) -> str:\n message = (\n f\"ERROR: Backend Exception.\\n\"\n f\" - Internal error code: {str(self.code)}\\n\"\n f\" - Internal error message: {str(self.message)}\"\n )\n return message",
"def repr_failure(self, excinfo):\n if excinfo.errisinstance(MypyError):\n return excinfo.value.args[0]\n return super().repr_failure(excinfo)",
"def create_exception(self, msg: str):",
"def make_exception_message(exc):\n if str(exc):\n return '%s: %s\\n' % (exc.__class__.__name__, exc)\n else:\n return '%s\\n' % (exc.__class__.__name__)",
"def exception_message(self) -> str:\n return pulumi.get(self, \"exception_message\")",
"def exception_msg(exception):\n if hasattr(exception, 'message'):\n return str(exception.message)\n return str(exception)",
"def test_friendly_exception_formatting_exc_with_str_overload():\n ex = InsufficientSignatures(1, 3)\n\n formatted_exception = friendlyEx(ex)\n\n assert formatted_exception == '{}'.format(ex.reason)",
"def __str__(self):\n return Error.__str__(self)",
"def exception_message(self):\n # type: () -> string_types\n return self._exception_message",
"def get_error_name(self):\n if hasattr(self, \"ui_name\"):\n # If this instance is part of the UI (has attribute \"ui_name\"), use\n # that to form a \"pretty\" error message.\n return self.parent.get_error_name()+\".\"+self.ui_name\n else:\n # If not, revert to the regular way.\n if self.name == \"\":\n return self.parent.get_error_name() + \".\" + self.cls + \"-\" \\\n + str(self.id)\n else:\n return self.parent.get_error_name() + \".\" + self.cls + \"_\" \\\n + self.name",
"def __str__(self) -> str:\n return \"[{}] {}\".format(self.error_string, self.message)",
"def exc_log_str(exception) -> str:\n return \"{}: {!s}\".format(type(exception).__name__, exception)",
"def pretty_exception(err: Exception, message: str = \"\"):\n return f\"{message} ({err.__module__}.{err.__class__.__name__}: {err!s})\"",
"def getReason():",
"def __str__(self, *args, **kwargs):\r\n return '{0.store}[{0.collection}] already has {0.element_id}'.format(\r\n self, Exception.__str__(self, *args, **kwargs)\r\n )",
"def error_msg(self) -> str:\n return self.__error_msg",
"def error_instance_id(self) -> str:\n return self._error_instance_id",
"def exception_alias():\n try:\n #result=1/0\n raise Exception\n except ZeroDivisionError, e:\n print(\"ZeroDivisionError\")\n print(e.message if e.message != \"\" else 'no message')\n except Exception, e:\n print(\"Exception\")\n print(type(e.message)) # <type 'str'>\n print(e.message if e.message != \"\" else 'no message')"
] | [
"0.64060336",
"0.63906485",
"0.62958705",
"0.62437165",
"0.6237594",
"0.62264633",
"0.61808884",
"0.61449856",
"0.6136974",
"0.613228",
"0.6131623",
"0.61072975",
"0.60843146",
"0.60669607",
"0.6061389",
"0.60569376",
"0.6030983",
"0.60280555",
"0.6019237",
"0.6018687",
"0.600181",
"0.5984265",
"0.5969417",
"0.59356827",
"0.5908799",
"0.59042305",
"0.5886118",
"0.585592",
"0.58477926",
"0.58439183"
] | 0.6514721 | 0 |
this method is invoked on the first turn. first player can puts only one stone on board. this method should return 2tuple, default is (10, 10) | def firstMove(self):
return (10, 10) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def firstMove(board):\r\n x = board.size / 2\r\n return (x, x)",
"def take_turn(self, board, other_player):\n\n # Always pick the middle box on the first round\n position = 4 if self.turn_count == 0 else None\n\n if self.turn_count == 1:\n # On the second turn, after the human player has picked\n # their first spot so we can determine our strategy\n assert other_player.board_value in board.tttboard\n player2_position = board.tttboard.index(other_player.board_value)\n self.strategy = AIPlayer.STRATEGIES[player2_position]\n\n if position is None:\n position = self.look_for_win(board)\n\n if position is None:\n position = self.look_for_win(board, other_player)\n\n if position is None:\n position = self.pick_open_position(board)\n\n self.turn_count += 1\n return position",
"def secondMove(board):\r\n # Get position of first tile\r\n (y1, x1) = board.black[0]\r\n\r\n if y1 <= board.size / 2:\r\n y2 = 1\r\n else:\r\n y2 = -1\r\n\r\n if x1 <= board.size / 2:\r\n x2 = 1\r\n else:\r\n x2 = -1\r\n return (y1 + y2, x1 + x2)",
"def getFirstMove(self):\n while True:\n try:\n move = tuple(int(str.strip()) for str in raw_input('Choose your first move: ').split(','))\n break\n except ValueError:\n print(\"Input is not a integer.\")\n\n while move not in [(1, 1), (self.grid.width/2, self.grid.height/2), \\\n (self.grid.width/2+1, self.grid.height/2+1), (self.grid.width, self.grid.height)]:\n print 'First move is not valid.'\n move = tuple(int(str.strip()) for str in raw_input('Choose your first move: ').split(','))\n return move",
"def prepare_next_turn(grid):\n\tempties = get_empty_cells(grid)\n\ty,x = random.choice(empties)\n\tgrid[y][x] = 2 if random.random() < prob_2 else 4\n\treturn any_possible_moves(grid)",
"def player(board):\n #X ALWAYS gets first move, alternates with each additional move\n curr_moves = actions(board)\n if (board == initial_state()):\n return X\n if(len(curr_moves) % 2 == 0):\n return O\n else:\n return X",
"def next_move(ttt):\r\n # get board in 2D array form\r\n b = ttt.get_board()\r\n \r\n # if there's a winning move, take it\r\n (cfw, win_move) = check_for_win_lose(b)\r\n if cfw is not None:\r\n if win_move:\r\n print 'COMPUTER WINS!'\r\n return cfw, win_move\r\n # otherwise, pres on with the next best move\r\n\r\n # get \"points\" on board. this tells us not only the move\r\n # but also who went first\r\n board_count = sum(sum(b,[]))\r\n \r\n # IF COMPUTER HAS FIRST TURN\r\n # if 1st move\r\n if board_count == 0:\r\n return (2,2), False # take the center\r\n # this is not best strategy for winning, but\r\n # it the human messes up, the computer can win.\r\n # taking a corner first makes it a little easier\r\n # for the computer to win becase the human only\r\n # has one correct move to make: to take the center\r\n \r\n # if 3rd move, and not a winning one\r\n if board_count == 3:\r\n if b[0][1]==2 or b[1][0]==2 or b[0][0]==2:\r\n return (3,3), False\r\n elif b[0][2]==2:\r\n return (3,1), False\r\n elif b[2][0]==2:\r\n return (1,3), False\r\n else:#elif b[1][2]==2 or b[2][1]==2 or b[2][2]==2:\r\n return (1,1), False\r\n\r\n # if 5th move, and not a winning or losing one\r\n if board_count == 6:\r\n b5 = numpy.array([[0,2,1],[0,1,0],[2,0,0]])\r\n if (b == b5).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (3,1), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (1,3), False\r\n\r\n b5 = numpy.array([[0,0,1],[0,1,2],[2,0,0]])\r\n if (b == b5).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (1,3), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (3,1), False\r\n\r\n # at this point, all possible boards should have been covered\r\n\r\n # if 7th move, and a winning or losing one\r\n if board_count == 9:\r\n # find the row or col with 2 open slots and mark it\r\n for ri in range(3):\r\n r = b[ri]\r\n if sum([1 if i==0 else 0 for i in r]) == 2:\r\n if r[0] == 0:\r\n return (ri+1,1), False\r\n else:\r\n return (ri+1,2), False\r\n for ci in range(3):\r\n c = get_col(b, ci)\r\n if sum([1 if i==0 else 0 for i in c]) == 2:\r\n if c[0] == 0:\r\n return (1,ci+1), False\r\n else:\r\n return (2,ci+1), False\r\n\r\n \r\n # IF HUMAN HAS FIRST TURN\r\n # if 2nd move\r\n if board_count == 2:\r\n if b[1][1] == 0:\r\n # if the center is open, computer has\r\n # to take it in order to not lose\r\n return (2,2), False\r\n else:\r\n # otherwise take a corner\r\n return (1,1), False\r\n\r\n # if 4th move\r\n if board_count == 5:\r\n # if we took a corner on move 2 and they\r\n # are using computer's offensive strategy\r\n # when it is first player\r\n b4 = [[1,0,0],[0,2,0],[0,0,2]]\r\n if b==b4:\r\n return (3,1), False\r\n # if we took center on move 2\r\n else:\r\n b4 = numpy.array([[2,0,0],[0,1,0],[0,0,2]])\r\n if (b == b4).all() or (b == numpy.rot90(b4,1)).all():\r\n return (1,2), False\r\n\r\n # overall ELSE -- just find a square\r\n for ri in range(3):\r\n for ci in range(3):\r\n if b[ri][ci] == 0:\r\n return (ri+1,ci+1), False",
"def _snap(self):\n return (\n # same symbols/players tokens in the same positions\n tuple(\n (x, tuple(sorted(ts))) for x, ts in self.board.items() if ts\n ),\n # with the same number of throws remaining for each player\n self.throws[\"upper\"],\n self.throws[\"lower\"],\n )",
"def player(board):\n total = 0\n for i in range(len(board)):\n for j in range(len(board)):\n total = total + utility_map[board[i][j]]\n\n # If they cancel out then equal number so X's turn\n if total == 0:\n return X\n else:\n return O",
"def mm_move(board, player):\r\n if board.check_win() == provided.PLAYERX:\r\n return SCORES[provided.PLAYERX],(-1,-1)\r\n elif board.check_win() == provided.PLAYERO:\r\n return SCORES[provided.PLAYERO],(-1,-1)\r\n elif board.check_win() == provided.DRAW:\r\n return SCORES[provided.DRAW],(-1,-1)\r\n else:\r\n empty_tuple_list = board.get_empty_squares()\r\n score_pos_tuple_list = []\r\n best_score = None\r\n best_pos = None\r\n for idx1 in range(len(empty_tuple_list)):\r\n empty_tuple = empty_tuple_list[idx1]\r\n board_clone = board.clone()\r\n board_clone.move(empty_tuple[0],empty_tuple[1],player)\r\n score_pos_tuple = mm_move(board_clone,provided.switch_player(player))\r\n score_pos_tuple_list.append(score_pos_tuple)\r\n\r\n #decide best score and pos fast!!!\r\n if score_pos_tuple[0]*SCORES[player] == 1:\r\n return (score_pos_tuple[0],empty_tuple)\r\n\r\n #decide best score and pos\r\n for idx2 in range(len(score_pos_tuple_list)):\r\n if idx2 == 0:\r\n best_score = score_pos_tuple_list[idx2][0]\r\n best_pos = empty_tuple_list[idx2]\r\n else:\r\n if score_pos_tuple_list[idx2][0]*SCORES[player] > best_score*SCORES[player]:\r\n best_score = score_pos_tuple_list[idx2][0]\r\n best_pos = empty_tuple_list[idx2]\r\n\r\n return (best_score,best_pos)",
"def winner(board):\n for turn in [X,O]:\n for i in range(3):\n if board[i] == [turn, turn, turn]:\n return turn\n if board[0][i] == turn and board[1][i] == turn and board[2][i] == turn:\n return turn\n if board[0][0] == turn and board[1][1] == turn and board[2][2] == turn:\n return turn\n if board[0][2] == turn and board[1][1] == turn and board[2][0] == turn:\n return turn\n return None",
"def static_player(self, board):\n valid_moves = self.game.find_valid_moves(self.computer_color, board, self.board_size)\n rows, columns = np.where(valid_moves == 1)\n max_value = -10000000\n location = (-1, -1)\n for i in range(len(rows)):\n move_value = 0\n temp_board = np.copy(board)\n temp_board = self.game.flip_opponent_stones((rows[i], columns[i]), temp_board, self.board_size,\n self.computer_num, self.opponent_num)\n stone_rows, stone_columns = np.where(temp_board == self.computer_num)\n for j in range(len(stone_rows)):\n move_value += self.static_weight[stone_rows[j]][stone_columns[j]]\n if move_value > max_value:\n max_value = move_value\n location = (rows[i], columns[i])\n return location",
"def win_tuple(deck_dict):\n # negate turns so that max() behaves; points good, turns bad.\n num_normal_turns = sum(not ( (POSSESSION in t and t[POSSESSION]) or \\\n (OUTPOST in t and t[OUTPOST]))\n for t in deck_dict[TURNS])\n return (deck_dict[POINTS], -num_normal_turns)",
"def result(self, state, action):\n\n # blank is the index of the blank square\n blank = self.find_blank_square(state)\n new_state = list(state)\n\n delta = {'UP': -3, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1}\n neighbor = blank + delta[action]\n new_state[blank], new_state[neighbor] = new_state[neighbor], new_state[blank]\n\n return tuple(new_state)",
"def getGameState(self):\n ### Student code goes here\n row1 = [-1, -1, -1]\n row2 = [-1, -1, -1]\n row3 = [-1, -1, -1]\n for i in self.kb.kb_ask(parse_input(\"fact: (pos ?t ?px ?py\")):\n if str(i.bindings_dict['?t'])=='empty':\n t = -1\n else:\n t = int(i.bindings_dict['?t'][4])\n xpx = int(i.bindings_dict['?px'][3])\n xpy = int(i.bindings_dict['?py'][3])\n if xpy == 1:\n row1[xpx-1] = t\n elif xpy == 2:\n row2[xpx-1] = t\n elif xpy == 3:\n row3[xpx-1] = t\n return tuple((tuple(row1),tuple(row2),tuple(row3)))",
"def test_single_game_returns_tuple(self):\n sim = ss.Simulation()\n assert type(sim.single_game()) == tuple, 'single_game should return ' \\\n 'tuple'",
"def won(self):\n for y in range(self.size):\n winning = []\n for x in range(self.size):\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n for x in range(self.size):\n winning = []\n for y in range(self.size):\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n winning = []\n for y in range(self.size):\n x = y\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n winning = []\n for y in range(self.size):\n x = self.size-1-y\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n return None",
"def __get_random_player_position(self) -> Tuple[int, int]:\n no_player_position = True\n while no_player_position:\n for row in range(0, self.__labyrinth.labyrinth_height):\n for col in range(0, self.__labyrinth.labyrinth_width):\n if self.__labyrinth[row][col] == Labyrinth.FLOOR and no_player_position:\n self.__row_position = row\n self.__col_position = col\n\n if len(self.__path_to_end()) > self.__labyrinth.labyrinth_width and \\\n len(self.__path_to_end()) > self.__labyrinth.labyrinth_height:\n self.__labyrinth[row][col] = Labyrinth.START\n no_player_position = False\n\n return self.__row_position, self.__col_position",
"def prepare_next_turn(grid):\n empties = get_empty_cells(grid)\n y,x = random.choice(empties)\n grid[y][x] = 2 if random.random() < 0.9 else 4\n return any_possible_moves(grid)",
"def player(board):\n turn = 0\n for i in range(3):\n for j in range(3):\n if board[i][j] != EMPTY:\n turn+=1\n if turn % 2 != 0:\n return O\n else:\n return X",
"def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col",
"def getGameState(self):\n ### Student code goes here\n row1 = ()\n row2 = ()\n row3 = ()\n for currRow in range(1,4):\n for currCol in range(1,4):\n tileFound = False\n for fact in self.kb.facts:\n if fact.statement.predicate == \"located\":\n tile = fact.statement.terms[0].term.element\n column = fact.statement.terms[1].term.element\n row = fact.statement.terms[2].term.element\n\n tileNumber = int(tile[-1])\n columnNumber = int(column[-1])\n rowNumber = int(row[-1])\n\n if rowNumber == currRow and columnNumber == currCol:\n tileFound = True\n if rowNumber == 1:\n row1 += tuple([tileNumber])\n elif rowNumber == 2:\n row2 += tuple([tileNumber])\n elif rowNumber == 3:\n row3 += tuple([tileNumber])\n \n break\n\n if not tileFound:\n if currRow == 1:\n row1 += tuple([-1])\n elif currRow == 2:\n row2 += tuple([-1])\n elif currRow == 3:\n row3 += tuple([-1])\n\n\n return (row1, row2, row3)",
"def get_move(self, board, possible_moves, player_1_or_2):\n\n # Given a Tic-Tac-Toe 3x3 board position where 1 => current player's square,\n # -1 => opponent's square, 0 => blank square,\n # this will return the current player's best move [as the x and y indexes into \n # the board array.]\n # The second input parameter, player_1_or_2, is 1 or -1 to indicate which player's\n # move it is. \n \n print('RL ~ Current player 1 or 2 (= -1):', player_1_or_2)\n \n print('RL ~ Current board: ')\n print(board)\n \n print('RL ~ possible_moves:', possible_moves)\n\n next_move = () \n\n # This will be the best move i.e. the move with the current\n # value of highest winning probability except when it is making exploratory\n # (as opposed to greedy) moves.\n\n next_move = self.board_position_states.get_next_move(board, possible_moves, self.current_player)\n\n next_move_location_tuple = possible_moves[next_move]\n board[next_move_location_tuple] = self.current_player\n\n self.list_board_positions_moved_to.append(board.copy()) # This board that we are\n # appending here could be changed by the next line of code, for example.\n # Hence we need to make a copy\n\n board[next_move_location_tuple] = 0 # undo the move in case it affects the calling method.\n\n return next_move",
"def getSecondMove(self, firstMove):\n while True:\n try:\n move = tuple(int(str.strip()) for str in raw_input('Choose your second move: ').split(','))\n break\n except ValueError:\n print(\"Input is not a integer.\")\n\n while len(move) != 2 or abs(move[0]-firstMove[0]) + abs(move[1]-firstMove[1]) != 1:\n print 'Second move is not valid.'\n move = tuple(int(str.strip()) for str in raw_input('Choose your second move: ').split(','))\n return move",
"def player(board):\n if board == initial_state():\n return X\n\n total_x = 0\n total_o = 0\n\n for i in board:\n total_x += i.count(X)\n total_o += i.count(O)\n\n if (total_x + total_o) % 2 == 1:\n return O\n else:\n return X",
"def get_move(self, board):\n while True:\n col = random.randint(0, board.width)\n row = board.try_move(col)\n\n if row >= 0:\n break\n\n return row, col",
"def player(board):\n\n # Game is over\n if terminal(board):\n return None\n\n # Count number of occurences of X and O\n x_count = 0\n o_count = 0\n for row in board:\n for box in row:\n if box == X:\n x_count = x_count + 1\n elif box == O:\n o_count = o_count + 1\n # When move count is tied, X is next\n if x_count <= o_count:\n return X\n # When X has moved once more than O, next move is O\n else:\n return O",
"def getMove(self):\n while True:\n try:\n init = tuple(int(str.strip()) for str in raw_input('Choose the initial position of your move: ').split(','))\n break\n except ValueError:\n print(\"Input is not integer.\")\n\n while (len(init) != 2) or (init[0] not in range(1, self.grid.width+1)) or (init[1] not in range(1, self.grid.height+1)):\n print 'Initial position is not valid.'\n init = tuple(int(str.strip()) for str in raw_input('Choose the initial position of your move: ').split(','))\n\n while True:\n try:\n dest = tuple(int(str.strip()) for str in raw_input('Choose the destination position of your move: ').split(','))\n break\n except ValueError:\n print(\"Input is not integer.\")\n\n while (len(dest) != 2) or (dest[0] not in range(1, self.grid.width+1)) or (dest[1] not in range(1, self.grid.height+1)):\n print 'Destination position is not valid.'\n dest = tuple(int(str.strip()) for str in raw_input('Choose the destination position of your move: ').split(','))\n\n return (init, dest)",
"def get_board(self) -> Tuple[Tuple[chr]]:\n # If we return the list, then the caller could modify the board,\n # so we want to convert it to a tuple so it's immutable\n return tuple(tuple(row) for row in self._board)",
"def randomMove(board):\r\n go = True\r\n while go:\r\n y = random.randint(0, board.size - 1)\r\n x = random.randint(0, board.size - 1)\r\n go = not board.validMove((y, x))\r\n return (y, x)"
] | [
"0.75705034",
"0.65747726",
"0.65377414",
"0.6350882",
"0.6316932",
"0.6271466",
"0.61944956",
"0.61604035",
"0.61389923",
"0.6138155",
"0.61295015",
"0.61253476",
"0.6120447",
"0.6111359",
"0.61091137",
"0.610629",
"0.61005545",
"0.6098801",
"0.60883456",
"0.60808027",
"0.60804933",
"0.6063353",
"0.60568225",
"0.60404664",
"0.6034011",
"0.60281944",
"0.6013078",
"0.60124385",
"0.60041803",
"0.59972304"
] | 0.75002813 | 1 |
Initialize neural net and check output layer shape. | def test_init() -> None:
neural_net = NeuralNetwork()
assert neural_net.model.get_layer('output_layer').output_shape, (None, 4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_init_net_simple(self):\n net = ecn.NeuralNet(2, (2,), 1)\n self.assertEqual(2, len(net.weights.keys()))\n self.assertEqual((2, 3), np.shape(net.weights['h0']))\n self.assertEqual((1, 3), np.shape(net.weights['y']))\n print('Finished testing simple neural net init\\n')",
"def initialize_network(self):\n # intermediate layer size\n ils = int((self.specbinnum + self.numfilters) / 2)\n\n network = lasagne.layers.InputLayer((None, 1, self.specbinnum, self.numtimebins), self.input_var)\n\n network = NormalisationLayer(network, self.specbinnum)\n self.normlayer = network\n\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.numfilters)\n network = batch_norm(network)\n\n network = lasagne.layers.NonlinearityLayer(network, nonlinearity=elu)\n self.latents = network\n network = ZeroOutBackgroundLatentsLayer(self.latents,\n mp_down_factor=self.mp_down_factor,\n numfilters=self.numfilters,\n numtimebins=self.numtimebins,\n background_latents_factor=self.background_latents_factor,\n use_maxpool=self.use_maxpool)\n network, _ = custom_convlayer_2(network, in_num_chans=self.numfilters, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.specbinnum)\n network = batch_norm(network)\n\n # output_size\n num_time_samples = int(audioframe_len/2 * (self.numtimebins + 1))\n # network = batch_norm(DenseLayer(network, num_time_samples)) # MemoryError\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=num_time_samples)\n network, _ = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=num_time_samples, out_num_chans=1)\n network, _ = batch_norm(network)\n\n self.network = network",
"def __init__(self, layerNeurons, numberOfLayers, initialWeights = None, lowerBound = None, upperBound = None):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons) > 1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)*numberOfLayers\r\n \r\n # Initialise the weights with the initialiser or random values\r\n if initialWeights is None:\r\n if lowerBound is None:\r\n lowerBound=-1/np.sqrt(layerNeurons[0])\r\n if upperBound is None:\r\n upperBound=1/np.sqrt(layerNeurons[0])\r\n self.weights = np.random.uniform(lowerBound, upperBound, totalWeightCount)\r\n else:\r\n assert initialWeights.size == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = initialWeights.view()\r\n \r\n self.weights.shape = (numberOfLayers, -1)\r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n for layerInputDimention, layerOutputDimention in zip(layerNeurons, layerNeurons[1:]):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = batchNetworkLayer(layerInputDimention, layerOutputDimention, numberOfLayers, \r\n self.weights[..., :, layerBlockStart:layerBlockEnd], \r\n self.weights[..., :, layerBlockEnd:layerBiasEnd])\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd",
"def __init__(self, layerNeurons, initialWeights = None, layerTypes=None, **kwargs):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons)>1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)\r\n \r\n # Initialise the weights with the initializer or random values\r\n if initialWeights is None:\r\n self.weights = np.random.uniform(-1/np.sqrt(layerNeurons[0]), 1/np.sqrt(layerNeurons[0]), totalWeightCount)\r\n else:\r\n assert len(initialWeights) == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = np.array(initialWeights, dtype = np.float64) \r\n \r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n if layerTypes is None or len(layerTypes)<(len(layerNeurons)-1):\r\n layerTypes=[NetworkLayer]*(len(layerNeurons)-1)\r\n \r\n for layerInputDimention, layerOutputDimention, layerType in zip(layerNeurons, layerNeurons[1:], layerTypes):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = layerType(layerInputDimention, layerOutputDimention, \r\n self.weights[..., layerBlockStart:layerBlockEnd], \r\n self.weights[..., layerBlockEnd:layerBiasEnd], **kwargs)\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd\r\n \r\n # Tell the output later to use a different function to calculate the delta \r\n newLayer.calcDelta = newLayer.calcDeltaOutputLayer",
"def __init__(self, n_input, n_output, hidden_layer_size, reg):\n self.reg = reg\n self.input_layer = FullyConnectedLayer(n_input, hidden_layer_size)\n self.relu = ReLULayer()\n self.output_layer = FullyConnectedLayer(hidden_layer_size, n_output)\n self.W_in = None\n self.W_out = None\n self.B_in = None\n self.B_out = None\n # TODO Create necessary layers",
"def __init__(self, netSize):\n\t\t\n\t\t# TRY THIS FOR RANDOM!\n\t\t#\n\t\t#\n\t\t#\n\t\t\n\t\tself.biases = [self.randomArray(i, 1) for i in netSize[1:]] # Biases do not exist for the first layer ! Those are inputs.\n\t\tself.netSize = netSize\n\t\t#Initialize Weights\n\t\t#This initializes the weights for each layer based on the size. The number of rows should be\n\t\t#the number of neurons for the current, and the number of columns should be the same as the number of neurons\n\t\t#in the next layer. There are no weights for the last layer. That's the output layer.\n\t\tself.weights \t\t = [self.randomArray(i, j) for i, j in zip(netSize[:-1], netSize[1:]) ]",
"def __init__(self, inputLayerSize, outputLayerSize, \\\n hiddenLayerSize):\n #Network hyperparameters - neurons per layer - **not altered by training**\n self.inputLayerSize = inputLayerSize\n self.outputLayerSize = outputLayerSize\n self.hiddenLayerSize = hiddenLayerSize\n self.num_params = inputLayerSize * hiddenLayerSize + \\\n hiddenLayerSize * outputLayerSize + hiddenLayerSize \\\n + outputLayerSize\n #--Weights--\n #w_ih - weights of synapses linking input -> hidden\n self.w_ih = np.random.randn( self.inputLayerSize, \\\n self.hiddenLayerSize)\n #w_ho - weights of synapses linking hidden -> output\n self.w_ho = np.random.randn( self.hiddenLayerSize, \\\n self.outputLayerSize)\n \n #--Biases--\n #b_h - biases of hidden layer\n self.b_h = np.random.randn( self.hiddenLayerSize )\n #b_o - biases of output layer\n self.b_o = np.random.randn( self.outputLayerSize )",
"def connect_layers(self):\n if not self.check():\n msg = \"Failed to check neural network.\"\n print(msg)\n logging.error(msg)\n return\n\n # 1. set input layer\n pre_layer = self.input_layer\n for layer in self.hidden_layers:\n layer.set_input_layer(pre_layer)\n pre_layer = layer\n self.output_layer.set_input_layer(pre_layer)\n\n # 2. set output layer\n next_layer = self.output_layer\n for layer in reversed(self.hidden_layers):\n layer.set_next_layer(next_layer)\n next_layer = layer\n self.input_layer.set_next_layer(next_layer)\n\n # 3. call layer init\n self.input_layer.init()\n for layer in self.hidden_layers:\n layer.init()\n self.output_layer.init()\n\n return",
"def __init__(self, attribute_size, output_size, n_hidden_layers=2, n_hidden_neurons=30):\n self.n_hidden_layers = n_hidden_layers\n self.n_hidden_neurons = n_hidden_neurons\n self.attribute_size = attribute_size\n self.output_size = output_size\n\n X = T.fmatrix()\n Y = T.fmatrix()\n\n self.w_h = nnet.init_weights((self.attribute_size, self.n_hidden_neurons))\n self.w_h2 = nnet.init_weights((self.n_hidden_neurons, self.n_hidden_neurons))\n self.w_o = nnet.init_weights((self.n_hidden_neurons, self.output_size))\n\n if self.n_hidden_layers == 2:\n\n noise_py_x = nnet.model_reg(X, self.w_h, self.w_h2, self.w_o, 0, 0)\n py_x = nnet.model_reg(X, self.w_h, self.w_h2, self.w_o, 0, 0)\n\n cost = nnet.rmse(noise_py_x, Y)\n params = [self.w_h, self.w_h2, self.w_o]\n updates = nnet.RMSprop(cost, params, lr=0.001)\n\n self.train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)\n self.predict_ = theano.function(inputs=[X], outputs=py_x, allow_input_downcast=True)\n\n elif self.n_hidden_layers == 3:\n\n self.w_h3 = nnet.init_weights((self.n_hidden_neurons, self.n_hidden_neurons))\n\n noise_py_x = nnet.model_reg3(X, self.w_h, self.w_h2, self.w_h3, self.w_o, 0, 0)\n py_x = nnet.model_reg3(X, self.w_h, self.w_h2, self.w_h3, self.w_o, 0, 0)\n\n cost = nnet.rmse(noise_py_x, Y)\n params = [self.w_h, self.w_h2, self.w_h3, self.w_o]\n updates = nnet.RMSprop(cost, params, lr=0.001)\n\n self.train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)\n self.predict_ = theano.function(inputs=[X], outputs=py_x, allow_input_downcast=True)",
"def __init__(self, net, batch):\n self.net = net\n self.train_batch_is(batch)\n self.image_height = len(batch.image_array[0][0])\n self.image_width = len(batch.image_array[0][0][0])\n self.net.reset_forward()",
"def __init__(self, n_input, n_output, hidden_layer_size, reg):\n self.reg = reg\n self.fulllayer1 = FullyConnectedLayer(n_input, hidden_layer_size)\n self.reglayer1 = ReLULayer()\n self.fulllayer2 = FullyConnectedLayer(hidden_layer_size, n_output)",
"def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)",
"def test_init_net_complex(self):\n net = ecn.NeuralNet(10, (5,3), 2)\n self.assertEqual(3, len(net.weights.keys()))\n self.assertEqual((5, 11), np.shape(net.weights['h0']))\n self.assertEqual((3, 6), np.shape(net.weights['h1']))\n self.assertEqual((2, 4), np.shape(net.weights['y']))\n print('Finished testing complex neural net init\\n')",
"def __init__(self):\r\n # A dummy layer does nothing\r\n self.weights = np.zeros(shape=(input.shape[1], 10))\r\n bias = np.zeros(shape=(10,))\r\n pass",
"def init_learner(self,**kwargs):\r\n \r\n if self.learn_type == 'nn':\r\n #initialize neural network\r\n shape = kwargs[\"shape\"]\r\n #initialize input layer\r\n model = Sequential() \r\n #add hidden layers\r\n for i in range(len(shape)):\r\n if i == 0:\r\n nb_input = self.size\r\n else:\r\n nb_input = shape[i -1]\r\n nb_output = shape[i]\r\n model.add(Dense(nb_input,nb_output,init=\"he_normal\",\r\n activation = \"tanh\"))\r\n model.add(Dropout(.5))\r\n model.add(Dense(shape[-1],1,init = \"he_normal\",\r\n activation = \"linear\"))\r\n model.compile(loss = 'mean_squared_error',optimizer = 'rmsprop')\r\n self.learner = model\r\n \r\n elif self.learn_type == 'linear':\r\n #initialize parameter\r\n self.learner = Linear(self.size,**kwargs)",
"def init_layer(layer):\n \n if layer.weight.ndimension() == 4:\n (n_out, n_in, height, width) = layer.weight.size()\n n = n_in * height * width\n \n elif layer.weight.ndimension() == 2:\n (n_out, n) = layer.weight.size()\n\n std = math.sqrt(2. / n)\n scale = std * math.sqrt(3.)\n layer.weight.data.uniform_(-scale, scale)\n\n if layer.bias is not None:\n layer.bias.data.fill_(0.)",
"def buildNet(inputShape, numUniqueClasses):\n layers = InputLayer((None,) + inputShape[1:4])\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1))\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1), stride= (5,1))\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1))\n layers = ResidualLayer(layers, 1, \n filter_size = (3,1), stride= (3,1))\n layers = NonlinearityLayer(layers, nonlinearity = nonlinearity)\n layers = DropoutLayer(layers,p=.3) \n layers = batch_norm(NNHelpers.LocallyConnected2DLayer(layers,1,(5,1),\n W=He('relu'),\n nonlinearity=nonlinearity)) \n layers = DenseLayer(layers,num_units=numUniqueClasses,\n nonlinearity=linear) \n layers = NonlinearityLayer(layers, nonlinearity=softmax) \n return layers",
"def __init__(\n self,\n hidden_layer_neurons: list,\n output_layer_neurons: list\n ):\n # Check that the dimensions of weights of the hidden and output\n # layers match up in the correct way.\n\n for neuron in output_layer_neurons:\n last_hidden_layer_neurons = hidden_layer_neurons[-1]\n if len(last_hidden_layer_neurons) != neuron.get_first_weights_dimension():\n print(len(last_hidden_layer_neurons), neuron.get_first_weights_dimension())\n raise ValueError(\n \"The final hidden layer has {0} neurons but the output layer neurons require {1} inputs. These \"\n \"values should be equal.\".format(\n len(last_hidden_layer_neurons),\n neuron.get_first_weights_dimension()\n )\n )\n\n self._hidden_layer_neurons = hidden_layer_neurons\n self._output_layer_neurons = output_layer_neurons",
"def __init__(self, input_dim: int, output_dim: int):\n\n super().__init__()\n\n self.input_dim = input_dim\n self.output_dim = output_dim\n\n self.batchNorm1 = layers.BatchNormalization()\n self.dense1 = layers.Dense(\n 64, input_shape=(input_dim+output_dim,),\n kernel_initializer=random_uniform(-np.sqrt(1/input_dim), np.sqrt(1/input_dim))\n )\n self.relu1 = layers.Activation('relu')\n self.dense2 = layers.Dense(32, kernel_initializer=random_uniform(-np.sqrt(1/64), np.sqrt(1/64)))\n self.relu2 = layers.Activation('relu')\n self.dense3 = layers.Dense(output_dim, kernel_initializer=random_uniform(-np.sqrt(1/32), np.sqrt(1/32)))",
"def __init__(self, inputSize, hiddenSize, outputSize, epochs = 100, debug = False):\n self.inputSize = inputSize\n self.hiddenSize = hiddenSize\n self.outputSize = outputSize\n self.epochs = epochs\n self.debug = debug\n\n #weights\n self.W1 = np.random.randn(self.inputSize, self.hiddenSize) \n self.W2 = np.random.randn(self.hiddenSize, self.outputSize)",
"def __init__(self, num_inputs=3, hidden_layers=[3, 3], num_outputs=2):\n\n self.num_inputs = num_inputs\n self.hidden_layers = hidden_layers\n self.num_outputs = num_outputs\n\n # create a generic representation of the layers\n layers = [num_inputs] + hidden_layers + [num_outputs]\n\n # create random connection weights for the layers\n weights = []\n for i in range(len(layers) - 1):\n w = np.random.rand(layers[i], layers[i + 1])\n weights.append(w)\n self.weights = weights\n\n activations = []\n\n for i in range(len(layers)):\n a = np.zeros(layers[i])\n activations.append(a)\n self.activations = activations\n\n derivatives = []\n\n for i in range(len(layers) - 1):\n d = np.zeros(layers[i])\n derivatives.append(d)\n self.derivatives = derivatives",
"def __init__(self, layer_neuron):\n\n self.num_layers = len(layer_neuron)\n self.layer_neuron = layer_neuron\n #a list of numpy ndarrays\n self.weights = []\n self.input_len = 0\n self.target_vals = []\n self.current_guess = 0\n\n self.layer_inputs = [[]]*(len(layer_neuron))\n self.layer_outputs = [[]]*(len(layer_neuron))\n #deltas: don't include input layer but still put it in for spacing ie self.deltas[0] should always be empty\n self.deltas = [[]]*((len(layer_neuron)))\n\n #make the weight matrices, each matrix nXm matrix with m = # nodes in the ith layer (incl bias) and n = # nodes in the (i+1)th layer\n #each row represents the set of weights from all m neurons in the ith layer to a single neuron in the (i+1)th layer\n #conversely, each column is all the output weights from a node in the ith layer, to each n nodes in the (i+1)th layer\n #the right-most column represents output weights from the bias node\n for i in range(len(self.layer_neuron)-1 ):\n np.random.seed(0)\n self.weights.append(np.random.normal( scale = 0.2, size = (self.layer_neuron[i+1], self.layer_neuron[i] + 1)))",
"def lenet_network(name, input_shape, output_count):\n network = NeuralNetwork(name, input_shape, output_count)\n\n # normal distribution parameters for random weights\n mean = 0.0\n stddev = 0.1\n\n # General convolution shapes and parameters common to all convolutional layers\n conv_stride_shape = (1, 1)\n conv_pad_shape = (0, 0)\n conv_pad_type = 'VALID'\n\n pool_stride_shape = (2, 2)\n pool_shape = (2, 2)\n pool_pad_type = 'VALID'\n\n activation = 'relu'\n\n # Kernel depths and sizes for each convolution layer\n depths = [6, 16]\n kernel_shapes = [(5, 5, depths[0]), (5, 5, depths[1])]\n conv_layer_count = len(depths)\n\n # Create convolutional layers\n conv = None\n for i in range(conv_layer_count):\n name = 'l{:d}'.format(i)\n if i > 0:\n input_shape = conv.output_shape\n conv = ConvolutionalLayer(name, input_shape, kernel_shapes[i], conv_stride_shape, \\\n conv_pad_shape, conv_pad_type, activation)\n conv.add_pooling('max', pool_shape, pool_stride_shape, pool_pad_type)\n network.add_layer(conv, mean, stddev)\n\n # Linear layer dimensions\n linear_input_sizes = [400, 120, 84]\n linear_output_sizes = [120, 84, 10]\n linear_activations = ['relu', 'relu', None]\n\n # Create linear layers\n for i, input_size in enumerate(linear_input_sizes):\n layer_index = i + conv_layer_count\n name = 'l{:d}'.format(layer_index)\n linear = LinearLayer(name, input_size, linear_output_sizes[i], linear_activations[i])\n linear.init_weights_and_biases(mean, stddev)\n network.add_layer(linear, mean, stddev)\n\n network.define_network()\n\n learning_rate = 0.001\n network.define_operations(learning_rate, 'adam')\n\n return network",
"def __init__(self, numpy_rng, input, n_in, hidden_layers_sizes, n_out):\n # instance variables\n self.numpy_rng = numpy_rng\n self.input = input\n self.n_in = n_in\n self.hidden_layers_sizes = hidden_layers_sizes\n self.n_layers = len(hidden_layers_sizes)\n self.n_out = n_out\n\n self.hidden_layers = []\n self.params = []\n\n self.initialize_variables()\n\n\n ################\n ## Prediction ##\n ################\n self.y_pred = self.logistic_regression_layer.y_pred",
"def initialize(self, input_size, n_classes):\n\n self.n_classes = n_classes\n self.input_size = input_size\n\n n_hidden_layers = len(self.sizes)\n #############################################################################\n # Allocate space for the hidden and output layers, as well as the gradients #\n #############################################################################\n self.hs = []\n self.grad_hs = []\n for h in range(n_hidden_layers):\n self.hs += [np.zeros((self.sizes[h],))] # hidden layer\n self.grad_hs += [np.zeros((self.sizes[h],))] # ... and gradient\n self.hs += [np.zeros((self.n_classes,))] # output layer\n self.grad_hs += [np.zeros((self.n_classes,))] # ... and gradient\n\n ##################################################################\n # Allocate space for the neural network parameters and gradients #\n ##################################################################\n self.weights = [np.zeros((self.input_size, self.sizes[0]))] # input.csv to 1st hidden layer weights\n self.grad_weights = [np.zeros((self.input_size, self.sizes[0]))] # ... and gradient\n\n self.biases = [np.zeros((self.sizes[0]))] # 1st hidden layer biases\n self.grad_biases = [np.zeros((self.sizes[0]))] # ... and gradient\n\n for h in range(1, n_hidden_layers):\n self.weights += [np.zeros((self.sizes[h - 1], self.sizes[h]))] # h-1 to h hidden layer weights\n self.grad_weights += [np.zeros((self.sizes[h - 1], self.sizes[h]))] # ... and gradient\n\n self.biases += [np.zeros((self.sizes[h]))] # hth hidden layer biases\n self.grad_biases += [np.zeros((self.sizes[h]))] # ... and gradient\n\n self.weights += [np.zeros((self.sizes[-1], self.n_classes))] # last hidden to output layer weights\n self.grad_weights += [np.zeros((self.sizes[-1], self.n_classes))] # ... and gradient\n\n self.biases += [np.zeros((self.n_classes))] # output layer biases\n self.grad_biases += [np.zeros((self.n_classes))] # ... and gradient\n\n #########################\n # Initialize parameters #\n #########################\n\n self.rng = np.random.mtrand.RandomState(self.seed) # create random number generator\n # biases are initialized to zero\n # ... and weights according to the slides\n for m in range(len(self.weights)):\n b = (6 ** 0.5) / ((self.weights[m].shape[0] + self.weights[m].shape[1]) ** 0.5)\n for ind, val in np.ndenumerate(self.weights[m]):\n self.weights[m][ind] = self.rng.uniform(-b, b, 1)\n\n\n self.n_updates = 0 # To keep track of the number of updates, to decrease the learning rate",
"def __init__(self, input_size, hidden_sizes, output_size=1,\n batchnorm_bool=False,\n dropout_bool=False):\n super(NeuralNet, self).__init__()\n self.input_size = input_size\n sizes = [input_size] + hidden_sizes + [output_size]\n self.layers = nn.ModuleList(\n [nn.Linear(in_f, out_f) for in_f, out_f in zip(sizes, sizes[1:])])\n self.bns = nn.ModuleList(\n [nn.BatchNorm1d(out_f) for in_f, out_f in zip(sizes, sizes[1:])])\n self.dps = nn.ModuleList(\n [nn.Dropout(p=0.5) for _ in range(len(self.layers))])\n self.relus = nn.ModuleList(\n [nn.ReLU() for _ in range(len(self.layers))])\n self.sigmoid = nn.Sigmoid()\n\n self.batchnorm_bool = batchnorm_bool\n self.dropout_bool = dropout_bool",
"def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 3, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 5, 0, 1, 3), # Layer 4: Convolution(Layer1)\n (5, 7, 0, 0, 0), # Layer 5: Convolution(Layer4)\n ]",
"def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())",
"def __init__(self, nInputs, nOutputs, hiddenLayersDims, outputActivationFunctions = None, outputActivationDerivatives = None, hiddenActivationFunctions = None,\\\n\t\t\t\t hiddenActivationDerivatives = None): \n\n\t\tself._nInputs = nInputs\n\t\tself._nOutputs = nOutputs\n\n\t\tself._nHiddenLayers, self._nUnitsPerLayer = hiddenLayersDims\n\n\t\tself._outputActivationFunctions = outputActivationFunctions\n\t\tself._outputActivationDerivatives = outputActivationDerivatives\n\n\t\tself._hiddenActivationFunctions = hiddenActivationFunctions\n\t\tself._hiddenActivationDerivatives = hiddenActivationDerivatives\n\n\t\tself.initialiseActivationFunctions()\n\n\t\tself.initialiseNetwork()\n\n\t\tself._nBranches = len(self.collectAllBranches())",
"def __init__(self, input_shape, n_out, ini_type=\"plain\"):\n\n self.m = input_shape[1] # number of examples in training data\n # `params` store weights and bias in a python dictionary\n self.params = self.initialize_parameters(input_shape[0], n_out, ini_type) # initialize weights and bias\n self.Z = np.zeros((self.params['W'].shape[0], input_shape[1])) # create space for resultant Z output"
] | [
"0.7603709",
"0.7023699",
"0.6976295",
"0.69317013",
"0.69186133",
"0.6909801",
"0.6901226",
"0.6877522",
"0.6854779",
"0.6819741",
"0.6771726",
"0.67149144",
"0.66965085",
"0.66904634",
"0.6672968",
"0.66600806",
"0.6655046",
"0.6619668",
"0.66193616",
"0.6618829",
"0.65973353",
"0.65812653",
"0.657847",
"0.65701324",
"0.65648687",
"0.65524364",
"0.6528101",
"0.6527364",
"0.6527244",
"0.6526204"
] | 0.8320543 | 0 |
update the list of d_bison_obs | def update_d_bison_obs(bison_i):
for i in range(obs_n):
d_bison_obs[i][bison_i] = calculate_d(bison_x[bison_i], bison_y[bison_i], obstacles[i][0], obstacles[i][1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self, bsd):\n raise NotImplementedError()",
"def update_bison_obs(x_tan, y_tan, i, k):\n d_tan_bison = calculate_d(elude_bison[k][i][0], elude_bison[k][i][1], x_tan, y_tan)\n turn_v = math.sqrt(math.fabs(bison_v ** 2 - 10 * bison_v * (1 - math.cos(bison_angle[i]) ** 2) / math.cos(bison_angle[i])))\n bison_x[i] = bison_x[i] + ((x_tan - elude_bison[k][i][0]) / d_tan_bison) * delta_t * turn_v\n bison_y[i] = bison_y[i] + ((y_tan - elude_bison[k][i][1]) / d_tan_bison) * delta_t * turn_v\n d[i] = calculate_d(bison_x[i], bison_y[i], wolf_x[0], wolf_y[0]) # 更新狼和羊群的距离\n update_d_bison_obs(i)\n if d_bison_obs[k][i] >= 2.5 * obstacles[k][2]:\n elude_flag[k][i] = True\n bison_flag[i] = False",
"def update(self, obs, actions, rewards, new_obs):\n pass",
"def updateList(self):\n for state in list_:\n state.update(True)",
"def update(self, obs):\n #######################################\n # Step 1 - prediction for birth targets\n born = [deepcopy(comp) for comp in self.birthgmm]\n # The original paper would do a spawning iteration as part of Step 1.\n spawned = [] # not implemented\n\n #######################################\n # Step 2 - prediction for existing targets\n updated = [GmphdComponent(self.survival * comp.weight, dot(self.f, comp.loc),\n self.q + dot(dot(self.f, comp.cov), self.f.T), comp.id)\n for comp in self.gmm]\n\n predicted = born + spawned + updated\n\n #######################################\n # Step 3 - construction of PHD update components\n # These two are the mean and covariance of the expected observation\n nu = [dot(self.h, comp.loc) for comp in predicted]\n s = [self.r + dot(dot(self.h, comp.cov), self.h.T) for comp in predicted]\n # Not sure about any physical interpretation of these two...\n k = [dot(dot(comp.cov, self.h.T), linalg.inv(s[index]))\n for index, comp in enumerate(predicted)]\n pkk = [dot(eye(len(k[index])) - dot(k[index], self.h), comp.cov)\n for index, comp in enumerate(predicted)]\n\n #######################################\n # Step 4 - update using observations\n # The 'predicted' components are kept, with a decay\n newgmm = [GmphdComponent(comp.weight * (1.0 - self.detection), comp.loc, comp.cov, comp.id)\n for comp in predicted]\n\n # then more components are added caused by each obsn's interaction with existing component\n for anobs in obs:\n anobs = array(anobs)\n newgmmpartial = []\n for j, comp in enumerate(predicted):\n newgmmpartial.append(GmphdComponent(\n self.detection * comp.weight * dmvnorm(nu[j], s[j], anobs),\n comp.loc + dot(k[j], anobs - nu[j]), pkk[j]))\n\n # The Kappa thing (clutter and reweight)\n weightsum = simplesum(newcomp.weight for newcomp in newgmmpartial)\n reweighter = 1.0 / (self.clutter + weightsum)\n for newcomp in newgmmpartial:\n newcomp.weight *= reweighter\n\n newgmm.extend(newgmmpartial)\n\n self.gmm = newgmm",
"def update(self, obs, q):\n for o in obs:\n prob = np.exp(-70)\n if self.landmarks:\n # find the data association with ML\n prob, landmark_idx, ass_obs, ass_jacobian, ass_adjcov = self.find_data_association(o)\n if prob < self.TOL:\n # create new landmark\n self.create_landmark(o)\n else:\n # update corresponding EKF\n self.update_landmark(np.transpose(np.array([o])), landmark_idx, ass_obs, ass_jacobian, ass_adjcov)\n else:\n # no initial landmarks\n self.create_landmark(o)\n self.weight *= prob\n \n q.put([self]) ###",
"def updateObservation(self, obs):\n self.settingsDb.updateObservation(self.observationsTableName(), obs)",
"def _update_elements(self, dps=4):\n\n pass",
"def _update_elements(self, dps=4):\n\n pass",
"def update_observation(db_observ, norm_observ):\n try:\n db_observ.serie = norm_observ.serie\n db_observ.period_begin = norm_observ.period_begin\n db_observ.period_end = norm_observ.period_end\n db_observ.pattern = norm_observ.pattern\n db_observ.sector = norm_observ.sector\n db_observ.indexx = norm_observ.indexx\n db_observ.observation = norm_observ.observation\n db_observ.perc_change = round(norm_observ.perc_change, 2) if norm_observ.perc_change is not None else None\n db_observ.abs_change = norm_observ.abs_change\n db_observ.relevance = norm_observ.relevance1\n db_observ.meta_data = norm_observ.meta_data\n # update in the db\n db_observ.save()\n except Exception as e:\n print(f\"{e}\\n{norm_observ.observation}\\n\")",
"def md_update_wrapper(bbo):\n\n changed = md.update(bbo)\n if changed:\n updated_symbols.add(bbo.symbol)",
"def update_mp(self, obs, pool):\n #######################################\n # Step 1 - prediction for birth targets\n born = [deepcopy(comp) for comp in self.birthgmm]\n # The original paper would do a spawning iteration as part of Step 1.\n spawned = [] # not implemented\n\n #######################################\n # Step 2 - prediction for existing targets\n updated = [GmphdComponent(self.survival * comp.weight, dot(self.f, comp.loc),\n self.q + dot(dot(self.f, comp.cov), self.f.T), comp.id)\n for comp in self.gmm]\n\n predicted = born + spawned + updated\n\n #######################################\n # Step 3 - construction of PHD update components\n # These two are the mean and covariance of the expected observation\n nu = [dot(self.h, comp.loc) for comp in predicted]\n s = [self.r + dot(dot(self.h, comp.cov), self.h.T) for comp in predicted]\n # Not sure about any physical interpretation of these two...\n k = [dot(dot(comp.cov, self.h.T), linalg.inv(s[index]))\n for index, comp in enumerate(predicted)]\n pkk = [dot(eye(len(k[index])) - dot(k[index], self.h), comp.cov)\n for index, comp in enumerate(predicted)]\n\n #######################################\n # Step 4 - update using observations\n # The 'predicted' components are kept, with a decay\n newgmm = [GmphdComponent(comp.weight * (1.0 - self.detection), comp.loc, comp.cov, comp.id)\n for comp in predicted]\n\n # then more components are added caused by each obsn's interaction with existing component\n result = pool.map_async(partial(self.update_obs_mp, predicted=predicted, nu=nu, s=s, pkk=pkk, k=k), obs)\n result = result.get()\n for newgmmpartial in result:\n newgmm.extend(newgmmpartial)\n\n self.gmm = newgmm",
"def md_update_wrapper(bbo):\n changed = md.update(bbo)\n if changed:\n updated_symbols.add(bbo.symbol)",
"def update():",
"def update():",
"def set_obs(self, num_obs):\n curr_obs = self._nobs\n if num_obs < curr_obs:\n raise ValueError(\"num_obs must be >= \" + str(curr_obs))\n if num_obs == curr_obs:\n return\n isstrvar = self._isstrvar\n empty_row = ['' if isstrvar(i) else MISSING for i in range(self._nvar)]\n self._varvals += [copy.copy(empty_row) \n for _ in range(num_obs - curr_obs)]\n self._nobs = num_obs\n self._changed = True\n # Need to clear srtlist. If there are string variables, there \n # might now be empty strings after non-empty string. If there \n # are numerical variables with extended missing, there will now \n # be \".\" missing after extended missing. Issue pointed out at\n # http://www.stata.com/statalist/archive/2013-08/msg00576.html\n self._srtlist = [None]*self._nvar",
"def update(self):\n #self.model.states[Polymerase].molecules\n\n DNA_obj = self.model.states[DNA].get_molecules(\"DNA\")[0]\n\n for i in range(1): #500\n DNA_obj.bind_polymerase()\n \n for i in range(50): #50\n DNA_obj.move_polymerase()\n #print(DNA_obj.poly_transcript)\n \n\n\n\n #print(self.test.poly_status)\n #print(DNA_obj.poly_pos)",
"def update_loan_status(updated_number):\n del book_select.selected_book[-1]\n book_select.selected_book.append(str(updated_number))\n updated=':'.join(book_select.selected_book)\n del database_book_list[list_pos]\n database_book_list.insert(list_pos,updated)\n for index in range(0,max_book_id):\n strip_list=database_book_list[index]\n stripped_str=strip_list.strip()\n del database_book_list[index]\n database_book_list.insert(index,stripped_str)\n with open(\"database.txt\",\"r+\") as data:\n data.seek(0)\n data.truncate(0)\n for index in range(0,max_book_id):\n data.write(str(database_book_list[index]+'\\n'))\n data.close()",
"def update_obstacles(self, new_obs):\n self.obstacles = new_obs",
"def dv_update(self, dv_list):\n for line in dv_list:\n line_sep = line.split(',')\n other_name = line_sep[0] + ':' + line_sep[1]\n other_cost = float(line_sep[2])\n self.distance_vector[other_name] = other_cost",
"def test_list_inplace_update(self):\r\n vm = List.value_manager(None, None, [1,2,3])\r\n assert not vm.changed\r\n vm.value.append(4)\r\n assert vm.changed",
"def updateParList(self, parList):\n counter = 0\n for modelName in self._modelList:\n model = self.__modelDict[modelName]\n modelParDict = model.parFitDict\n for parName in modelParDict.keys():\n if modelParDict[parName][\"vary\"]:\n modelParDict[parName][\"value\"] = parList[counter]\n counter += 1\n else:\n pass",
"def update( ):\r\n pass",
"def note_update(self, upd_note_handle_list):\n for handle in upd_note_handle_list :\n if handle in self.data:\n self.rebuild()\n break",
"def update_lists():\n global donor_totals_list\n global donor_donations_list\n global donor_names_list\n global last_donation_list\n donor_totals_list = get_all_donor_totals()\n donor_donations_list = get_list_of_donations()\n donor_names_list = get_list_of_donors()\n last_donation_list = get_max_donation_date_list()",
"def update(self, delta_time):\n for b in self.star_list:\n b.update()",
"def updateBench(self, *benches):\n for bench in benches:\n self.benches[bench.name] = bench",
"def update(self, dt):\n for obj in self.objects:\n obj.update(dt)",
"def update_patients(self, list):\n\n self.llista.delete(0, tk.END)\n for i in range(len(list)):\n self.llista.insert(tk.END, list[i])\n self.llista.bind('<Double-1>', self.select_patient)",
"def updateLastObs(self):\n result = self.robot['SCRIPTOBS_STATUS'].read()\n with open('/u/rjhanson/master/lastObs.txt','w') as f:\n f.write(\"%s\\n\" % self.ucam('OBSNUM').read())\n apflog(\"Recording last ObsNum as %d\" % int(self.ucam[\"OBSNUM\"].read()))\n if result == 'Exited/Failure':\n # Last observation failed, so no need to update files\n return\n elif result == 'Exited/Success': \n try:\n f = open(\"/u/rjhanson/master/apf_sched.txt\",'r')\n except IOError:\n pass\n else:\n for line in f:\n if line.strip() != '':\n with open('/u/rjhanson/master/hit_list','a') as o:\n o.write(line + '\\n')\n f.close()"
] | [
"0.6155102",
"0.5999859",
"0.59011286",
"0.5888638",
"0.58285916",
"0.5765711",
"0.5587305",
"0.5576261",
"0.5576261",
"0.5572444",
"0.55177706",
"0.54821646",
"0.54729325",
"0.54384035",
"0.54384035",
"0.54246837",
"0.54121697",
"0.54096115",
"0.53964806",
"0.53757375",
"0.5358313",
"0.5357176",
"0.53428155",
"0.53377706",
"0.5327778",
"0.5312623",
"0.53089017",
"0.5295817",
"0.5294335",
"0.5272586"
] | 0.69077736 | 0 |
if the distance between bison_i and obs_i less than 2.5, it should elude this obstacle | def elude_obstacles(bison_i, k):
# Ax+By+C=0
global predict_wolf
r = obstacles[k][2]
xk = obstacles[k][0]
yk = obstacles[k][1]
a = (bison_y[bison_i] - wolf_y[0]) / (bison_x[bison_i] - wolf_x[0])
b = -1
c = bison_y[bison_i] - (a * bison_x[bison_i])
d_obs_dir = math.fabs(a * xk + b * yk + c) / math.sqrt(a ** 2 + b ** 2)
if d_obs_dir < r: # 羊运动方向会撞上障碍物
bison_flag[bison_i] = True
a2 = -1 / (xk - bison_x[bison_i])
b2 = yk - bison_y[bison_i]
n1 = (-a2 ** 2 * b2 * r ** 2 + r * math.sqrt(math.fabs(a2 ** 2 * b2 ** 2 + 1 - a2 ** 2 * r ** 2))) / (
a2 ** 2 * b2 ** 2 + 1)
n2 = (-a2 ** 2 * b2 * r ** 2 - r * math.sqrt(math.fabs(a2 ** 2 * b2 ** 2 + 1 - a2 ** 2 * r ** 2))) / (
a2 ** 2 * b2 ** 2 + 1)
m1 = a2 * r ** 2 + a2 * b2 * n1
m2 = a2 * r ** 2 + a2 * b2 * n2
x1 = m1 + xk
y1 = n1 + yk
x2 = m2 + xk
y2 = n2 + yk
bison_motion_list = compare_angle(x1, y1, x2, y2, bison_i)
x = bison_motion_list[0]
y = bison_motion_list[1]
bison_angle[bison_i] = bison_motion_list[2]
elude_tan[k][bison_i] = (x, y)
elude_bison[k][bison_i] = (bison_x[bison_i], bison_y[bison_i])
update_bison_obs(x, y, bison_i, k) # 更新羊和障碍物距离
target = find_min_distance()
if bison_i == target:
predict_wolf[0] = (wolf_x[0], wolf_y[0])
update_wolf_predict(x, y, bison_i, k)
elude_flag[k][bison_i] = False
else:
bison_flag[bison_i] = False
elude_flag[k][bison_i] = True
update_bison_normal(bison_i) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_terminal(self,bump,DLightBump, AnalogBump, IR):\r\n terminal = False\r\n # signal returned from distance to obstacle /terminal 50 mm,5cm\r\n # by measurement, small obstacle (height = height of light bumper) in 2cm: signal 120 ~300\r\n # within 1cm >400\r\n # if big obstacle: (like a wall) at 2cm: 1300~1600\r\n # d_obs = 140\r\n d_obs = 500.0\r\n threshold = d_obs/self.max_strength\r\n obstacles = []\r\n Infra_Omi, Infra_L, Infra_R =IR\r\n\r\n L, FL, CL, CR, FR, R = AnalogBump\r\n prob_obs =np.array([L, FL, CL, CR, FR, R]).astype(float)\r\n strength = prob_obs/self.max_strength # maximum signal strength light bumper can receive\r\n for i in range(len(strength)):\r\n strength[i] = 1 if strength[i] >=threshold else 0\r\n\r\n cnt = strength.sum()\r\n if Infra_Omi!=0 or Infra_L!=0 or Infra_R!= 0:\r\n terminal =True\r\n x = int(self.Motion.x +d_obs)\r\n y = int(self.Motion.y)\r\n s = (x, y)\r\n obstacles.append(s)\r\n\r\n if bump != 0 or cnt >=1:\r\n terminal=True\r\n # stop immediately\r\n self.Roomba.Move(0,0)\r\n #-------------determine position of obstacles-------------\r\n l_bump = 1 if bump&2 !=0 else 0\r\n r_bump = 1 if bump& 1 !=0 else 0\r\n # Assume Left , right bumpers are at -45 degree, 45 degree\r\n # Then find the average degree of object:0, -45, 45 degree\r\n b_avg_angle = 45*(r_bump -l_bump)\r\n prob_obs /= (prob_obs.sum()+1.0)\r\n # average angles of obstacle detected by light bumper\r\n # [-90, -60,-30,30,60,90] are heading angles of 6 analog light bumper\r\n lb_avg_agl = np.dot(prob_obs,[-90, -60,-30,30,60,90])\r\n\r\n # if there are 2 obstacles\r\n if np.abs(lb_avg_agl - b_avg_angle)>=60 or (np.sign(lb_avg_agl) !=np.sign(b_avg_angle)):\r\n th = self.Motion.theta + lb_avg_agl\r\n x = self.Motion.x + d_obs * math.cos(th)\r\n y = self.Motion.y + d_obs * math.sin(th)\r\n x = int(x)\r\n y = int(y)\r\n s= (x,y)\r\n if obstacles.count(s) == 0:\r\n obstacles.append(s)\r\n\r\n th = self.Motion.theta + b_avg_angle\r\n x = self.Motion.x + d_obs * math.cos(th)\r\n y = self.Motion.y + d_obs * math.sin(th)\r\n x = int(x)\r\n y = int(y)\r\n s = (x,y)\r\n if obstacles.count(s) ==0:\r\n obstacles.append(s)\r\n\r\n else:\r\n # if there is 1 obstacle\r\n alg = (b_avg_angle+lb_avg_agl)/2.0\r\n th= self.Motion.theta+ alg\r\n x = self.Motion.x + d_obs * math.cos(th)\r\n y = self.Motion.y + d_obs * math.sin(th)\r\n x = int(x)\r\n y = int(y)\r\n s = (x,y)\r\n if obstacles.count(s) == 0:\r\n obstacles.append(s)\r\n\r\n # check if the obstacle is one of other agents\r\n for k in self.global_trans.keys():\r\n # Format: self.global_trans={id: (degree, [a,st,s_t+1])}\r\n states = self.global_trans[k][1]\r\n st = self.get_gridState(states[1])\r\n st1 = self.get_gridState(states[2])\r\n # if obstacles are other agents, remove them\r\n for o in obstacles:\r\n grid_o = self.get_gridState((o[0],o[1],th))\r\n if (grid_o[0],grid_o[1]) == (st[0],st[1]) or (grid_o[0],grid_o[1]) == (st1[0],st1[1]):\r\n obstacles.remove(o)\r\n return terminal, obstacles",
"def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)",
"def update_bison_obs(x_tan, y_tan, i, k):\n d_tan_bison = calculate_d(elude_bison[k][i][0], elude_bison[k][i][1], x_tan, y_tan)\n turn_v = math.sqrt(math.fabs(bison_v ** 2 - 10 * bison_v * (1 - math.cos(bison_angle[i]) ** 2) / math.cos(bison_angle[i])))\n bison_x[i] = bison_x[i] + ((x_tan - elude_bison[k][i][0]) / d_tan_bison) * delta_t * turn_v\n bison_y[i] = bison_y[i] + ((y_tan - elude_bison[k][i][1]) / d_tan_bison) * delta_t * turn_v\n d[i] = calculate_d(bison_x[i], bison_y[i], wolf_x[0], wolf_y[0]) # 更新狼和羊群的距离\n update_d_bison_obs(i)\n if d_bison_obs[k][i] >= 2.5 * obstacles[k][2]:\n elude_flag[k][i] = True\n bison_flag[i] = False",
"def update_d_bison_obs(bison_i):\n for i in range(obs_n):\n d_bison_obs[i][bison_i] = calculate_d(bison_x[bison_i], bison_y[bison_i], obstacles[i][0], obstacles[i][1])",
"def through_obstacle(line, obstacles):\r\n noofpoints = 20\r\n for i in range(noofpoints):\r\n if inside_obstacle((line[0]+(i*(line[2]-line[0])/noofpoints), line[1]+(i*(line[3]-line[1])/noofpoints)), obstacles) == 1:\r\n return 1\r\n return 0",
"def compute_refl(self, step, borders,obstacle):\n\n r = self.radius\n v = self.velocity\n x = self.position\n projx = step*abs(np.dot(v,np.array([1.,0.])))\n projy = step*abs(np.dot(v,np.array([0.,1.])))\n\n a = pygame.Rect(0,0,borders[3][0],borders[3][0])\n b = pygame.Rect(0,borders[0][1]+borders[0][3],borders[3][0],borders[3][1]+borders[3][3])\n c = pygame.Rect(borders[2][0]+borders[2][2],0,borders[3][0],borders[3][0])\n d = pygame.Rect(borders[3][0]+borders[3][2],borders[1][1]+borders[1][3],borders[3][0],borders[3][0])\n\n if(a.collidepoint(*self.position) or b.collidepoint(*self.position) or c.collidepoint(*self.position) or d.collidepoint(*self.position)):\n self.vafter *= 0\n self.delete = True\n\n\n\n\n else:\n if (abs(x[0])-r -borders[0][0]-borders[0][2] < projx ) or (abs(borders[1][0]- x[0])-r < projx):\n self.vafter[0] *= -1\n\n if abs(x[1])-r -(borders[2][1]+borders[2][3]) < projy or abs(borders[3][1]-x[1])-r < projy:\n self.vafter[1] *= -1.\n\n if obstacle != None:\n obs = pygame.Rect(*obstacle)\n if obs.collidepoint(x[0] + r,x[1]):\n self.vafter[0] = -20\n if obs.collidepoint(x[0] - r,x[1]):\n self.vafter[0] = 20\n if obs.collidepoint(x[0],x[1]- r):\n self.vafter[1] = 20\n if obs.collidepoint(x[0], x[1]+ r):\n self.vafter[1] = -20",
"def through_obstacle(line, obstacles):\r\n noofpoints = 100\r\n for i in range(noofpoints):\r\n if inside_obstacle((line[0]+(i*(line[2]-line[0])/noofpoints), line[1]+(i*(line[3]-line[1])/noofpoints)), obstacles) == 1:\r\n return 1\r\n return 0",
"def obstacles_callback(self, data):\n obs_pos = [(obs.ObsPosition.x, obs.ObsPosition.y, obs.ObsPosition.z)\n for obs in data.obs]\n obs_yaw = np.array([obs.ObsTheta for obs in data.obs])\n if len(obs_pos)==0:\n self.obs_risk = 0.0\n self.min_obs_dist = self.detect_obstacle_range + 100.0\n else:\n disp_vec = np.array(obs_pos) - self.car_pos # displacement\n dist_obs = np.linalg.norm(disp_vec, axis=1) # obstacle distance\n # ego heading unit vector\n ego_hdg = (np.cos(self.car_euler[2]), np.sin(self.car_euler[2]), 0)\n # cosine of ego heading and obs displacment\n obs_cosine = np.dot(disp_vec, ego_hdg)/dist_obs\n # angle of obs displacement w.r.t ego heading\n obs_angle = np.arccos(obs_cosine)\n # raised cosine, 1.0 within a narrow angle ahead, quickly rolloff\n # to 0.0 as angle increases \n obs_rcos = self.raised_cosine(obs_angle, np.pi/24, np.pi/48)\n # distance risk is Laplacian normalized by detection rangei\n risk_dist = np.exp(-0.1*(dist_obs-self.detect_obstacle_range))\n # relative angle between headings of ego car and obs car\n # shifted by pi\n rel_angle = self.car_euler[2] - obs_yaw + np.pi\n rel_angle = (rel_angle + np.pi) % (2*np.pi) - np.pi\n collide_rcos = self.raised_cosine(rel_angle, np.pi/24, np.pi/48)\n # total directional obs risk is distance risk multiplied by\n # raised-cosied directional weight.\n self.obs_risk = np.sum(\n risk_dist * (obs_rcos+0.1) * (collide_rcos+0.1)\n )\n if np.isnan(self.obs_risk):\n self.obs_risk = 0.0\n # idx = np.argsort(dist_obs)[::]\n # minimum obs distance\n self.min_obs_dist = min(dist_obs)\n near_obs = True if self.min_obs_dist<self.detect_obstacle_range else False\n self.pub_obs_risk.publish(self.obs_risk)\n self.pub_nearest_obs.publish(near_obs)",
"def obstacle_prone_area(self,image):\r\n\r\n start_x=int(self.start[0])\r\n start_y=int(self.start[1])\r\n goal_x=int(self.goal[0])\r\n goal_y=int(self.goal[1])\r\n print(goal_x,goal_y)\r\n if (image[int(self.maximum_size-goal_x),int(goal_y),0]==0) or ((image[int(self.maximum_size-start_x),int(start_y),0]==0)):\r\n #print(1)\r\n return False\r\n else:\r\n #print(2)\r\n return True",
"def intercept_e(self):\n for asteroid in range(len(self.asteroid_id_e) - 1, -1, -1):\n if self.distance(self.Main_Ship, self.asteroid_id_e[asteroid]) < (self.spaceship_radius + self.asteroid_r_e[asteroid]):\n self.del_asteroid_e(asteroid)\n self.lives -= 1",
"def checkObstaclesAhead(ldr_compl,tireAngle, maxLen=0.3,threshold=2):\n #within the car-width and the maxLen\n # at 45 degrees shift real for 0.05m\n madeUpHeuristic = tireAngle*0.07/45 #shifts real-axis dependent on tire angle\n madeUpHeuristic2= abs(tireAngle*0.14/45) #at 45degrees append CAR_WIDTH with 0.15m\n obstacleIdx = (ldr_compl.imag<maxLen)*(abs(ldr_compl.real+madeUpHeuristic)<((CAR_WIDTH/100+madeUpHeuristic2)/2))\n if is_debugging:\n plt.plot(ldr_compl.real,ldr_compl.imag,'.')\n plt.show()\n print(sum(obstacleIdx))\n return sum(obstacleIdx)>threshold",
"def _find_obstacle(self, obstacle_type='*traffic_light*'): \r\n obst = list()\r\n \r\n _actors = self._world.get_actors()\r\n _obstacles = _actors.filter(obstacle_type)\r\n\r\n\r\n for _obstacle in _obstacles:\r\n trigger = _obstacle.trigger_volume\r\n\r\n _obstacle.get_transform().transform(trigger.location)\r\n \r\n distance_to_car = trigger.location.distance(self._vehicle.get_location())\r\n\r\n a = np.sqrt(\r\n trigger.extent.x ** 2 +\r\n trigger.extent.y ** 2 +\r\n trigger.extent.z ** 2)\r\n b = np.sqrt(\r\n self._vehicle.bounding_box.extent.x ** 2 +\r\n self._vehicle.bounding_box.extent.y ** 2 +\r\n self._vehicle.bounding_box.extent.z ** 2)\r\n\r\n s = a + b + 10\r\n \r\n if distance_to_car <= s:\r\n # the actor is affected by this obstacle.\r\n obst.append(_obstacle)\r\n\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(_obstacle.get_transform().location, carla.Vector3D(0.5,0.5,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,255,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,10)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(trigger,\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n\r\n return obst",
"def _detect_obstacles(self):\n def _distance(point, line_point1, line_point2):\n \"\"\"calcuate the distance between a point and a line\"\"\"\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance\n\n def _acute_angle(point, line_point1, line_point2):\n \"\"\"detetrmine if the point is whithin the boundary of the line through law of cosines\"\"\"\n base_line = np.linalg.norm(line_point1-line_point2)\n assert base_line > 0, \"check the library useage\"\n line1 = np.linalg.norm(point - line_point1)\n line2 = np.linalg.norm(point - line_point2)\n cos_angle_1 = (base_line**2 + line1**2 - line2**2)/(2*base_line*line1)\n cos_angle_2 = (base_line**2 + line2**2 - line1**2)/(2*base_line*line2)\n if cos_angle_1 * cos_angle_2 > 0:\n return True\n else:\n return False\n\n if self.obstacles != \"None\": # if user assigned some obstacles\n for line in self.env_config: \n line_point1, line_point2 = np.array(line[0]), np.array(line[1])\n point = np.array(self.state[:2])\n distance = _distance(point, line_point1, line_point2)\n acute_angle = _acute_angle(point, line_point1, line_point2)\n if distance <= 0.02 and acute_angle:\n self.adsorption = True\n break\n else:\n self.adsorption = False",
"def avoid_obstacles(self):\n _a = v2d(0, 0)\n _count = 0\n\n # Process all obstacles\n for obs in self.target._obstacles:\n # Vector from target to me\n diff = self._posn - obs._posn\n dist = abs(diff) # Distance\n if 0 < dist < self._sensing_range: # Is it in range?\n # Get force exherted by obstacle\n _f = self.obstacle_force(obs)\n if _f.magnitude() > 1: # Is the force significant?\n _a += _f\n _count += 1\n \n if _count > 0:\n _a /= _count\n _a *= self._speed_cap\n #limit(_a, self._max_f)\n \n return _a",
"def khorne_slide(obs, berzerker_x, berzerker_y):\n def environment_fits(obs, berzerker_x, berzerker_y):\n \"\"\" environment fits constraints \"\"\"\n # if prey has the ball\n if obs[\"ball_owned_team\"] == 1:\n prey_x = obs[\"right_team\"][obs[\"ball_owned_player\"]][0]\n prey_y = obs[\"right_team\"][obs[\"ball_owned_player\"]][1]\n # by x position, amount of berzerker's team players between prey and goal of berzerker's team\n players_amount = 0\n for i in range(1, len(obs[\"left_team\"])):\n if obs[\"left_team\"][i][0] < prey_x:\n players_amount += 1\n prey_x_direction = obs[\"right_team_direction\"][obs[\"ball_owned_player\"]][0]\n future_prey_x = prey_x + obs[\"right_team_direction\"][obs[\"ball_owned_player\"]][0]\n future_prey_y = prey_y + obs[\"right_team_direction\"][obs[\"ball_owned_player\"]][1]\n future_berzerker_x = berzerker_x + obs[\"left_team_direction\"][obs[\"active\"]][0]\n future_berzerker_y = berzerker_y + obs[\"left_team_direction\"][obs[\"active\"]][1]\n distance_to_prey = get_distance(berzerker_x, berzerker_y, prey_x, prey_y)\n future_distance_to_prey = get_distance(future_berzerker_x, future_berzerker_y, future_prey_x, future_prey_y)\n # if berzerker is not close to his own penalty zone\n # and prey is beyond x position of too many players of berzerker's team\n # and berzerker is close enough to prey\n # and berzerker is running in direction of prey\n if ((berzerker_x > -0.65 or abs(berzerker_y) > 0.3) and\n players_amount <= 7 and\n future_distance_to_prey < 0.015 and\n distance_to_prey > future_distance_to_prey):\n return True\n return False\n \n def get_action(obs, berzerker_x, berzerker_y):\n \"\"\" get action of this memory pattern \"\"\"\n return Action.Slide\n \n return {\"environment_fits\": environment_fits, \"get_action\": get_action}",
"def obstacle(psi,f_rhs,tol,f_dist,h0,pts,tri,*args,**kwargs):\n announce = kwargs.get('announce',False)\n if announce:\n print (\" obstacle: asking poisson() for linear system and unconstrained soln ...\")\n # use poisson to get unconstrained stiffness, load\n uhpoisson, inside, AA, bb = poisson(f_rhs,f_dist,h0,pts,tri,announce=True,getsys=True)\n omega = 1.75 # found by trial and error\n maxiter = 500\n Npts = np.shape(pts)[0] # = number of nodes\n geps = 0.001 * h0\n ii = (f_dist(pts, *args) < -geps) # boolean array for interior nodes\n N = ii.sum() # = number of interior nodes\n UU = np.triu(AA,1)\n LL = np.tril(AA,-1)\n dd = np.diag(AA).copy()\n if any(dd == 0.0):\n print ('ERROR: stiffness matrix has zero on diagonal')\n return None\n # first guess is max(uhpoisson,psi)\n ps = np.maximum(psi(pts[ii]),np.zeros(N)) # FIXME: does not work well if f < 0?\n uold = np.maximum(uhpoisson[ii],ps)\n unew = uold.copy()\n omcomp = 1.0 - omega\n ierr = np.array([])\n # iterate: constrained point over-relaxation\n for l in range(maxiter+1):\n Ux = np.dot(UU,uold)\n for j in range(N): # iterate over interior vertices\n # Gauss-Seidel idea:\n if j == 0:\n utmp = (bb[j] - Ux[j]) / dd[j]\n else:\n utmp = (bb[j] - np.dot(LL[j,:j],unew[:j]) - Ux[j]) / dd[j]\n # over-relax and project up to psi if needed\n unew[j] = np.maximum(omcomp * uold[j] + omega * utmp, ps[j])\n er = max(abs(unew-uold))\n ierr = np.append(ierr,er)\n uold = unew.copy()\n if er < tol:\n break\n if l == maxiter:\n print ('WARNING: max number of iterations reached')\n # construct solution by filling interior values and boundary values\n uh = uhpoisson.copy()\n uh[ii] = unew\n return uh, ii, ierr",
"def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4",
"def condition(o):\n\t\t\tv = o.pos() - self.pos()\n\t\t\treturn v.norm2() < dist2 and abs(angle_diff(v.angle(),self.angle())) < math.radians(45)",
"def at_b (self):\n self.argc = int((len(n.coord[0]))/2)\n self.pts_con = np.array(self.coord[:,self.argc:len(n.coord[0])])\n\n self.xd = self.xdi\n self.zd = self.zdi \n \n for i, x in enumerate(self.xdi):\n self.aux_con = self.pts_con[0] - x \n self.arg1 = np.argmin(abs(self.aux_con)) \n \n if (self.aux_con[self.arg1] < 0 and self.arg1 == 0) or (self.aux_con[self.arg1] > 0 and self.arg1 == len(self.aux_con)-1):\n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif (self.aux_con[self.arg1] > 0 and self.aux_con[self.arg1+1] > self.aux_con[self.arg1]): #(self.aux_con[self.arg1] < 0 and self.aux_con[self.arg1-1] > self.aux_con[self.arg1]) or \n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif self.aux_con[self.arg1] < 0:\n #print(self.arg1)\n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 - 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1])\n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n\n elif self.aux_con[self.arg1] > 0:\n #print(self.arg1) \n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 + 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1]) \n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n \n #print('Defensa {0}\\n{1}: {2}\\n{3}: {4}'.format(i,self.arg1,self.aux_con[self.arg1],self.arg2,self.aux_con[self.arg2])) \n \n #self.yd = self.yd\n self.b = np.array([self.xd,self.yd,self.zd])\n #self.b.loc[:,('y')] = self.b.loc[:,('y')] ",
"def process_obstacle(color, cx, cy, box, x, y, obj_length, obj_height, obj_depth,\n\t\t\t\t\t equi_diameter, obstacle_list, obstacle_lifetime, obstacle_id, visualize, send_data):\n\tcoords = list(depth_to_point_cloud_pos(cx, cy, obj_depth)) # convert obstacle depth to XYZ coordinate\n\n\t#theta = CameraPosition['azimuth'] * math.pi / 180 # get robot pitch angle in radians\n\t#coords[0] = CameraPosition['x'] - coords[0] * math.cos(theta) # convert relative obstacle position to global\n\t#coords[2] = CameraPosition['y'] + coords[2] * math.sin(theta)\n\tmm_diameter = equi_diameter * (1.0 / CameraParams['fx']) * obj_depth # convert pixel diameter to mm\n\n\tif 100 < mm_diameter < 400:\n\t\tnew_obstacle = True\n\t\tcurrent_obstacle = None\n\t\tfor obstacle in obstacle_list:\n\t\t\tx_match = abs(obstacle.x - coords[0]) < 0.3\n\t\t\ty_match = abs(obstacle.y - coords[2]) < 0.3\n\t\t\tz_match = abs(obstacle.z - coords[1]) < 0.5\n\t\t\tdiameter_match = abs(obstacle.diameter - mm_diameter) / 1000. < 0.5\n\t\t\tif x_match and y_match:\n\t\t\t\tobstacle.x = coords[0]\n\t\t\t\tobstacle.y = coords[2]\n\t\t\t\tobstacle.z = coords[1]\n\t\t\t\tobstacle.diameter = mm_diameter / 1000.\n\t\t\t\tnew_obstacle = False\n\t\t\t\tobstacle.lifetime = obstacle_lifetime\n\t\t\t\tif send_data:\n\t\t\t\t\tsend_obstacle_data(obstacle)\n\t\t\t\tcurrent_obstacle = Obstacle(obstacle.id,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.x,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.y,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.z,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.diameter,\n\t\t\t\t\t\t\t\t\t\t\tobstacle_lifetime)\n\t\t\t\tif obstacle.lifetime == 0:\n\t\t\t\t\tobstacle_list.remove(obstacle)\n\t\t\t\tbreak\n\t\tif new_obstacle:\n\t\t\tcurrent_obstacle = Obstacle(obstacle_id,\n\t\t\t\t\t\t\t\t\t\tcoords[0],\n\t\t\t\t\t\t\t\t\t\tcoords[2],\n\t\t\t\t\t\t\t\t\t\tcoords[1],\n\t\t\t\t\t\t\t\t\t\tmm_diameter / 1000.,\n\t\t\t\t\t\t\t\t\t\tobstacle_lifetime)\n\t\t\tobstacle_id += 1\n\t\t\tif send_data:\n\t\t\t\tsend_obstacle_data(current_obstacle)\n\t\t\tobstacle_list.append(current_obstacle)\n\n\t\tif visualize:\n\t\t\t# begin visualization\n\t\t\tcv2.drawContours(color, [box], 0, (0, 0, 255), 1)\n\t\t\tcv2.rectangle(color, (x, y), (x + obj_length, y + obj_height), (0, 255, 0), 2)\n\t\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\t\t\tcv2.putText(color, 'id = %d' % current_obstacle.id, (cx, cy + 15), font, 0.4, (255, 0, 255),\n\t\t\t\t\t\t1, cv2.LINE_AA)\n\t\t\tcv2.putText(color, \"x = %.2f\" % coords[0], (cx, cy + 30), font, 0.4, (0, 0, 255), 1,\n\t\t\t\t\t\tcv2.LINE_AA)\n\t\t\tcv2.putText(color, \"y = %.2f\" % coords[2], (cx, cy + 45), font, 0.4, (0, 255, 0), 1,\n\t\t\t\t\t\tcv2.LINE_AA)\n\t\t\tcv2.putText(color, \"z = %.2f\" % (obj_depth / 1000), (cx, cy + 60), font, 0.4, (255, 0, 127),\n\t\t\t\t\t\t1, cv2.LINE_AA)\n\t\t\tcv2.putText(color, \"diameter = %.2f\" % (mm_diameter / 1000), (cx, cy + 75), font, 0.4,\n\t\t\t\t\t\t(255, 127, 0), 1,\n\t\t\t\t\t\tcv2.LINE_AA)\n\treturn obstacle_id",
"def _check_sonar_obstacles(self):\n # TODO: what's a good number?\n BLOCKED_THRESHOLD = 0.7\n\n rate = rospy.Rate(10) # 10 hz\n count = 10\n left = 0\n center = 0\n right = 0\n\n for i in range(count):\n obstacle = self.swarmie.get_obstacle_condition()\n\n if obstacle & Obstacle.SONAR_LEFT == Obstacle.SONAR_LEFT:\n left += 1\n if (obstacle & Obstacle.SONAR_CENTER ==\n Obstacle.SONAR_CENTER):\n center += 1\n if obstacle & Obstacle.SONAR_RIGHT == Obstacle.SONAR_RIGHT:\n right += 1\n\n rate.sleep()\n\n left_blocked = left / count > BLOCKED_THRESHOLD\n center_blocked = center / count > BLOCKED_THRESHOLD\n right_blocked = right / count > BLOCKED_THRESHOLD\n\n return left_blocked, center_blocked, right_blocked",
"def observe(self, observation, gameState):\n noisyDistance = observation\n pacmanPosition = gameState.getPacmanPosition()\n\n \"*** YOUR CODE HERE ***\"\n\n # Replace this code with a correct observation update\n # Be sure to handle the \"jail\" edge case where the ghost is eaten\n # and noisyDistance is None\n allPossible = util.Counter()\n \n for p in self.legalPositions:\n if noisyDistance is None:\n allPossible[p] = 0.0\n else:\n trueDistance = util.manhattanDistance(p, pacmanPosition)\n emissionProb = gameState.getDistanceProb(trueDistance, noisyDistance) \n if emissionProb > 0: \n allPossible[p] = emissionProb * self.beliefs[p]\n\n \"*** END YOUR CODE HERE ***\"\n\n allPossible.normalize()\n self.beliefs = allPossible",
"def inside_obstacle(point, obstacle):\r\n for obs in obstacle:\r\n if point[0] > obs[0][0] and point[0] < obs[0][2] and point[1] > obs[1][0] and point[1] < obs[1][2]:\r\n return 1\r\n return 0",
"def inside_obstacle(point, obstacle):\r\n for obs in obstacle:\r\n if point[0] > obs[0][0] and point[0] < obs[0][2] and point[1] > obs[1][0] and point[1] < obs[1][2]:\r\n return 1\r\n return 0",
"def breath_analyze(self, offset=0, th=10):\n # breath part\n breath_gd = np.gradient(gf(self.breath_list, 10))\n breath_gd[breath_gd > 0] = 1\n breath_gd[breath_gd < 0] = 0\n breath_pulse = breath_gd[:-1]-np.roll(breath_gd, -1)[:-1]\n breath_in = argrelextrema(breath_pulse, np.less, order=10)[0]#+offset\n breath_out = argrelextrema(breath_pulse, np.greater, order=10)[0]#+offset\n self.breath = np.sort(np.hstack([breath_in, breath_out, len(self.breath_list)-1]))\n \n if self.breath[0] == breath_in[0]:\n self.btype = 'in'\n else:\n self.btype = 'out' \n\n b_in = []\n b_out = []\n delidx = []\n\n if len(self.breath) != 0: \n for i, j in zip(self.breath[:-1], self.breath[1:]):\n breath_diff = abs(self.breath_list[j]-self.breath_list[i])\n if abs(breath_diff) > 3000: # really breath in/out\n if abs(breath_diff) < 30000: # not deep breath\n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_out.append(j-i)\n self.ngframe.append(i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_in.append(j-i)\n else: \n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j))\n b_out.append(j-i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j))\n b_in.append(j-i)\n else:\n delidx.append(np.argwhere(self.breath==j)[0][0])\n self.breath = np.delete(self.breath, np.array(delidx))\n\n print('\\naverage breath out freq is: '+str(np.round(30./np.mean(b_out), 2))+' Hz')\n print('\\naverage breath in freq is: '+str(np.round(30./np.mean(b_in), 2))+' Hz')\n else:\n raise ImportError('Doing too fast !! please redo again !!')",
"def checkObstacles(dist_compl, centerBoxCoordinate, box_width, box_height, threshold=2):\n # move the coordinate system to the center + box_height/2\n #plt.plot(dist_compl.real,dist_compl.imag,'g.')\n shift_dist_compl= dist_compl-( centerBoxCoordinate-np.array([box_height/2+0j]) )\n #plt.plot(dist_compl.real,dist_compl.imag,'r.')\n # now look in the box in front of you\n obstacleIdx = (shift_dist_compl.real<box_height)*(abs(shift_dist_compl.imag)<((box_width)))\n #plt.show()\n return sum(obstacleIdx)>threshold",
"def process_observation(self, observation):\n #print(\"start_process_obs\")\n processed_observation = np.zeros((NB_AGENTS, OBSERVATION_SIZE))\n\n goliath_type = getattr(env, 'Terran_Goliath')\n battlecruiser_type = getattr(env, 'Terran_Battlecruiser')\n '''\n goliath and battlecruiser type:\n hp_max: 125\n armor: 1\n cooldown_max: 22\n acceleration: 1\n top_speed: 4.57\n damage_amount: 12\n damage_factor: 1\n weapon_range: 192\n sight_range: 256\n seek_range: 160\n\n hp_max: 500\n energy_max: 200\n armor: 3\n cooldown_max: 30\n acceleration: 27\n top_speed: 2.5\n damage_amount: 25\n damage_factor: 1\n weapon_range: 192\n sight_range: 352\n '''\n #print(\"goliath and battlecruiser type:\")\n #print(goliath_type)\n #print(battlecruiser_type)\n\n for i, agent in enumerate(observation.my_unit):\n if agent.hp <= 0:\n continue\n my_x = agent.pos_x\n my_y = agent.pos_y\n my_type_str = agent.unit_type\n my_type = goliath_type if my_type_str == 'Terran_Goliath' else print(\"error in the my_type\")\n t1 = [agent.hp + agent.shield, agent.cooldown, math.atan2(agent.velocity_y, agent.velocity_x),\n math.sqrt((agent.velocity_x) ** 2 + (agent.velocity_y) ** 2), agent.angle,\n 1 if agent.accelerating else -1 if agent.braking else 0, agent.attacking, agent.is_attack_frame]\n t2 = [self.last_action[i] / (env.action_space[1] - 1)]\n t3 = [i.nearest_obstacle_dist for i in agent.pos_info]\n t4 = []\n t5 = []\n t4_max = []\n t5_max = []\n for idx, enemy in enumerate(observation.en_unit):\n en_type_str = enemy.unit_type\n if en_type_str == 'Terran_Battlecruiser':\n en_type = battlecruiser_type\n else:\n continue \n if enemy.hp <= 0:\n t4.extend([0,0,0,0,0,0,0,0,0,0])\n else:\n t4.extend([math.atan2(enemy.pos_y - my_y, enemy.pos_x - my_x), math.sqrt((enemy.pos_x - my_x) ** 2 + (enemy.pos_y - my_y) ** 2),\n math.atan2(enemy.velocity_y, enemy.velocity_x), math.sqrt((enemy.velocity_x) ** 2 + (enemy.velocity_y) ** 2),\n enemy.cooldown, enemy.hp + enemy.shield, enemy.angle, 1 if agent.accelerating else -1 if agent.braking else 0, agent.attacking, agent.is_attack_frame])\n t4_max.extend([math.pi, 320, math.pi, en_type.top_speed, en_type.cooldown_max, en_type.hp_max + en_type.shield_max, math.pi, 1, 1, 1])\n for idx, ally in enumerate(observation.my_unit):\n if i == idx:\n continue\n if ally.hp <= 0:\n t5.extend([0,0,0,0,0])\n else:\n t5.extend([math.atan2(ally.pos_y - my_y, ally.pos_x - my_x), math.sqrt((ally.pos_x - my_x) ** 2 + (ally.pos_y - my_y) ** 2),\n math.atan2(ally.velocity_y, ally.velocity_x), math.sqrt((ally.velocity_x) ** 2 + (ally.velocity_y) ** 2), ally.hp + ally.shield])\n ally_type = goliath_type\n t5_max.extend([math.pi, 320, math.pi, ally_type.top_speed, ally_type.hp_max + ally_type.shield_max])\n if my_type_str == 'Terran_Goliath':\n t1_max = [my_type.hp_max + my_type.shield_max, 1, math.pi, my_type.top_speed, math.pi, 1, 1, 1]\n else:\n t1_max = [my_type.hp_max + my_type.shield_max, my_type.cooldown_max, math.pi, my_type.top_speed, math.pi, 1, 1, 1]\n #t4_max = [math.pi, 320, math.pi, en_type.top_speed, en_type.cooldown_max, en_type.hp_max + en_type.shield_max, math.pi, 1, 1, 1]\n #t5_max = [math.pi, 320, math.pi, ally_type.top_speed, ally_type.hp_max + ally_type.shield_max]\n\n #t5_max = [32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, math.pi,\n #32, 32, type.hp_max + type.shield_max, math.pi,\n #32, 32, type.hp_max + type.shield_max, math.pi]\n\n t1 = np.divide(t1, t1_max) # runtime warning\n t2 = np.array(t2) / 320\n t3 = np.array(t3) / 320\n t4 = np.divide(t4, t4_max)\n t5 = np.divide(t5, t5_max)\n\n processed_observation[i] = np.concatenate([t1, t2, t3, t4, t5])\n\n self.last_my_unit_cnt.append(np.sum(np.array([u.hp+u.shield for u in observation.my_unit]) > 0))\n self.last_enemy_unit_cnt.append(np.sum(np.array([u.hp+u.shield for u in observation.en_unit]) > 0))\n self.last_enemy_unit_hp.append(sum([u.hp + u.shield for u in observation.en_unit]))\n self.accumulated_observation.append(processed_observation)\n\n\n return processed_observation",
"def observe(self, observation, gameState, myPosition, idx):\n noisyDistance = observation\n noZero = False\n for p in self.legalPositions:\n if self.beliefs[idx][p] <= 0:\n self.beliefs[idx].pop(p, None)\n continue\n trueDistance = util.manhattanDistance(p, myPosition)\n prob = gameState.getDistanceProb(trueDistance, noisyDistance)\n if prob > 0:\n self.beliefs[idx][p] *= prob\n noZero = True\n else:\n self.beliefs[idx].pop(p, None)\n if not noZero:\n self.initializeBeliefsUniformly(gameState, idx)\n self.beliefs[idx].normalize()",
"def mover_bm_izquierda(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1]],\n [self.vertice_1[0] - self.velocidad,self.vertice_1[1]], \n [self.vertice_1[0] - 5 - 5, self.vertice_1[1]])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1] + 1],\n [self.vertice_3[0] - self.velocidad,self.vertice_3[1]],\n [self.vertice_3[0] - 5,self.vertice_3[1]]) \n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.x -= self.velocidad * (self.x >= 15)\n self.posicion = [self.x,self.posicion[1]]\n self.casilla = [self.casilla[0] - self.nueva_posicion_posible_parte_superior[1] *(self.nueva_posicion_posible_parte_inferior[0] != 1) * (self.nueva_posicion_posible_parte_superior[0] != 1), self.casilla[1]]\n self.redefinir_vertices()",
"def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)"
] | [
"0.62927634",
"0.6241226",
"0.620058",
"0.6018024",
"0.59719473",
"0.59486395",
"0.5917246",
"0.59119254",
"0.5910905",
"0.58528554",
"0.5772193",
"0.5666695",
"0.56637704",
"0.5658386",
"0.56462765",
"0.5594453",
"0.55852175",
"0.5563984",
"0.5561192",
"0.55316013",
"0.5528085",
"0.5515602",
"0.549967",
"0.549967",
"0.5488382",
"0.5450852",
"0.5449293",
"0.5448109",
"0.5438832",
"0.5420643"
] | 0.7006195 | 0 |
find the index of minimum distance in list of d | def find_min_distance():
return np.argmin(d) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __idx_of_minimum(cls, lst: list) -> int:\n\t\treturn lst.index(min(lst))",
"def _minimum_distance(self,arg):\n return min([abs(arg-e) for e in self if not e is arg])",
"def min_distance(distance, spt_set, self_nodes):\n minimum = sys.maxsize\n minimum_node = None\n for curr_node in self_nodes.values():\n if distance[curr_node.id] < minimum and not spt_set[curr_node.id]:\n minimum = distance[curr_node.id]\n minimum_node = curr_node\n return minimum_node",
"def findSmallest(distancesWithNames):\n smallest = distancesWithNames[0][2]\n smallestIndex = -1\n for i in range(len(distancesWithNames)):\n if smallest >= distancesWithNames[i][2]:\n smallest = distancesWithNames[i][2]\n smallestIndex = i\n return smallestIndex",
"def minDist(l, a, b):\n pre = 0\n rt = float('INF')\n for i in range(len(l)):\n if l[i] == a or l[i] == b:\n pre = i\n break\n\n for i in range(pre+1, len(l)):\n if l[i] == a or l[i] == b:\n if l[i] != l[pre] and i - pre < rt:\n rt = i - pre\n pre = i\n return rt",
"def find_min(list):\n return find_value_at(list, -1)",
"def min_distance_vertex(distance, visited):\n vertices = len(visited)\n min_distance = INF\n min_index = None\n for v in range(vertices):\n if not visited[v] and distance[v] <= min_distance:\n min_distance = distance[v]\n min_index = v\n return min_index",
"def nearest_min(dist_matrix):\n # much faster than np.where\n i, j = np.unravel_index(\n np.argmin(dist_matrix), \n dims=dist_matrix.shape\n )\n return i, j",
"def nn(x, S, dist):\n\n # note that there might be more than on minimal item. min will return the\n # first one ecountered\n return min(S, key=lambda y: dist(x, y[:-1]))",
"def _findMinNode(self, s):\n\n minNode = None\n minVal = self.inf\n for vertex in s:\n if self.dist[vertex] < minVal:\n minVal = self.dist[vertex]\n minNode = vertex\n return minNode",
"def smallest_elem_index(input_list):\n if len(input_list) == 0:\n raise Exception(\"List must contain at least 1 element\")\n \n min_index = 0 \n for i in range(1, len(input_list)):\n if input_list[i] < input_list[min_index]:\n min_index = i\n return min_index",
"def findMin(list, t_value):\n currMin = sys.maxsize\n result = 0\n for index in list:\n if t_value[(index[0], index[1], tuple(index[2].items()))] < currMin:\n currMin = t_value[(index[0], index[1], tuple(index[2].items()))]\n result = index\n return result",
"def smallest_distance(self, clusters):\n i, j = numpy.unravel_index(numpy.argmin(clusters), clusters.shape)\n return clusters[i, j], i, j",
"def minimal_distance(me):\n smallest_d = 101 # given length of edge <= 100\n ismallest = -1 # index of the edge in the list, me\n for i, e in enumerate(me):\n if e[0] < smallest_d:\n smallest_d = e[0]\n ismallest = i\n\n d = me[ismallest][0]\n v1 = me[ismallest][1]\n v2 = me[ismallest][2]\n me.pop(ismallest)\n\n smallest_d = 101\n for i, e in enumerate(me):\n if (e[1] == v1 or e[2] == v1 or e[1] == v2 or e[2] == v2) and e[0] < smallest_d:\n smallest_d = e[0]\n\n d += smallest_d\n return d",
"def min_indice(L):\n min_l = min(L)\n return [min_l,np.where(L==min_l)[0][0]]",
"def getnearest(iterable, value):\n return min(enumerate(iterable), key=lambda i: abs(i[1] - value))",
"def extract_min(H, ds):\n minDist = approxInf\n u = None # min vertex unknown\n i = 0\n for v in H:\n if ds[v] <= minDist:\n minDist = ds[v]\n u = v # note that u is unused (instead returned by pop)\n imin = i\n i += 1\n return(H.pop(imin)) # return [u, d]",
"def _findMin(p, A):\n\n m=(-1, (0,0))\n for p0 in A:\n dist = np.linalg.norm(p0-np.array(p))\n if m[0]==-1 or m[0]>dist:\n m = (dist, p0)\n \n return tuple(m[1])",
"def _calc_min_distance(self, walker):\n\n cell_lengths, cell_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])\n\n t2 = time.time()\n # make a traj out of it so we can calculate distances through\n # the periodic boundary conditions\n walker_traj = mdj.Trajectory(walker.state['positions'],\n topology=self._mdj_top,\n unitcell_lengths=cell_lengths,\n unitcell_angles=cell_angles)\n\n t3 = time.time()\n # calculate the distances through periodic boundary conditions\n # and get hte minimum distance\n min_distance = np.min(mdj.compute_distances(walker_traj,\n it.product(self.ligand_idxs,\n self.receptor_idxs),\n periodic=self._periodic)\n )\n t4 = time.time()\n logging.info(\"Make a traj: {0}; Calc dists: {1}\".format(t3-t2,t4-t3))\n\n return min_distance",
"def geo_idx(dd, dd_array):\n geo_idx = (np.abs(dd_array - dd)).argmin()\n return geo_idx",
"def min(weightData , dataSetVector ):\r\n # weightData: pass the whole weightData array.\r\n # dataSetVector: pass the a data vector to compare with weightdata array, to find its closest match\r\n winnerIndex = 0 #flag for initalizing the winner index\r\n minValue = EcuDist(dataSetVector,weightData[0]) # initalize the minValue\r\n # iterate through all weighdata rows to find the closest match, depending on ecu. distance,\r\n #and then return the index of the closest match(winner)\r\n for i in range(weightData.shape[0]):\r\n if(EcuDist(dataSetVector,weightData[i]) < minValue):\r\n minValue = EcuDist(dataSetVector,weightData[i])\r\n winnerIndex = i\r\n return winnerIndex",
"def secondSmallest(d_diff_pts):\n tmp_inds = np.arange(len(d_diff_pts))\n tmp_inds_min0 = np.argmin(d_diff_pts)\n tmp_inds = np.delete(tmp_inds, tmp_inds_min0)\n tmp_d_diff_pts =np.delete(d_diff_pts, tmp_inds_min0)\n secondSmallest_value = min(tmp_d_diff_pts)\n secondSmallest_ind = np.argmin(np.abs(d_diff_pts - secondSmallest_value))\n return secondSmallest_value, secondSmallest_ind",
"def i_min(Pd):\n return int(pentagonal_index(2 * Pd))",
"def get_index_of_smallest(L, i):\n #The index of the smallest item so far.\n index_of_smallest = i\n\n for j in range(i+1,len(L)):\n if L[index_of_smallest] > L[j]:\n index_of_smallest = j\n return index_of_smallest",
"def get_nearest_node_index(node_list, random_node):\n\n dist_list = [\n (node.x - random_node.x) ** 2 + (node.y - random_node.y) ** 2\n for node in node_list\n ]\n minind = dist_list.index(min(dist_list))\n\n return minind",
"def get_min_distance(self):\n return round(min(self.combined_euclidian_distance))",
"def get_index_of_smallest(L, i):\n index_of_smallest = i\n for j in range(i+1, len(L)):\n if L[j] < L[index_of_smallest]:\n index_of_smallest = j\n return index_of_smallest",
"def find_vertex_at_nearest_distance(DISTANCES, D):\n v = int(0) # All vertex IDs are integers\n iv = int(0) # Index of the vertex v in DISTANCES\n DISTANCES = np.asarray(DISTANCES)\n min_val = (np.abs(DISTANCES - D)).min()\n vertices = np.where(DISTANCES == min_val + D)\n iv = int(np.random.random() * (len(vertices[0]) - 1))\n v = vertices[0][iv]\n return v",
"def find_min(self, A, w):\n import numpy as np\n\n vcost = self.INFINITY\n vto = vfrom = -1\n for v in w:\n # Get array offset of minimum of this vertex\n i = np.argmin(A[v,:])\n if A[v,i] < vcost:\n vcost = A[v,i]\n vto = i\n vfrom = v\n return (vfrom, vto, vcost)",
"def _find_min_diff(self, rejected):\n\t\t# TODO: optimize search for a minimum\n\t\tindexes = [i for i in range(self._size) if i not in rejected]\n\t\tvector = copy(self._diffEi)\n\t\tfor i in sorted(rejected,reverse=True):\n\t\t\tdel vector[i]\n\t\treturn min(zip(indexes,vector), key=itemgetter(1))[0]"
] | [
"0.7192563",
"0.71359694",
"0.70440257",
"0.7035856",
"0.70053023",
"0.69904137",
"0.6925225",
"0.6874505",
"0.6810732",
"0.67803216",
"0.6676719",
"0.66622704",
"0.66444784",
"0.6632789",
"0.6621095",
"0.66131043",
"0.6592418",
"0.6562787",
"0.6508623",
"0.649189",
"0.64786786",
"0.64664793",
"0.64525247",
"0.6440356",
"0.643428",
"0.643078",
"0.6421628",
"0.642094",
"0.64146876",
"0.64133674"
] | 0.8462347 | 0 |
runs a community detection algorithm on graph and returns a coloring of the nodes based on the found communities | def node_community_colors(graph, communities):
colors = nx_helpers.generate_colors(len(communities))
def which_color(node):
"""finds which community node is in and returns
its corresponding color
"""
for i, com in enumerate(communities):
if node in com:
return colors[i]
return nx_helpers.rgb_to_hex((0, 0, 0))
node_colors = [which_color(node) for node in graph.nodes()]
return node_colors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def community_detection(net_G):\r\n if list(nx.isolates(net_G)) == []:\r\n part = community.best_partition(net_G)\r\n #values = [part.get(node) for node in net_G.nodes()]\r\n #nx.draw_spring(net_G, cmap = plt.get_cmap('jet'), node_color = values, node_size=30, with_labels=False)\r\n #plt.show()\r\n else:\r\n net_G = net_G.copy()\r\n net_G.remove_nodes_from(list(nx.isolates(net_G)))\r\n part = community.best_partition(net_G)\r\n list_nodes = []\r\n for com in set(part.values()):\r\n list_nodes.append([nodes for nodes in part.keys() if part[nodes] == com])\r\n num_of_communities = len(list_nodes)\r\n partition_performance = nx.algorithms.community.quality.performance(net_G, list_nodes)\r\n net_communities = [[\"Numbers of communities:\", num_of_communities], \\\r\n [\"Partition performance:\", partition_performance]]\r\n return net_communities",
"def _community_detection(self, kg: KG) -> None:\n nx_graph = nx.Graph()\n\n for vertex in kg._vertices:\n if not vertex.predicate:\n nx_graph.add_node(str(vertex), vertex=vertex)\n\n for vertex in kg._vertices:\n if not vertex.predicate:\n # Neighbors are predicates\n for pred in kg.get_neighbors(vertex):\n for obj in kg.get_neighbors(pred):\n nx_graph.add_edge(\n str(vertex), str(obj), name=str(pred)\n )\n\n # Create a dictionary that maps the URI on a community\n partition = community.best_partition(\n nx_graph, resolution=self.resolution\n )\n self.labels_per_community = defaultdict(list)\n\n self.communities = {}\n vertices = nx.get_node_attributes(nx_graph, \"vertex\")\n for node in partition:\n if node in vertices:\n self.communities[vertices[node]] = partition[node]\n\n for node in self.communities:\n self.labels_per_community[self.communities[node]].append(node)",
"def create_network(self, community_detection, wt_steps, n_clust, network_from, neighbors, top):\n \n if network_from == 'top_n':\n sort_by_scores = []\n\n for pair, score in scores_update.items():\n sort_by_scores.append([pair[0], pair[1], score[2]])\n top_n = sorted(sort_by_scores, reverse=False, key=lambda x: x[2])[:top]\n\n # Convert from distance to similarity for edge\n for score in top_n: \n c = 1/(1 + score[2])\n score[2] = c\n\n flat = [tuple(pair) for pair in top_n]\n\n elif network_from == 'knn': \n flat = []\n projection_knn = nearest_neighbors(neighbors=neighbors)\n\n for projection, knn in projection_knn.items():\n for n in knn:\n flat.append((projection, n[0], abs(n[3]))) # p1, p2, score\n\n clusters = {}\n g = Graph.TupleList(flat, weights=True)\n\n if community_detection == 'walktrap':\n try:\n wt = Graph.community_walktrap(g, weights='weight', steps=wt_steps)\n cluster_dendrogram = wt.as_clustering(n_clust)\n except:\n self.show_cluster_fail()\n elif community_detection == 'betweenness':\n try:\n ebs = Graph.community_edge_betweenness(g, weights='weight', directed=True)\n cluster_dendrogram = ebs.as_clustering(n_clust)\n except:\n self.show_cluster_fail()\n\n for community, projection in enumerate(cluster_dendrogram.subgraphs()):\n clusters[community] = projection.vs['name']\n\n #convert node IDs back to ints\n for cluster, nodes in clusters.items():\n clusters[cluster] = sorted([int(node) for node in nodes])\n \n remove_outliers(clusters)\n\n clustered = []\n for cluster, nodes in clusters.items():\n for n in nodes:\n clustered.append(n)\n\n clusters['singles'] = [] # Add singles to clusters if not in top n scores\n clusters['removed'] = []\n \n for node in projection_2D:\n if node not in clustered and node not in drop:\n clusters['singles'].append(node)\n elif node in drop:\n clusters['removed'].append(node)\n \n G = nx.Graph()\n\n for pair in flat:\n G.add_edge(int(pair[0]), int(pair[1]), weight=pair[2])\n\n #if you want to see directionality in the networkx plot\n #G = nx.MultiDiGraph(G)\n\n #adds singles if not in top n scores\n for node_key in projection_2D:\n if node_key not in G.nodes:\n G.add_node(node_key)\n\n return flat, clusters, G",
"def detection_algorithm(G, edge_weight):\n Gc = G.copy()\n set_node_attributes(Gc, attr_name='k-index')\n seed_node2communities = {}\n\n from operator import itemgetter\n while Gc.number_of_nodes() > 0:\n seed_node = max(list(Gc.nodes(data='k-index')), key=itemgetter(1))[0]\n nodes_in_community, modularity = find_local_community(Gc, seed_node=seed_node, weight=edge_weight)\n seed_node2communities[seed_node] = (nodes_in_community, modularity)\n Gc.remove_nodes_from(nodes_in_community)\n return seed_node2communities",
"def greedy_coloring(*args):\r\n # get arguments\r\n G = args[0]\r\n n = G.nodes()\r\n m = G.arcs()\r\n \r\n # check if it a valid Graph\r\n if not G.is_correct_type('u'):\r\n print \"ERROR: the graph is not in one of the valid formats for greedy_coloring()\"\r\n return [], []\r\n \r\n # calculate degrees of each node (set as rows per node)\r\n a_nodes = zeros((n,n), int)\r\n for arc in range(m):\r\n i = G.A[arc,0] # tail of the arc\r\n j = G.A[arc,1] # head of the arc\r\n a_nodes[i-1,j-1] = 1\r\n a_nodes[j-1,i-1] = 1\r\n # get degree and add the node number\r\n degree = sum(a_nodes,0)\r\n degree = vstack((degree, array(range(n), int) + 1))\r\n \r\n # initialize coloring vector\r\n coloring = zeros(n, int)\r\n color_step = 1\r\n \r\n # if there are any nodes of degree 0 color them first\r\n while min(degree[0,:]) == 0:\r\n n_i = argmin(degree[0,:]) # get node with zero\r\n i = degree[1,n_i]\r\n # eliminate the node column from the list and matrix\r\n degree = delete(degree, s_[n_i], axis=1)\r\n a_nodes = delete(a_nodes, s_[n_i], axis=1)\r\n # color it\r\n coloring[i-1] = color_step\r\n \r\n # iterate till all nodes have a color\r\n while size(degree) > 0:\r\n n_i = argmax(degree[0,:]) # get node with largest degree\r\n i = degree[1,n_i]\r\n # eliminate the node column from the list and matrix\r\n degree = delete(degree, s_[n_i], axis=1)\r\n a_nodes = delete(a_nodes, s_[n_i], axis=1)\r\n \r\n # color it\r\n coloring[i-1] = color_step\r\n \r\n # color the rest of the possible nodes\r\n possible = 1 - array(a_nodes[i-1,:]) # transforms 0 in 1, and 1 in 0\r\n # iterate while there are possible nodes available\r\n while sum(possible) > 0:\r\n # get the node with largest degree among possible ones\r\n n_j = argmax(degree[0,:] * possible)\r\n j = degree[1,n_j]\r\n # eliminate the node column from the list and matrix\r\n degree = delete(degree, s_[n_j], axis=1)\r\n a_nodes = delete(a_nodes, s_[n_j], axis=1)\r\n possible = delete(possible, n_j)\r\n \r\n # color it\r\n coloring[j-1] = color_step\r\n # eliminate adjacent nodes of j from possible nodes\r\n possible = possible * (1 - a_nodes[j-1,:])\r\n \r\n # update color\r\n color_step += 1\r\n \r\n col_number = max(coloring) # approx chromatic number\r\n \r\n return coloring, col_number",
"def find_local_community(G, seed_node, weight, debug_log=False):\n nodes_in_community = seed_node if isinstance(seed_node, list) else [seed_node]\n modularity = edge_modularity(G, nodes_in_community=nodes_in_community, weight=weight)\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n if debug_log:\n print('==========\\nInitial community has nodes:', nodes_in_community)\n print('Neighbor edges:', neighbor_edges)\n print('Modularity = %f' % modularity)\n while neighbor_edges:\n # Compute the edge_modularity for each neighbor edge,\n # suppose the neighbor edge is added to the community\n mod_max, c_max, e_max = 0, None, None\n for e in neighbor_edges:\n # edges in the current community\n edges_in_temp_community = list(G.subgraph(nodes_in_community).edges)\n # append the candidate edge\n edges_in_temp_community.append(e)\n nodes_in_temp_community = list(G.edge_subgraph(edges_in_temp_community).nodes)\n mod_temp = edge_modularity(G, nodes_in_community=nodes_in_temp_community, weight=weight)\n if mod_temp > mod_max:\n mod_max, c_max, e_max = mod_temp, nodes_in_temp_community, e\n if mod_max > modularity:\n if debug_log:\n print('==========\\nEdge', e_max, 'and node', set(e_max).difference(nodes_in_community), 'are added to the community')\n\n # Update the community and the corresponding neighbor edges\n nodes_in_community = c_max\n modularity = mod_max\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n\n if debug_log:\n print('The community has nodes:', nodes_in_community)\n print('Modularity = %f' % mod_max)\n print('Neighbor edges:', neighbor_edges)\n else:\n break\n return nodes_in_community, modularity",
"def communityGraph(graph):\n\n lapgr = nx.laplacian_matrix(graph)\n\n # Get the eigenvalues and eigenvectors of the Laplacian matrix\n evals, evec = np.linalg.eigh(lapgr.todense())\n\n fiedler = evec[1]\n results = []\n ## \"Fiedler\", fiedler\n median = np.median(fiedler, axis=1) # median of the second eigenvalue\n for i in range(0, fiedler.size): # divide the graph nodes into two\n if(fiedler[0, i] < median):\n results.append(0)\n else:\n results.append(1)\n return results, evals, evec",
"def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0",
"def assign_communities(graph):\n communities = nx.algorithms.community\\\n .greedy_modularity_communities(nx.Graph(graph))\n for node in graph.nodes:\n graph.nodes[node]['community'] = [i for i,c in enumerate(communities)\n if node in c][0]\n graph.graph['modularity'] = nx.algorithms.community.quality\\\n .modularity(nx.Graph(graph),\n communities)",
"def which_color(node):\n for i, com in enumerate(communities):\n if node in com:\n return colors[i]\n return nx_helpers.rgb_to_hex((0, 0, 0))",
"def neato_graph_from_corpus( corpus, max_nodes ) :\n\n O, row_dois, column_dois = cites_matrix( corpus )\n neato_cooccurrence_graph( O, column_dois )\n return None\n\n \n v = total_occurrences( O ) \n nv = v.astype( float32 ) / v.max()\n C = cooccurrence_matrix ( O )\n nC = normalized_cooccurrence_matrix( O )\n\n # now find our cutoff!\n # find the max number of cocites and start there\n cocite_cutoff = C.max()\n num_nodes = nodes_from_c( C[C >= cocite_cutoff] )\n # then reduce the number until we exceed max_nodes\n while num_nodes < max_nodes :\n cocite_cutoff = cocite_cutoff - 1\n num_nodes = nodes_from_c( C[C >= cocite_cutoff] )\n\n if num_nodes > max_nodes :\n cocite_cutoff = cocite_cutoff + 1\n \n C = C.copy()\n C[ C < cocite_cutoff ]= 0\n\n graph = pydot.Dot( graph_type = 'graph' )\n graph.set_overlap(\"false\")\n coords = zip(*(C >= cocite_cutoff).nonzero())\n\n # make a dict of all nodes which are mentioned in the coords\n nodes = {}\n index = 1\n for coord in set(chain.from_iterable(coords)) :\n if not nodes.has_key( coord ) :\n node = pydot.Node( str(coord) )\n if v != None :\n doi = column_dois[coord]\n node.set_label( str(index) )\n node.set_penwidth( nv[ coord ] )\n node.set_fixedsize(\"true\")\n node.set_width( 1.0 *nv[ coord ] )\n #node.set_shape(\"circle\")\n nodes[ coord ] = node\n graph.add_node( node )\n index = index + 1\n\n for coord in coords :\n \n edge = pydot.Edge( nodes[coord[0]], nodes[coord[1]] )\n edge.set_weight( nC[coord] )\n edge.set_penwidth( nC[coord]*5 )\n #edge.set_label( str(int(m[coord]) ))\n graph.add_edge(edge)\n\n \n legend = pydot.Node( \"legend\" )\n nodelist = nodes.items()\n nodelist.sort( lambda a,b : cmp(node_index(a[1].get_label()),node_index(b[1].get_label())) )\n legend.set_label( \"\\l\".join([x[1].get_label()+\":\"+column_dois[x[0]] for x in nodelist])+\"\\l\" )\n legend.set_shape(\"box\")\n graph.add_node(legend)\n\n print graph.to_string()\n #graph.write_dot('test.dot', prog='neato' )\n #graph.write_png('test.png', prog='neato' )\n #graph.write_pdf('test.pdf', prog='neato' )",
"def cluster_connectivity(G, weight='weight'):\n\t# 1) indexing the edges by community\n\tsum_edges_dic = { com : {} for com in range(G.nb_communities)}\n\tfor node1, node2 in G.edges():\n\t\tcomm1 = G.nodes[node1]['community']\n\t\tcomm2 = G.nodes[node2]['community']\n\t\tif comm2 not in sum_edges_dic[comm1]:\n\t\t\tsum_edges_dic[comm1][comm2] = 0\n\t\t\tsum_edges_dic[comm2][comm1] = 0\n\t\telse:\n\t\t\tif weight is None:\n\t\t\t\tsum_edges_dic[comm1][comm2] += 1\n\t\t\t\tsum_edges_dic[comm2][comm1] += 1\n\t\t\telse:\t\n\t\t\t\tsum_edges_dic[comm1][comm2] += G.edges[node1, node2][weight]\n\t\t\t\tsum_edges_dic[comm2][comm1] += G.edges[node1, node2][weight]\n\tc_connectivity = {}\n\t# 2) computing the connectivity\n\tfor com in sum_edges_dic:\n\t\tin_out_edges = sum(sum_edges_dic[com].values())\n\t\tc_connectivity[com] = round(- np.log2(sum_edges_dic[com][com] / in_out_edges),3) \n\treturn c_connectivity",
"def detect_community(network_path, network_name, output_path):\n net = network.network()\n net.from_file(network_path)\n output_prefix = output_path + '/{}_recursive_Louvain_{}_{}'.format(network_name, len(net.nodes), len(net.edges))\n logging.info('{} network: {} nodes and {} edges were loaded.'.format(network_name, len(net.nodes), len(net.edges)))\n if os.path.isfile(output_prefix + '.gmt'):\n logging.info('Communities have been detected.')\n else:\n rl = RecursiveLouvain()\n rl.fit(net)\n comms = rl.community\n hierarchy = rl.hierarchy\n os.makedirs(output_path, exist_ok=True)\n commToGmt(comms, output_prefix)\n output_hierarchy(hierarchy, output_prefix)\n logging.info('{} communities were detected.'.format(len(comms)))\n logging.info('Community file was saved as {}.gmt'.format(output_prefix))\n return '{}.gmt'.format(output_prefix)",
"def find_and_print_network_communities(G, code_dict=None):\n\n comm_dict = partition(G)\n\n comm_members = {}\n for comm in set(comm_dict.values()):\n countries = [node for node in comm_dict if comm_dict[node] == comm]\n if code_dict is not None:\n countries = [code_dict[code] for code in countries]\n\n comm_members[comm] = countries\n\n return comm_members, get_modularity(G, comm_dict)",
"def run_connected_components(img, viz=False):\n\n fg = img.foreground_mask(cfg.COLOR_TOL, ignore_black=True)\n if viz:\n cv2.imwrite(\"debug_imgs/mask.png\", fg.data)\n\n groups = get_cluster_info(fg)\n\n if viz:\n display_grasps(img, groups)\n\n return groups",
"def network_topology(voxels, clusters, primaries, edges, mode='sphere'):\n # Define the arrays of node positions (barycenter of voxels in the cluster)\n pos = np.array([voxels[c].cpu().numpy().mean(0) for c in clusters])\n\n # Define the node features (label, color)\n n = len(clusters)\n node_labels = ['%d (%0.1f, %0.1f, %0.1f)' % (i, pos[i,0], pos[i,1], pos[i,2]) for i in range(n)]\n \n node_colors = ['#ff7f0e' if i in primaries else '#1f77b4' for i in range(n)]\n\n # Define the nodes and their connections\n graph_data = []\n edge_vertices = []\n if mode == 'sphere':\n # Define the node size\n logn = np.array([np.log(len(c)) for c in clusters])\n node_sizes = np.interp(logn, (logn.min(), logn.max()), (5, 50))\n \n # Define the nodes as sphere of radius proportional to the log of the cluster voxel content\n graph_data.append(go.Scatter3d(x = pos[:,0], y = pos[:,1], z = pos[:,2],\n name = 'clusters',\n mode = 'markers',\n marker = dict(\n symbol = 'circle',\n size = node_sizes,\n color = node_colors,\n colorscale = 'Viridis',\n line = dict(color='rgb(50,50,50)', width=0.5)\n ),\n text = node_labels,\n hoverinfo = 'text'\n ))\n\n # Define the edges center to center\n edge_vertices = np.concatenate([[pos[i], pos[j], [None, None, None]] for i, j in zip(edges[0], edges[1])])\n\n elif mode == 'hull':\n # For each cluster, add the convex hull of all its voxels\n graph_data += [go.Mesh3d(alphahull =10.0,\n name = '',\n x = voxels[c][:,0],\n y = voxels[c][:,1],\n z = voxels[c][:,2],\n color = node_colors[i],\n opacity = 0.3,\n text = node_labels[i],\n hoverinfo = 'text'\n ) for i, c in enumerate(clusters)]\n\n # Define the edges closest pixel to closest pixel\n import scipy as sp\n edge_vertices = []\n for i, j in zip(edges[0], edges[1]):\n vi, vj = voxels[clusters[i]], voxels[clusters[j]]\n d12 = sp.spatial.distance.cdist(vi, vj, 'euclidean')\n i1, i2 = np.unravel_index(np.argmin(d12), d12.shape)\n edge_vertices.append([vi[i1].cpu().numpy(), vj[i2].cpu().numpy(), [None, None, None]])\n \n edge_vertices = np.concatenate(edge_vertices)\n \n else:\n raise ValueError\n \n # Initialize a graph that contains the edges\n graph_data.append(go.Scatter3d(x = edge_vertices[:,0], y = edge_vertices[:,1], z = edge_vertices[:,2],\n mode = 'lines',\n name = 'edges',\n line = dict(\n color = 'rgba(50, 50, 50, 0.5)',\n width = 1\n ),\n hoverinfo = 'none'\n ))\n\n # Return\n return graph_data",
"def make_graph(imageAnnotated, imageGaussian):\n nodeNumber = imageAnnotated.max() - 1\n distanceDiagonalPixels, distanceDiagonalPixelsCubic = np.sqrt(2.0), np.sqrt(3.0)\n distanceMatrix = np.array([[distanceDiagonalPixelsCubic, distanceDiagonalPixels, distanceDiagonalPixelsCubic], [distanceDiagonalPixels, 1, distanceDiagonalPixels],\n [distanceDiagonalPixelsCubic, distanceDiagonalPixels, distanceDiagonalPixelsCubic]])\n nodePositions = np.transpose(np.where(imageAnnotated > 1))[:, ::-1]\n imagePropagatedNodes = imageAnnotated.copy()\n imageFilamentLength = 1.0 * (imageAnnotated.copy() > 0)\n imageFilamentIntensity = 1.0 * (imageAnnotated.copy() > 0)\n dimensionY, dimensionX = imageAnnotated.shape\n filament = (imagePropagatedNodes == 1).sum()\n while (filament > 0):\n nodePixel = np.transpose(np.where(imagePropagatedNodes > 1))\n for posY, posX in nodePixel:\n xMin, xMax, yMin, yMax = bounds(posX - 1, 0, dimensionX), bounds(posX + 2, 0, dimensionX), bounds(posY - 1, 0, dimensionY), bounds(posY + 2, 0, dimensionY)\n nodeNeighborhood = imagePropagatedNodes[yMin:yMax, xMin:xMax]\n nodeFilamentLength = imageFilamentLength[yMin:yMax, xMin:xMax]\n nodeFilamentIntensity = imageFilamentIntensity[yMin:yMax, xMin:xMax]\n imagePropagatedNodes[yMin:yMax, xMin:xMax] = np.where(nodeNeighborhood == 1, imagePropagatedNodes[posY, posX], nodeNeighborhood)\n imageFilamentLength[yMin:yMax, xMin:xMax] = np.where(nodeFilamentLength == 1, distanceMatrix[0:yMax - yMin, 0:xMax - xMin] + imageFilamentLength[posY, posX], nodeFilamentLength)\n imageFilamentIntensity[yMin:yMax, xMin:xMax] = np.where(nodeFilamentIntensity == 1, imageGaussian[posY, posX] + imageFilamentIntensity[posY, posX], nodeFilamentIntensity)\n filament = (imagePropagatedNodes == 1).sum()\n graph = nx.empty_graph(nodeNumber, nx.MultiGraph())\n filamentY, filamentX = np.where(imagePropagatedNodes > 1)\n for posY, posX in zip(filamentY, filamentX):\n nodeIndex = imagePropagatedNodes[posY, posX]\n xMin, xMax, yMin, yMax = bounds(posX - 1, 0, dimensionX), bounds(posX + 2, 0, dimensionX), bounds(posY - 1, 0, dimensionY), bounds(posY + 2, 0, dimensionY)\n filamentNeighborhood = imagePropagatedNodes[yMin:yMax, xMin:xMax].flatten()\n filamentLength = imageFilamentLength[yMin:yMax, xMin:xMax].flatten()\n filamentIntensity = imageFilamentIntensity[yMin:yMax, xMin:xMax].flatten()\n for index, pixel in enumerate(filamentNeighborhood):\n if (pixel != nodeIndex and pixel > 1):\n node1, node2 = np.sort([nodeIndex - 2, pixel - 2])\n nodeDistance = sp.linalg.norm(nodePositions[node1] - nodePositions[node2])\n filamentLengthSum = imageFilamentLength[posY, posX] + filamentLength[index]\n filamentIntensitySum = imageFilamentIntensity[posY, posX] + filamentIntensity[index]\n minimumEdgeWeight = max(1e-9, filamentIntensitySum)\n edgeCapacity = 1.0 * minimumEdgeWeight / filamentLengthSum\n edgeLength = 1.0 * filamentLengthSum / minimumEdgeWeight\n edgeConnectivity = 0\n edgeJump = 0\n graph.add_edge(node1, node2, edist=nodeDistance, fdist=filamentLengthSum, weight=minimumEdgeWeight, capa=edgeCapacity, lgth=edgeLength, conn=edgeConnectivity, jump=edgeJump)\n return(graph, nodePositions)",
"def run(self, infected_graph):\n pos = nx.spring_layout(infected_graph)\n points = np.zeros((len(pos), 2))\n i = 0\n for p in pos:\n points[i] = pos[p]\n i += 1\n \n hull = ConvexHull(points)\n nodes = list(pos)\n return [nodes[p] for p in hull.vertices]",
"def find_communities(graph):\n visited = set()\n communities = []\n for node in graph:\n if node not in visited:\n community = _find_community(node, graph, visited)\n communities.append(community)\n\n return communities",
"def cfdProcessNodeTopology(self):\r\n self.nodeElements = self.cfdInvertConnectivity(self.elementNodes)\r\n self.nodeFaces = self.cfdInvertConnectivity(self.faceNodes)",
"def create_graph(self):\n robot_pix = int(math.ceil(self.robot.size / self.resolution))\n ii = 0\n jj = 0\n for i in range(0, self.height, robot_pix):\n jj = 0\n for j in range(0, self.width, robot_pix):\n block = self.occ_grid[i:i+robot_pix, j:j+robot_pix].flatten()\n avg = np.mean(block)\n robot_block = self.tesselation_image[i:i+robot_pix, j:j+robot_pix].flatten()\n n_occur = np.bincount(robot_block)\n block_id = np.argmax(n_occur)\n \n p = Pose()\n p.position.x = self.resolution * j + self.resolution / 2.0 + self.origin.position.x\n p.position.y = self.height * self.resolution - (self.resolution * i + self.resolution / 2.0) + self.origin.position.y\n node = Node(ii, jj, p)\n idx = np.where(block > 20)\n if block_id == self.robot.robot_id:\n if 0 <= avg <= 20:\n print(\"Node in path\", node)\n node.valid = True\n else:\n node.valid = False\n elif block_id == 0:\n node.valid = False\n else:\n node.belongs = False\n self.nodes[ii,jj] = node\n jj += 1\n ii += 1\n\n\n height, width = self.nodes.shape\n print(\"Node shape: \", self.nodes.shape)\n for i in range(height):\n for j in range(width):\n min_i = max(0, i-1)\n max_i = min(height - 1, i+1) + 1\n min_j = max(0, j-1)\n max_j = min(width - 1, j+1) + 1\n\n node = self.nodes[i,j]\n neighbors = self.nodes[min_i:max_i, min_j:max_j].flatten()\n for n in neighbors:\n if not n or not node:\n print(\"None %d-%d\"%(i,j))\n continue\n if n != node:\n if n.valid:\n print(\"Neighbor appended\")\n self.nodes[i,j].neighbors.append(n)\n else:\n self.nodes[i,j].obstacle_neighbors.append(n)\n print(\"Graph is created!\")",
"def GraphToCommunities(Network):\n comm=community.best_partition(Network)\n clusters={}\n for k in comm.keys():\n if clusters.has_key(comm[k])==False:\n clusters[comm[k]]=[]\n clusters[comm[k]].append(k)\n return (clusters)",
"def brute_force_coloring(*args):\r\n # get arguments\r\n G = args[0]\r\n n = G.nodes()\r\n m = G.arcs()\r\n \r\n # check if it a valid Graph\r\n if not G.is_correct_type('u'):\r\n print \"ERROR: the graph is not in one of the valid formats for brute_force_coloring()\"\r\n return [], []\r\n \r\n coloring = ones(n, int) # initialize with just one color\r\n chrom_n = inf # initialize chromatic number\r\n min_coloring = [] # initialize minimum coloring\r\n \r\n # iterate till you get a coloring (really stupid way)\r\n terminal = array(range(n), int) + 1\r\n while sum(coloring != terminal) > 0:\r\n #print coloring\r\n coloring[n-1] += 1\r\n # correct if some achieve n\r\n for node in range(n-1):\r\n # if one get above n\r\n if coloring[n-1-node] > max(coloring[0:n-1-node]) + 1:\r\n coloring[n-1-node] = 1 # take one and...\r\n coloring[n-2-node] += 1 # ... add it to the previous one\r\n \r\n # if it is a coloring check it\r\n if G.is_coloring(coloring):\r\n col_number = max(coloring) # number of colors\r\n # if it is better, update\r\n if col_number < chrom_n:\r\n chrom_n = col_number\r\n min_coloring = coloring.copy()\r\n print \"current minimum: \", min_coloring, \"with %d colors\" %(chrom_n)\r\n \r\n return min_coloring, chrom_n",
"def node_condense(imageNodes, imageGrayscale, ones):\n imageLabeled, labels = sp.ndimage.label(imageNodes, structure=ones)\n sizes = sp.ndimage.sum(imageLabeled > 0, imageLabeled, range(1, labels + 1))\n centerOfMass = sp.ndimage.center_of_mass(imageGrayscale, imageLabeled, range(1, labels + 1))\n for label in range(labels):\n if (sizes[label] > 1):\n idx = (imageLabeled == label + 1)\n idm = tuple(np.add(centerOfMass[label], 0.5).astype('int'))\n imageLabeled[idx] = 0\n imageLabeled[idm] = label + 1\n imageLabeledNodes, _ = sp.ndimage.label(imageLabeled > 0, structure=ones)\n imageLabeledNodes = imageLabeledNodes.astype('int')\n return(imageLabeledNodes)",
"def _connect_components_analysis(image):\n if len(image.shape) == 3:\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n else:\n gray_image = image\n\n return cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)",
"def _find_community(root, graph, visited):\n community = [root]\n visited.add(root)\n next_queue = [root]\n while next_queue:\n node = next_queue.pop(0)\n for child in graph[node]:\n if child not in visited:\n next_queue.append(child)\n community.append(child)\n visited.add(child)\n\n return community",
"def neato_cooccurrence_graph( nC, v, labels, max_nodes = 10, fnam_stem = \"test\", label_nodes_directly = False, scale=1.0, min_node_size = 0.1 ):\n \n nv = v.astype( float32 ) / v.max()\n\n cutoff = cooccur_cutoff( nC, max_nodes );\n\n graph = pydot.Dot( graph_type = 'graph' )\n graph.set_overlap(\"false\")\n coords = zip(*(nC >= cutoff).nonzero())\n\n # make a dict of all nodes which are mentioned in the coords\n nodes = {}\n index = 1\n for coord in set(chain.from_iterable(coords)) :\n if not nodes.has_key( coord ) :\n node = pydot.Node( str(coord) )\n if v != None :\n #print coord\n label = labels[coord]\n if label_nodes_directly :\n node.set_label( label )\n else :\n node.set_label( str(index) )\n #node.set_penwidth( nv[ coord ] )\n node.set_fixedsize(\"true\")\n node.set_width( max(min_node_size,scale *nv[ coord ]) )\n node.set_shape(\"circle\")\n nodes[ coord ] = node\n graph.add_node( node )\n index = index + 1\n\n for coord in coords :\n \n edge = pydot.Edge( nodes[coord[0]], nodes[coord[1]] )\n edge.set_weight( nC[coord] )\n edge.set_penwidth( nC[coord]*5 )\n #edge.set_label( str(int(m[coord]) ))\n graph.add_edge(edge)\n\n if not label_nodes_directly : \n legend = pydot.Node( \"legend\" )\n nodelist = nodes.items()\n nodelist.sort( lambda a,b : cmp(node_index(a[1].get_label()),node_index(b[1].get_label())) )\n legend.set_label( \"\\l\".join([x[1].get_label()+\":\"+labels[x[0]] for x in nodelist])+\"\\l\" )\n legend.set_shape(\"box\")\n graph.add_node(legend)\n\n #print graph.to_string()\n graph.write_dot(fnam_stem+'.dot', prog='neato' )\n graph.write_png(fnam_stem+'.png', prog='neato' )\n #graph.write_pdf(fnam_stem+'.pdf', prog='neato' )",
"def non_pol_neighbours_graph():\n data = pd.read_csv(\"/Users/emg/GitHub/thesis/output/2019_01/1000_residuals_output_utf8.csv\", index_col=0)\n\n labelled = label_subs(data)\n labelled['resid_rank'] = labelled.resid.rank(pct=True)\n top = subset_df(labelled, 'resid', q=0.95)\n\n edges = top.copy()[['source','target','resid']]\n edges_rev = edges.copy()\n edges_rev.columns = ['target','source','resid']\n directed_edges = pd.concat([edges,edges_rev], sort=True)\n directed_edges['resid_rank'] = directed_edges['resid'].rank(pct=True)\n\n df = label_subs(directed_edges)\n\n pol_subs = load_pol_subs()\n pol_names = pol_subs.subreddit.str.replace('\\\\','')\n pol_subs.subreddit=pol_subs.subreddit.str.replace('\\\\','')\n\n pol_neighbours = df[df['source'].isin(pol_names)].sort_values('resid', ascending=False)\n\n top_pol_neigh = pol_neighbours.groupby('source').head(10).sort_values(['source','resid'], ascending=[True,False])\n \n x = top_pol_neigh[~top_pol_neigh.target.isin(pol_names)][['source','target']]\n\n col_dict = pol_subs.set_index('subreddit').col.to_dict()\n for sub in x.target.unique():\n col_dict[sub] = 'gray'\n\n G = nx.from_pandas_edgelist(x)\n nx.set_node_attributes(G, col_dict, 'col')\n\n f = plt.figure(1)\n ax = f.add_subplot(1,1,1)\n\n colors = dict(G.nodes(data='col')).values()\n\n pos = nx.spring_layout(G, k=0.2)\n nx.draw_networkx(G, pos=pos, with_labels=False, node_color=colors, alpha=0.3)\n #nx.draw_networkx_labels(G, pos=pos, with_labels=True)\n\n plt.axis('off')\n f.set_facecolor('w')\n \n f.tight_layout()\n plt.savefig(figures_path(f\"{date}/non_pol_neighbours_graph.png\"))\n plt.close()",
"def FindClumps_graph(self):\n # IMPORT STUFF\n import string\n # END IMPORT\n \n maxima = self['CL_LOC'].copy()\n maxima = num.where(maxima)\n maxima = (maxima[1],maxima[0])\n detectimg = self['STAMP'].copy()\n \n id = self._getGraphId()\n root = 'FindClumps_%s' % (id,)\n pngname = root + '.png' ; epsname = root + '.eps'\n jpgname = root + '.jpg'\n\n doStamp(detectimg,pngname,format='PNG')\n Convert(pngname,jpgname)\n \n Painted = Paint(jpgname)\n Painted.load()\n Painted.DrawCross(maxima,length=7,color='green')\n \n strpeaks = string.strip('%i'% (self['M_NUM_CL']))\n text = 'NC=%s' % strpeaks \n \n # Painted.Graffiti(text,commtextpos)\n \n Painted.save(jpgname)\n Painted.release()\n \n Convert(jpgname,epsname)\n os.system('rm %s %s' % (pngname,jpgname))\n self['figures']['FindClumps'] = epsname\n self['figcomms']['FindClumps'] = text",
"def get_communities(num_of_neighbors, is_self_loops, relevant_period_groups, full_confusion_csv, classes_csv_file, priod_group_column, similarty_csv = ''):\n\n # generate class_names dict\n cnt = 0\n class_name_dict = {}\n with open(classes_csv_file, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n if cnt > 0:\n class_name_dict[int(row[8])] = row[1]\n cnt = cnt + 1\n\n\n full_conf = np.genfromtxt(full_confusion_csv, delimiter=',')\n relevant_conf = full_conf[:,:num_of_neighbors+1]\n flatten_conf = np.zeros((relevant_conf.shape[0]*num_of_neighbors,2), dtype=np.int32)\n if similarty_csv != '':\n similarity_mat = np.genfromtxt(similarty_csv, delimiter=',')\n similarity_conf_mat = np.zeros((200, 200), dtype=np.float32)\n print(similarity_mat.shape)\n\n\n row = 0\n for k in range(relevant_conf.shape[0]):\n for m in range(num_of_neighbors):\n flatten_conf[row, 0] = relevant_conf[k,0]\n flatten_conf[row,1] = relevant_conf[k,m+1]\n if similarty_csv != '':\n similarity_conf_mat[int(relevant_conf[k,0]), int(relevant_conf[k,m+1]) ] += similarity_mat[k, m]\n\n row = row + 1\n\n confusion_mat = confusion_matrix(flatten_conf[:,0], flatten_conf[:,1])\n if similarty_csv != '':\n confusion_mat = similarity_conf_mat\n\n confusion_mat = confusion_mat.astype('float') / confusion_mat.sum(axis=1)[:, np.newaxis]\n symmetric_confusion = (confusion_mat + np.transpose(confusion_mat)) / 2\n if not is_self_loops:\n np.fill_diagonal(symmetric_confusion, 0)\n\n # taking only the relevant classes\n if relevant_period_groups != -1:\n df = pd.read_csv(classes_csv_file)\n period_groups = df[priod_group_column]\n relevant_classes = []\n for group in relevant_period_groups:\n group_slice = df[period_groups == group]\n relevant_classes.extend(group_slice['id_period_sorted'].values)\n\n L = len(relevant_classes)\n relevant_confusion = np.zeros((L,L), dtype=np.float32)\n class_node_dict = {}\n for m,cls_i in enumerate(relevant_classes):\n class_node_dict[m] = cls_i\n for n,cls_j in enumerate(relevant_classes):\n relevant_confusion[m,n] = symmetric_confusion[cls_i,cls_j]\n else:\n relevant_confusion = symmetric_confusion\n\n G = nx.from_numpy_matrix(relevant_confusion)\n\n # find best communities based on modularity grade\n resolution_vec = np.linspace(0.0,2,50)\n mod_vec = np.zeros_like(resolution_vec)\n best_modularity = -1\n best_communities = -1\n best_res = -1\n for k in range(resolution_vec.size):\n partition = community.best_partition(G, weight='weight', resolution=resolution_vec[k])\n modularity = community.modularity(partition, G, weight='weight')\n mod_vec[k] = modularity\n if (modularity > best_modularity):\n best_modularity = modularity\n best_communities = partition\n best_res = resolution_vec[k]\n\n summary_str = 'best resolution: %.3f\\nbest modularity: %.3f\\nnumber of communities: %d' % (best_res,best_modularity,len(set(best_communities.values())))\n\n #plt.plot(resolution_vec,mod_vec)\n #plt.show()\n\n # generate community summary file\n count = 0\n strr = ''\n summary_file_name = 'community_summary.csv'\n for com in set(best_communities.values()):\n count += 1.\n list_nodes = [nodes for nodes in best_communities.keys() if best_communities[nodes] == com]\n strr += 'community,' + str(com) + '\\n'\n for nd in list_nodes:\n if relevant_period_groups == -1:\n strr += class_name_dict[nd] + ',id,' + str(nd) + '\\n'\n else:\n strr += class_name_dict[class_node_dict[nd]] + ',id,' + str(class_node_dict[nd]) + '\\n'\n strr += '\\n'\n with open(summary_file_name, \"w\") as text_file:\n text_file.write(strr)\n\n print(strr)\n # summary for map visualization tool\n strr = ''\n for k in range(relevant_confusion.shape[0]):\n comm = partition[k]\n comm_members = [nodes for nodes in partition.keys() if partition[nodes] == comm]\n if relevant_period_groups == -1:\n strr += 'id,' + str(k) + ',community,' + str(comm) + ',community_members,'\n else:\n strr += 'id,' + str(class_node_dict[k]) + ',community,' + str(comm) + ',community_members,'\n for member in comm_members:\n if relevant_period_groups == -1:\n strr += str(member) + ','\n else:\n strr += str(class_node_dict[member]) + ','\n strr += '\\n'\n with open('nodes_communities.csv', \"w\") as text_file:\n text_file.write(strr)\n\n\n\n return summary_str"
] | [
"0.7544369",
"0.67278713",
"0.67230785",
"0.6600129",
"0.6450803",
"0.62319934",
"0.62119263",
"0.6207066",
"0.62020797",
"0.61163753",
"0.61084396",
"0.6086533",
"0.5949669",
"0.591313",
"0.59082705",
"0.5903655",
"0.58992743",
"0.58878565",
"0.5877019",
"0.58694255",
"0.5868911",
"0.5826663",
"0.5795262",
"0.5783291",
"0.57811725",
"0.5725391",
"0.5702787",
"0.5699948",
"0.56912565",
"0.5661789"
] | 0.7250731 | 1 |
finds which community node is in and returns its corresponding color | def which_color(node):
for i, com in enumerate(communities):
if node in com:
return colors[i]
return nx_helpers.rgb_to_hex((0, 0, 0)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def node_community_colors(graph, communities):\n colors = nx_helpers.generate_colors(len(communities))\n\n def which_color(node):\n \"\"\"finds which community node is in and returns\n its corresponding color\n \"\"\"\n for i, com in enumerate(communities):\n if node in com:\n return colors[i]\n return nx_helpers.rgb_to_hex((0, 0, 0))\n\n node_colors = [which_color(node) for node in graph.nodes()]\n return node_colors",
"def get_color(node, color_map):\r\n if node in color_map:\r\n return color_map[node]\r\n return \"black\"",
"def get_color(self, node: Node) -> str:\n\n idx = hash(node.get_kind_name()) % len(self.colors_)\n return self.colors_[idx]",
"def get_node_color(node_label):\n for NODE_KEY in list(NODE_TYPES.keys()):\n if node_label in NODE_TYPES[NODE_KEY]:\n return NODE_COLOR_DICT[NODE_KEY]\n try:\n x = int(node_label)\n return NODE_COLOR_DICT['Terminals']\n except:\n try:\n x = float(node_label)\n return NODE_COLOR_DICT['Terminals']\n except:\n try:\n node_label = node_label.replace(\"\\'\", \"\\\"\")\n tree = json.loads(node_label)\n for key in tree.keys():\n if key not in NODE_TYPES['Learner Params']:\n return NODE_COLOR_DICT['Uncategorized']\n else:\n try:\n x = int(tree[key])\n except:\n try:\n x = float(tree[key])\n except:\n return NODE_COLOR_DICT['Uncategorized']\n return NODE_COLOR_DICT['Learner Params']\n except:\n return NODE_COLOR_DICT['Uncategorized']\n return NODE_COLOR_DICT['Uncategorized']",
"def get_node_color(self, origin_node_id):\n origin_node_id %= 11\n if origin_node_id == 9:\n return 0.753, 0.753, 0.753, 1.\n if origin_node_id == 8:\n return 0.824, 0.412, 0.118, 1.\n if origin_node_id == 7:\n return 1.000, 0.000, 1.000, 1.\n if origin_node_id == 6:\n return 1.000, 1.000, 0.000, 1.\n if origin_node_id == 5:\n return 1.000, 0.627, 0.478, 1.\n if origin_node_id == 4:\n return 0.498, 1.000, 0.000, 1.\n if origin_node_id == 3:\n return 0.000, 1.000, 1.000, 1.\n if origin_node_id == 2:\n return 1.000, 0.922, 0.804, 1.\n if origin_node_id == 1:\n return 0.871, 0.722, 0.529, 1.\n if origin_node_id == 0:\n return 0.000, 0.749, 1.000, 1.\n if origin_node_id == 0:\n return 0.500, 0.549, 1.000, 1.\n\n return 0.8, 0.8, 0.8, 1.0",
"def color_chosen_nodes(network, chosen_node, color):\n\n # Color the node selected randomly by RWR\n network.nodes[chosen_node]['color'] = color\n # Create a list with color for each node\n color_nodes = [network.nodes[node]['color'] for node in network.nodes]\n return color_nodes",
"def get_graph_color ( self, object ):\n return self.graph_color_",
"def getCurrentColor(self):\n if self.__currentnode__ is None:\n return None\n else:\n return self.__currentnode__.getPlayer().getColor()",
"def maximal_color(graph, node):\n return max(get_node_colors(graph, node))",
"def colorNode(node):\n # Try to find the session color manager.\n manager = _findSessionColorManager()\n\n # If one exists, use it to try to color the node.\n if manager is not None:\n manager.colorNode(node)",
"def colorNodeByName(node):\n # Try to find the session color manager.\n manager = _findSessionColorManager()\n\n # If one exists, use it to try to color the node.\n if manager is not None:\n manager.colorNodeByName(node)",
"def _community(G, u, community):\n node_u = G.node[u]\n try:\n return node_u[community]\n except KeyError:\n raise nx.NetworkXAlgorithmError('No community information')",
"def find_local_community(G, seed_node, weight, debug_log=False):\n nodes_in_community = seed_node if isinstance(seed_node, list) else [seed_node]\n modularity = edge_modularity(G, nodes_in_community=nodes_in_community, weight=weight)\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n if debug_log:\n print('==========\\nInitial community has nodes:', nodes_in_community)\n print('Neighbor edges:', neighbor_edges)\n print('Modularity = %f' % modularity)\n while neighbor_edges:\n # Compute the edge_modularity for each neighbor edge,\n # suppose the neighbor edge is added to the community\n mod_max, c_max, e_max = 0, None, None\n for e in neighbor_edges:\n # edges in the current community\n edges_in_temp_community = list(G.subgraph(nodes_in_community).edges)\n # append the candidate edge\n edges_in_temp_community.append(e)\n nodes_in_temp_community = list(G.edge_subgraph(edges_in_temp_community).nodes)\n mod_temp = edge_modularity(G, nodes_in_community=nodes_in_temp_community, weight=weight)\n if mod_temp > mod_max:\n mod_max, c_max, e_max = mod_temp, nodes_in_temp_community, e\n if mod_max > modularity:\n if debug_log:\n print('==========\\nEdge', e_max, 'and node', set(e_max).difference(nodes_in_community), 'are added to the community')\n\n # Update the community and the corresponding neighbor edges\n nodes_in_community = c_max\n modularity = mod_max\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n\n if debug_log:\n print('The community has nodes:', nodes_in_community)\n print('Modularity = %f' % mod_max)\n print('Neighbor edges:', neighbor_edges)\n else:\n break\n return nodes_in_community, modularity",
"def node_colors(self, nodes):\n zmin, zmax = nodes[:, 2].min(), nodes[:, 2].max()\n start_color = np.array(self.background) + 5\n end_color = np.array(self.nodeColor)\n z = (nodes[:, 2] - zmin) / (zmax - zmin)\n # indexing [:, None] is used to explicitly state second axis\n c = (1 - z)[:, None] @ start_color[:, None].T + z[:, None] @ end_color[:, None].T\n self.wireframe_col = c\n # return c",
"def colour_node(instance, reaction_colour='darkgrey', Xc_colour='orange', waste_colour='red', res_colour='limegreen', InPr_colour='lightblue'):\n G, mapping = instance.network()\n\n # relabel\n G = nx.relabel_nodes(G, mapping)\n\n node_dict_mapped = nodes_mapped(instance)\n\n waste, resources, intmed_products = instance.amenities()\n\n colour_map = []\n\n for nd in G:\n # print(\"nd\",nd)\n for nd_label, ammentity in node_dict_mapped.items():\n # print(\"nd_label\",nd_label)\n if nd_label == nd:\n # print(nd, nd_label)\n\n if ammentity == \"r\":\n colour_map.append(reaction_colour)\n\n elif ammentity == \"Xc\":\n colour_map.append(Xc_colour)\n\n elif ammentity == \"w\":\n colour_map.append(waste_colour)\n\n elif ammentity == \"Xr\":\n colour_map.append(res_colour)\n\n elif ammentity == \"InPr\":\n colour_map.append(InPr_colour)\n return colour_map",
"def get_edge_color( row ):\n\n rgb = 0.5 * (\n node_color_dict[ row[ 'source' ] ] + \\\n node_color_dict[ row[ 'target' ] ] )\n\n return rgb2hex( rgb )",
"def get_color(edge, nR):\n R_color, E_color = 'C0', 'C1'\n edge = sorted(edge)\n if edge[0] < nR:\n if edge[1] > nR:\n comp_color = 'gray'\n zorder = 10\n else:\n comp_color = R_color\n zorder = 5\n else:\n comp_color = E_color\n zorder = 5\n return comp_color, zorder",
"def vertex_coloring(self, display = False):\r\n stack = self.SL_algorithm()\r\n color_of_vertex = self.greedily_coloring(stack)\r\n if(display):\r\n self.display_graph(color_of_vertex)\r\n return color_of_vertex\r\n else: \r\n return color_of_vertex",
"def nextColorLedCluster(self):\n hwidRef = YRefParam()\n if YAPI.YISERR(self._nextFunction(hwidRef)):\n return None\n if hwidRef.value == \"\":\n return None\n return YColorLedCluster.FindColorLedCluster(hwidRef.value)",
"def get_color(rank):\n if rank == 1:\n color = int(0xffd700)\n elif rank == 2:\n color = int(0xc0c0c0)\n elif rank == 3:\n color = int(0xcd7f32)\n else:\n color = random.randint(1, 16777215)\n\n return discord.Color(color)",
"def FirstColorLedCluster():\n devRef = YRefParam()\n neededsizeRef = YRefParam()\n serialRef = YRefParam()\n funcIdRef = YRefParam()\n funcNameRef = YRefParam()\n funcValRef = YRefParam()\n errmsgRef = YRefParam()\n size = YAPI.C_INTSIZE\n #noinspection PyTypeChecker,PyCallingNonCallable\n p = (ctypes.c_int * 1)()\n err = YAPI.apiGetFunctionsByClass(\"ColorLedCluster\", 0, p, size, neededsizeRef, errmsgRef)\n\n if YAPI.YISERR(err) or not neededsizeRef.value:\n return None\n\n if YAPI.YISERR(\n YAPI.yapiGetFunctionInfo(p[0], devRef, serialRef, funcIdRef, funcNameRef, funcValRef, errmsgRef)):\n return None\n\n return YColorLedCluster.FindColorLedCluster(serialRef.value + \".\" + funcIdRef.value)",
"def community_detection(net_G):\r\n if list(nx.isolates(net_G)) == []:\r\n part = community.best_partition(net_G)\r\n #values = [part.get(node) for node in net_G.nodes()]\r\n #nx.draw_spring(net_G, cmap = plt.get_cmap('jet'), node_color = values, node_size=30, with_labels=False)\r\n #plt.show()\r\n else:\r\n net_G = net_G.copy()\r\n net_G.remove_nodes_from(list(nx.isolates(net_G)))\r\n part = community.best_partition(net_G)\r\n list_nodes = []\r\n for com in set(part.values()):\r\n list_nodes.append([nodes for nodes in part.keys() if part[nodes] == com])\r\n num_of_communities = len(list_nodes)\r\n partition_performance = nx.algorithms.community.quality.performance(net_G, list_nodes)\r\n net_communities = [[\"Numbers of communities:\", num_of_communities], \\\r\n [\"Partition performance:\", partition_performance]]\r\n return net_communities",
"def _greedy_color(self, source):\n for target in self.graph.iteradjacent(source):\n if self.color[target] is not None:\n self._color_list[self.color[target]] = True\n for c in xrange(self.graph.v()): # check colors\n if not self._color_list[c]:\n self.color[source] = c\n break\n for target in self.graph.iteradjacent(source):\n if self.color[target] is not None:\n self._color_list[self.color[target]] = False\n return c",
"def FindColorLedCluster(func):\n # obj\n obj = YFunction._FindFromCache(\"ColorLedCluster\", func)\n if obj is None:\n obj = YColorLedCluster(func)\n YFunction._AddToCache(\"ColorLedCluster\", func, obj)\n return obj",
"def is_red(node):\n if node is None:\n return False\n return node.colour is True",
"def edge_colors(\n et: pd.DataFrame,\n nt: pd.DataFrame,\n color_by: Hashable,\n node_color_by: Hashable,\n):\n if color_by in (\"source_node_color\", \"target_node_color\"):\n edge_select_by = color_by.split(\"_\")[0]\n return encodings.data_color(\n et[edge_select_by].apply(nt[node_color_by].get),\n nt[node_color_by],\n )\n elif color_by:\n return encodings.data_color(et[color_by], et[color_by])\n return pd.Series([\"black\"] * len(et), name=\"color_by\")",
"def set_colors(graph):\n colors = []\n for n in graph.nodes():\n node = graph.node[n]\n if node['adopter'] == 1:\n colors.append('b')\n else:\n colors.append('r')\n \n return colors",
"def color(self):\n return self.container['color']",
"def get_color(self, coord):\n return self.board[coord[0], coord[1]]",
"def FindColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_FindColor(self, *args)"
] | [
"0.75510806",
"0.69493985",
"0.66909236",
"0.66592944",
"0.6608618",
"0.64989406",
"0.6450056",
"0.64425915",
"0.64138216",
"0.6396164",
"0.6335987",
"0.6323094",
"0.62105125",
"0.6205423",
"0.61846185",
"0.617105",
"0.6087763",
"0.59781784",
"0.5975609",
"0.59191763",
"0.590046",
"0.5883436",
"0.58277667",
"0.58133215",
"0.57972234",
"0.5789967",
"0.57800215",
"0.577198",
"0.57589364",
"0.57484853"
] | 0.83000135 | 0 |
returns a list of colors for coloring nodes based on which set each node is in | def node_set_colors(nodes, spanset, gapset, preset, postset):
node_colors = []
for n in nodes:
if n in preset:
node_colors.append(nx_helpers.rgb_to_hex((255, 0, 0)))
elif n in postset:
node_colors.append(nx_helpers.rgb_to_hex((255, 255, 0)))
## reads now may be missing the last set of numbers. Account for this in the node naming.
elif n in gapset or any([g for g in gapset if n in g]):
node_colors.append(nx_helpers.rgb_to_hex((0, 10, 250)))
elif n in spanset or any([s for s in spanset if n in s]):
node_colors.append(nx_helpers.rgb_to_hex((0, 250, 10)))
else:
# uncategorized
node_colors.append(nx_helpers.rgb_to_hex((0, 0, 0)))
return node_colors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_groups(nodes):\n return list(set([node.color for node in nodes]))",
"def get_colors(self):\n colors = [\"#244486\", \"#A6A6A6\", \"#B12122\"]\n cmap = LinearSegmentedColormap.from_list(\"mycmap\", colors)\n\n color_palette=[cmap(i) for i in np.linspace(0, 1, len(set(self.nodes_list)))]\n return dict(zip(list(set(self.nodes_list)), color_palette))",
"def set_colors(graph):\n colors = []\n for n in graph.nodes():\n node = graph.node[n]\n if node['adopter'] == 1:\n colors.append('b')\n else:\n colors.append('r')\n \n return colors",
"def node_colors(self, nodes):\n zmin, zmax = nodes[:, 2].min(), nodes[:, 2].max()\n start_color = np.array(self.background) + 5\n end_color = np.array(self.nodeColor)\n z = (nodes[:, 2] - zmin) / (zmax - zmin)\n # indexing [:, None] is used to explicitly state second axis\n c = (1 - z)[:, None] @ start_color[:, None].T + z[:, None] @ end_color[:, None].T\n self.wireframe_col = c\n # return c",
"def color_chosen_nodes(network, chosen_node, color):\n\n # Color the node selected randomly by RWR\n network.nodes[chosen_node]['color'] = color\n # Create a list with color for each node\n color_nodes = [network.nodes[node]['color'] for node in network.nodes]\n return color_nodes",
"def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]",
"def getColors():\n return ['#8c99fc', '#cacefd', '#fff1d7', '#feda98', '#fda85a', '#fc6647']",
"def get_colors():\n colors = {}\n for h in wn.synset('chromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n for h in wn.synset('achromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n return colors",
"def greedy_coloring(*args):\r\n # get arguments\r\n G = args[0]\r\n n = G.nodes()\r\n m = G.arcs()\r\n \r\n # check if it a valid Graph\r\n if not G.is_correct_type('u'):\r\n print \"ERROR: the graph is not in one of the valid formats for greedy_coloring()\"\r\n return [], []\r\n \r\n # calculate degrees of each node (set as rows per node)\r\n a_nodes = zeros((n,n), int)\r\n for arc in range(m):\r\n i = G.A[arc,0] # tail of the arc\r\n j = G.A[arc,1] # head of the arc\r\n a_nodes[i-1,j-1] = 1\r\n a_nodes[j-1,i-1] = 1\r\n # get degree and add the node number\r\n degree = sum(a_nodes,0)\r\n degree = vstack((degree, array(range(n), int) + 1))\r\n \r\n # initialize coloring vector\r\n coloring = zeros(n, int)\r\n color_step = 1\r\n \r\n # if there are any nodes of degree 0 color them first\r\n while min(degree[0,:]) == 0:\r\n n_i = argmin(degree[0,:]) # get node with zero\r\n i = degree[1,n_i]\r\n # eliminate the node column from the list and matrix\r\n degree = delete(degree, s_[n_i], axis=1)\r\n a_nodes = delete(a_nodes, s_[n_i], axis=1)\r\n # color it\r\n coloring[i-1] = color_step\r\n \r\n # iterate till all nodes have a color\r\n while size(degree) > 0:\r\n n_i = argmax(degree[0,:]) # get node with largest degree\r\n i = degree[1,n_i]\r\n # eliminate the node column from the list and matrix\r\n degree = delete(degree, s_[n_i], axis=1)\r\n a_nodes = delete(a_nodes, s_[n_i], axis=1)\r\n \r\n # color it\r\n coloring[i-1] = color_step\r\n \r\n # color the rest of the possible nodes\r\n possible = 1 - array(a_nodes[i-1,:]) # transforms 0 in 1, and 1 in 0\r\n # iterate while there are possible nodes available\r\n while sum(possible) > 0:\r\n # get the node with largest degree among possible ones\r\n n_j = argmax(degree[0,:] * possible)\r\n j = degree[1,n_j]\r\n # eliminate the node column from the list and matrix\r\n degree = delete(degree, s_[n_j], axis=1)\r\n a_nodes = delete(a_nodes, s_[n_j], axis=1)\r\n possible = delete(possible, n_j)\r\n \r\n # color it\r\n coloring[j-1] = color_step\r\n # eliminate adjacent nodes of j from possible nodes\r\n possible = possible * (1 - a_nodes[j-1,:])\r\n \r\n # update color\r\n color_step += 1\r\n \r\n col_number = max(coloring) # approx chromatic number\r\n \r\n return coloring, col_number",
"def __get_color_table(self):\n #Color: Green Yellow Orange Red Distance:\n return [[0.70, 0.15, 0.1, 0.05], # 0\n [0.17, 0.6, 0.17, 0.06], # 1\n [0.06, 0.17, 0.6, 0.17], # 2\n [0.05, 0.12, 0.23, 0.6], # 3\n [0.05, 0.1, 0.15, 0.8]] # >= 4 ",
"def edge_colors(\n et: pd.DataFrame,\n nt: pd.DataFrame,\n color_by: Hashable,\n node_color_by: Hashable,\n):\n if color_by in (\"source_node_color\", \"target_node_color\"):\n edge_select_by = color_by.split(\"_\")[0]\n return encodings.data_color(\n et[edge_select_by].apply(nt[node_color_by].get),\n nt[node_color_by],\n )\n elif color_by:\n return encodings.data_color(et[color_by], et[color_by])\n return pd.Series([\"black\"] * len(et), name=\"color_by\")",
"def which_color(node):\n for i, com in enumerate(communities):\n if node in com:\n return colors[i]\n return nx_helpers.rgb_to_hex((0, 0, 0))",
"def setColorConf(colors,ngroups)->list:\n if colors == \"hcl\":\n try:\n from colorspace import sequential_hcl\n color_repo = sequential_hcl(h=[15,375],l=65,c=70)\n colors_list = color_repo.colors(ngroups + 1)\n except ImportError:\n print('hcl colorspace package has not being installed.')\n print('please try the following command:')\n print('pip install git+https://github.com/retostauffer/python-colorspace')\n else:\n colors = list(plt.get_cmap(colors).colors)\n colors_list = [to_hex(color) for color in colors]\n colors_list = colors_list[:ngroups]\n\n return colors_list",
"def brute_force_coloring(*args):\r\n # get arguments\r\n G = args[0]\r\n n = G.nodes()\r\n m = G.arcs()\r\n \r\n # check if it a valid Graph\r\n if not G.is_correct_type('u'):\r\n print \"ERROR: the graph is not in one of the valid formats for brute_force_coloring()\"\r\n return [], []\r\n \r\n coloring = ones(n, int) # initialize with just one color\r\n chrom_n = inf # initialize chromatic number\r\n min_coloring = [] # initialize minimum coloring\r\n \r\n # iterate till you get a coloring (really stupid way)\r\n terminal = array(range(n), int) + 1\r\n while sum(coloring != terminal) > 0:\r\n #print coloring\r\n coloring[n-1] += 1\r\n # correct if some achieve n\r\n for node in range(n-1):\r\n # if one get above n\r\n if coloring[n-1-node] > max(coloring[0:n-1-node]) + 1:\r\n coloring[n-1-node] = 1 # take one and...\r\n coloring[n-2-node] += 1 # ... add it to the previous one\r\n \r\n # if it is a coloring check it\r\n if G.is_coloring(coloring):\r\n col_number = max(coloring) # number of colors\r\n # if it is better, update\r\n if col_number < chrom_n:\r\n chrom_n = col_number\r\n min_coloring = coloring.copy()\r\n print \"current minimum: \", min_coloring, \"with %d colors\" %(chrom_n)\r\n \r\n return min_coloring, chrom_n",
"def colors(self):\r\n\t\treturn self._colors",
"def color_label_generator(clusters):\n colors = ['green', 'red', 'blue']\n point_colors = [\"\" for i in range(12)]\n for i, cluster in enumerate(clusters):\n for point_num in cluster:\n point_colors[point_num] = colors[i]\n return point_colors",
"def colors(self):\n return self[\"colors\"]",
"def colors(self):\n return self[\"colors\"]",
"def get_node_color(self, origin_node_id):\n origin_node_id %= 11\n if origin_node_id == 9:\n return 0.753, 0.753, 0.753, 1.\n if origin_node_id == 8:\n return 0.824, 0.412, 0.118, 1.\n if origin_node_id == 7:\n return 1.000, 0.000, 1.000, 1.\n if origin_node_id == 6:\n return 1.000, 1.000, 0.000, 1.\n if origin_node_id == 5:\n return 1.000, 0.627, 0.478, 1.\n if origin_node_id == 4:\n return 0.498, 1.000, 0.000, 1.\n if origin_node_id == 3:\n return 0.000, 1.000, 1.000, 1.\n if origin_node_id == 2:\n return 1.000, 0.922, 0.804, 1.\n if origin_node_id == 1:\n return 0.871, 0.722, 0.529, 1.\n if origin_node_id == 0:\n return 0.000, 0.749, 1.000, 1.\n if origin_node_id == 0:\n return 0.500, 0.549, 1.000, 1.\n\n return 0.8, 0.8, 0.8, 1.0",
"def get_color_list(cluster_count):\n color_list = []\n for i in xrange(cluster_count):\n color_list.append(random_color_gen())\n return color_list",
"def colors(k): \n ret = []\n for i in range(k):\n ret.append((random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))\n return ret",
"def node_community_colors(graph, communities):\n colors = nx_helpers.generate_colors(len(communities))\n\n def which_color(node):\n \"\"\"finds which community node is in and returns\n its corresponding color\n \"\"\"\n for i, com in enumerate(communities):\n if node in com:\n return colors[i]\n return nx_helpers.rgb_to_hex((0, 0, 0))\n\n node_colors = [which_color(node) for node in graph.nodes()]\n return node_colors",
"def ordered_colors():\n\n return [(\"yellow\",0.263) ,(\"orange\", 0.047), (\"red\",0.0),(\"green\", 0.444), (\"purple\", 0.972)]",
"def coloring(self):\n labeling = [0]*self.n\n for ik in range(self.loc.index(-1)): # Loop over colors to current depth\n # Find all slots in coloring that are still empty\n freeIndices = [ilc for ilc,jlc in enumerate(labeling) if jlc == 0]\n # Get the current index for ik-th color\n cIdx = self.loc[ik]\n # Get the coloring that corresponds to the current index so that we can add it to the labeling\n clabeling = integer2coloring(cIdx,len(freeIndices),self.colors[ik])\n # Load up labeling with the current color in the corrent slots\n for iIdx,jIdx in enumerate(freeIndices):\n if clabeling[iIdx] !=0:\n labeling[jIdx] = ik + 1\n self.labeling = labeling\n return self.labeling",
"def getColors():\n colors = ['#d53e4f',\n '#fc8d59',\n '#fee08b',\n '#ffffbf',\n '#e6f598',\n '#99d594',\n '#3288bd',\n ]\n return colors",
"def nodes(self):\n\n return list(set(self._graph.keys() + [x for x in itertools.chain.from_iterable(self._graph.values())]))",
"def colors(self):\n return self._colors",
"def addcolors(poptree):\r\n rgbset = ([[0.4,0.4,0.4],\r\n [0.650980392,0.462745098,0.11372549],\r\n [0.4,0.650980392,0.117647059],\r\n [0.121568627,0.470588235,0.705882353],\r\n [0.905882353,0.160784314,0.541176471],\r\n [0.458823529,0.439215686,0.701960784],\r\n [0.850980392,0.37254902,0.007843137],\r\n [0.105882353,0.619607843,0.466666667],\r\n [0.901960784,0.670588235,0.007843137],\r\n [0.984313725,0.603921569,0.6]])\r\n for i in range(numpops):\r\n poptree[i].append(rgbset[i])\r\n for i in range(numpops,2*numpops-1):\r\n poptree[i].append([])\r\n while True:\r\n notdone = False\r\n for i in range(numpops,2*numpops-1):\r\n if poptree[i][5] == []:\r\n ld = poptree[i][2]\r\n rd = poptree[i][3]\r\n if poptree[ld][5] != [] and poptree[rd][5] != []:\r\n acolor = [0,0,0]\r\n acolor = meanrgb(poptree[ld][5],poptree[rd][5])\r\n poptree[i][5] = acolor\r\n else:\r\n notdone = True\r\n if notdone == False:\r\n break\r\n return poptree",
"def generate_cluster_colors(num, grouping):\n\n\tif (grouping == 'dataset' or grouping == 'target_region') and num > 2 and num <= 9:\n\t\tc = cl.scales[str(num)]['qual']['Set1']\n\t\treturn c\n\n\tif num>18:\n\t\t# c = ['hsl('+str(round(h*1.8 % 360))+',50%,50%)' for h in linspace(0, 360, num)]\n\t\tc = ['rgb'+str(colorsys.hls_to_rgb((h*1.8/360), 0.5, 0.5)) for h in linspace(0, 360, num)]\n\telse:\n\t\t# c = ['hsl('+str(round(h*1.3 % 360))+',50%,50%)' for h in linspace(0, 360, num)]\n\t\tc = ['rgb'+str(colorsys.hls_to_rgb((h*1.3 / 360), 0.5, 0.5)) for h in linspace(0, 360, num)]\n\n\tc=c+c\n\treturn c",
"def exploit_chain_coloring(graph, exploit_chain):\n node_color = []\n edge_color = []\n\n for node in graph:\n if node in exploit_chain:\n node_color.append('red')\n else:\n node_color.append('grey')\n\n for edge in graph.edges:\n control = False\n # checks if in exploit chain or not\n for i in range(len(exploit_chain)-1):\n if edge[0]==exploit_chain[i] and edge[1]==exploit_chain[i+1]:\n edge_color.append('red')\n control = True\n elif edge[1]==exploit_chain[i] and edge[0]==exploit_chain[i+1]:\n edge_color.append('red')\n control = True\n\n if control == False:\n edge_color.append('grey')\n\n return node_color, edge_color"
] | [
"0.78957987",
"0.7514314",
"0.712716",
"0.69475394",
"0.6675967",
"0.66652167",
"0.66599905",
"0.66454685",
"0.64734924",
"0.6415016",
"0.64149046",
"0.6397059",
"0.63902277",
"0.63846207",
"0.63146603",
"0.6292402",
"0.627562",
"0.627562",
"0.62653184",
"0.62567157",
"0.62204975",
"0.6208494",
"0.62081605",
"0.6207174",
"0.61238325",
"0.6106372",
"0.60992575",
"0.60924006",
"0.6078123",
"0.607428"
] | 0.7795284 | 1 |
generates and returns overlap graph from .paf files | def generate_graph(params):
alignedreads = read_paf(params, should_filter_paf=True)
aligned = [(t, h) for t, h, _ in alignedreads]
graph = nx.Graph()
graph.add_edges_from(aligned)
return graph | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def overlap_graph(input, overlap):\n\n sequence_dict = {}\n seq_id_list = []\n\n for seq_record in SeqIO.parse(input, \"fasta\"): # change input variable.\n sequence_dict[seq_record.id] = seq_record.seq\n seq_id_list.append(seq_record.id)\n\n adjacency_list= []\n\n index = overlap\n\n for id in seq_id_list:\n neg_index = (index * -1)\n for k in sequence_dict:\n\n if id == k:\n pass\n\n elif id != k:\n if sequence_dict[id][neg_index:] == sequence_dict[k][0:index]:\n adjacency_list.append(f\"{id} {k}\".format(id=id, k=k))\n\n #print(\"\\nyes\")\n #print(id, sequence_dict[id][neg_index:], \"and\", k, sequence_dict[k][0:index], \"\\n\")\n else:\n pass\n\n #print(\"no\")\n #print(id, sequence_dict[id][neg_index:], \"and\", k, sequence_dict[k][0:index])\n open(\"output.txt\", \"w\")\n with open(\"output.txt\", \"a\") as out:\n for x in adjacency_list:\n out.write(f\"{x}\\n\".format(x=x))",
"def get_overlaps(file_name):\r\n\r\n place = {}\r\n size = {}\r\n sap = {}\r\n overlapping = []\r\n active_list = []\r\n max_width = 0\r\n\r\n with open(file_name + \".scl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if line.split()[0] == \"Sitespacing\":\r\n sitespacing = line.split()[2]\r\n if line.split()[0] == \"SubrowOrigin\":\r\n starting_x = line.split()[2]\r\n ending_x = int(starting_x) + int(sitespacing) * int(line.split()[5])\r\n if ending_x > max_width:\r\n max_width = ending_x\r\n\r\n divider = max_width // 10\r\n\r\n with open(file_name + \".nodes\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if len(line.split()) == 3:\r\n size[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n\r\n with open(file_name + \".pl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if line.split()[0] in size:\r\n place[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n sap_num = int(line.split()[1]) // divider\r\n if sap_num not in sap.keys():\r\n sap[sap_num] = []\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]), int(line.split()[2]),\r\n \"start\"])\r\n\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]),\r\n int(line.split()[2]) + int(size[line.split()[0]][1]), \"end\"])\r\n\r\n for lista in sap.values():\r\n lista.sort(key=lambda x: x[3])\r\n lista.sort(key=lambda x: x[4], reverse=True)\r\n for element in lista:\r\n if element[4] == \"start\":\r\n if len(active_list) == 0:\r\n active_list.append(element[0])\r\n else:\r\n for node in active_list:\r\n if int(place[node][0]) < int(place[element[0]][0]) + int(size[element[0]][0]) \\\r\n and int(place[node][0]) + int(size[node][0]) > int(place[element[0]][0]) \\\r\n and int(place[node][1]) < int(place[element[0]][1]) + int(size[element[0]][1]) \\\r\n and int(place[node][1]) + int(size[node][1]) > int(place[element[0]][1]):\r\n overlap = (node, element[0])\r\n overlapping.append(overlap)\r\n active_list.append(element[0])\r\n else:\r\n active_list.remove(element[0])\r\n return overlapping",
"def check_overlap(gfa_, path, external_file):\n if external_file:\n fasta_dict = fasta_reader(path, external_file)\n if not fasta_dict:\n return None\n\n eid_dict = dict()\n node_dict = dict()\n\n count_consistency = 0\n count_no_defined = 0\n edges_no_consistency = []\n edges_no_calculate = []\n edge_no_text = 0\n\n data_edges = gfa_.edge()\n for node1 in data_edges:\n for node2 in data_edges[node1]:\n for eid in data_edges[node1][node2]:\n if not eid_dict.get(eid):\n eid_dict[eid] = True\n from_id = data_edges[node1][node2][eid]['from_node']\n try:\n node_dict[from_id] = gfa_.node()[from_id]['sequence']\n except KeyError:\n edge_no_text += 1\n break\n to_id = data_edges[node1][node2][eid]['to_node']\n try:\n node_dict[to_id] = gfa_.node()[to_id]['sequence']\n except KeyError:\n edge_no_text += 1\n break\n\n from_orn = data_edges[node1][node2][eid]['from_orn']\n to_orn = data_edges[node1][node2][eid]['to_orn']\n overlap = int(data_edges[node1][node2][eid]['alignment'].rstrip('M'))\n\n if node_dict[from_id] == '*' and external_file:\n node_dict[from_id] = fasta_dict.get(from_id, '*')\n if node_dict[from_id] == '*':\n GRAPH_LOGGER.debug('Node %s has sequence no specify!', from_id)\n if node_dict[to_id] == '*' and external_file:\n node_dict[to_id] = fasta_dict.get(to_id, '*')\n if node_dict[to_id] == '*':\n GRAPH_LOGGER.debug('Node %s has sequence no specify!', to_id)\n\n if not node_dict[from_id] == '*' and not node_dict[to_id] == '*':\n check = consistency((from_id, to_id), \\\n (node_dict[from_id], node_dict[to_id]), (from_orn, to_orn), overlap)\n if check:\n count_consistency += 1\n else:\n edges_no_consistency.append(eid)\n else:\n GRAPH_LOGGER.debug('Can\\'t check overlap consistency between \\\n node %s and %s', from_id, to_id)\n count_no_defined += 1\n edges_no_calculate.append(eid)\n\n GRAPH_LOGGER.debug('%s edge overlap are consistency of total amount of %s', \\\n count_consistency, len(eid_dict)-edge_no_text)\n\n return edges_no_consistency, edges_no_calculate",
"def overlap(fields) :\n\n r13=apload.ApLoad(apred='r13')\n f=[]\n a=[]\n for field in fields :\n f.append(fits.open(field+'/'+field+'_rv.fits'))\n a.append( r13.apFieldVisits(field))\n\n outdir=fields[0]+'_'+fields[1]\n try: os.makedirs(outdir)\n except: pass\n\n fp=open(outdir+'/'+outdir+'.html','w')\n fp.write('<HTML>\\n')\n fp.write('<HEAD><script type=text/javascript src=../html/sorttable.js></script></head>')\n fp.write('<BODY>\\n')\n fp.write('<TABLE BORDER=2>\\n')\n\n matplotlib.use('Agg')\n i1,i2=match.match(f[0][1].data['APOGEE_ID'],f[1][1].data['APOGEE_ID'])\n colors=['g','r','b','m']\n for star in f[0][1].data['APOGEE_ID'][i1] :\n print(star)\n fp.write('<TR><TD>{:s}<BR>\\n'.format(star))\n fp.write('<TABLE BORDER=2>\\n')\n fig,ax=plots.multi(1,1)\n for i,field in enumerate(f) :\n j=np.where(field[2].data['APOGEE_ID'] == star)[0]\n plots.plotp(ax,field[2].data['MJD'][j],field[2].data['VHELIO'][j],color=colors[i],size=10)\n j=np.where(field[1].data['APOGEE_ID'] == star)[0][0]\n fp.write('<TR><TD>Doppler<TD>{:8.2f}<TD>{:8.2f}<TD>{:8.0f}<TD>{:8.2f}<TD>{:8.2f}\\n'.format(\n field[1].data['VHELIO_AVG'][j],field[1].data['VSCATTER'][j],\n field[1].data['RV_TEFF'][j],field[1].data['RV_LOGG'][j],field[1].data['RV_FEH'][j]))\n for i,field in enumerate(a) :\n j=np.where(field[1].data['APOGEE_ID'] == star)[0]\n gd=np.where(np.abs(field[1].data['VHELIO'][j]) < 999)[0]\n plots.plotp(ax,field[1].data['MJD'][j[gd]],field[1].data['VHELIO'][j[gd]],color=colors[i+2],size=10)\n #fp.write('<TR><TD>IDL<TD>{:8.2f}<TD>{:8.2f}<TD>{:8.2f}<TD>{:8.0f}<TD>{:8.2f}<TD>{:8.2f}\\n'.format(\n # field[1].data['VHELIO_AVG'],field[1].data['VSCATTER'],\n # field[1].data['RV_TEFF'],field[1].data['RV_LOGG'],field[1].data['RV_FEH']))\n plt.draw()\n plt.close()\n fig.savefig(outdir+'/'+star+'.png')\n fp.write('</TABLE>\\n')\n fp.write('<TD><a HREF={:s}.png> <IMG SRC={:s}.png> </a>\\n'.format(star,star))\n fp.write('</TABLE>')\n fp.close()",
"def getConf(ppiFile):\n validpattern = re.compile('^[\\w _\\-.,\\t\"\\':;]+$')\n splitpattern = re.compile('[\\t ;,]+')\n numericpattern = re.compile('^[0-9. \\t,\\-]+')\n\t### collect node names ###\n finfile = open(ppiFile, 'r')\n names = {}\n index = 0\n for temp in finfile:\n temp = temp.strip('\\t \\n\\r')\n if temp == \"\" or (re.search(validpattern, temp) is None):\n continue\n allwords = re.split(splitpattern, temp)\n for i in xrange(0, 2):\n if re.match(numericpattern, allwords[i]) is not None:\n temp = \"Error: file inconsistent\"\n temp = temp + \"(possible node ID is numeric)\"\n temp = temp + \" \" + allwords[i]\n print >> sys.stderr, temp\n exit(1)\n if allwords[0] not in names:\n names[allwords[0]] = index\n index = index + 1\n if allwords[1] not in names:\n names[allwords[1]] = index\n index = index + 1\n finfile.close()\n names = collections.OrderedDict(sorted(names.items(), key=lambda x: x[1]))\n N = index\n ppbConf = np.zeros((N, N))\n\t### collect edges ###\n finfile = open(ppiFile, 'r')\n for temp in finfile:\n temp = temp.strip('\\t \\n\\r')\n if temp == \"\" or (re.search(validpattern, temp) is None):\n continue\n allwords = re.split(splitpattern, temp)\n i = names[allwords[0]]\n j = names[allwords[1]]\n templen = len(temp)\n # allow for tab or space delimiters\n temp = temp.split('\\t')\n if templen == len(temp):\n \ttemp = temp.split(' ')\n \t\n if len(temp) > 2:\n confidence = temp[2]\n else:\n confidence = 1\n if i != j:\n ppbConf[i, j] = confidence\n ppbConf[j, i] = confidence\n finfile.close()\n return (ppbConf, names)",
"def generatePhasingScore(options,phase,cycle):\n score,readcount,readseq=readDataForPhasingScoreComputation(options,phase)\n phased_loci_filename=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\".positive_phase_loci\"\n final_phase_loci=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\".phasing_score_phase_loci\"\n fhr=open(phased_loci_filename,\"r\")\n out4=open(final_phase_loci,\"w\")\n for line in fhr:\n chromosome,ss,ee=line.strip().split()\n ss=int(ss)\n ee=int(ee)\n #correct=list(range(ss,ee+1,phase))\n phasing_score_filename=options.output_directory_per_run+\"/\"+str(phase)+\"_\"+str(chromosome)+\"_\"+str(ss)+\"_\"+str(ee)+\".phasing_score\"\n abundance_score_filename=options.output_directory_per_run+\"/\"+str(phase)+\"_\"+str(chromosome)+\"_\"+str(ss)+\"_\"+str(ee)+\".abundance\"\n out=open(phasing_score_filename,\"w\")\n out2=open(abundance_score_filename,\"w\")\n score_count={}\n for site in range(ss,ee+1):\n start=site-(phase*4)\n end=site+(phase*5)-1\n max_within_site,max_within_count,all_scores=0,0,0\n for cor in range(start,end+1):\n if cor not in score[chromosome]:continue\n all_scores+=score[chromosome][cor]\n for i in readcount[chromosome][cor]:\n if max_within_count<readcount[chromosome][cor][i]:\n max_within_site=cor\n max_within_count=readcount[chromosome][cor][i]\n all_scores-=max_within_count\n P,k=0,0\n s=start\n while s<end:\n if s not in score[chromosome]:\n s+=phase\n continue\n if score[chromosome][s]!=0:\n P+=score[chromosome][s]\n k+=1\n if s == max_within_site:\n P-=max_within_count \n s+=phase\n U=all_scores-P\n \n #if U<0: continue\n if k>=3:\n #print(P,U,k)\n phas_score=math.log((1+(10*(P/(1+U))))**(k-2))\n \"\"\"if phas_score>max and site in correct:\n max=phas_score\"\"\"\n else:\n phas_score=0\n out.write(str(site)+\"\\t\"+str(phas_score)+\"\\n\")\n out4.write(chromosome+\"\\t\"+str(site)+\"\\t\"+str(phas_score)+\"\\n\")\n if chromosome not in score_count:\n score_count[chromosome]={}\n if site not in score_count[chromosome]:\n score_count[chromosome][site]=phas_score\n if site in readcount[chromosome] and '+' in readcount[chromosome][site] and readcount[chromosome][site]['+']!=0:\n out2.write(str(site)+\"\\t\"+str(readcount[chromosome][site]['+'])+\"\\n\")\n if site in readcount[chromosome] and '-' in readcount[chromosome][site] and readcount[chromosome][site]['-']!=0:\n out2.write(str(site)+\"\\t-\"+str(readcount[chromosome][site]['-'])+\"\\n\")\n out.close()\n out2.close()\n \n #out4.write(chromosome+\"\\t\"+str(ss)+\"\\t\"+str(ee)+\"\\t\"+str(phas_score)+\"\\n\")\n out4.close()",
"def acogps(f_path, min_supp=MIN_SUPPORT, evaporation_factor=EVAPORATION_FACTOR,\n max_iteration=MAX_ITERATIONS, return_gps=False):\n # 0. Initialize and prepare data set\n d_set = DataGP(f_path, min_supp)\n \"\"\":type d_set: DataGP\"\"\"\n d_set.init_attributes()\n # attr_index = d_set.attr_cols\n # e_factor = evaporation_factor\n d, attr_keys = gend(d_set.valid_bins) # distance matrix (d) & attributes corresponding to d\n\n a = d_set.attr_size\n winner_gps = list() # subsets\n loser_gps = list() # supersets\n str_winner_gps = list() # subsets\n repeated = 0\n it_count = 0\n counter = 0\n\n if d_set.no_bins:\n return []\n\n # 1. Remove d[i][j] < frequency-count of min_supp\n fr_count = ((min_supp * a * (a - 1)) / 2)\n d[d < fr_count] = 0\n\n # 3. Initialize pheromones (p_matrix)\n pheromones = np.ones(d.shape, dtype=float)\n\n invalid_count = 0\n # 4. Iterations for ACO\n # while repeated < 1:\n while counter < max_iteration:\n rand_gp, pheromones = genaco(attr_keys, d, pheromones, evaporation_factor)\n if len(rand_gp.gradual_items) > 1:\n # print(rand_gp.get_pattern())\n exits = isduplicate(rand_gp, winner_gps, loser_gps)\n if not exits:\n repeated = 0\n # check for anti-monotony\n is_super = amcheck(loser_gps, rand_gp, subset=False)\n is_sub = amcheck(winner_gps, rand_gp, subset=True)\n if is_super or is_sub:\n continue\n gen_gp = validategp(d_set, rand_gp)\n \"\"\":type gen_gp: GP\"\"\"\n is_present = isduplicate(gen_gp, winner_gps, loser_gps)\n is_sub = amcheck(winner_gps, gen_gp, subset=True)\n if is_present or is_sub:\n repeated += 1\n else:\n if gen_gp.support >= min_supp:\n pheromones = update_pheromones(attr_keys, gen_gp, pheromones)\n winner_gps.append(gen_gp)\n str_winner_gps.append(gen_gp.print(d_set.titles))\n else:\n loser_gps.append(gen_gp)\n invalid_count += 1\n if set(gen_gp.get_pattern()) != set(rand_gp.get_pattern()):\n loser_gps.append(rand_gp)\n else:\n repeated += 1\n else:\n invalid_count += 1\n it_count += 1\n if max_iteration == 1:\n counter = repeated\n else:\n counter = it_count\n # Output\n out = json.dumps({\"Algorithm\": \"ACO-GRAD\", \"Best Patterns\": str_winner_gps, \"Invalid Count\": invalid_count,\n \"Iterations\": it_count})\n \"\"\":type out: object\"\"\"\n if return_gps:\n return out, winner_gps\n else:\n return out",
"def main_load_nps(fileA, fileB):\n \n print \"- Loading the following non-dominated sets of two-dimensional\"\n print \" objective vectors and computing the joint-EAF:\"\n print \" * A:\", fileA\n print \" * B:\", fileB\n print\n \n npsetA = datasets.load_nps(fileA)\n npsetB = datasets.load_nps(fileB)\n assert len(npsetA) == len(npsetB), (\"The npsets must have the same length \"\n \"(i.e. same number of executions)\")\n\n point_ind = eafindicators(npsetA, npsetB)\n return point_ind",
"def constraint_test():\n import itertools, sys\n\n show_analysis = False\n #Generated via grammar\n gr = grammar.Grammar('grammars/test_constraints.bnf')\n inputs = ([1 for _ in range(100)], [ i%3 for i in range(100)])\n for _input in inputs: \n output = gr.generate(_input)\n azr = analyser.Analyser('test',output['phenotype'],True)\n try:\n azr.create_graph()\n except ValueError as e:\n print(__name__, \"ERROR\", _input, e)\n continue\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()\n \n #Fixed generated\n lengths = (1000, 10000)\n levels = (5, 10)\n for length_idx, level_idx in itertools.permutations([0,1]):\n try:\n GRAPH = constrained_offset_graph(lengths[length_idx],\n levels[length_idx])\n except ValueError as e:\n print(__name__, \"ERROR\", lengths[length_idx], levels[length_idx], e)\n continue\n GRAPH.save_graph(\"pylon\")\n print \"nodes:\", GRAPH.number_of_nodes()\n print \"edges\", GRAPH.number_of_edges()\n #will it blend?\n azr = analyser.Analyser('test',\"moo\",True)\n azr.my_graph = GRAPH\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()",
"def build_from_file(self, topology_file, topology_format):\n with open(topology_file) as infile:\n for line in infile:\n if line.startswith(\"#\"):\n continue\n else:\n if topology_format == 0:\n x = line.split(\"\\n\")[0].split(\"|\")\n as1 = int(x[0])\n as2 = int(x[1])\n relationship = int(x[2])\n else:\n x = line.split(\"\\n\")[0].split(\"\\t\")\n if x[2] == \"p2c\":\n as1 = int(x[0])\n as2 = int(x[1])\n relationship = -1\n elif x[2] == \"c2p\":\n as1 = int(x[1])\n as2 = int(x[0])\n relationship = -1\n elif x[2] == \"p2p\":\n as1 = int(x[1])\n as2 = int(x[0])\n relationship = 0\n else:\n continue\n\n if not self.has_edge(as1, as2):\n self.add_edge(as1, as2, relationship=relationship, as1=as1, as2=as2)",
"def read_ao_overlap(self, path_rwfdump, fn_rwf):\n os.system(path_rwfdump + f\" {fn_rwf} ao_overlap.dat 514R\")\n\n with open('ao_overlap.dat', \"r\") as f:\n log = f.read()\n\n tmp = re.findall('[-]?\\d+\\.\\d+D[+-]\\d\\d', log)\n tmp = [float(x.replace('D', 'e')) for x in tmp]\n \n tmp_ovr = np.zeros((self.nbasis * 2, self.nbasis * 2))\n \n cnt = 0\n for ibasis in range(self.nbasis * 2):\n for jbasis in range(ibasis + 1):\n tmp_ovr[ibasis, jbasis] = tmp[cnt]\n cnt += 1\n\n tmp_ovr += np.transpose(tmp_ovr) - np.diag(np.diag(tmp_ovr))\n\n # Slicing the components between t and t+dt\n return tmp_ovr[:self.nbasis, self.nbasis:]",
"def anchors_to_adjacency(set_path, n_proteomes, mailbox_reader):\n frame_list = []\n for idx in range(n_proteomes):\n with mailbox_reader(idx) as file_handle:\n frame_list.append(\n pd.read_csv(\n file_handle, sep=\"\\t\", index_col=0\n ).convert_dtypes()\n )\n nodes = pd.concat(\n frame_list,\n ignore_index=True,\n )\n del frame_list\n graph = nx.Graph()\n for unused_tuple, subframe in nodes.groupby(\n by=[\"syn.anchor.id\", \"syn.anchor.sub_id\"]\n ):\n ids = subframe[\"member_ids\"]\n n_ids = len(ids)\n graph.add_nodes_from(ids)\n if n_ids > 1:\n edges = combinations(ids, 2)\n graph.add_edges_from(edges, weight=n_ids)\n outpath = set_path / ANCHORS_FILE\n summarypath = outpath.parent / (\n outpath.name[: -len(outpath.suffix)] + \"_summary.tsv\"\n )\n histpath = outpath.parent / (\n outpath.name[: -len(outpath.suffix)] + \"_hist.tsv\"\n )\n components = [\n c\n for c in sorted(nx.connected_components(graph), key=len, reverse=True)\n if len(c) > 1\n ]\n fh = outpath.open(\"w\")\n fh.write(\"idx\\tcluster_id\\tsize\\tmembers\\n\")\n n_items = 0\n count_list = []\n hash_list = []\n id_list = []\n for i, comp in enumerate(components):\n component = np.sort(pd.Index(list(comp)).to_numpy())\n id_list.append(i)\n size = len(comp)\n count_list.append(size)\n hash_list.append(hash_array(component))\n for node in component:\n fh.write(f\"{n_items}\\t{i}\\t{size}\\t{node}\\n\")\n n_items += 1\n fh.close()\n n_clusts = len(count_list)\n del graph, components\n cluster_counts = pd.DataFrame({\"size\": count_list})\n largest_cluster = cluster_counts[\"size\"].max()\n cluster_hist = (\n pd.DataFrame(cluster_counts.value_counts()).sort_index().reset_index()\n )\n cluster_hist = cluster_hist.set_index(\"size\")\n cluster_hist = cluster_hist.rename(columns={0: \"n\"})\n cluster_hist[\"item_pct\"] = (\n cluster_hist[\"n\"] * cluster_hist.index * 100.0 / n_items\n )\n cluster_hist.to_csv(histpath, sep=\"\\t\", float_format=\"%5.2f\")\n cluster_hist[\"cluster_pct\"] = cluster_hist[\"n\"] * 100.0 / n_clusts\n cluster_hist.to_csv(histpath, sep=\"\\t\", float_format=\"%5.2f\")\n clusters = pd.DataFrame(\n {\"anchor.id\": id_list, \"count\": count_list, \"hash\": hash_list}\n )\n clusters.to_csv(summarypath, sep=\"\\t\")\n stats_dict = {\n \"in_anchor\": n_items,\n \"syn.anchors.n\": n_clusts,\n \"syn.anchors.largest\": largest_cluster,\n }\n return stats_dict",
"def parcel_overlap(parcellation1, parcellation2, outpath):\n p1_dat = nib.load(parcellation1).get_data()\n p2_dat = nib.load(parcellation2).get_data()\n p1regs = np.unique(p1_dat)\n p1regs = p1regs[p1regs > 0]\n p2regs = np.unique(p2_dat)\n\n p1n = get_filename(parcellation1)\n p2n = get_filename(parcellation2)\n\n overlapdat = lil_matrix((p1regs.shape[0], p2regs.shape[0]), dtype=np.float32)\n for p1idx, p1reg in enumerate(p1regs):\n p1seq = p1_dat == p1reg\n N = p1seq.sum()\n poss_regs = np.unique(p2_dat[p1seq])\n for p2idx, p2reg in enumerate(p2regs):\n if p2reg in poss_regs:\n # percent overlap is p1seq and'd with the anatomical region voxelspace, summed and normalized\n pover = np.logical_and(p1seq, p2_dat == p2reg).sum() / float(N)\n overlapdat[p1idx, p2idx] = pover\n\n outf = op.join(outpath, \"{}_{}.csv\".format(p1n, p2n))\n with open(outf, \"w\") as f:\n p2str = [\"%s\" % x for x in p2regs]\n f.write(\"p1reg,\" + \",\".join(p2str) + \"\\n\")\n for idx, p1reg in enumerate(p1regs):\n datstr = [\"%.4f\" % x for x in overlapdat[idx,].toarray()[0,]]\n f.write(str(p1reg) + \",\" + \",\".join(datstr) + \"\\n\")\n f.close()\n return",
"def fasta(args):\n from jcvi.formats.sizes import Sizes\n from jcvi.formats.agp import OO, build\n\n p = OptionParser(fasta.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 3:\n sys.exit(not p.print_help())\n\n bedfile, scffasta, pmolfasta = args\n pf = bedfile.rsplit(\".\", 1)[0]\n bed = Bed(bedfile)\n selected = select_bed(bed)\n oo = OO()\n seen = set()\n sizes = Sizes(scffasta).mapping\n agpfile = pf + \".agp\"\n agp = open(agpfile, \"w\")\n for b in selected:\n scf = range_parse(b.accn).seqid\n chr = b.seqid\n cs = (chr, scf)\n if cs not in seen:\n oo.add(chr, scf, sizes[scf], b.strand)\n seen.add(cs)\n else:\n logging.debug(\"Seen {0}, ignored.\".format(cs))\n\n oo.write_AGP(agp, gaptype=\"contig\")\n agp.close()\n build([agpfile, scffasta, pmolfasta])",
"def overlap(path1, path2):\n DataL1 = BedTool(path1).sort()\n DataL2 = BedTool(path2).sort()\n overlap = DataL1.intersect(DataL2, wao=True)\n Overlap_df = overlap.to_dataframe()\n Strand1 = list(Overlap_df.iloc[:, 5])\n Strand2 = list(Overlap_df.iloc[:, 11])\n p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent = orientation(Strand1, Strand2)\n return p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent",
"def loadFromFile():\n try:\n f1 = open( \"friendshipMap.p\", \"rb\" )\n friendship_map = pickle.load(f1)\n f1.close()\n f2 = open( \"businessReviews.p\", \"rb\" )\n business_reviews = pickle.load(f2)\n f2.close()\n f3 = open( \"degreeCentrality.p\", \"rb\" )\n degree_centrality_map = pickle.load(f3)\n f3.close()\n f4 = open( \"closenessCentrality.p\", \"rb\" )\n closeness_centrality_map = pickle.load(f4)\n f4.close()\n f5 = open( \"betweennessCentrality.p\", \"rb\" )\n betweenness_centrality_map = pickle.load(f5)\n f5.close()\n except IOError as e:\n sys.stderr.write(\"I/O error({0}): {1}\".format(e.errno, e.strerror)+'\\n')\n sys.stderr.write('Try running with -buildClean = clean!\\n')\n\n return (friendship_map, business_reviews, degree_centrality_map, closeness_centrality_map, betweenness_centrality_map, YGraph)",
"def _load_pascal_annotation(self, index):\n imagefile = os.path.join(self._data_path, 'Images', index + self._image_ext)\n labelfile = os.path.join(self._data_path, 'GroundTruth', index + '.txt')\n print 'Loading: image {} and label {}'.format(imagefile, labelfile)\n\n labels = np.loadtxt(labelfile) + 1\n image = cv2.imread(imagefile)\n\n gt_classes = []\n gt_boxes = []\n\n boxes, classes = self._get_boxes(image, labels)\n\n boxes = np.array(boxes)\n classes = np.array(classes)\n\n scores = np.ones(len(boxes), dtype=np.float32)\n dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32)\n\n # keep = nms(dets, 0.2)\n _, keep = non_max_suppression_fast(dets, 0.2)\n boxes = boxes[keep, :]\n classes = classes[keep]\n\n for n, (x, y, w, h) in enumerate(boxes):\n # gt_boxes.append([min(x, w), min(y, h), min(abs(w - x), w), min(abs(h - y), h)])\n gt_boxes.append([x, y, w - x, h - y])\n gt_classes.append(classes[n])\n\n overlaps = np.zeros((len(gt_boxes), len(self._classes)), dtype=np.float32)\n for n, obj in enumerate(gt_boxes):\n overlaps[n, gt_classes[n]] = 1.0\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes': np.array(gt_boxes),\n 'gt_classes': np.array(gt_classes, dtype=np.int32),\n 'gt_overlaps': overlaps,\n 'flipped': False}\n\n # def get_data_from_tag(node, tag):\n # return node.getElementsByTagName(tag)[0].childNodes[0].data\n\n # with open(filename) as f:\n # data = minidom.parseString(f.read())\n\n # objs = data.getElementsByTagName('object')\n # num_objs = len(objs)\n\n # boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n # gt_classes = np.zeros((num_objs), dtype=np.int32)\n # overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n\n # # Load object bounding boxes into a data frame.\n # for ix, obj in enumerate(objs):\n # # Make pixel indexes 0-based\n # x1 = float(get_data_from_tag(obj, 'xmin')) - 1\n # y1 = float(get_data_from_tag(obj, 'ymin')) - 1\n # x2 = float(get_data_from_tag(obj, 'xmax')) - 1\n # y2 = float(get_data_from_tag(obj, 'ymax')) - 1\n # cls = self._class_to_ind[str(get_data_from_tag(obj, \"name\")).lower().strip()]\n # boxes[ix, :] = [x1, y1, x2, y2]\n # gt_classes[ix] = cls\n # overlaps[ix, cls] = 1.0\n\n # overlaps = scipy.sparse.csr_matrix(overlaps)\n\n # return {'boxes': boxes,\n # 'gt_classes': gt_classes,\n # 'gt_overlaps': overlaps,\n # 'flipped': False}",
"def get_overlap():\n\n\tx = 382 * 2\n\ty = x\n\tz = 129 * 2 - 1\n\ttype_size = 8\n\ttab = np.empty(x * y * z)\n\tsize = 4096\n\ti = 0\n\tl = x * y * z * type_size\n\tn = 0\n\tfd = open('omeg5', 'rb')\n\twhile l - n > size:\n\t\ts = fd.read(size)\n\t\ttab[n / type_size : (n + size) / type_size] = np.fromstring(s, dtype = np.float64)\n\t\ti += 1\n\t\tn += size\n\ts = fd.read(l - n)\n\ttab[i * size / type_size:] = np.fromstring(s, dtype = np.float64)\n\tfd.close()\n\ttab = tab.reshape(x, y, z)\n\treturn (tab)",
"def _get_assembly_points(self, agp, weight):\n comps = set()\n prev_obj = \"\"\n seq1 = \"\"\n strand1 = \"\"\n\n # Gap info\n gap_count = 0\n prev_agp_known = None\n prev_gap_size = 0\n prev_gap_type = None\n prev_linkage = \"\"\n prev_evidence = \"\"\n\n # Iterate over the AGP file and yield assembly points\n agp_file = AGPFile(agp)\n for agp_line in agp_file.iterate_lines():\n if not agp_line.is_gap:\n # Add this component to our master list\n if agp_line.comp not in self.component_lens:\n raise RuntimeError(\"{} is in {} but not {}.\".format(agp_line.comp, agp, self.components_fasta_fname))\n comps.add(agp_line.comp)\n\n comp_len = agp_line.comp_end\n if comp_len < self.get_component_len(agp_line.comp):\n raise RuntimeError(\"only complete components can be added to the graph.\")\n\n if comp_len > self.get_component_len(agp_line.comp):\n raise RuntimeError(\"inconsistent component lengths: {} bp in {} and {} bp in {}\". format(comp_len, agp, self.get_component_len(agp_line.comp), self.components_fasta_fname))\n\n if agp_line.obj == prev_obj:\n # Check if these components are bookended (no gap in between)\n if not gap_count:\n prev_evidence = \"bookend\"\n\n # Check if two consecutive gaps preceded this component\n if gap_count > 1:\n raise ValueError(\"Consecutive gaps in the AGP file are not currently supported.\")\n\n yield AssemblyPoint(\n seq1,\n strand1,\n agp_line.comp,\n agp_line.orientation,\n weight,\n agp,\n prev_agp_known,\n prev_gap_size,\n prev_gap_type,\n prev_linkage,\n prev_evidence\n )\n\n # Set this component as the previous component\n seq1 = agp_line.comp\n strand1 = agp_line.orientation\n\n gap_count = 0\n prev_agp_known = None\n prev_gap_size = 0\n prev_gap_type = None\n prev_linkage = \"\"\n prev_evidence = \"\"\n\n else:\n seq1 = agp_line.comp\n strand1 = agp_line.orientation\n\n prev_obj = agp_line.obj\n gap_count = 0\n prev_agp_known = None\n prev_gap_size = 0\n prev_gap_type = None\n prev_linkage = \"\"\n prev_evidence = \"\"\n else:\n if agp_line.obj == prev_obj:\n gap_count += 1\n prev_agp_known = True if agp_line.comp_type == \"N\" else False\n prev_gap_size = agp_line.gap_len\n prev_gap_type = agp_line.gap_type\n prev_linkage = True if agp_line.linkage == \"yes\" else False\n prev_evidence = agp_line.linkage_evidence\n\n if comps != self.components:\n raise ValueError(\"Input AGPs do not have the same set of components.\")",
"def generate_gff( mapfile, funtax_orf_file ):\n annotation2assembly_map = pd.read_table(mapfile,\n names=['annotation','assembly','length'],\n index_col='annotation')\n funtax_gff = pd.read_table( funtax_orf_file.name, engine='python', encoding='ISO-8859-1', quoting=3)\n funtax_gff['seqid'] = funtax_gff.join(annotation2assembly_map, on='Contig_Name')['assembly']\n funtax_gff['source'] = 'Prodigal_v2.00'\n funtax_gff['type'] = 'CDS'\n funtax_gff['score'] = 100.0\n funtax_gff['phase'] = 0\n funtax_gff['attributes'] = funtax_gff['ORF_ID'].str.replace(r'(.*)', r'ID=\\1;')\n return funtax_gff[['seqid','source', 'type','start', 'end', 'score', 'strand','phase','attributes']]",
"def main():\n directed = True\n try:\n opts,args = getopt.getopt(sys.argv[1:], \"\")\n except:\n usage(sys.argv[0])\n for opt,arg in opts:\n usage(sys.argv[0])\n\n if len(args) != 5:\n usage(sys.argv[0])\n\n data_dir = args[0]\n num_samples = int(args[1])\n num_seeds = int(args[2])\n num_waves = int(args[3]) - 1 # -1 for consistency with SPNet\n outputdir = args[4]\n\n print \"directed:\", directed\n print \"number of samples:\", num_samples\n print \"number of seeds:\", num_seeds\n print \"number of waves:\", num_waves\n print \"output directory:\", outputdir\n \n if not os.path.exists(outputdir):\n os.mkdir(outputdir)\n\n sys.stdout.write('loading data from ' + data_dir + '...')\n start = time.time()\n (G, profile, colnames) = load_pokec_data(data_dir)\n print time.time() - start, 's'\n\n snap.PrintInfo(G)\n\n\n # We do not add attributes to nodes as SNAP node attribute as\n # these seem to get lost by varoius operations including subgraph\n # that we need to use, so instead maintain them just in the\n # dictionary mapping the original node ids to the attributes -\n # fortunately the original node ids are maintained by\n # GetSubGraph() so we can used these to index the profile\n # dictoinary in the subgraphs\n\n\n ## https://snap.stanford.edu/data/soc-pokec-readme.txt\n ## region:\n ## string, mostly regions in Slovakia (example: \"zilinsky kraj,\n ## kysucke nove mesto\" means county Zilina, town Kysucke Nove Mesto,\n ## Slovakia), some foreign countries (example: \"zahranicie, \n ## zahranicie - nemecko\" means foreign country Germany (nemecko)),\n ## some Czech regions (example: \"ceska republika, cz - ostravsky \n ## kraj\" means Czech Republic, county Ostrava (ostravsky kraj))\n ## We just make this a factor, looking at the output written by print\n ## below, it looks reasonable, but is is only a categorical variable\n ## allowing us to tell if two users are in the same region or not.\n ## TODO we could recode this so that we can have different variables\n ## for being in a different country, major city, etc.\n # Cannot do this:\n #profile[:][colnames['region']] = convert_to_int_cat(profile[:][colnames['region']]) # like factor in R\n # as get \"TypeError: unhashable type\" so have to do this instead:\n id_regions = [(k, p[colnames['region']]) for (k,p) in profile.iteritems()]\n id_regions_int = convert_to_int_cat([x[1] for x in id_regions])\n for i in xrange(len(id_regions)):\n profile[id_regions[i][0]][colnames['region']] = id_regions_int[i]\n\n for attr in ['region']:\n sys.stdout.write('There are %d NA for %s\\n' % ([p[colnames[attr]] for p in profile.itervalues()].count('NA'), attr))\n\n\n # get num_samples * num_seeds distinct random seed nodes (sample without replacement)\n # and convert to list of lists where each list is seed set for one sample\n allseeds = random.sample([node.GetId() for node in G.Nodes()], num_samples * num_seeds)\n seedsets = [allseeds[i:i+num_seeds] for i in range(0, len(allseeds), num_seeds)]\n\n sampledesc_filename = outputdir + os.path.sep + \"sampledesc\" + os.path.extsep + \"txt\"\n sampledesc_f = open(sampledesc_filename, 'w')\n\n for i in range(num_samples):\n sys.stdout.write( 'generating snowball sample ' + str(i+1) + '... ' )\n start = time.time()\n # have to convert seedset to TIntV for SNAP\n seedsVec = snap.TIntV()\n for nodeid in seedsets[i]:\n seedsVec.Add(nodeid)\n Gsample = snowball_sample(G, num_waves, seedsVec)\n nodelist = list() # keep this iteration in list so we always use same order in future\n zonedict = dict() # map nodeid : zone\n for node in Gsample.Nodes():\n nodelist.append(node.GetId())\n zonedict[node.GetId()] = Gsample.GetIntAttrDatN(node.GetId(), \"zone\")\n print time.time() - start, 's'\n \n snap.PrintInfo(Gsample)\n subgraph_filename = outputdir + os.path.sep + \"subgraph\" + str(i) + os.path.extsep + \"txt\"\n write_graph_file(subgraph_filename, Gsample, nodelist)\n subzone_filename = outputdir + os.path.sep + \"subzone\" + str(i) + os.path.extsep + \"txt\"\n write_zone_file(subzone_filename, Gsample, nodelist, zonedict)\n subactor_binary_filename = outputdir + os.path.sep + \"subactorbin\" + str(i) + os.path.extsep + \"txt\"\n subactor_categorical_filename = outputdir + os.path.sep + \"subactorcat\" + str(i) + os.path.extsep + \"txt\"\n subactor_continuous_filename = outputdir + os.path.sep + \"subactorcont\" + str(i) + os.path.extsep + \"txt\"\n\n write_subactors_file_binary(subactor_binary_filename, Gsample, nodelist, profile, colnames)\n write_subactors_file_categorical(subactor_categorical_filename, Gsample, nodelist, profile, colnames)\n write_subactors_file_continuous(subactor_continuous_filename, Gsample, nodelist, profile, colnames)\n\n nodeid_filename = outputdir + os.path.sep + \"subnodeid\" + str(i) + os.path.extsep + \"txt\"\n write_subgraph_nodeids(nodeid_filename, nodelist)\n \n # format of sampledesc file is:\n # N subzone_filename subgraph_filename binary_Filename cat_filename cont_filename\n sampledesc_filename = outputdir + os.path.sep + \"sampledesc\" + os.path.extsep + \"txt\"\n sampledesc_f.write(\"%d %s %s %s %s %s\\n\" % (Gsample.GetNodes(), subzone_filename,\n subgraph_filename, subactor_binary_filename,\n subactor_categorical_filename, subactor_continuous_filename))\n\n sampledesc_f.close()",
"def load_pfam():\n data = collections.defaultdict(list)\n ds_loader = specs.make_pfam34_loader(\n root_dir=FLAGS.data_dir, sub_dir='', extra_keys=KEYS, task=TASK)\n for split in SPLITS:\n logging.info('Loading %s Pfam-A seed 34.0 split...', split)\n split_data = collections.defaultdict(list)\n for ex in ds_loader.load(split).prefetch(tf.data.AUTOTUNE):\n split_data['id'].append(ex['id'].numpy().decode('utf-8'))\n split_data['start'].append(ex['start'].numpy())\n split_data['end'].append(ex['end'].numpy())\n split_data['seq'].append(\n vocabulary.alternative.decode(ex['sequence'].numpy()))\n for k, v in split_data.items():\n data[k].append(v)\n result = {k: sum(v, []) for k, v in data.items()}\n\n indices_from_pfam_id = collections.defaultdict(list)\n for i, pfam_id in enumerate(result['id']):\n indices_from_pfam_id[pfam_id].append(i)\n logging.info('Found %d unique IDs among %d Pfam-A seed sequences',\n len(indices_from_pfam_id), len(result['id']))\n return (result['start'], result['end'], result['seq']), indices_from_pfam_id",
"def main():\n filenames = sys.argv[1]\n fdir = sys.argv[2]\n filenames = filenames.split(',')\n\n # print (filenames)\n graph = PGraph(fdir, filenames, \"Multi-Source Foraging\")\n # graph = PGraph(fdir, filenames, \"Cooperative Transport\")\n # graph = PGraph(fdir, filenames, \"Nest Maintenance\")\n # graph = PGraph(\n # fdir, filenames, \"Nest Maintenance \\n with \\n Handcoded behaviors\")\n graph.gen_plot()\n\n # box = BoxGraph(fdir, filenames, \"Single-Source Foraging\")\n # box = BoxGraph(fdir, filenames, False, (-1, 100), \"Multi-Source Foraging\")\n box = BoxGraph(fdir, filenames, False, (-1, 120), \"Nest Maintenance with Handcoded behaviors\")\n # box = BoxGraph(\n # fdir, filenames, \"Nest Maintenance \\n with \\n Handcoded behaviors\")\n box.gen_plot()",
"def write_agp(self, agp_fn, ref_fn, add_suffix_to_unplaced=False):\n used_components = set()\n used_edges = set()\n obj_header_idx = -1\n\n agp = AGPFile(agp_fn, \"w\")\n agp.add_pragma()\n agp.add_comment(\"# AGP created by RagTag {}\".format(get_ragtag_version()))\n\n while True:\n # Find a starting node\n from_node = None\n to_node = None\n cur_ref = None\n for u, v in sorted(self.edges):\n if (u, v) not in used_edges:\n u_base = u[:-2]\n\n u_degree = 0\n if u_base + \"_b\" in self.nodes:\n u_degree += self.graph.degree[u_base + \"_b\"]\n if u_base + \"_e\" in self.nodes:\n u_degree += self.graph.degree[u_base + \"_e\"]\n\n assert u_degree in {2, 4}\n\n # Check if we have found a starting target sequence\n if u_degree == 2:\n cur_ref = u_base\n from_node = u\n to_node = v\n used_edges.add((u, v))\n used_edges.add((v, u))\n break\n\n # If we haven't found a new starting target sequence, we are done\n if from_node is None:\n break\n\n # Initialize this object\n obj_header_idx += 1\n obj_header = \"scf\" + \"{0:08}\".format(obj_header_idx)\n obj_pos = 0\n obj_pid = 1\n\n # Process the first target sequence\n cur_ref_len = self.component_lens[cur_ref]\n cur_ref_strand = \"+\"\n if from_node.endswith(\"_b\"):\n cur_ref_strand = \"-\"\n agp.add_seq_line(obj_header, obj_pos+1, obj_pos+cur_ref_len, obj_pid, \"W\", cur_ref, 1, cur_ref_len, cur_ref_strand)\n obj_pos += cur_ref_len\n obj_pid += 1\n used_components.add(cur_ref)\n\n # Process the remaining sequences.\n next_edge_exists = True\n while next_edge_exists:\n # Process the patch\n patch_aln = self.graph[from_node][to_node][\"alignment\"]\n patch_query = patch_aln.query\n patch_strand = \"+\"\n if patch_aln.strand:\n patch_strand = \"-\"\n\n patch_len = patch_aln.their_query_start - patch_aln.my_query_end\n if patch_len > 0:\n if patch_aln.is_gap:\n agp.add_gap_line(obj_header, obj_pos+1, obj_pos+patch_len, obj_pid, \"N\", patch_len, \"scaffold\", \"yes\", \"align_genus\")\n else:\n agp.add_seq_line(obj_header, obj_pos+1, obj_pos+patch_len, obj_pid, \"W\", patch_query, patch_aln.my_query_end+1, patch_aln.their_query_start, patch_strand)\n used_components.add(patch_query)\n obj_pos += patch_len\n obj_pid += 1\n\n # Next, process the reference sequence\n comp_start = min(0, patch_len)\n cur_ref = to_node[:-2]\n cur_ref_len = self.component_lens[cur_ref]\n cur_ref_strand = \"+\"\n if to_node.endswith(\"_e\"):\n cur_ref_strand = \"-\"\n agp.add_seq_line(obj_header, obj_pos+1, obj_pos+(cur_ref_len + comp_start), obj_pid, \"W\", cur_ref, 1+(-1*comp_start), cur_ref_len, cur_ref_strand)\n obj_pos += cur_ref_len + comp_start\n obj_pid += 1\n used_components.add(cur_ref)\n\n # Look for the next edge\n from_node = to_node[:-2] + \"_b\"\n if to_node.endswith(\"_b\"):\n from_node = to_node[:-2] + \"_e\"\n\n if from_node in self.graph.nodes:\n next_nodes = set(self.graph[from_node])\n assert len(next_nodes) == 1\n to_node = next_nodes.pop()\n used_edges.add((from_node, to_node))\n used_edges.add((to_node, from_node))\n else:\n next_edge_exists = False\n\n # Write unplaced reference sequences\n fai = pysam.FastaFile(ref_fn)\n all_ref_seqs = set(fai.references)\n fai.close()\n remaining_components = all_ref_seqs - used_components\n for c in sorted(remaining_components):\n agp.add_seq_line(\n c + \"_RagTag\" * add_suffix_to_unplaced,\n \"1\",\n str(self.component_lens[c]),\n \"1\",\n \"W\",\n c,\n \"1\",\n str(self.component_lens[c]),\n \"+\"\n )\n\n agp.write()",
"def g_n():\n for gname in os.listdir(sroot):\n if gname != 's1-league1-game1':\n continue\n if gname.startswith('s1'):\n p0 = os.path.join(sroot, gname)\n p1 = os.path.join(p0, 'commitment', 'jperret')\n p2 = os.path.join(p0, 'commitment', 'sa')\n if os.path.isdir(p1) and os.path.isdir(p2):\n for fname in os.listdir(p1):\n if fname.endswith('.aa'):\n bname = fname[:-3]\n #~ if bname == 's1-league1-game2_07':\n #~ continue\n a = ad.Annotations(os.path.join(p1, fname))\n a.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a.gen_full_struct()\n a.commitments = list(u for u in a.units if u.type == 'Commitment')\n a2 = ad.Annotations(os.path.join(p2, fname))\n a2.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a2.gen_full_struct()\n a2.commitments = list(u for u in a2.units if u.type == 'Commitment')\n yield bname, (a, a2)",
"def FindPeaks_graph(self):\n import string\n \n maxima = self['FP_LOC'].copy()\n maxima = num.where(maxima)\n maxima = (maxima[1],maxima[0])\n detectimg = self['FP_DETECT'].copy()\n \n id = self._getGraphId()\n root = 'FindPeaks_%s' % (id,)\n pngname = root + '.png' ; epsname = root + '.eps'\n jpgname = root + '.jpg'\n\n doStamp(detectimg,pngname,format='PNG')\n Convert(pngname,jpgname)\n \n Painted = Paint(jpgname)\n Painted.load()\n Painted.DrawCross(maxima,length=7,color='green')\n \n strpeaks = string.strip('%i'% (self['M_NPEAKS']))\n text = 'NP=%s' % strpeaks \n \n # Painted.Graffiti(text,commtextpos)\n \n Painted.save(jpgname)\n Painted.release()\n \n Convert(jpgname,epsname)\n os.system('rm %s %s' % (pngname,jpgname))\n self['figures']['FindPeaks'] = epsname\n self['figcomms']['FindPeaks'] = text",
"def merge_tables():\r\n filename = \"ppxf_results_best.dat\"\r\n s1 = np.genfromtxt(filename, usecols=(0,), dtype=None).tolist()\r\n sref = s1[:]\r\n sref.sort()\r\n x, y = get_positions(sref).T\r\n r = np.sqrt(x * x + y * y)\r\n pa = np.rad2deg(np.arctan2(x, y))\r\n pa[pa < 0.] += 360.\r\n data1 = np.loadtxt(filename, usecols=np.arange(1, 11))\r\n ##########################################################################\r\n # Account for difference in resolution\r\n # Not used anymore because the resolution is now matched in pPXF\r\n # fwhm_dif = (2.5 - 2.1) * c / 5500. / 2.3548\r\n # data1[:,2] = np.sqrt(data1[:,2]**2 - fwhm_dif**2)\r\n ##########################################################################\r\n data1 = match_data(s1, sref, data1)\r\n results = np.column_stack((sref, x, y, r, pa, data1))\r\n header = ['FILE', \"X[kpc]\", \"Y[kpc]\",\r\n \"R[kpc]\", \"PA\",\r\n 'V', 'dV', 'S', 'dS', 'h3', 'dh3',\r\n 'h4', 'dh4', 'chi/DOF', 'S/N']\r\n with open(outtable, \"w\") as f:\r\n for i, field in enumerate(header):\r\n print \"# {0} : {1}\\n\".format(i, field)\r\n f.write(\"# {0} : {1}\\n\".format(i, field))\r\n np.savetxt(f, results, fmt=\"%s\")\r\n return",
"def generate_graph_fca(organism):\n\twith open('./organisms/'+organism+'/'+'fcagraph.txt','r') as gfile:\n\t\trnamelst = [] \n\t\tenamelst = []\n\t\tclst = []\n\t\tfor line in gfile:\n\t\t\te = line.strip().split('\\t')\n\t\t\tfor rname in e[0:2]:\n\t\t\t\tif rname not in rnamelst:\n\t\t\t\t\trnamelst.append(rname)\n\t\t\tenamelst.append((e[0],e[1]))\n\t\t\tclst.append(e[2])\n\t\t\t\n\t\tG = igraph.Graph(0,directed=True)\n\t\tG.add_vertices(rnamelst)\n\t\tG.add_edges(enamelst)\n\t\tG.es['color'] = clst\n\t\n\treturn G",
"def write_output(pfam,\n indices_from_pfam_id,\n uniprot):\n pfam_starts, pfam_ends, pfam_sequences = pfam\n uniprot_ids, uniprot_sequences = uniprot\n\n logging.info('Writing output file %s...', FLAGS.output_file)\n\n n_pfam_entries_found = 0\n n_sequence_mismatches = 0\n n_repeats = 0\n n_start_mismatches = 0\n with tf.io.gfile.GFile(FLAGS.output_file, 'w') as f:\n f.write(','.join(OUTPUT_FIELDS) + '\\n')\n for uniprot_id, uniprot_sequence in zip(uniprot_ids, uniprot_sequences):\n for idx in indices_from_pfam_id[uniprot_id]:\n pfam_start, pfam_end = pfam_starts[idx], pfam_ends[idx]\n pfam_sequence = pfam_sequences[idx]\n\n uniprot_starts = find_all(uniprot_sequence, pfam_sequence)\n\n n_pfam_entries_found += 1\n if uniprot_starts:\n n_repeats += len(uniprot_starts) > 1\n n_start_mismatches += pfam_start not in uniprot_starts\n else:\n n_sequence_mismatches += 1\n\n pfam_id = f'{uniprot_id}/{pfam_start}-{pfam_end}'\n uniprot_starts = ';'.join([str(i) for i in uniprot_starts])\n fields = [pfam_id, uniprot_starts, uniprot_sequence]\n f.write(','.join(fields) + '\\n')\n\n logging.info('Finished writing %d entries to output file.',\n n_pfam_entries_found)\n\n logging.info('%d / %d Pfam-A seed entries have mismatching sequences.',\n n_sequence_mismatches, n_pfam_entries_found)\n logging.info('%d / %d Pfam-A seed entries have repeats.',\n n_repeats, n_pfam_entries_found)\n logging.info('%d / %d Pfam-A seed entries have mismatching starts.',\n n_start_mismatches, n_pfam_entries_found)",
"def overlap_generator(metric, graph):\n edges =[(edge[0], edge[1]) for edge in nx.edges(graph)]\n edges = edges + [(edge[1], edge[0]) for edge in nx.edges(graph)]\n return {edge: metric(graph, edge[0], edge[1]) for edge in tqdm(edges)}"
] | [
"0.6383634",
"0.61519605",
"0.60640895",
"0.5620521",
"0.54954714",
"0.5461694",
"0.5433732",
"0.54307693",
"0.5381156",
"0.53631675",
"0.5311818",
"0.5308905",
"0.52986294",
"0.52673423",
"0.5267266",
"0.52432793",
"0.52378273",
"0.52373457",
"0.5227681",
"0.5227558",
"0.52236855",
"0.5220912",
"0.52207094",
"0.5205544",
"0.5199316",
"0.51859576",
"0.51727813",
"0.514736",
"0.51290923",
"0.5118723"
] | 0.64970756 | 0 |
removes nodes from graph in they are in communities smaller than n | def drop_small_communities(graph, communities, n=4):
for community in communities:
if len(community) < n:
nx_helpers.remove_nodes(graph, community)
communities = [c for c in communities if len(c) >= n]
return graph, communities | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keep_important_nodes(graph, number_of_nodes):\n sorted_dict = np.array(\n [\n [k, v]\n for k, v in sorted(\n dict(graph.degree()).items(),\n key=lambda item: item[1],\n )\n ]\n )\n use_nodes = sorted_dict[-number_of_nodes:, 0]\n graph = graph.subgraph(use_nodes)\n return graph",
"def remove_node(self, n):\r\n keys = self.d.keys()\r\n #check for node in graph\r\n if n not in keys:\r\n raise KeyError(str(n) + \" is not in graph\")\r\n self.d.pop(n)\r\n #discard each occurence of node in the values of others\r\n for k in keys:\r\n edges = self.d[k]\r\n new = edges.discard(n)",
"def prune(neuron,\n number_of_nodes):\n n = len(neuron.nodes_list)\n for i in range(n - number_of_nodes):\n index = shortest_tips(neuron)\n neuron = remove_node(neuron, index)\n return neuron",
"def remove_nodes_connections(self, nodes):\n nodes = ensure_list(nodes)\n for nd in nodes:\n for nd_in in self.successors[nd.name]:\n self.predecessors[nd_in.name].remove(nd)\n self.edges.remove((nd, nd_in))\n self.successors.pop(nd.name)\n self.predecessors.pop(nd.name)\n self._node_wip.remove(nd)",
"def remove(n, c, dnodecomm):\n\n _tot[c] -= k[n]\n _in[c] -= 2 * dnodecomm + network[n][n]\n bl[n] = -1",
"def eliminate_var(n, g,clq_ind,tree):\r\n l = len(clq_ind) # number of nodes eliminated\r\n \r\n new_ind = scipy.array(g.neighbors(n))\r\n new_clique = g.neighbors(n)\r\n new_clique.append(n) \r\n g.add_edges_from( combinations(new_clique,2) )\r\n \r\n for i,clq in enumerate(clq_ind):\r\n if n in clq:\r\n tree.add_edge(l,i)\r\n clq_ind[i] = scipy.setdiff1d(clq,new_clique)\r\n \r\n clq_ind.append(new_ind)\r\n g.remove_node(n)\r\n tree.node[l]['clique'] = new_clique",
"def largest_connected_components(adj, n_components=1):\n _, component_indices = connected_components(adj)\n component_sizes = np.bincount(component_indices)\n components_to_keep = np.argsort(component_sizes)[::-1][:n_components] # reverse order to sort descending\n nodes_to_keep = [\n idx for (idx, component) in enumerate(component_indices) if component in components_to_keep\n ]\n return nodes_to_keep",
"def exclude_nodes_GC(G):\n remove, present = [], []\n # Find giant component\n Gcc = sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)\n G0 = Gcc[0]\n for node in G.nodes():\n if node not in G0.nodes():\n remove.append(node)\n G0.add_node(node,GC= 0)\n else:\n present.append(node)\n G0.add_node(node, GC= 1)\n # Remove nodes not in giant component\n remove_outliers = [node for node in G.nodes() if node not in G0.nodes()]\n G.remove_nodes_from(remove_outliers)\n return G",
"def disconnected_graph(n):\n g = nx.DiGraph()\n for i in range(0, n):\n g.add_node(i)\n return g",
"def remove_nodes(self, count=1):\n for i in range(count):\n dead_guy = self.all_nodes.pop()\n self.log.info(\"Removing node %s\" % dead_guy.name)\n dead_guy.decommission()\n self.log.info(\"Client %s is removed\" % dead_guy.name)\n self.save_cluster()\n self.inject_hosts_files()",
"def trim_cluster( g, cluster, num_edges ) :\n edges = []\n for e in cluster :\n r = sorted( g.edges( e, data = True ), lambda x, y : cmp_edge( g, x, y ) )\n edges.extend( r[-num_edges:] )\n\n sg = networkx.Graph( networkx.subgraph( g, cluster ) )\n for e in sg.edges() :\n sg[e[0]][e[1]][\"reversed_similarity\"] = -sg[e[0]][e[1]][\"similarity\"]\n\n mst_edges = networkx.minimum_spanning_edges( sg, \"reversed_similarity\" )\n \n edges = [(e[0], e[1],) for e in edges ]\n edges += [(e[0], e[1],) for e in mst_edges]\n del_edges = []\n for e in g.edges( cluster ) :\n if (e not in edges and (e[1], e[0],) not in edges) :\n del_edges.append( e )\n g.remove_edges_from( del_edges )\n return g",
"def prune_network(network):\n rand_int = np.random.random_integers\n num_rxns = len(network.reactions)\n num_cmpds = len(network.compounds)\n total = 0\n prune = list()\n for rxn in network.reactions:\n in_deg = network.in_degree(rxn)\n out_deg = network.out_degree(rxn)\n if in_deg == 0:\n if out_deg <= 1:\n prune.append(rxn)\n else:\n targets = network.successors(rxn)\n flips = rand_int(1, len(targets) - 1)\n while (flips > 0):\n target = targets[rand_int(0, len(targets) - 1)]\n factor = network[rxn][target][\"coefficient\"]\n network.remove_edge(rxn, target)\n network.add_edge(target, rxn, coefficient=factor)\n LOGGER.debug(\"flipped direction of link %s -> %s\",\n str(rxn), str(target))\n targets.remove(target)\n flips -= 1\n total += 1\n elif out_deg == 0:\n if in_deg <= 1:\n prune.append(rxn)\n else:\n targets = network.predecessors(rxn)\n flips = rand_int(1, len(targets) - 1)\n while (flips > 0):\n target = targets[rand_int(0, len(targets) - 1)]\n factor = network[target][rxn][\"coefficient\"]\n network.remove_edge(target, rxn)\n network.add_edge(rxn, target, coefficient=factor)\n LOGGER.debug(\"flipped direction of link %s -> %s\",\n str(rxn), str(target))\n targets.remove(target)\n flips -= 1\n total += 1\n for rxn in prune:\n network.remove_node(rxn)\n LOGGER.debug(\"removed reaction %s\", str(rxn))\n prune = list()\n for cmpd in network.compounds:\n if network.degree(cmpd) == 0:\n prune.append(cmpd)\n for cmpd in prune:\n network.remove_node(cmpd)\n LOGGER.debug(\"removed compound %s\", str(cmpd))\n LOGGER.info(\"%d reaction(s) and %d compound(s) removed\",\n (num_rxns - len(network.reactions)),\n (num_cmpds - len(network.compounds)))\n LOGGER.info(\"direction of %d link(s) reversed\", total)",
"def disconnect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n # `discard` ignores non-existing elements (unlike `remove`)\n app.edges[src_id].discard(trg_id)\n self.mark_as_unsaved()\n self.update()",
"def remove_nodes_from(self, nodes):\n for node in nodes:\n self.remove_node(node)",
"def _pair_based_graph_cut(self, graph):\n for node in self._find_paired_nodes(graph):\n graph.remove_node(node)\n return",
"def exclude_nodes_degree(G, min_degree):\n remove = [node for (node, degree) in G.degree().items() if degree < min_degree]\n G.remove_nodes_from(remove)\n #remove new nodes without edges\n remove_zero_degree = [node for (node, degree) in G.degree().items() if degree == 0]\n G.remove_nodes_from(remove_zero_degree)\n return G",
"def delete_node_cascade(self,n):\n # list will get mutated - copy preemptively\n for j in list(self.node_to_edges(n)):\n self.delete_edge_cascade(j)\n self.delete_node(n)",
"def remove_previous_connections(self, nodes):\n nodes = ensure_list(nodes)\n for nd in nodes:\n for nd_out in self.predecessors[nd.name]:\n if nd_out.name in self.successors:\n self.successors[nd_out.name].remove(nd)\n self.edges.remove((nd_out, nd))\n self.successors.pop(nd.name)\n self.predecessors.pop(nd.name)\n self._node_wip.remove(nd)",
"def clean_edges(self):\n for from_node in self.all_nodes():\n for to_node in self.all_nodes():\n if from_node == to_node:\n continue\n dup = list(filter(lambda x: x.from_node == from_node and x.to_node == to_node, self.edges))\n if len(dup) > 1:\n for d in dup[1:]:\n self.edges.remove(d)",
"def filter_graph(self, sorted_node, ploidy):\n \n for node in sorted_node:\n \n # while number of prefix edge > ploidy level\n while len(self.prefix[node]) > ploidy:\n min_weight_node = min(self.prefix[node], key=self.prefix[node].get)\n self.remove_edge(min_weight_node, node)\n \n # while number of suffix edge > ploidy level\n while len(self.suffix[node]) > ploidy:\n min_weight_node = min(self.suffix[node], key=self.suffix[node].get)\n self.remove_edge(node, min_weight_node)\n \n print(\"Graph is reduced to best overlap graph.\")",
"def exclude_nodes(self, nodes):",
"def repress_node_removal_old(graph, active_nodes):\n # list_active = list(active_nodes)\n num_neighbors = {node: len(list(graph.neighbors(node))) for node in active_nodes}\n total_neighbors = sum(num_neighbors.values())\n to_remove = set()\n for node in active_nodes:\n if np.random.random() < num_neighbors[node] / total_neighbors:\n to_remove.add(node)\n # only remove nodes at end so that probabilities are from the same time\n graph.remove_nodes_from(to_remove)\n active_nodes -= to_remove",
"def make_complete_graph(num_nodes):\n complete_digraph = {}\n if num_nodes > 0 and type(num_nodes) == int:\n neighbors = set([idx for idx in range(num_nodes)])\n for idx in range(num_nodes):\n complete_digraph[idx] = neighbors.copy() #creates adjacency set\n complete_digraph[idx].remove(idx) # pop out self-loop \n return complete_digraph",
"def delete_orphan_nodes(self):\n used=np.zeros( self.Nnodes(),'b1')\n valid_cells=~self.cells['deleted']\n valid_nodes=self.cells['nodes'][valid_cells,:].ravel()\n valid_nodes=valid_nodes[ valid_nodes>=0 ]\n used[ valid_nodes ]=True\n\n valid_edges=~self.edges['deleted']\n valid_nodes=self.edges['nodes'][valid_edges,:].ravel()\n used[ valid_nodes ]=True\n \n self.log.info(\"%d nodes found to be orphans\"%np.sum(~used))\n\n for n in np.nonzero(~used)[0]:\n self.delete_node(n)",
"def run_removing_edges(self):\n indices = np.where(self.X==1)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)",
"def cutoff_graph( g, simi_cutoff ) :\n g = copy.deepcopy( g )\n edges_to_be_deleted = []\n for e in g.edges() :\n if (g[e[0]][e[1]][\"similarity\"] < simi_cutoff) :\n edges_to_be_deleted.append( e )\n g.remove_edges_from( edges_to_be_deleted )\n return g",
"def remove_nodes(model: onnx.ModelProto,\n predicate: Callable) -> onnx.ModelProto:\n # ! this doesn't handle inputs/outputs\n logger = get_root_logger()\n while True:\n connect = None\n for i, node in enumerate(model.graph.node):\n if predicate(node):\n assert len(node.input) == 1\n assert len(node.output) == 1\n connect = (node.input[0], node.output[0])\n logger.info(f'remove node {node.name}')\n del model.graph.node[i]\n break\n if not connect:\n break\n src, dst = connect\n for node in model.graph.node:\n for i, input in enumerate(node.input):\n if input == dst:\n node.input[i] = src\n return model",
"def _prune_unreached(self):\n swcdict = {}\n for n in self._data: # Hash all the swc nodes\n swcdict[n[0]] = Node(n[0])\n\n # Try to join all the unconnected branches at first\n for i, n in enumerate(self._data):\n if n[6] not in swcdict:\n # Try to match it\n matched, midx = self.match(n[2:5], n[5])\n if matched:\n self._data[i, 6] = self._data[midx, 0]\n\n # Add mutual links for all nodes\n for n in self._data:\n id = n[0]\n pid = n[6]\n if pid >= 0:\n swcdict[id].add_link(swcdict[pid])\n\n groups = connected_components(set(swcdict.values()))\n lenlist = [len(g) for g in groups]\n maxidx = lenlist.index(max(lenlist))\n set2keep = groups[maxidx]\n id2keep = [n.id for n in set2keep]\n self._data = self._data[np.in1d(self._data[:, 0], np.asarray(id2keep)), :]",
"def remove_nodes(self, nodes):\n for node in nodes:\n for arc in node.entries:\n arc.src.exits.remove(arc)\n self.arcs.remove(arc)\n for arc in node.exits:\n arc.dest.entries.remove(arc)\n self.arcs.remove(arc)\n self.nodes.remove(node)\n dangling_nodes = []\n for node in self.nodes:\n if node == self.start or node == self.end:\n pass\n else:\n if not node.exits or not node.entries:\n dangling_nodes.append(node)\n if dangling_nodes:\n self.remove_nodes(dangling_nodes)",
"def disconnect_lowest_ecc(G, num_remove):\n num_removed = []\n spectral_gap = []\n\n g = G.copy()\n vs = np.random.choice(list(g.nodes()), num_remove, replace=False)\n for i, v in enumerate(vs):\n neighbors = list(g.neighbors(v))\n if len(neighbors) == 0:\n continue\n ecc = np.array([nx.eccentricity(G, n) for n in neighbors])\n remove = np.argmin(ecc)\n g.remove_edge(v, neighbors[remove])\n\n num_removed.append(i)\n spectral_gap.append(get_spectral_gap(g))\n\n return num_removed, spectral_gap"
] | [
"0.69280136",
"0.68292296",
"0.67802733",
"0.66563076",
"0.6461734",
"0.64381963",
"0.64123416",
"0.63864523",
"0.6341436",
"0.62839335",
"0.6267461",
"0.6262071",
"0.6231139",
"0.61691225",
"0.6163174",
"0.6140132",
"0.61174977",
"0.61075747",
"0.61025345",
"0.60737944",
"0.60666347",
"0.60567206",
"0.60521644",
"0.6047738",
"0.6041141",
"0.60200036",
"0.601443",
"0.60082656",
"0.5997524",
"0.5968756"
] | 0.8111857 | 0 |
Determines the quality of the mapping (assignment of edges) based on the "ground truth" of spanset and gapset. Sums up number of edges between spanset and gapset. Assumes undirected graph see comments | def mapping_quality(graph, spanset, gapset):
the_sum = sum(sum(1 for edge in graph.edges(node) if edge[1] in gapset) for node in spanset)
# if directed graph, uncomment this:
#the_sum += sum(sum(1 for edge in graph.edges(node) if edge[1] in spanset) for node in gapset)
return the_sum | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def countEdges(self):\n n = 0\n for (hub, table) in self.totsupport.iteritems():\n n += len(table)\n return n",
"def community_quality(communities, spanset, gapset):\n if len(communities) != 2:\n return -1\n\n com_sets = [set(c) for c in communities]\n spanset = set(spanset)\n gapset = set(gapset)\n\n spanset_0 = len(com_sets[0].difference(spanset))\n spanset_1 = len(com_sets[1].difference(spanset))\n gapset_0 = len(com_sets[0].difference(gapset))\n gapset_1 = len(com_sets[1].difference(gapset))\n\n # used for determining which community corresponds to gapset and spanset\n spanset_i = 1 - np.argmax([spanset_0, spanset_1])\n gapset_i = 1 - np.argmax([gapset_0, gapset_1])\n\n if spanset_i == gapset_i:\n # Error in finding community quality\n return -1\n elif spanset_i == 0:\n return spanset_0 + gapset_1\n elif spanset_i == 1:\n return spanset_1 + gapset_0\n else:\n return -1",
"def test_can_traverse_wide_grid(self):\n grid = [[\"1\", \"0\", \"1\", \"1\", \"0\", \"1\", \"0\", \"0\", \"1\", \"0\"]]\n result = num_islands(grid)\n self.assertEqual(result, 4)",
"def make_set_cover_nr(gRNA_hits, num_sets = 1, target_ids = [], low_coverage_penalty = 0,\n num_lengths_to_track = None, prioritise_3prime = False, optimal_depth = 5,\n suppress_warning = False):\n collapsed_grnas = gRNA_hits.collapse()\n if not target_ids:\n target_ids = set().union(*[set(cg) for cg in collapsed_grnas])\n else:\n target_ids = set(target_ids)\n ## function to regenerate set cover solutions from collapsed_grna object\n collapsed_grnas_original = collapsed_grnas.copy()\n def generate_sc_solutions():\n ## sort in order of smallest set cover size, smallest redundancy, and size of largest set in set cover\n minweight_sc = limited_minweight_SC(collapsed_grnas, num_sets, targets = target_ids,\n low_coverage_penalty = low_coverage_penalty,\n num_lengths_to_track = num_lengths_to_track)\n ## optimal solutions\n max_depth = min(optimal_depth, max(map(len, minweight_sc)))\n max_redundancy = max(map(lambda C:C.redundancy, minweight_sc))/len(target_ids)\n print(max_depth, max_redundancy)\n optimal_sc = limited_optimal_SC(target_ids, collapsed_grnas_original,\n size = max_depth, redundancy = max_redundancy)\n print(\"num unfiltered optimal sc:\", len(optimal_sc))\n ## remove duplicates\n optimal_sc = [C for C in optimal_sc\n if all(map(lambda minweight_C:(len(C) != minweight_C\n and C != minweight_C),\n minweight_sc))]\n print(\"num filtered optimal sc:\", len(optimal_sc))\n return sorted(minweight_sc + optimal_sc,\n key = lambda C:(len(C), C.redundancy, -C.max_coverage))\n sc_solutions = []\n sc_solutions.extend(generate_sc_solutions())\n eliminated_grna = []\n ## function to generate set covers\n def make_set_cover(restore = []):\n ## restore only works if gRNA belonged in the current set cover\n curr_sc = sc_solutions[0]\n for grna in restore:\n curr_sc.add_grna(grna)\n eliminated_grna.remove(grna)\n ## if current set cover solution has at least one CollapsedgRNA with no gRNA left\n while not curr_sc.all_not_empty():\n sink = sc_solutions.pop(0) ## remove set cover solution\n ## generate more possible gRNA sets if no pre-generated set covers are left\n if not sc_solutions:\n collapsed_grnas.remove_grna(*eliminated_grna)\n collapsed_grnas.remove_empty()\n sc_solutions.extend(generate_sc_solutions())\n if not sc_solutions:\n if not suppress_warning:\n print((\"\\nError: The provided gRNA sequences cannot cover all\"\n \" target sequences at least once.\\n\"))\n return []\n ## select next solution\n curr_sc = sc_solutions[0]\n ## consume=True -> remove selected gRNA from CollapsedgRNA\n output = curr_sc.generate_grna_set(prioritise_3prime = prioritise_3prime, consume = True)\n eliminated_grna.extend(output)\n return output\n return make_set_cover",
"def num_edges(g):\n total_edges_with_duplicates = sum(len(v) for v in g.values())\n return total_edges_with_duplicates // 2",
"def countEdges(self):\n return numpy.count_nonzero(self.supportArray) / 2",
"def cal_expected_map(self, ranking_list, total_rel=0):\r\n s = 0.0\r\n pr = 0\r\n pn = 0\r\n for ele in reversed(ranking_list):\r\n rel_doc_cnt = ele[0]\r\n this_doc_cnt = ele[1]\r\n nonrel_doc_cnt = this_doc_cnt - rel_doc_cnt\r\n s += self.A(pr, pn, rel_doc_cnt, nonrel_doc_cnt)\r\n pr += rel_doc_cnt\r\n pn += nonrel_doc_cnt\r\n total_rel += rel_doc_cnt\r\n #print s/total_rel\r\n if total_rel == 0:\r\n return 0\r\n return s/total_rel",
"def best_pairing(current_end, end_dict, inverse_dict, blast_hits, l_min_score, r_min_score):\n #this duplicates part of trio_hits - should try to rewrite that to use this function\n \n l_flange = int(end_dict[current_end][1])\n l_contig = end_dict[current_end][0]\n \n #first find blast hits for the target scaffold end\n left_matches = []\n for hit in blast_hits:\n if hit[0] == l_contig and int(hit[11]) >= l_min_score:\n left_matches.append(hit)\n \n link_count = {}\n \n #then find other ends with correctly oriented hits adjacent to the target hits\n for slink in end_dict:\n link = end_dict[slink][0]\n \n right_matches = []\n\n for hit in blast_hits:\n if hit[0] == link and int(hit[11]) >= r_min_score: \n right_matches.append(hit)\n \n for lhit in left_matches:\n for rhit in right_matches:\n srhit = inverse_dict[rhit[0]]\n r_flange = end_dict[srhit][1]\n joint_flange = l_flange + r_flange\n \n if lhit[1] == rhit[1]:\n lh_start = int(lhit[8])\n lh_end = int(lhit[9])\n rh_start = int(rhit[8])\n rh_end = int(rhit[9])\n\n if abs(lh_start - rh_start) < joint_flange + 3000:\n if (lh_end - lh_start)/(rh_end - rh_start) < 0:\n if abs(lh_end - rh_end) > abs(lh_start - rh_start):\n link_score = int(lhit[11]) * int(rhit[11])\n if not link in link_count: \n link_count[link] = link_score\n elif link_score > link_count[link]:\n link_count[link] = link_score\n return link_count",
"def n_inequalities(self):\n try: \n return self._n_inequalities\n except AttributeError:\n self._n_inequalities = 0\n for i in self.inequalities(): self._n_inequalities += 1\n return self._n_inequalities",
"def seg_to_affgraph(seg_gt, nhood):\n nhood = np.ascontiguousarray(nhood, np.int32)\n shape = seg_gt.shape\n n_edge = nhood.shape[0]\n aff = np.zeros((n_edge,)+shape,dtype=np.int16)\n\n for e in range(n_edge):\n aff[e, \\\n max(0,-nhood[e,0]):min(shape[0],shape[0]-nhood[e,0]), \\\n max(0,-nhood[e,1]):min(shape[1],shape[1]-nhood[e,1]), \\\n max(0,-nhood[e,2]):min(shape[2],shape[2]-nhood[e,2])] = \\\n (seg_gt[max(0,-nhood[e,0]):min(shape[0],shape[0]-nhood[e,0]), \\\n max(0,-nhood[e,1]):min(shape[1],shape[1]-nhood[e,1]), \\\n max(0,-nhood[e,2]):min(shape[2],shape[2]-nhood[e,2])] == \\\n seg_gt[max(0,nhood[e,0]):min(shape[0],shape[0]+nhood[e,0]), \\\n max(0,nhood[e,1]):min(shape[1],shape[1]+nhood[e,1]), \\\n max(0,nhood[e,2]):min(shape[2],shape[2]+nhood[e,2])] ) \\\n * ( seg_gt[max(0,-nhood[e,0]):min(shape[0],shape[0]-nhood[e,0]), \\\n max(0,-nhood[e,1]):min(shape[1],shape[1]-nhood[e,1]), \\\n max(0,-nhood[e,2]):min(shape[2],shape[2]-nhood[e,2])] > 0 ) \\\n * ( seg_gt[max(0,nhood[e,0]):min(shape[0],shape[0]+nhood[e,0]), \\\n max(0,nhood[e,1]):min(shape[1],shape[1]+nhood[e,1]), \\\n max(0,nhood[e,2]):min(shape[2],shape[2]+nhood[e,2])] > 0 )\n\n return aff",
"def test_metric(self, qset: Iterator[Tuple[str, float]]) -> Dict[str, float]:\n res = dict(mks0=0.0, mks1=0.0, mks2=0.0, sum_weights=0.0, sum_wlen=0.0, n=0)\n hist = {k: {} for k in {\"mks0\", \"mks1\", \"mks2\", \"l\"}} # pylint: disable=C0208\n wei = {k: {} for k in hist}\n res[\"hist\"] = hist\n res[\"histnow\"] = wei\n\n for el, _ in self.enumerate_test_metric(qset):\n le = len(el.value)\n w = el.weight\n res[\"mks0\"] += w * el.mks0\n res[\"mks1\"] += w * el.mks1\n res[\"mks2\"] += w * el.mks2\n res[\"sum_weights\"] += w\n res[\"sum_wlen\"] += w * le\n res[\"n\"] += 1\n\n if el.mks0 not in hist[\"mks0\"]:\n hist[\"mks0\"][el.mks0] = w\n wei[\"mks0\"][el.mks0] = 1\n else:\n hist[\"mks0\"][el.mks0] += w\n wei[\"mks0\"][el.mks0] += 1\n if el.mks1 not in hist[\"mks1\"]:\n hist[\"mks1\"][el.mks1] = w\n wei[\"mks1\"][el.mks1] = 1\n else:\n hist[\"mks1\"][el.mks1] += w\n wei[\"mks1\"][el.mks1] += 1\n if el.mks2 not in hist[\"mks2\"]:\n hist[\"mks2\"][el.mks2] = w\n wei[\"mks2\"][el.mks2] = 1\n else:\n hist[\"mks2\"][el.mks2] += w\n wei[\"mks2\"][el.mks2] += 1\n if le not in hist[\"l\"]:\n hist[\"l\"][le] = w\n wei[\"l\"][le] = 1\n else:\n hist[\"l\"][le] += w\n wei[\"l\"][le] += 1\n return res",
"def calculate_edge_spans(self):\n for edge in self.edges:\n from_index = self.layers.index(edge.from_node.layer)\n to_index = self.layers.index(edge.to_node.layer)\n edge.span = abs(from_index - to_index)",
"def count_automorphisms(g: Graph) -> int:\n\n def generate_mapping(g: Graph, h: Graph):\n \"\"\"\n Generates the corresponding mapping from vertex to vertex for the isomorphism between graphs g and h.\n We map g to h.\n :param g: A graph\n :param h: A graph\n :return: A permutation with the mapping from g to h\n \"\"\"\n mapping = [0] * len(g.vertices)\n for v_g in g:\n for v_h in h:\n if v_g.colornum == v_h.colornum:\n mapping[v_g.label] = v_h.label\n return permutation(len(mapping), mapping=mapping)\n\n def generate_automorphisms(g: Graph, h: Graph, d: list[Vertex], i: list[Vertex]):\n \"\"\"\n Is called recursively to traverse through the branching tree and to find all automorphisms.\n :param g: A copy of the original graph\n :param h: Another copy of the original graph\n :param d: A list with pre-colored vertices for graph g\n :param i: A list with pre-colored vertices for graph h\n \"\"\"\n\n # Refine the graphs g and h.\n color_refinement([g, h])\n\n # Make sure that the colors are balanced, and check for a bijection.\n if not is_balanced(g, h):\n return\n if is_bijection(g, h):\n\n # Generate the mapping from g -> h.\n p = generate_mapping(g, h)\n\n # If the permutation cannot be generated by this generating set, we need to add it.\n if not is_member(generating_set, p):\n generating_set.append(p)\n\n # We can now back to the last trivial ancestor nodes in the branching tree.\n while [v.label for v in d] != [v.label for v in i]:\n # We remove the vertices from d and i and mark them as 'used'.\n # This should prevent the algorithm from trying to re-explore a branch that may be skipped.\n # FIXME: This strategy seems too aggressive, the results are sometimes off by a factor 2 or 4\n d.pop().pre_labeled = True\n i.pop().pre_labeled = True\n\n return\n\n c, next_color = get_c([g, h])\n for v_g in g:\n if v_g.colornum == c:# and not v_g.pre_labeled:\n x = v_g\n break\n\n for v_h in h:\n if v_h.colornum == c and not v_h.pre_labeled:\n g1 = g + Graph(False)\n h1 = h + Graph(False)\n g1.vertices[g.vertices.index(x)].colornum = next_color\n h1.vertices[h.vertices.index(v_h)].colornum = next_color\n d.append(x)\n i.append(v_h)\n generate_automorphisms(g1, h1, d, i)\n\n generating_set = []\n graph_copy_1 = g + Graph(False)\n graph_copy_2 = g + Graph(False)\n for v in graph_copy_1.vertices:\n v.pre_labeled = False\n for v in graph_copy_2.vertices:\n v.pre_labeled = False\n generate_automorphisms(graph_copy_1, graph_copy_2, [], [])\n return compute_order(generating_set)",
"def test_can_traverse_tall_grid(self):\n grid = [\n [\"0\"],\n [\"1\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n ]\n result = num_islands(grid)\n self.assertEqual(result, 4)",
"def matching_score(self,set1, set2):\n set_set1=set(set1)\n set_set2=set(set2)\n '''print(\" set_set12\")\n print(set_set1)\n print(set_set2)'''\n return len(set_set1.intersection(set_set2)) ** 2 / (float(len(set1)) * len(set2))\n #return len(set_set1.intersection(set_set2)) / len(set_set1.union(set_set2))",
"def test_for_grader():\n test_map1 = np.array([\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 0, 0, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 0, 0, 1, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1]])\n x_spacing1 = 1\n y_spacing1 = 1\n start1 = np.array([[1.5], [1.5], [0]])\n goal1 = np.array([[7.5], [1], [0]])\n path1 = dijkstras(test_map1,x_spacing1,y_spacing1,start1,goal1)\n s = 0\n for i in range(len(path1)-1):\n s += np.sqrt((path1[i][0]-path1[i+1][0])**2 + (path1[i][1]-path1[i+1][1])**2)\n print(\"Path 1 length:\")\n print(s)\n\n\n test_map2 = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1]])\n start2 = np.array([[0.4], [0.4], [1.5707963267948966]])\n goal2 = np.array([[0.4], [1.8], [-1.5707963267948966]])\n x_spacing2 = 0.2\n y_spacing2 = 0.2\n path2 = dijkstras(test_map2,x_spacing2,y_spacing2,start2,goal2)\n s = 0\n for i in range(len(path2)-1):\n s += np.sqrt((path2[i][0]-path2[i+1][0])**2 + (path2[i][1]-path2[i+1][1])**2)\n print(\"Path 2 length:\")\n print(s)",
"def get_GNS_cut_reduced(self):\n # we build the optimization around the casted digraph instead of multidigraph\n # for simplicity\n G = self.base_digraph\n s_1 = self.sources[0]\n s_2 = self.sources[1]\n t_1 = self.destinations[0]\n t_2 = self.destinations[1]\n edges = G.edges()\n nodes = G.nodes()\n\n try:\n\n # Great an gurobi instance of the optimization model\n m = Model(\"GNS\")\n m.setParam('OutputFlag', False)\n\n x_v = {}\n # vertex variables for s_1, t_1 cut\n for v in nodes:\n x_v[v] = m.addVar(vtype=GRB.BINARY)\n\n y_v = {}\n # vertex variables for s_2, t_2 cut\n for v in nodes:\n y_v[v] = m.addVar(vtype=GRB.BINARY)\n\n z_v = {}\n # vertex variables for s_2, t_1 cut\n for v in nodes:\n z_v[v] = m.addVar(vtype=GRB.BINARY)\n\n e = {}\n # GNS indicator variable\n for (u,v) in edges:\n e[u,v] = m.addVar(vtype=GRB.BINARY, obj=G[u][v]['capacity'])\n\n # Done with decision variable creation\n # update model\n m.update()\n\n # Constraints\n # 1. Constraints for s_1 - t_1 cut\n for (u,v) in edges:\n if (u,v) == (s_1, t_1):\n m.addConstr(e[u,v] >= 1)\n elif u == s_1:\n m.addConstr(x_v[v] + e[u,v] >= 1)\n elif v == t_1:\n m.addConstr(-x_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(x_v[v] - x_v[u] + e[u,v] >= 0)\n\n if (u,v) == (s_2, t_2):\n m.addConstr(e[u,v] >= 1)\n elif u == s_2:\n m.addConstr(y_v[v] + e[u,v] >= 1)\n elif v == t_2:\n m.addConstr(-y_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(y_v[v] - y_v[u] + e[u,v] >= 0)\n\n if (u,v) == (s_2, t_1):\n m.addConstr(e[u,v] >= 1)\n elif u == s_2:\n m.addConstr(z_v[v] + e[u,v] >= 1)\n elif v == t_1:\n m.addConstr(-z_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(z_v[v] - z_v[u] + e[u,v] >= 0)\n\n m.optimize()\n\n if m.status == GRB.status.OPTIMAL:\n #print \"Min GNS cut value = \" + str(m.objVal)\n #print \"GNS cut edges:\"\n cut_set_edges = []\n for u,v in edges:\n if e[u,v].x != 0:\n #print (u,v), str(G[u][v]['capacity'])\n cut_set_edges.append((u,v, G[u][v]['capacity']))\n return (m.objVal, cut_set_edges)\n else:\n # something went wrong...err...\n print \"Something was wrong\"\n return None, None\n\n except GurobiError:\n print ('Error report from Gurobi')",
"def compute_num_edges(graph):\n # return the number of edges\n return sum([len(graph[source_node].keys()) for source_node in graph.keys()]) / 2",
"def test_returns_correct_number_of_islands(self):\n grid = [\n [\"1\", \"1\", \"1\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"1\"],\n [\"1\", \"0\", \"1\", \"0\", \"1\"],\n [\"0\", \"1\", \"1\", \"1\", \"1\"],\n [\"1\", \"0\", \"1\", \"1\", \"1\"],\n ]\n result = num_islands(grid)\n self.assertEqual(result, 3)",
"def __used(self):\n tot=0\n assign={}\n for c in self.assigned :\n if not assign.has_key(c.start) :\n assign[c.start]=c.end\n tot+=c.end-c.start+1\n return tot",
"def return_num_edges(self):\n return sum(map(lambda x: len(x),self.__adj))",
"def _compute_packet_heatmap(simulators, span):\n _check_span_keywords(span=span)\n n = simulators[0].model.network.num_nodes\n packet_count = np.zeros([n, n])\n for simulator in simulators:\n if simulator.model.network.num_nodes != n:\n raise ValueError(\"Inconsistent number of nodes are found among the simulators.\")\n if span in overall_range_keywords:\n latency = simulator.latency\n elif span in extended_range_keywords:\n start_index = simulator.batch_stats[-1]['end_index']\n latency = simulator.latency[start_index:]\n elif span in final_batch_range_keywords:\n start_index = simulator.batch_stats[-1]['start_index']\n end_index = simulator.batch_stats[-1]['end_index']\n latency = simulator.latency[start_index:end_index]\n for latency_dict in latency:\n packet_count[latency_dict['Source ID']][latency_dict['Destination ID']] += 1\n max_packet = np.nanmax(packet_count)\n min_packet = np.nanmin(packet_count)\n for i in range(n):\n for j in range(n):\n if i == j:\n packet_count[i][j] = np.nan\n else:\n packet_count[i][j] = (packet_count[i][j] - min_packet) / (max_packet - min_packet)\n return packet_count",
"def EdgeWeights(setOfEdges, multiSetOfEdges):\n weights = dict()\n for edge in setOfEdges:\n weights[edge] = multiSetOfEdges.count(edge)\n # a way to normalize edge weights\n # edgeWeights = map((1 / max(list(weights.values()))), weights.values())\n return weights",
"def test_edges_and_weights():\n edges, weights, state_value = edges_weights_specified_state(\n PROBABILITYMATRIX,\n MARKET,\n SNULL)\n assert len(edges) == len(weights) == (len(state_value)**2)",
"def query_size_of_adjacent_nodes(self, node_curie, source_type, adjacent_type, kp=\"infores:rtx-kg2\", rel_type=None):\n\n res = None\n source_type = ComputeFTEST.convert_string_to_snake_case(source_type.replace('biolink:',''))\n source_type = ComputeFTEST.convert_string_biolinkformat(source_type)\n adjacent_type = ComputeFTEST.convert_string_to_snake_case(adjacent_type.replace('biolink:',''))\n adjacent_type = ComputeFTEST.convert_string_biolinkformat(adjacent_type)\n\n if rel_type is None:\n normalized_nodes = self.nodesynonymizer.get_canonical_curies(node_curie)\n failure_nodes = list()\n mapping = {node:normalized_nodes[node]['preferred_curie'] for node in normalized_nodes if normalized_nodes[node] is not None}\n failure_nodes += list(normalized_nodes.keys() - mapping.keys())\n query_nodes = list(set(mapping.values()))\n query_nodes = [curie_id.replace(\"'\", \"''\") if \"'\" in curie_id else curie_id for curie_id in query_nodes]\n # special_curie_ids = [curie_id for curie_id in query_nodes if \"'\" in curie_id]\n\n # Get connected to kg2c sqlite\n connection = sqlite3.connect(self.sqlite_file_path)\n cursor = connection.cursor()\n\n # Extract the neighbor count data\n node_keys_str = \"','\".join(query_nodes) # SQL wants ('node1', 'node2') format for string lists\n sql_query = f\"SELECT N.id, N.neighbor_counts \" \\\n f\"FROM neighbors AS N \" \\\n f\"WHERE N.id IN ('{node_keys_str}')\"\n cursor.execute(sql_query)\n rows = cursor.fetchall()\n rows = [curie_id.replace(\"\\'\",\"'\").replace(\"''\", \"'\") if \"'\" in curie_id else curie_id for curie_id in rows]\n connection.close()\n\n # Load the counts into a dictionary\n neighbor_counts_dict = {row[0]:eval(row[1]) for row in rows}\n\n res_dict = {node:neighbor_counts_dict[mapping[node]].get(adjacent_type) for node in mapping if mapping[node] in neighbor_counts_dict and neighbor_counts_dict[mapping[node]].get(adjacent_type) is not None}\n failure_nodes += list(mapping.keys() - res_dict.keys())\n\n if len(failure_nodes) != 0:\n return (res_dict, failure_nodes)\n else:\n return (res_dict, [])\n\n else:\n # if kp == 'ARAX/KG1':\n # self.response.warning(f\"Since the edge type '{rel_type}' is from KG1, we still use the DSL expand(kg=ARAX/KG1) to query neighbor count. However, the total node count is based on KG2c from 'nodesynonymizer.get_total_entity_count'. So the FET result might not be accurate.\")\n\n # construct the instance of ARAXQuery class\n araxq = ARAXQuery()\n\n # check if node_curie is a str or a list\n if type(node_curie) is str:\n query_node_curie = node_curie\n elif type(node_curie) is list:\n node_id_list_str = \"[\"\n for index in range(len(node_curie)):\n node = node_curie[index]\n if index + 1 == len(node_curie):\n node_id_list_str = node_id_list_str + str(node) + \"]\"\n else:\n node_id_list_str = node_id_list_str + str(node) + \",\"\n\n query_node_curie = node_id_list_str\n else:\n self.response.error(\"The 'node_curie' argument of 'query_size_of_adjacent_nodes' method within FET only accepts str or list\")\n return res\n\n # call the method of ARAXQuery class to query adjacent node\n query = {\"operations\": {\"actions\": [\n \"create_message\",\n f\"add_qnode(ids={query_node_curie}, categories={source_type}, key=FET_n00)\",\n f\"add_qnode(categories={adjacent_type}, key=FET_n01)\",\n f\"add_qedge(subject=FET_n00, object=FET_n01, key=FET_e00, predicates={rel_type})\",\n f\"expand(edge_key=FET_e00,kp={kp})\",\n #\"resultify()\",\n \"return(message=true, store=false)\"\n ]}}\n\n try:\n result = araxq.query(query)\n if result.status != 'OK':\n self.response.error(f\"Fail to query adjacent nodes from infores:rtx-kg2 for {node_curie}\")\n return res\n else:\n res_dict = dict()\n message = araxq.response.envelope.message\n if type(node_curie) is str:\n tmplist = set([edge_key for edge_key in message.knowledge_graph.edges if message.knowledge_graph.edges[edge_key].subject == node_curie or message.knowledge_graph.edges[edge_key].object == node_curie]) ## edge has no direction\n if len(tmplist) == 0:\n self.response.warning(f\"Fail to query adjacent nodes from {kp} for {node_curie} in FET probably because expander ignores node type. For more details, please see issue897.\")\n return (res_dict,[node_curie])\n res_dict[node_curie] = len(tmplist)\n return (res_dict,[])\n else:\n check_empty = False\n failure_nodes = list()\n for node in node_curie:\n tmplist = set([edge_key for edge_key in message.knowledge_graph.edges if message.knowledge_graph.edges[edge_key].subject == node or message.knowledge_graph.edges[edge_key].object == node]) ## edge has no direction\n if len(tmplist) == 0:\n self.response.warning(f\"Fail to query adjacent nodes from {kp} for {node} in FET probably because expander ignores node type. For more details, please see issue897.\")\n failure_nodes.append(node)\n check_empty = True\n continue\n res_dict[node] = len(tmplist)\n\n if check_empty is True:\n return (res_dict,failure_nodes)\n else:\n return (res_dict,[])\n except:\n tb = traceback.format_exc()\n error_type, error, _ = sys.exc_info()\n self.response.error(tb, error_code=error_type.__name__)\n self.response.error(f\"Something went wrong with querying adjacent nodes from {kp} for {node_curie}\")\n return res",
"def test_weighting(self):\n dset = self.dset.spec.sel(\n lons=self.lons_inexact, lats=self.lats_inexact, method=\"idw\"\n )\n for stat in [\"hs\", \"tp\"]:\n idw = dset.spec.stats([stat])[stat].values\n site0 = self.dset.isel(site=[0]).spec.stats([stat])[stat].values\n site1 = self.dset.isel(site=[1]).spec.stats([stat])[stat].values\n lower = np.array([min(s1, s2) for s1, s2 in zip(site0, site1)])\n upper = np.array([max(s1, s2) for s1, s2 in zip(site0, site1)])\n assert (upper - idw > 0).all() and (idw - lower > 0).all()",
"def compute_m_paths_of_len_back(g, sampl, end_times, path_len=5, cost=1, its=100, max_duration=sys.maxint):\n\n num_of_paths_dict = {}\n\n # for each node in source, generate random paths and count the number of times they end up in each\n # dest. node\n for dest, end_time in zip(sampl, end_times):\n print \"Processing dest node: {} (ending at time {})\".format(dest, end_time)\n num_of_paths_dict[(dest, end_time)] = {source: 0 for source in g.nodes()}\n for _ in range(its):\n d = random_path_backwards(g, dest, path_len, end_time, cost, max_duration)\n if d is not None:\n num_of_paths_dict[(dest, end_time)][d] += 1\n\n return num_of_paths_dict",
"def edge_count(adjList):\n edges = {}\n for id, neigh in enumerate(adjList):\n for n in neigh:\n edges[max(n, id), min(n, id)] = id\n\n return len(edges)",
"def num_cusps_of_regions(self):\n G = self._get_puncturefinder_graph()\n # return [sum(G.subgraph(vertices=region).edge_labels())\n # for region in G.connected_components()]\n return [sum(edge[2]['weight']\n for edge in subgraph.edges(data=True))\n for subgraph in nx.connected_component_subgraphs(G)]",
"def count_property_range_hits(prop, node_dict, hits):\n\tres = []\n\t# sets tuple position to use in dict value\n\tswitcher = {\n \"length\": (0,(0,4000,8000,12000,16000,20000)),\n \"steps\": (1,(0,2,4,8,16,32)),\n \"cov\": (2,(1,10,100,1000,10000,100000)),\n \"cv\": (3, (0,0.05,0.10,0.15,0.20,0.25))\n }\n\tif prop not in switcher:\n\t\treturn res\n\ttup_pos = switcher[prop][0]\n\tnode_cnt = 0\n\tpos_cnt = 0\n\tfor ind in range(len(switcher[prop][1])-1):\n\t\tmin_val = switcher[prop][1][ind]\n\t\tmax_val = switcher[prop][1][ind+1]\n\t\tfor node in node_dict.keys():\n\t\t\tval = node_dict[node][tup_pos]\n\t\t\tif ind < len(switcher[prop][1])-2:\n\t\t\t\trange_test_val = (min_val <= val < max_val)\n\t\t\telse:\n\t\t\t\trange_test_val = (min_val <= val <= max_val)\n\t\t\t# print \"range bool is\", range_test_val\n\t\t\tif range_test_val:\n\t\t\t\tnode_cnt += 1\n\t\t\t\tif node in hits: pos_cnt += 1\n\t\tif node_cnt > 0:\n\t\t\tres.append( (pos_cnt, node_cnt, round(float(pos_cnt)/node_cnt,2)))\n\t\telse:\n\t\t\tres.append((0,0,0))\n\t\tnode_cnt = 0\n\t\tpos_cnt = 0\n\treturn res"
] | [
"0.55912036",
"0.553591",
"0.5377618",
"0.53747284",
"0.53365",
"0.5324719",
"0.5322185",
"0.5320498",
"0.5284785",
"0.52796876",
"0.5255801",
"0.5249715",
"0.5231621",
"0.5206148",
"0.51990473",
"0.519828",
"0.51928836",
"0.51651835",
"0.51341033",
"0.5120622",
"0.511521",
"0.50705624",
"0.50290227",
"0.50163823",
"0.4992436",
"0.4988187",
"0.49855474",
"0.4982166",
"0.49643457",
"0.49609903"
] | 0.8308598 | 0 |
Determines the quality of the communities based on the "ground truth" of spanset and gapset. First, determines which community corresponds to gapset and spanset. Then, returns number of wrong nodes. | def community_quality(communities, spanset, gapset):
if len(communities) != 2:
return -1
com_sets = [set(c) for c in communities]
spanset = set(spanset)
gapset = set(gapset)
spanset_0 = len(com_sets[0].difference(spanset))
spanset_1 = len(com_sets[1].difference(spanset))
gapset_0 = len(com_sets[0].difference(gapset))
gapset_1 = len(com_sets[1].difference(gapset))
# used for determining which community corresponds to gapset and spanset
spanset_i = 1 - np.argmax([spanset_0, spanset_1])
gapset_i = 1 - np.argmax([gapset_0, gapset_1])
if spanset_i == gapset_i:
# Error in finding community quality
return -1
elif spanset_i == 0:
return spanset_0 + gapset_1
elif spanset_i == 1:
return spanset_1 + gapset_0
else:
return -1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mapping_quality(graph, spanset, gapset):\n the_sum = sum(sum(1 for edge in graph.edges(node) if edge[1] in gapset) for node in spanset)\n # if directed graph, uncomment this:\n #the_sum += sum(sum(1 for edge in graph.edges(node) if edge[1] in spanset) for node in gapset)\n return the_sum",
"def compare(self):\n len0 = len(self.cluster_lists[0])\n len1 = len(self.cluster_lists[1])\n longer_index = 0 if len0 >= len1 else 1\n shorter_index = 1 if len1 <= len0 else 0\n self.stars_length = len(self.cluster_lists[shorter_index]) \n self.starlets_length = len(self.cluster_lists[longer_index]) \n # build the noeds for shorter cluster list, and get the\n # distribution of cluster size.\n for cluster in self.cluster_lists[shorter_index]:\n len_spectra = len(cluster.get_spectra())\n star = ClusterNode(cluster.id, len_spectra) \n self.stars[cluster.id] = star\n\n self.cluster_spectra_num[shorter_index] += len_spectra\n self.cluster_size_dist[shorter_index][len_spectra] = self.cluster_size_dist[shorter_index].get(len_spectra,0) + 1\n # build the noeds for longer cluster list, and get the\n # distribution of cluster size.\n for cluster in self.cluster_lists[longer_index]:\n len_spectra = len(cluster.get_spectra())\n starlet = ClusterNode(cluster.id, len_spectra) \n self.starlets[cluster.id] = starlet\n\n self.cluster_spectra_num[longer_index] += len_spectra\n self.cluster_size_dist[longer_index][len_spectra] = self.cluster_size_dist[longer_index].get(len_spectra,0) + 1\n # do the comparing, and network building\n for i in range (0, len(self.cluster_lists[shorter_index])):\n cluster0 = self.cluster_lists[shorter_index][i] \n for j in range (i, len(self.cluster_lists[longer_index])):\n cluster1 = self.cluster_lists[longer_index][j] \n (shared_spec_num, similarity) = self.calculate_similarity(cluster0, cluster1)\n if similarity == 0:\n continue\n self.similarity_dist[int(similarity*10)] = self.similarity_dist.get(int(similarity*10),0) + 1\n self.shared_spec_num += shared_spec_num\n\n self.stars[cluster0.id].add_nb_node(cluster1.id, similarity, shared_spec_num)\n self.starlets[cluster1.id].add_nb_node(cluster0.id, similarity, shared_spec_num)\n\n self.ave_star_size = self.cluster_spectra_num[shorter_index]/self.stars_length\n self.ave_starlet_size = self.cluster_spectra_num[longer_index]/self.starlets_length",
"def communities_with_protesters(partition, active_nodes):\n return len(set([partition[node] for node in active_nodes]))",
"def community_detection(net_G):\r\n if list(nx.isolates(net_G)) == []:\r\n part = community.best_partition(net_G)\r\n #values = [part.get(node) for node in net_G.nodes()]\r\n #nx.draw_spring(net_G, cmap = plt.get_cmap('jet'), node_color = values, node_size=30, with_labels=False)\r\n #plt.show()\r\n else:\r\n net_G = net_G.copy()\r\n net_G.remove_nodes_from(list(nx.isolates(net_G)))\r\n part = community.best_partition(net_G)\r\n list_nodes = []\r\n for com in set(part.values()):\r\n list_nodes.append([nodes for nodes in part.keys() if part[nodes] == com])\r\n num_of_communities = len(list_nodes)\r\n partition_performance = nx.algorithms.community.quality.performance(net_G, list_nodes)\r\n net_communities = [[\"Numbers of communities:\", num_of_communities], \\\r\n [\"Partition performance:\", partition_performance]]\r\n return net_communities",
"def make_set_cover_nr(gRNA_hits, num_sets = 1, target_ids = [], low_coverage_penalty = 0,\n num_lengths_to_track = None, prioritise_3prime = False, optimal_depth = 5,\n suppress_warning = False):\n collapsed_grnas = gRNA_hits.collapse()\n if not target_ids:\n target_ids = set().union(*[set(cg) for cg in collapsed_grnas])\n else:\n target_ids = set(target_ids)\n ## function to regenerate set cover solutions from collapsed_grna object\n collapsed_grnas_original = collapsed_grnas.copy()\n def generate_sc_solutions():\n ## sort in order of smallest set cover size, smallest redundancy, and size of largest set in set cover\n minweight_sc = limited_minweight_SC(collapsed_grnas, num_sets, targets = target_ids,\n low_coverage_penalty = low_coverage_penalty,\n num_lengths_to_track = num_lengths_to_track)\n ## optimal solutions\n max_depth = min(optimal_depth, max(map(len, minweight_sc)))\n max_redundancy = max(map(lambda C:C.redundancy, minweight_sc))/len(target_ids)\n print(max_depth, max_redundancy)\n optimal_sc = limited_optimal_SC(target_ids, collapsed_grnas_original,\n size = max_depth, redundancy = max_redundancy)\n print(\"num unfiltered optimal sc:\", len(optimal_sc))\n ## remove duplicates\n optimal_sc = [C for C in optimal_sc\n if all(map(lambda minweight_C:(len(C) != minweight_C\n and C != minweight_C),\n minweight_sc))]\n print(\"num filtered optimal sc:\", len(optimal_sc))\n return sorted(minweight_sc + optimal_sc,\n key = lambda C:(len(C), C.redundancy, -C.max_coverage))\n sc_solutions = []\n sc_solutions.extend(generate_sc_solutions())\n eliminated_grna = []\n ## function to generate set covers\n def make_set_cover(restore = []):\n ## restore only works if gRNA belonged in the current set cover\n curr_sc = sc_solutions[0]\n for grna in restore:\n curr_sc.add_grna(grna)\n eliminated_grna.remove(grna)\n ## if current set cover solution has at least one CollapsedgRNA with no gRNA left\n while not curr_sc.all_not_empty():\n sink = sc_solutions.pop(0) ## remove set cover solution\n ## generate more possible gRNA sets if no pre-generated set covers are left\n if not sc_solutions:\n collapsed_grnas.remove_grna(*eliminated_grna)\n collapsed_grnas.remove_empty()\n sc_solutions.extend(generate_sc_solutions())\n if not sc_solutions:\n if not suppress_warning:\n print((\"\\nError: The provided gRNA sequences cannot cover all\"\n \" target sequences at least once.\\n\"))\n return []\n ## select next solution\n curr_sc = sc_solutions[0]\n ## consume=True -> remove selected gRNA from CollapsedgRNA\n output = curr_sc.generate_grna_set(prioritise_3prime = prioritise_3prime, consume = True)\n eliminated_grna.extend(output)\n return output\n return make_set_cover",
"def calc_skill_cluster_sets(blocked_days, GTD, GTD_seas, persis_thresh, SOM_nodes, blocks_one_clusnum, skill_str, seas):\r\n prec_arr, recall_arr, F1_arr, clus_num_arr = [], [], [], []\r\n\r\n prec_vals = sorted(np.unique(blocks_one_clusnum[skill_str].values), reverse = True)\r\n #loop through first element separately so that subsequent values can be appended\r\n node_cluster_set_test_str, ds_arr = [], []\r\n for prec in prec_vals:\r\n node_cluster_set_test_str_app = blocks_one_clusnum['set'][np.where(blocks_one_clusnum[skill_str]==prec)[0]].values\r\n for clus in node_cluster_set_test_str_app:\r\n #add cluster to cluster set\r\n node_cluster_set_test_str = np.append(node_cluster_set_test_str, clus)\r\n node_cluster_set_test_str = np.unique(node_cluster_set_test_str)\r\n node_num = len(node_cluster_set_test_str) # number of nodes in cluster set\r\n clus_num_arr.append(node_num)\r\n #calculate skill score of cluster set by calculating the number of days blocked from the GTD and selecting the season\r\n blocked_days_clus = calc_blocked_days_clus(blocked_days, persis_thresh, SOM_nodes, node_cluster_set_test_str)\r\n blocked_days_clus_xr = xr.DataArray(blocked_days_clus, name = \"blocking\", dims={\"time\": GTD['time']})\r\n blocked_days_clus_xr['time'] = GTD['time']\r\n blocked_days_clus_sel = blocked_days_clus_xr.sel(time=np.isin(blocked_days_clus_xr['time.season'], seas))\r\n prec, recall, F1 = calc_pr_rc_F1(GTD_seas, blocked_days_clus_sel)\r\n prec_arr.append(prec)\r\n recall_arr.append(recall)\r\n F1_arr.append(F1)\r\n\r\n return clus_num_arr, prec_arr, recall_arr, F1_arr",
"def g_minority_1_dev(by_grps):\n if by_grps[0][0]==by_grps[0][1]:\n print(\"Failed g_1dev_t2 -- small groups match\")\n return False\n \n cts = 0\n ctn = 0\n cto = 0\n big_letter= \"\"\n \n for item in by_grps[1]:\n if item==\"S\":\n cts+=1\n if item==\"N\":\n ctn+=1 \n if item==\"O\":\n cto+=1\n if(cts==4 or ctn==4 or cto ==4):\n pass\n else:\n print(\"Failed g_1dev_t2 -- no large group consistency\")\n return False\n \n if(cts==4):\n big_letter = \"S\"\n if(cto==4):\n big_letter = \"O\"\n if(ctn == 4):\n big_letter = \"N\"\n \n for item in by_grps[0]:\n if(item==big_letter):\n print(\"Faield g_1dev_t2 -- a small group member and large group letter are the same\")\n return False\n print(\"Confirmed g_1dev_t2 -- small group with 1 deviancy and large group are different\")\n return True",
"def caculate_network_statistics(self):\n divide_factor_sum = 0 \n for key in self.stars.keys():\n star = self.stars[key]\n if star.nb_num == 0 :\n self.standalone_star_num += 1 \n\n divide_factor = star.nb_num + 2 * (star.spec_num - star.shared_spec_num )/self.ave_starlet_size\n divide_factor_sum += divide_factor\n divide_factor_int = round(divide_factor)\n self.star_divide_factor_dist[divide_factor_int] = self.star_divide_factor_dist.get(divide_factor_int,0) + 1\n if star.spec_num < star.shared_spec_num:\n print(\"!!!!!!!!!!!!Becareful, total spectra No is less than Shared Spectra with starlets\")\n print(\"with star \" + star.id + \" \" + str(star.spec_num) + \"is less than\" + str(star.shared_spec_num))\n if star.spec_num > star.shared_spec_num:\n self.star_lost_spec_num += star.spec_num - star.shared_spec_num\n self.ave_divide_factor_star = divide_factor_sum/self.stars_length\n\n divide_factor_sum = 0 \n for key in self.starlets.keys():\n starlet = self.starlets[key]\n if starlet.nb_num == 0 :\n self.standalone_starlet_num += 1 \n\n divide_factor = starlet.nb_num + 2 * (starlet.spec_num - starlet.shared_spec_num )/self.ave_star_size\n divide_factor_sum += divide_factor\n divide_factor_int = round(divide_factor)\n self.starlet_divide_factor_dist[divide_factor_int] = self.starlet_divide_factor_dist.get(divide_factor_int,0) + 1\n if starlet.spec_num < starlet.shared_spec_num:\n print(\"!!!!!!!!!!!!Becareful, total spectra No is less than Shared Spectra with starlets\")\n print(\"with star \" + starlet.id + \" \" + str(starlet.spec_num) + \"is less than\" + str(starlet.shared_spec_num))\n if starlet.spec_num > starlet.shared_spec_num:\n self.starlet_lost_spec_num += starlet.spec_num - starlet.shared_spec_num\n self.ave_divide_factor_starlet = divide_factor_sum/self.starlets_length",
"def get_GNS_cut_reduced(self):\n # we build the optimization around the casted digraph instead of multidigraph\n # for simplicity\n G = self.base_digraph\n s_1 = self.sources[0]\n s_2 = self.sources[1]\n t_1 = self.destinations[0]\n t_2 = self.destinations[1]\n edges = G.edges()\n nodes = G.nodes()\n\n try:\n\n # Great an gurobi instance of the optimization model\n m = Model(\"GNS\")\n m.setParam('OutputFlag', False)\n\n x_v = {}\n # vertex variables for s_1, t_1 cut\n for v in nodes:\n x_v[v] = m.addVar(vtype=GRB.BINARY)\n\n y_v = {}\n # vertex variables for s_2, t_2 cut\n for v in nodes:\n y_v[v] = m.addVar(vtype=GRB.BINARY)\n\n z_v = {}\n # vertex variables for s_2, t_1 cut\n for v in nodes:\n z_v[v] = m.addVar(vtype=GRB.BINARY)\n\n e = {}\n # GNS indicator variable\n for (u,v) in edges:\n e[u,v] = m.addVar(vtype=GRB.BINARY, obj=G[u][v]['capacity'])\n\n # Done with decision variable creation\n # update model\n m.update()\n\n # Constraints\n # 1. Constraints for s_1 - t_1 cut\n for (u,v) in edges:\n if (u,v) == (s_1, t_1):\n m.addConstr(e[u,v] >= 1)\n elif u == s_1:\n m.addConstr(x_v[v] + e[u,v] >= 1)\n elif v == t_1:\n m.addConstr(-x_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(x_v[v] - x_v[u] + e[u,v] >= 0)\n\n if (u,v) == (s_2, t_2):\n m.addConstr(e[u,v] >= 1)\n elif u == s_2:\n m.addConstr(y_v[v] + e[u,v] >= 1)\n elif v == t_2:\n m.addConstr(-y_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(y_v[v] - y_v[u] + e[u,v] >= 0)\n\n if (u,v) == (s_2, t_1):\n m.addConstr(e[u,v] >= 1)\n elif u == s_2:\n m.addConstr(z_v[v] + e[u,v] >= 1)\n elif v == t_1:\n m.addConstr(-z_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(z_v[v] - z_v[u] + e[u,v] >= 0)\n\n m.optimize()\n\n if m.status == GRB.status.OPTIMAL:\n #print \"Min GNS cut value = \" + str(m.objVal)\n #print \"GNS cut edges:\"\n cut_set_edges = []\n for u,v in edges:\n if e[u,v].x != 0:\n #print (u,v), str(G[u][v]['capacity'])\n cut_set_edges.append((u,v, G[u][v]['capacity']))\n return (m.objVal, cut_set_edges)\n else:\n # something went wrong...err...\n print \"Something was wrong\"\n return None, None\n\n except GurobiError:\n print ('Error report from Gurobi')",
"def test_graphs_threshold_omst_global_cost_efficiency2():\n # the function is optmized at the 3rd OMST, so it is going to yeild the same results\n # as the exhaustive search\n\n # Groundtruth\n expected = np.load(\"groundtruth/graphs_threshold/omst_gce.npy\")\n\n # Data\n graph = np.load(\"sample_data/graphs_threshold/graph.npy\")\n\n # Run\n n_msts = 5\n _, CIJtree, _, _, _, _, _, _ = threshold_omst_global_cost_efficiency(\n graph, n_msts=n_msts\n )\n\n # Test\n np.testing.assert_array_equal(expected, CIJtree)",
"def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")",
"def test_figure4(self):\n\n topics = get_topics('msmarco-passage-dev-subset')\n qrels = get_qrels('msmarco-passage-dev-subset')\n\n self.assertEqual(len(topics), 6980)\n self.assertEqual(len(qrels), 6980)\n\n # Compute the average length of queries:\n avg_qlen = sum([len(topics[t]['title'].split()) for t in topics])/len(topics)\n\n # Compute the average number of relevance judgments per query:\n avg_qrels = sum([len(qrels[t]) for t in topics])/len(topics)\n\n self.assertAlmostEqual(avg_qlen, 5.925, delta=0.001)\n self.assertAlmostEqual(avg_qrels, 1.065, delta=0.001)",
"def branchNBound2(nationtxt, bound, scheme):\n\n\n nation = nationLoader(nationtxt)\n transmitterCosts = scheme\n\n neighborCount = {}\n for province in nation:\n neighborCount.update({province:len(nation.get(province)[0])})\n\n\n neighborCountSorted = sorted(neighborCount, key=neighborCount.__getitem__)\n\n #~ neighborCountSorted = sorted(neighborCount, key=neighborCount.__getitem__, reverse=True)\n\n for key in neighborCountSorted:\n provinces.append(key)\n #~ print provinces\n\n upperbound = bound\n #~ print bound\n\n\n\n solution = []\n\n\n counter = 0\n\n\n\n\n while index >= 0:\n\n counter += 1\n if counter % 100000000 == 0:\n print counter\n print \"Now at:\", nation\n\n\n if index == -1:\n break\n\n # Assign transmitter\n if nation[provinces[index]][1] == numTransmitters:\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n else:\n costs, index = updateTransmitter(nation, False, scheme, provinces, costs, index)\n\n # Check if costs are above upper bound\n if (costs + (len(provinces) - (index + 1)) * transmitterCosts[0]) > upperbound:\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n # Check if a neighbor has the same transmitter\n conflict = False\n for neighbor in nation[provinces[index]][0]:\n if nation[neighbor][1] == nation[provinces[index]][1]:\n conflict = True\n break\n\n if conflict:\n continue\n\n # Check if a solution is found\n if index == len(provinces) - 1:\n #~ print \"\\nSOLUTION:\"\n if costs < upperbound:\n solution = []\n solution.append(json_deep_copy(nation))\n upperbound = costs\n #~ print \"Score:\", upperbound\n #~ print nation\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n index += 1\n\n\n\n usedTrans = []\n fivePlus = 0\n fivePlusNoDuplicate = 0\n\n for nation in solution:\n\n one = 0\n two = 0\n three = 0\n four = 0\n five = 0\n six = 0\n seven = 0\n\n for province in nation:\n\n if nation[province][1] == 1:\n one += 1\n if nation[province][1] == 2:\n two += 1\n if nation[province][1] == 3:\n three += 1\n if nation[province][1] == 4:\n four += 1\n if nation[province][1] == 5:\n five += 1\n if nation[province][1] == 6:\n six += 1\n if nation[province][1] == 7:\n seven += 1\n\n\n if five > 0 or six > 0 or seven > 0:\n fivePlus += 1\n if transmitterCosts[3] != transmitterCosts[4]:\n fivePlusNoDuplicate += 1\n\n usedTrans.append([one, two, three, four, five, six, seven])\n\n return counter",
"def specificity():\n\tatlas = 'power'\n\tproject='hcp'\n\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's PC & j's Q\"]\n\ttasks = ['REST','WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL',]\n\tknown_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\tdf = pd.DataFrame(columns = df_columns)\n\tfor task in tasks:\n\t\tprint task\n\t\t# subjects = np.array(hcp_subjects).copy()\n\t\t# subjects = list(subjects)\n\t\t# subjects = remove_missing_subjects(subjects,task,atlas)\n\t\tsubjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task,atlas))\n\t\tstatic_results = graph_metrics(subjects,task,atlas,'fz')\n\t\tsubject_pcs = static_results['subject_pcs']\n\t\tsubject_wmds = static_results['subject_wmds']\n\t\tsubject_mods = static_results['subject_mods']\n\t\tsubject_wmds = static_results['subject_wmds']\n\t\tmatrices = static_results['matrices']\n\t\t#sum of weight changes for each node, by each node.\n\t\thub_nodes = ['WCD']\n\t\t# hub_nodes = ['PC']\n\t\tdriver_nodes_list = ['Q+','Q-']\n\t\t# driver_nodes_list = ['Q+']\n\t\tmean_pc = np.nanmean(subject_pcs,axis=0)\n\t\tmean_wmd = np.nanmean(subject_wmds,axis=0)\n\t\tmod_pc_corr = np.zeros(subject_pcs.shape[1])\n\t\tfor i in range(subject_pcs.shape[1]):\n\t\t\tmod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]\n\t\tmod_wmd_corr = np.zeros(subject_wmds.shape[1])\n\t\tfor i in range(subject_wmds.shape[1]):\n\t\t\tmod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]\n\t\tfor hub_node in hub_nodes:\n\t\t\tif hub_node == 'PC':\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\t\tconnector_nodes = np.where(mod_pc_corr>0.0)[0]\n\t\t\t\tlocal_nodes = np.where(mod_pc_corr<0.0)[0]\n\t\t\telse:\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\t\tconnector_nodes = np.where(mod_wmd_corr>0.0)[0]\n\t\t\t\tlocal_nodes = np.where(mod_wmd_corr<0.0)[0]\n\t\t\tedge_thresh_val = 50.0\n\t\t\tedge_thresh = np.percentile(np.nanmean(matrices,axis=0),edge_thresh_val)\n\t\t\tpc_edge_corr[:,np.nanmean(matrices,axis=0)<edge_thresh] = np.nan\n\t\t\tfor driver_nodes in driver_nodes_list:\n\t\t\t\tweight_change_matrix_between = np.zeros((num_nodes,num_nodes))\n\t\t\t\tweight_change_matrix_within = np.zeros((num_nodes,num_nodes))\n\t\t\t\tif driver_nodes == 'Q-':\n\t\t\t\t\tdriver_nodes_array = local_nodes\n\t\t\t\telse:\n\t\t\t\t\tdriver_nodes_array = connector_nodes\n\t\t\t\tfor n1,n2 in permutations(range(num_nodes),2):\n\t\t\t\t\tif n1 not in driver_nodes_array:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[n2] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tarray = pc_edge_corr[n1][n2]\n\t\t\t\t\tweight_change_matrix_between[n1,n2] = np.nansum(pc_edge_corr[n1][n2][np.where((known_membership!=known_membership[n2])&(np.arange(264)!=n1))])\n\t\t\t\t\tweight_change_matrix_within[n1,n2] = np.nansum(pc_edge_corr[n1][n2][np.where((known_membership==known_membership[n2])&(np.arange(264)!=n1))])\n\t\t\t\t\t# for n3 in range(264):\n\t\t\t\t\t# \tif n1 == n3:\n\t\t\t\t\t# \t\tcontinue\n\t\t\t\t\t# \tif known_membership[n3]!= known_membership[n2]:\n\t\t\t\t\t# \t\tweight_change_matrix_between[n1,n2] = np.nansum([weight_change_matrix_between[n1,n2],array[n3]])\n\t\t\t\t\t# \t\tbetween_len = between_len + 1\n\t\t\t\t\t# \telse:\n\t\t\t\t\t# \t\tweight_change_matrix_within[n1,n2] = np.nansum([weight_change_matrix_within[n1,n2],array[n3]])\n\t\t\t\t\t# \t\tcommunity_len = community_len + 1\n\t\t\t\t\t# weight_change_matrix_within[n1,n2] = weight_change_matrix_within[n1,n2] / community_len\n\t\t\t\t\t# weight_change_matrix_between[n1,n2] = weight_change_matrix_between[n1,n2] / between_len\n\t\t\t\ttemp_matrix = np.nanmean(matrices,axis=0)\n\t\t\t\tweight_matrix = weight_change_matrix_within-weight_change_matrix_between\n\t\t\t\tweight_matrix[np.isnan(weight_matrix)] = 0.0\n\t\t\t\tif hub_node == 'PC':\n\t\t\t\t\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's PC & j's Q\"]\n\t\t\t\telse:\n\t\t\t\t\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's WCD & j's Q\"]\n\t\t\t\tdf_array = []\n\t\t\t\tfor i,j in zip(temp_matrix[weight_matrix!=0.0].reshape(-1),weight_matrix[weight_matrix!=0.0].reshape(-1)):\n\t\t\t\t\tdf_array.append([task,hub_node,driver_nodes,i,j])\n\t\t\t\tdf = pd.concat([df,pd.DataFrame(df_array,columns=df_columns)],axis=0)\n\t\t\t\tprint hub_node, driver_nodes\n\t\t\t\tprint pearsonr(weight_matrix[weight_matrix!=0.0].reshape(-1),temp_matrix[weight_matrix!=0.0].reshape(-1))\n\t\t\t\t1/0\n\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q+') &(df['Hub Measure']=='PC')],\"Strength of r's, i's PC & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_pcqplus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q-') &(df['Hub Measure']=='PC')],\"Strength of r's, i's PC & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_pcqminus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q+') &(df['Hub Measure']=='WCD')],\"Strength of r's, i's WCD & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_wmdqplus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q-') &(df['Hub Measure']=='WCD')],\"Strength of r's, i's WCD & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_wmdqminus_%s.pdf'%(edge_thresh_val))\n\t# \"\"\"\n\t# Are connector nodes modulating the edges that are most variable across subjects?\n\t# \"\"\"\n\t# atlas='power'\n\t# known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\t# for task in tasks:\n\t# \tpc_thresh = 75\n\t# \tlocal_thresh = 25\n\t# \tsubjects = np.array(hcp_subjects).copy()\n\t# \tsubjects = list(subjects)\n\t# \tsubjects = remove_missing_subjects(subjects,task,atlas)\n\t# \tstatic_results = graph_metrics(subjects,task,atlas)\n\t# \tsubject_pcs = static_results['subject_pcs']\n\t# \tsubject_wmds = static_results['subject_wmds']\n\t# \tmatrices = static_results['matrices']\n\t# \tmatrices[:,np.nanmean(matrices,axis=0)<0.0] = np.nan\n\t# \tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))\n\t# \t# pc_edge_corr = pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas))\n\t# \tstd_mod = []\n\t# \ttstd = np.std(matrices,axis=0).reshape(-1)\n\t# \tfor i in range(num_nodes):\n\t# \t\tstd_mod.append(nan_pearsonr(pc_edge_corr[i].reshape(-1),tstd)[0])\n\t# \t# print task, pearsonr(np.nanmean(subject_pcs,axis=0),std_mod)\n\t# \tprint task, pearsonr(np.nanmean(subject_wmds,axis=0),std_mod)\n\t# \tplot_corr_matrix(np.std(matrices,axis=0),network_names.copy(),out_file=None,plot_corr=True,return_array=False)",
"def branchNBound(nationtxt, bound, scheme):\n provinces = []\n index = 0\n costs = 0\n numTransmitters = 7\n\n transmitterCosts = scheme\n nation = nationLoader(nationtxt)\n\n\n neighborCount = {}\n for province in nation:\n neighborCount.update({province:len(nation.get(province)[0])})\n\n\n #~ neighborCountSorted = sorted(neighborCount, key=neighborCount.__getitem__)\n\n neighborCountSorted = sorted(neighborCount, key=neighborCount.__getitem__, reverse=True)\n\n for key in neighborCountSorted:\n provinces.append(key)\n #~ print provinces\n\n upperbound = bound\n #~ print bound\n #~ print bound\n\n\n\n solution = []\n\n\n counter = 0\n\n\n\n\n\n while index >= 0:\n\n\n counter += 1\n if counter % 100000000 == 0:\n print counter\n print \"Now at:\", nation\n\n\n if index == -1:\n break\n\n # Assign transmitter\n if nation[provinces[index]][1] == numTransmitters:\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n else:\n costs, index = updateTransmitter(nation, False, scheme, provinces, costs, index)\n\n # Check if costs are above upper bound\n if (costs + (len(provinces) - (index + 1)) * transmitterCosts[0]) > upperbound:\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n # Check if a neighbor has the same transmitter\n conflict = False\n for neighbor in nation[provinces[index]][0]:\n if nation[neighbor][1] == nation[provinces[index]][1]:\n conflict = True\n break\n\n if conflict:\n continue\n\n # Check if a solution is found\n if index == len(provinces) - 1:\n #~ print \"\\nSOLUTION:\"\n if costs < upperbound:\n solution = []\n solution.append(json_deep_copy(nation))\n upperbound = costs\n #~ print \"Score:\", upperbound\n #~ print nation\n costs, index = updateTransmitter(nation, True, scheme, provinces, costs, index)\n continue\n\n index += 1\n\n\n usedTrans = []\n fivePlus = 0\n fivePlusNoDuplicate = 0\n\n for nation in solution:\n\n one = 0\n two = 0\n three = 0\n four = 0\n five = 0\n six = 0\n seven = 0\n\n for province in nation:\n\n if nation[province][1] == 1:\n one += 1\n if nation[province][1] == 2:\n two += 1\n if nation[province][1] == 3:\n three += 1\n if nation[province][1] == 4:\n four += 1\n if nation[province][1] == 5:\n five += 1\n if nation[province][1] == 6:\n six += 1\n if nation[province][1] == 7:\n seven += 1\n\n\n if five > 0 or six > 0 or seven > 0:\n fivePlus += 1\n if transmitterCosts[3] != transmitterCosts[4]:\n fivePlusNoDuplicate += 1\n\n usedTrans.append([one, two, three, four, five, six, seven])\n\n return fivePlus, fivePlusNoDuplicate, usedTrans, upperbound, len(solution), counter\n #~ f.write(\"\\n Used Transmitters: \"+ str(one)+\" \"+ str(two)+\" \"+ str(three)+\" \"+ str(four)+\" \"+ str(five)+\" \"+ str(six)+\" \"+ str(seven)+\"\\n Cost: \"+str(upperbound)+\"\\n Number of solutions: \"+str(len(solution))+\"\\n Iterations: \"+str(counter)+\"\\n\"+\"\\n\"+\"\\n\"+\"\\n\")\n\n #~ print \"transmitter frequecies:\", one, two, three, four, five, six, seven\n #~ print \"Solutions:\", solution\n #~ print \"Cost:\", upperbound\n #~ print \"Number of solutions:\", len(solution)\n #~ print \"Iterations:\", counter",
"def test_graphs_threshold_omst_global_cost_efficiency():\n # the function is optmized at the 3rd OMST.\n\n # Groundtruth\n expected = np.load(\"groundtruth/graphs_threshold/omst_gce.npy\")\n\n # Data\n graph = np.load(\"sample_data/graphs_threshold/graph.npy\")\n\n # Run\n _, CIJtree, _, _, _, _, _, _ = threshold_omst_global_cost_efficiency(\n graph, n_msts=None\n )\n\n # Test\n np.testing.assert_array_equal(expected, CIJtree)",
"def compute_district_weights(dist_changes, elec_sets, elec_set_dict, state_gdf, partition, prec_draws_outcomes,\\\n geo_id, primary_elecs, runoff_elecs, elec_match_dict, bases, outcomes,\\\n recency_W1, cand_race_dict, min_cand_weights_dict):\n \n black_pref_cands_prim_dist = pd.DataFrame(columns = dist_changes)\n black_pref_cands_prim_dist[\"Election Set\"] = elec_sets\n hisp_pref_cands_prim_dist = pd.DataFrame(columns = dist_changes)\n hisp_pref_cands_prim_dist[\"Election Set\"] = elec_sets\n #store runoff preferences for instances where minority-preferred candidate needs to switch between primary and runoff\n black_pref_cands_runoffs_dist = pd.DataFrame(columns = dist_changes)\n black_pref_cands_runoffs_dist[\"Election Set\"] = elec_sets\n hisp_pref_cands_runoffs_dist = pd.DataFrame(columns = dist_changes)\n hisp_pref_cands_runoffs_dist[\"Election Set\"] = elec_sets \n \n black_conf_W3_dist = np.empty((len(elec_sets),0), float)\n hisp_conf_W3_dist = np.empty((len(elec_sets),0), float)\n neither_conf_W3_dist = np.empty((len(elec_sets),0), float)\n \n for district in dist_changes: \n state_gdf[\"New Map\"] = state_gdf.index.map(dict(partition.assignment))\n dist_prec_list = list(state_gdf[state_gdf[\"New Map\"] == district][geo_id])\n dist_prec_indices = state_gdf.index[state_gdf[geo_id].isin(dist_prec_list)].tolist()\n district_support_all = cand_pref_outcome_sum(prec_draws_outcomes, dist_prec_indices, bases, outcomes)\n \n black_pref_prob_single_dist = []\n hisp_pref_prob_single_dist = []\n \n for elec_set in elec_sets: \n HCVAP_support_elec = district_support_all[('HCVAP', elec_set_dict[elec_set]['Primary'])]\n hisp_pref_cand_dist = max(HCVAP_support_elec.items(), key=operator.itemgetter(1))[0]\n hisp_pref_prob_dist = HCVAP_support_elec[hisp_pref_cand_dist]\n hisp_pref_prob_single_dist.append(hisp_pref_prob_dist) \n \n BCVAP_support_elec = district_support_all[('BCVAP', elec_set_dict[elec_set]['Primary'])]\n black_pref_cand_dist = max(BCVAP_support_elec.items(), key=operator.itemgetter(1))[0]\n black_pref_prob_dist = BCVAP_support_elec[black_pref_cand_dist]\n black_pref_prob_single_dist.append(black_pref_prob_dist)\n \n black_pref_cands_prim_dist.at[black_pref_cands_prim_dist[\"Election Set\"] == elec_set, district] = black_pref_cand_dist\n hisp_pref_cands_prim_dist.at[hisp_pref_cands_prim_dist[\"Election Set\"] == elec_set, district] = hisp_pref_cand_dist \n \n if 'Runoff' in elec_set_dict[elec_set].keys():\n HCVAP_support_elec = district_support_all[('HCVAP', elec_set_dict[elec_set]['Runoff'])]\n hisp_pref_cand_dist = max(HCVAP_support_elec.items(), key=operator.itemgetter(1))[0]\n hisp_pref_cands_runoffs_dist.at[hisp_pref_cands_runoffs_dist[\"Election Set\"] == elec_set, district] = hisp_pref_cand_dist\n \n BCVAP_support_elec = district_support_all[('BCVAP', elec_set_dict[elec_set]['Runoff'])]\n black_pref_cand_dist = max(BCVAP_support_elec.items(), key=operator.itemgetter(1))[0] \n black_pref_cands_runoffs_dist.at[black_pref_cands_runoffs_dist[\"Election Set\"] == elec_set, district] = black_pref_cand_dist\n \n black_pref_conf_single_dist = [prob_conf_conversion(x) for x in black_pref_prob_single_dist]\n black_conf_W3_dist = np.append(black_conf_W3_dist, np.array([black_pref_conf_single_dist]).transpose(), axis = 1) \n \n hisp_pref_conf_single_dist = [prob_conf_conversion(x) for x in hisp_pref_prob_single_dist]\n hisp_conf_W3_dist = np.append(hisp_conf_W3_dist, np.array([hisp_pref_conf_single_dist]).transpose(), axis = 1) \n \n neither_pref_conf_single_dist = [prob_conf_conversion(x*y) for x,y in zip(black_pref_prob_single_dist,hisp_pref_prob_single_dist)]\n neither_conf_W3_dist = np.append(neither_conf_W3_dist, np.array([neither_pref_conf_single_dist]).transpose(), axis = 1) \n \n #compute W2 (\"in-group\"-minority-preference weight) \n min_cand_black_W2_dist, min_cand_hisp_W2_dist, min_cand_neither_W2_dist = compute_W2(elec_sets, \\\n dist_changes, min_cand_weights_dict, black_pref_cands_prim_dist, hisp_pref_cands_prim_dist, cand_race_dict)\n ################################################################################ \n #compute final election weights per district\n recency_W1 = recency_W1.copy()[:, dist_changes]\n black_weight_dist = recency_W1*min_cand_black_W2_dist*black_conf_W3_dist\n hisp_weight_dist = recency_W1*min_cand_hisp_W2_dist*hisp_conf_W3_dist \n neither_weight_dist = recency_W1*min_cand_neither_W2_dist*neither_conf_W3_dist\n \n return black_weight_dist, hisp_weight_dist, neither_weight_dist, black_pref_cands_prim_dist, \\\n black_pref_cands_runoffs_dist, hisp_pref_cands_prim_dist, hisp_pref_cands_runoffs_dist",
"def test_local_efficiency_disconnected_graph(self):\n assert_equal(nx.local_efficiency(self.G1), 0)",
"def fit_score(self, solution):\r\n illegal_neighbours = 0\r\n legal_neighbours = 0\r\n for polygon in solution.genetic_units.values():\r\n for neighbour_id in polygon.neighbours_ids:\r\n if polygon.color is solution.genetic_units[neighbour_id].color:\r\n illegal_neighbours += 1\r\n else:\r\n legal_neighbours += 1\r\n if self.sorting_order is ScoresSortingOrder.ASCENDING:\r\n return illegal_neighbours\r\n else:\r\n return legal_neighbours",
"def _majority(data_set):\r\n pair = _count_parties(data_set)\r\n democrats = pair[0]\r\n republicans = pair[1]\r\n if democrats > republicans: return \"D\"\r\n if democrats < republicans: return \"R\"\r\n else: return None",
"def determine_measure_position(self):\n green_probs = []\n net_size = len(self.net)\n #Belief propagation:\n #Analyzes each position's probability of obtaining\n #green when measuring at a time t+1.\n for i in range(0, net_size):\n accum = 0\n for j in range(0, net_size):\n distance = self.__get_distance(i, j)\n if distance == 0: #Probability of measure green at distance 0 from 'i'.\n accum += self.net[i].value * self.ct[0][0]\n elif distance == 1: #Probability of measure green at distance 1 from 'i'.\n accum += self.net[i].value * self.ct[1][0]\n elif distance == 2: #Probability of measure green at distance 2 from 'i'.\n accum += self.net[i].value * self.ct[2][0]\n elif distance == 3: #Probability of measure green at distance 3 from 'i'.\n accum += self.net[i].value * self.ct[3][0]\n else: #Probability of measure green at a distance >= 4 from 'i'.\n accum += self.net[i].value * self.ct[4][0]\n green_probs.append(accum)\n #Returns the position in which the probability of\n #obtaining green when measuring is the highest.\n return self.net[np.argmax(green_probs)].id",
"def get_communities(num_of_neighbors, is_self_loops, relevant_period_groups, full_confusion_csv, classes_csv_file, priod_group_column, similarty_csv = ''):\n\n # generate class_names dict\n cnt = 0\n class_name_dict = {}\n with open(classes_csv_file, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n if cnt > 0:\n class_name_dict[int(row[8])] = row[1]\n cnt = cnt + 1\n\n\n full_conf = np.genfromtxt(full_confusion_csv, delimiter=',')\n relevant_conf = full_conf[:,:num_of_neighbors+1]\n flatten_conf = np.zeros((relevant_conf.shape[0]*num_of_neighbors,2), dtype=np.int32)\n if similarty_csv != '':\n similarity_mat = np.genfromtxt(similarty_csv, delimiter=',')\n similarity_conf_mat = np.zeros((200, 200), dtype=np.float32)\n print(similarity_mat.shape)\n\n\n row = 0\n for k in range(relevant_conf.shape[0]):\n for m in range(num_of_neighbors):\n flatten_conf[row, 0] = relevant_conf[k,0]\n flatten_conf[row,1] = relevant_conf[k,m+1]\n if similarty_csv != '':\n similarity_conf_mat[int(relevant_conf[k,0]), int(relevant_conf[k,m+1]) ] += similarity_mat[k, m]\n\n row = row + 1\n\n confusion_mat = confusion_matrix(flatten_conf[:,0], flatten_conf[:,1])\n if similarty_csv != '':\n confusion_mat = similarity_conf_mat\n\n confusion_mat = confusion_mat.astype('float') / confusion_mat.sum(axis=1)[:, np.newaxis]\n symmetric_confusion = (confusion_mat + np.transpose(confusion_mat)) / 2\n if not is_self_loops:\n np.fill_diagonal(symmetric_confusion, 0)\n\n # taking only the relevant classes\n if relevant_period_groups != -1:\n df = pd.read_csv(classes_csv_file)\n period_groups = df[priod_group_column]\n relevant_classes = []\n for group in relevant_period_groups:\n group_slice = df[period_groups == group]\n relevant_classes.extend(group_slice['id_period_sorted'].values)\n\n L = len(relevant_classes)\n relevant_confusion = np.zeros((L,L), dtype=np.float32)\n class_node_dict = {}\n for m,cls_i in enumerate(relevant_classes):\n class_node_dict[m] = cls_i\n for n,cls_j in enumerate(relevant_classes):\n relevant_confusion[m,n] = symmetric_confusion[cls_i,cls_j]\n else:\n relevant_confusion = symmetric_confusion\n\n G = nx.from_numpy_matrix(relevant_confusion)\n\n # find best communities based on modularity grade\n resolution_vec = np.linspace(0.0,2,50)\n mod_vec = np.zeros_like(resolution_vec)\n best_modularity = -1\n best_communities = -1\n best_res = -1\n for k in range(resolution_vec.size):\n partition = community.best_partition(G, weight='weight', resolution=resolution_vec[k])\n modularity = community.modularity(partition, G, weight='weight')\n mod_vec[k] = modularity\n if (modularity > best_modularity):\n best_modularity = modularity\n best_communities = partition\n best_res = resolution_vec[k]\n\n summary_str = 'best resolution: %.3f\\nbest modularity: %.3f\\nnumber of communities: %d' % (best_res,best_modularity,len(set(best_communities.values())))\n\n #plt.plot(resolution_vec,mod_vec)\n #plt.show()\n\n # generate community summary file\n count = 0\n strr = ''\n summary_file_name = 'community_summary.csv'\n for com in set(best_communities.values()):\n count += 1.\n list_nodes = [nodes for nodes in best_communities.keys() if best_communities[nodes] == com]\n strr += 'community,' + str(com) + '\\n'\n for nd in list_nodes:\n if relevant_period_groups == -1:\n strr += class_name_dict[nd] + ',id,' + str(nd) + '\\n'\n else:\n strr += class_name_dict[class_node_dict[nd]] + ',id,' + str(class_node_dict[nd]) + '\\n'\n strr += '\\n'\n with open(summary_file_name, \"w\") as text_file:\n text_file.write(strr)\n\n print(strr)\n # summary for map visualization tool\n strr = ''\n for k in range(relevant_confusion.shape[0]):\n comm = partition[k]\n comm_members = [nodes for nodes in partition.keys() if partition[nodes] == comm]\n if relevant_period_groups == -1:\n strr += 'id,' + str(k) + ',community,' + str(comm) + ',community_members,'\n else:\n strr += 'id,' + str(class_node_dict[k]) + ',community,' + str(comm) + ',community_members,'\n for member in comm_members:\n if relevant_period_groups == -1:\n strr += str(member) + ','\n else:\n strr += str(class_node_dict[member]) + ','\n strr += '\\n'\n with open('nodes_communities.csv', \"w\") as text_file:\n text_file.write(strr)\n\n\n\n return summary_str",
"def get_rank(cutoff: dict, coverage: float, quality: float, length: int, contigs: int, genome_size: int, is_paired: bool) -> list:\n rank = None\n reason = []\n coverage = float(f'{float(coverage):.2f}')\n quality = float(f'{float(quality):.2f}')\n length = round(float(f'{float(length):.2f}'))\n contigs = int(contigs)\n genome_size = int(genome_size)\n gold = cutoff['gold']\n silver = cutoff['silver']\n bronze = cutoff['bronze']\n\n if coverage >= gold['coverage'] and quality >= gold['quality'] and length >= gold['length'] and contigs <= gold['contigs'] and is_paired:\n reason.append('passed all cutoffs')\n rank = 'gold'\n elif coverage >= silver['coverage'] and quality >= silver['quality'] and length >= silver['length'] and contigs <= silver['contigs'] and is_paired:\n if coverage < gold['coverage']:\n reason.append(f\"Low coverage ({coverage:.2f}x, expect >= {gold['coverage']}x)\")\n if quality < gold['quality']:\n reason.append(f\"Poor read quality (Q{quality:.2f}, expect >= Q{gold['quality']})\")\n if length < gold['length']:\n reason.append(f\"Short read length ({length}bp, expect >= {gold['length']} bp)\")\n if contigs > gold['contigs']:\n reason.append(f\"Too many contigs ({contigs}, expect <= {gold['contigs']})\")\n rank = 'silver'\n elif coverage >= bronze['coverage'] and quality >= bronze['quality'] and length >= bronze['length'] and contigs <= bronze['contigs']:\n if coverage < silver['coverage']:\n reason.append(f\"Low coverage ({coverage:.2f}x, expect >= {silver['coverage']}x)\")\n if quality < silver['quality']:\n reason.append(f\"Poor read quality (Q{quality:.2f}, expect >= Q{silver['quality']})\")\n if length < silver['length']:\n reason.append(f\"Short read length ({length}bp, expect >= {silver['length']} bp)\")\n if contigs > silver['contigs']:\n reason.append(f\"Too many contigs ({contigs}, expect <= {silver['contigs']})\")\n if not is_paired:\n reason.append(f\"Single-end reads\")\n rank = 'bronze'\n\n if not rank:\n rank = 'exclude'\n\n if coverage < bronze['coverage']:\n reason.append(f\"Low coverage ({coverage:.2f}x, expect >= {bronze['coverage']}x)\")\n if quality < bronze['quality']:\n reason.append(f\"Poor read quality (Q{quality:.2f}, expect >= Q{bronze['quality']})\")\n if length < bronze['length']:\n reason.append(f\"Short read length ({length:.2f}bp, expect >= {bronze['length']} bp)\")\n if contigs > bronze['contigs']:\n reason.append(f\"Too many contigs ({contigs}, expect <= {bronze['contigs']})\")\n\n if cutoff['min-assembled-size']:\n if genome_size < cutoff['min-assembled-size']:\n reason.append(f\"Assembled size is too small ({genome_size} bp, expect <= {cutoff['min-assembled-size']})\")\n\n if cutoff['max-assembled-size']:\n if genome_size < cutoff['max-assembled-size']:\n reason.append(f\"Assembled size is too large ({genome_size} bp, expect <= {cutoff['max-assembled-size']})\")\n\n reason = \";\".join(sorted(reason))\n return [rank, reason]",
"def check_all_nodes_covered(G, solution_set):\n \n # List of nodes\n nodes_in_graph = list(G.nodes())\n \n # Nodes in solution_set\n edges_in_solution_set = [x for l in solution_set for x in l]\n nodes_in_solution_set = list(set(itertools.chain.from_iterable(edges_in_solution_set)))\n \n # Check for completeness\n for node in nodes_in_graph:\n if node not in nodes_in_solution_set:\n return False, [node]\n \n return True, []",
"def calc_cohesion( g, sg0, sg1, max_csize ) :\n score = 0.0\n n0 = len( sg0 )\n n1 = len( sg1 )\n if (n0 + n1 <= max_csize) :\n boundary_edges = networkx.edge_boundary( g, sg0, sg1 )\n for e in boundary_edges :\n score += g[e[0]][e[1]][\"similarity\"]\n return score / max( n0, n1 )",
"def most_discriminating( features_df, labels_df, top=5):\n \n columns = features_df.shape[1]\n labels_df = labels_df[['file', 'candy_id']].set_index('file')\n qualities = np.zeros(columns)\n \n _left = 0\n _right = 1\n\n _c = 0\n _h = 1\n\n # globals\n cases = float(labels_df['candy_id'].count()) # total cases\n\n p_c_A = (labels_df['candy_id'] == 0).sum() / cases\n p_h_A = 1.0 - p_c_A\n\n\n for feature in range(columns):\n\n branch_cases = np.zeros(2) # total on each branch\n pi = np.zeros(2) # proportion on each branch\n\n split = np.array([\n #c, h\n [0, 0], #left\n [0, 0] #right\n ])\n\n for index, value in features_df[feature].iteritems():\n split[value][labels_df.loc[index][0]] += 1\n\n branch_cases[_left] = split[_left].sum()\n branch_cases[_right] = split[_right].sum()\n \n if branch_cases[_left] == 0.0 or branch_cases[_right] == 0.0:\n qualities[feature] = 0\n continue\n \n pi[_left] = branch_cases[_left] / cases\n pi[_right] = branch_cases[_right] / cases\n\n p_c_B = split[_left][_c] / branch_cases[_left]\n p_h_B = split[_left][_h] / branch_cases[_left]\n\n p_c_C = split[_right][_c] / branch_cases[_right]\n p_h_C = split[_right][_h] / branch_cases[_right]\n\n gini_tree = 1.0 - (math.pow(p_c_A, 2) + math.pow(p_h_A, 2))\n\n gini_left = 1.0 - (math.pow(p_c_B, 2) + math.pow(p_h_B, 2))\n gini_right = 1.0 - (math.pow(p_c_C, 2) + math.pow(p_h_C, 2))\n\n quality = gini_tree - pi[_left] * gini_left - pi[_right] * gini_right\n\n qualities[feature] = quality\n return list(reversed(qualities.argsort()))[:top]",
"def test_edges_and_weights():\n edges, weights, state_value = edges_weights_specified_state(\n PROBABILITYMATRIX,\n MARKET,\n SNULL)\n assert len(edges) == len(weights) == (len(state_value)**2)",
"def test_efficiency_disconnected_nodes(self):\n assert_equal(nx.efficiency(self.G1, 1, 2), 0)",
"def matching_score(self,set1, set2):\n set_set1=set(set1)\n set_set2=set(set2)\n '''print(\" set_set12\")\n print(set_set1)\n print(set_set2)'''\n return len(set_set1.intersection(set_set2)) ** 2 / (float(len(set1)) * len(set2))\n #return len(set_set1.intersection(set_set2)) / len(set_set1.union(set_set2))",
"def SA(targetMDG):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, i)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n return max_climber.result"
] | [
"0.63994",
"0.58635193",
"0.5778622",
"0.5743064",
"0.55880827",
"0.555476",
"0.53169554",
"0.52722627",
"0.52660155",
"0.52518225",
"0.52481997",
"0.52412534",
"0.5223429",
"0.5208664",
"0.5191661",
"0.5173846",
"0.51651204",
"0.51604426",
"0.51279247",
"0.51213825",
"0.5113577",
"0.51023996",
"0.5090531",
"0.50895363",
"0.50890976",
"0.5086639",
"0.50231844",
"0.5019312",
"0.50101745",
"0.50064147"
] | 0.75630814 | 0 |
Create a producer. Calls pykafka.topic.Topic.get_producer to create the producer Returns pykafka.producer.Producer | def create(self):
topic = self.__conn__.create_topic(self.__topic__)
return topic.get_producer(*self.__args__, **self.__kargs__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_producer(self, topic_id: str) -> Producer:\n backend = None\n if self.vendor == 'kafka':\n backend = KafkaClient(topic_id, self.configs['kafka_servers'])\n else:\n project_id = os.getenv(\"GOOGLE_CLOUD_PROJECT\")\n subscription_id = os.getenv(\"GOOGLE_PUBSUB_SUB_ID\")\n backend = GooglePubSubClient(project_id=project_id, topic=topic_id,\n subscription_id=subscription_id, gcp_configs=self.configs)\n\n return Producer(backend)",
"def connect_kafka_producer():\n producer = KafkaProducer(bootstrap_servers='0.0.0.0:9092', value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n return producer",
"def get_producer(conf_settings=None, address=Config.INSIGHTS_KAFKA_ADDRESS): # pragma: no cover\n if conf_settings is None:\n conf_settings = {}\n conf = _get_producer_config(address, conf_settings)\n return ProducerSingleton(conf)",
"def connect_kafka_producer():\n _producer = None\n kafka_url = 'localhost:9092'\n try:\n _producer = KafkaProducer(bootstrap_servers=kafka_url,\n value_serializer=lambda value: json.dumps(value).encode())\n except Exception as ex:\n print('Exception while connecting to Kafka..')\n print(str(ex))\n finally:\n return _producer",
"def create_sender(self):\n sender = kafka.KafkaProducer(bootstrap_servers=['%s:%s' % (self._host, self._port)])\n return sender",
"def kafka_producer_factory(kafka_broker, request):\n _producer = [None]\n\n def factory(**kafka_producer_params):\n params = {} if kafka_producer_params is None else kafka_producer_params.copy()\n params.setdefault('client_id', 'producer_%s' % (request.node.name,))\n _producer[0] = next(kafka_broker.get_producers(cnt=1, **params))\n return _producer[0]\n\n yield factory\n\n if _producer[0]:\n _producer[0].close()",
"def producer(self):\n return Producer(app=self.app, client=self.client)",
"def _create_producer(self, yaml_producer):\n producer = None\n if \"args\" in yaml_producer:\n producer = partial(\n self.data_factory.get_producer(\n function_name=yaml_producer[\"function_name\"]\n ),\n **yaml_producer[\"args\"]\n )\n else:\n producer = partial(\n self.data_factory.get_producer(\n function_name=yaml_producer[\"function_name\"]\n )\n )\n return producer",
"def connect(self):\n # First close any existing producer if already connected\n if hasattr(self, 'producer') and getattr(self, 'connected', False) == True:\n try:\n self.producer.close()\n except Exception:\n pass\n # Create the producer\n try:\n self.producer = self.client.create_producer(self.control_topic, schema=pulsar.schema.StringSchema(),\n block_if_queue_full=True)\n self.connected = True\n except Exception as e:\n logger.warn('Cannot connect a producer to publish commands ' + str(e))\n self.connected = False",
"def kafka_producer(kafka_producer_factory):\n yield kafka_producer_factory()",
"def test_producer(self):\n try:\n producer = Producer()\n producer.send()\n except (Exception) as error:\n logging.error(\"\\n\\nProducer's connection to\"\n \"kafka failed with error: {}\\n\\n\".format(error))\n assert(False)",
"def kafka_connect(self):\n self.kf_producer = KafkaProducer(bootstrap_servers=self.kf_servers)",
"def create_producer(self, stream, isCollectionStream=False, local=False, producer_name=None,\n initial_sequence_id=None, send_timeout_millis=30000,\n compression_type=COMPRESSION_TYPES.NONE,\n max_pending_messages=1000,\n batching_enabled=False,\n batching_max_messages=1000,\n batching_max_allowed_size_in_bytes=131072,\n batching_max_publish_delay_ms=10,\n message_routing_mode=ROUTING_MODE.ROUND_ROBIN_PARTITION\n ):\n if isCollectionStream is False:\n if local is True:\n type_constant = constants.STREAM_LOCAL_NS_PREFIX\n elif local is False:\n type_constant = constants.STREAM_GLOBAL_NS_PREFIX\n\n stream = type_constant.replace(\".\", \"\")+\"s.\"+stream\n elif isCollectionStream is False:\n stream = stream\n print(\"Calling has steram from create_producer: \", stream, local)\n flag = self.fabric.has_stream(stream, local=local, isCollectionStream=isCollectionStream)\n if flag:\n namespace = type_constant + self.fabric_name\n topic = \"producer/persistent/%s/%s/%s\" % (self.tenant_name, namespace,\n stream)\n params = {\n \"producerName\":producer_name,\n \"initialSequenceId\":initial_sequence_id,\n \"sendTimeoutMillis\":send_timeout_millis,\n \"compressionType\":compression_type,\n \"maxPendingMessages\":max_pending_messages,\n \"batchingEnabled\":batching_enabled,\n \"batchingMaxMessages\":batching_max_messages,\n \"batchingMaxPublishDelay\":batching_max_publish_delay_ms,\n \"messageRoutingMode\":message_routing_mode\n }\n\n params = {k: v for k, v in params.items() if v is not None}\n url = self._ws_url + topic \n print(url)\n return websocket.create_connection(url, header={'Authorization' : self.header['Authorization']}, class_=Base64Socket)\n\n raise ex.StreamProducerError(\n \"No stream present with name:\" + stream +\n \". Please create a stream and then stream producer\"\n )",
"def producer(self, topic, msg, e=None):\n producer = KafkaProducer(bootstrap_servers=['HOST_IP', 'HOST_IP', 'HOST_IP']\n ,api_version=(2, 2, 1),security_protocol='SSL',\n ssl_check_hostname=True,\n ssl_cafile='/home/oulu/certs/ca-cert',\n ssl_certfile='/home/oulu/certs/cutler-p3-c1-00.crt',\n ssl_keyfile='/home/oulu/certs/cutler-p3-c1-00.key')\n\n msg_b = str.encode(msg)\n producer.send(topic, msg_b).get(timeout=30)\n\n if (e):\n logging.exception('exception happened')",
"def __init__(self,\n hostname,\n port,\n topic_name,\n level=logging.NOTSET,\n _producer_class=None):\n\n super().__init__(level=level)\n\n if _producer_class is None:\n _producer_class = kafka.KafkaProducer\n\n class KafkaProducer(_producer_class):\n isend = functools.partialmethod(func=kafka.KafkaProducer.send,\n topic=topic_name)\n\n self._producer = KafkaProducer(\n bootstrap_servers=[hostname + ':' + str(port)],\n value_serializer=self._serialize_value)",
"def Connect_Producer(server_address): \n producer = None\n try:\n producer = KafkaProducer(bootstrap_servers=server_address, api_version=(0, 10))\n except Exception as ex:\n print('Exception while connecting Kafka')\n print(str(ex))\n finally:\n return(producer)",
"def getProducer():\r\n\r\n # get the config and a producer\r\n config = ecommerce.config.getConfig()\r\n return ecommerce.queue.queue(config, queuePrefix)",
"def registerProducer(producer, streaming):\n pass",
"def setup_own_producer(self) -> None:\n producer = Producer(self.name, self.call, self.input, self.output, self.scopes)\n for scope in self.scopes:\n self.producers[scope].append(producer)",
"def produce(self):\r\n print(\"Artist checking if Producer is available...\")\r\n\r\n if self.occupied == 'No':\r\n #If the producer is available, create a producer objects\r\n self.producer = Producer()\r\n time.sleep(2)\r\n # make the prducer meet the guest!\r\n self.producer.meet()\r\n\r\n else:\r\n #Otherwise, dont instanciate a producer\r\n time.sleep(2)\r\n print(\"Producer es busy!\")",
"def registerProducer(producer, streaming):",
"def get_publisher():\n return Publisher(\n topic=os.environ[\"TOPIC\"],\n **get_kafka_connection_params(),\n )",
"def verify_producer():\n\n # Producer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'error_cb': error_cb,\n 'api.version.request': api_version_request,\n 'default.topic.config': {'produce.offset.report': True}}\n\n # Create producer\n p = confluent_kafka.Producer(**conf)\n print('producer at %s' % p)\n\n headers = [('foo1', 'bar'), ('foo1', 'bar2'), ('foo2', b'1')]\n\n # Produce some messages\n p.produce(topic, 'Hello Python!', headers=headers)\n p.produce(topic, key='Just a key and headers', headers=headers)\n p.produce(topic, key='Just a key')\n p.produce(topic, partition=1, value='Strictly for partition 1',\n key='mykey', headers=headers)\n\n # Produce more messages, now with delivery report callbacks in various forms.\n mydr = MyTestDr()\n p.produce(topic, value='This one has a dr callback',\n callback=mydr.delivery)\n p.produce(topic, value='This one has a lambda',\n callback=lambda err, msg: MyTestDr._delivery(err, msg))\n p.produce(topic, value='This one has neither')\n\n # Try producing with a timestamp\n try:\n p.produce(topic, value='with a timestamp', timestamp=123456789000)\n except NotImplementedError:\n if confluent_kafka.libversion()[1] >= 0x00090400:\n raise\n\n # Produce even more messages\n for i in range(0, 10):\n p.produce(topic, value='Message #%d' % i, key=str(i),\n callback=mydr.delivery)\n p.poll(0)\n\n print('Waiting for %d messages to be delivered' % len(p))\n\n # Block until all messages are delivered/failed\n p.flush()\n\n #\n # Additional isolated tests\n #\n test_producer_dr_only_error()",
"def producer():\n\n connection = pika.BlockingConnection(pika.ConnectionParameters('rabbit'))\n channel = connection.channel()\n\n channel.queue_declare(queue=QUEUE_NAME)\n\n # Create two unique device ids to provide more example data\n timestamp = arrow.now().timestamp\n device_name = b'A' if timestamp % 2 == 0 else b'B'\n '''\n This creates the same hash value each time so we can use the Raspberry Pi\n serial number to create a unique ID for each device\n '''\n device_id = hashlib.sha1(device_name).hexdigest()\n\n # Currently a python dict\n data = {\n 'device_id': device_id,\n 'timestamp': timestamp,\n 'data': {\n 'key': 'value'\n }\n }\n\n channel.basic_publish(exchange='',\n routing_key=QUEUE_NAME,\n body=json.dumps(data)) # Encode as a JSON string\n msg = f' [x] Sent {data}'\n print(msg)\n logging.info(msg)\n connection.close()",
"def push_with_producer(self, producer):\n self.producer_fifo.append(producer)\n self.initiate_send()",
"def create_data_producer(name='', description=''):\n pass",
"async def start(self) -> None:\n while self.producer is None:\n try:\n self.producer = self._producer_factory(\n bootstrap_servers=self.bootstrap_servers,\n ssl_cafile=self.ssl_cafile,\n ssl_certfile=self.ssl_certfile,\n ssl_keyfile=self.ssl_keyfile,\n security_protocol='SSL',\n value_serializer=lambda v: json.dumps(v).encode('utf-8'),\n )\n except kafka.errors.NoBrokersAvailable:\n await trio.sleep(self.connect_interval_secs)\n else:\n logger.info('kafka-ready: %s', self.producer)\n async with self.has_producer:\n self.has_producer.notify_all()",
"async def setup(self, **kwargs):\n async with self.lock:\n # make configuration is locked so multiple tasks can't attempt\n if self.is_ready:\n return\n self.config = {**{\n 'bootstrap_servers': app_settings['kafka']['brokers'],\n 'value_serializer': lambda data: json.dumps(data).encode('utf-8')\n }, **kwargs}\n self.config.setdefault(\n 'loop', self.loop or asyncio.get_event_loop())\n if self.producer is None:\n producer = AIOKafkaProducer(**self.config)\n await producer.start()\n # delay setting the value until after the producer object\n # is setup; otherwise, other async tasks will attempt\n # to use this object before it is ready and get errors\n self.producer = producer\n return self.producer",
"def produce():\n # argument parsing\n args = parse_args()\n broker = args.broker_host + ':9092'\n topic = args.kafka_topic\n print 'Starting up ... Broker: ' + broker\n # connect to Kafka\n producer = KafkaProducer(bootstrap_servers=broker)\n counter = 1\n while True:\n # send messages\n for user in users:\n user_activity = generate_activity(user)\n producer.send(topic, user_activity)\n print 'Message ' + str(counter) + ' send...'\n time.sleep(0.5)\n counter += 1",
"def as_producer(func):\n\n # only decorate generator functions\n if not inspect.isgeneratorfunction(func):\n msg = 'as_producer requires a generating function not {}'\n raise TypeError(msg.format(type(func)))\n\n @functools.wraps(func)\n def decorated(pro, *args, **kwargs):\n \"\"\"Returns a producer using values from generating func.\"\"\"\n\n if not isinstance(pro, Producer):\n msg = (\"First positional argument of decorated function\"\n \" must be of type {} not {}\")\n msg.format('Producer', type(pro))\n\n genfunc = functools.partial(func, pro, *args, **kwargs)\n return producer(genfunc, pro.chunksize, pro.axis, shape=pro.shape)\n\n return decorated"
] | [
"0.77851456",
"0.72549456",
"0.6957942",
"0.6930621",
"0.69090855",
"0.68014026",
"0.66531676",
"0.66403145",
"0.6623068",
"0.6583257",
"0.6494753",
"0.6437796",
"0.63862807",
"0.6300328",
"0.6213889",
"0.6204735",
"0.60279506",
"0.59126776",
"0.58620876",
"0.58424014",
"0.58340997",
"0.5735275",
"0.57207274",
"0.5711068",
"0.5687609",
"0.5684477",
"0.56753695",
"0.55081904",
"0.54829705",
"0.53941125"
] | 0.77106583 | 1 |
Check that broken __unicode__/__str__ actually raises an error. | def test_force_text_exception(self):
class MyString(object):
def __str__(self):
return b'\xc3\xb6\xc3\xa4\xc3\xbc'
__unicode__ = __str__
# str(s) raises a TypeError on python 3 if the result is not a text type.
# python 2 fails when it tries converting from str to unicode (via ASCII).
exception = TypeError if six.PY3 else UnicodeError
self.assertRaises(exception, force_text, MyString()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_unicode(data, err=\"Input not valid unicode\"):\n try:\n if not isinstance(data, str) and not isinstance(data, str):\n raise UnicodeError(err)\n # In some cases we pass the above, but it's still inappropriate utf-8.\n str(data)\n except UnicodeError:\n raise UnicodeError(err) # lint-amnesty, pylint: disable=raise-missing-from",
"def test_to_unicode_raises_on_non_string():\n with pytest.raises(TypeError):\n to_unicode(999)",
"def test_unicode_warnings(self):\n\n unicodedata = u\"Alors vous imaginez ma surprise, au lever du jour, quand \"\\\n u\"une drôle de petit voix m’a réveillé. \"\\\n u\"Elle disait: « S’il vous plaît… dessine-moi un mouton! »\"\n\n u = Unicode()\n uni = u.dialect_impl(testing.db.dialect).bind_processor(testing.db.dialect)\n if testing.db.dialect.supports_unicode_binds:\n # Py3K\n #assert_raises(exc.SAWarning, uni, b'x')\n #assert isinstance(uni(unicodedata), str)\n # Py2K\n assert_raises(exc.SAWarning, uni, 'x')\n assert isinstance(uni(unicodedata), unicode)\n # end Py2K\n\n eq_(uni(unicodedata), unicodedata)\n else:\n # Py3K\n #assert_raises(exc.SAWarning, uni, b'x')\n #assert isinstance(uni(unicodedata), bytes)\n # Py2K\n assert_raises(exc.SAWarning, uni, 'x')\n assert isinstance(uni(unicodedata), str)\n # end Py2K\n \n eq_(uni(unicodedata), unicodedata.encode('utf-8'))\n \n unicode_engine = engines.utf8_engine(options={'convert_unicode':True,})\n unicode_engine.dialect.supports_unicode_binds = False\n \n s = String()\n uni = s.dialect_impl(unicode_engine.dialect).bind_processor(unicode_engine.dialect)\n # Py3K\n #assert_raises(exc.SAWarning, uni, b'x')\n #assert isinstance(uni(unicodedata), bytes)\n # Py2K\n assert_raises(exc.SAWarning, uni, 'x')\n assert isinstance(uni(unicodedata), str)\n # end Py2K\n \n eq_(uni(unicodedata), unicodedata.encode('utf-8'))",
"def test_nonASCIIUnicodeToString(self):\n self.assertRaises(UnicodeError, nativeString, u\"\\u1234\")",
"def test_unicode(self):\n if _PY3:\n expected = str\n else:\n expected = unicode\n self.assertTrue(unicodeCompat is expected)",
"def test_unexpectedType(self):\n self.assertRaises(TypeError, nativeString, 1)",
"def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)",
"def test_assert_unicode():\n if backwards.PY2: # pragma: Python 2\n # backwards.assert_unicode(unicode('hello'))\n # nt.assert_raises(AssertionError, backwards.assert_unicode, 'hello')\n backwards.assert_unicode('hello')\n nt.assert_raises(AssertionError, backwards.assert_unicode,\n unicode('hello'))\n nt.assert_raises(AssertionError, backwards.assert_unicode,\n bytearray('hello', 'utf-8'))\n else: # pragma: Python 3\n backwards.assert_unicode('hello')\n nt.assert_raises(AssertionError, backwards.assert_unicode, b'hello')\n nt.assert_raises(AssertionError, backwards.assert_unicode,\n bytearray('hello', 'utf-8'))",
"def testNonAsciiStr(self):\n class Thing(messages.Message):\n string_field = messages.StringField(2)\n\n thing = Thing()\n self.assertRaisesWithRegexpMatch(\n messages.ValidationError,\n 'Field string_field encountered non-ASCII string',\n setattr, thing, 'string_field', test_util.BINARY)",
"def test_unicodeToString(self):\n self.assertNativeString(u\"Good day\", \"Good day\")",
"def test_string_attribute_errors(self):\n self.inventory = Inventory()\n with self.assertRaises(AttributeError):\n self.inventory.add_coconut('south asian')",
"def testStr_NoFieldName(self):\n self.assertEquals('Validation error',\n str(messages.ValidationError('Validation error')))",
"def test_unicode(self):\n self.assertValue({\n 'snowman': six.u('\\N{SNOWMAN}'),\n 'something': 'not a snowman',\n },\n six.u(\"snowman: \\N{SNOWMAN} something: not_a_snowman\\n\"))",
"def test_raw_unicode_escape_dashes(self):\n ok = True\n try:\n unicode(b'hey', 'raw_unicode-escape')\n except LookupError:\n ok = False\n\n self.assertTrue(ok, \"dashes and underscores should be interchangable\")",
"def ensure_unicode_string(value):\n if not isinstance(value, six.string_types):\n raise TypeError(u'Expected string value, got: {}'.format(value))\n return six.text_type(value)",
"def test_other(self):\n self.assertRaises(ValueError, isStringTrue, \"dog\")\n return",
"def test_string_conversion():\n ob = ConversionTest()\n\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = \"eggs\"\n assert ob.StringField == \"eggs\"\n assert ob.StringField == u\"eggs\"\n\n ob.StringField = u\"spam\"\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = u'\\uffff\\uffff'\n assert ob.StringField == u'\\uffff\\uffff'\n\n ob.StringField = System.String(\"spam\")\n assert ob.StringField == \"spam\"\n assert ob.StringField == u\"spam\"\n\n ob.StringField = System.String(u'\\uffff\\uffff')\n assert ob.StringField == u'\\uffff\\uffff'\n\n ob.StringField = None\n assert ob.StringField is None\n\n with pytest.raises(TypeError):\n ConversionTest().StringField = 1\n\n world = UnicodeString()\n test_unicode_str = u\"안녕\"\n assert test_unicode_str == str(world.value)\n assert test_unicode_str == str(world.GetString())\n assert test_unicode_str == str(world)",
"def test_ustr(self):\n # unicode(self.bs) fails\n self.assertEqual(unicode, type(unicode(self.wbs)))\n self.assertEqual(unicode(self.us), unicode(self.wus))\n # unicode(self.be) fails\n self.assertEqual(unicode, type(unicode(self.wbe)))\n # unicode(ue) fails in Python < 2.6 (issue2517_)\n self.assertEqual(unicode, type(unicode(self.wue)))\n self.assertEqual(self.us, unicode(self.wue))",
"def test_py2_transaction_exception_message_unicode():\n try:\n raise ValueError(UNICODE_MESSAGE)\n except ValueError:\n notice_error()",
"def test_noUnicode(self):\n s = proto_helpers.StringTransport()\n self.assertRaises(TypeError, s.write, \"foo\")",
"def test_invalid_str(self):\n with self.assertRaises(ValidationError):\n lowercase_validator('hg213i75%^&$Efg')",
"def test_py3_transaction_exception_message_bytes_non_english_unicode():\n try:\n raise ValueError(UNICODE_MESSAGE)\n except ValueError:\n notice_error()",
"def test_cast_string_failure(self) -> None:\n self.flag.flag_type = None\n self.assertRaises(ValueError, self.flag.cast_string, '42')",
"def test_unicode_insert_error():\n# In addition, we should use vagrant or azure deployments of the scanner to Ubuntu and Windows virtual machines\n# to ensure cross-platform behavior.\n pass",
"def test_alias_has_unicode_method(self):\n expected = u\"Book: {0}, ID Scheme: {1}, Value: {2}\".format(unicode(self.book), u\"ISBN-10\", u\"1000000001\")\n self.assertEqual(expected, unicode(self.alias))",
"def _validate_string(display_name, input_value):\n\n if not isinstance(input_value, str):\n raise ValueError(display_name + \" must be a string type\")\n if input_value == '':\n raise ValueError(display_name + \" cannot be empty\")",
"def check_dog_name(dog):\n if not isinstance(dog.name, str):\n raise NotStringError(\"Dog name entered is not a string\")",
"def test_unicode_username(self):\n assert str(LoginFailures.objects.get(user=self.user)) == f'§: 10 - {self.user_lockout_until.isoformat()}'\n assert str(LoginFailures.objects.get(user=self.user2)) == 'Zażółć gęślą jaźń: 2 - -'",
"def test_badyvaluewithstring(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, \"foo\", 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):\r\n # Handle the common case first, saves 30-40% in performance when s\r\n # is an instance of unicode. This function gets called often in that\r\n # setting.\r\n if isinstance(s, unicode):\r\n return s\r\n if strings_only and is_protected_type(s):\r\n return s\r\n try:\r\n if not isinstance(s, basestring,):\r\n if hasattr(s, '__unicode__'):\r\n s = unicode(s)\r\n else:\r\n try:\r\n s = unicode(str(s), encoding, errors)\r\n except UnicodeEncodeError:\r\n if not isinstance(s, Exception):\r\n raise\r\n # If we get to here, the caller has passed in an Exception\r\n # subclass populated with non-ASCII data without special\r\n # handling to display as a string. We need to handle this\r\n # without raising a further exception. We do an\r\n # approximation to what the Exception's standard str()\r\n # output should be.\r\n s = u' '.join([force_unicode(arg, encoding, strings_only,\r\n errors) for arg in s])\r\n elif not isinstance(s, unicode):\r\n # Note: We use .decode() here, instead of unicode(s, encoding,\r\n # errors), so that if s is a SafeString, it ends up being a\r\n # SafeUnicode at the end.\r\n s = s.decode(encoding, errors)\r\n except UnicodeDecodeError, e:\r\n if not isinstance(s, Exception):\r\n raise DjangoUnicodeDecodeError(s, *e.args)\r\n else:\r\n # If we get to here, the caller has passed in an Exception\r\n # subclass populated with non-ASCII bytestring data without a\r\n # working unicode method. Try to handle this without raising a\r\n # further exception by individually forcing the exception args\r\n # to unicode.\r\n s = u' '.join([force_unicode(arg, encoding, strings_only,\r\n errors) for arg in s])\r\n return s"
] | [
"0.7820335",
"0.712562",
"0.7090174",
"0.6981514",
"0.6951635",
"0.67255706",
"0.66341805",
"0.6598654",
"0.6582145",
"0.6511867",
"0.6404797",
"0.6359545",
"0.6332364",
"0.6303237",
"0.6299061",
"0.6286056",
"0.6273976",
"0.62645805",
"0.6254592",
"0.6227654",
"0.62090826",
"0.62063944",
"0.6206074",
"0.6191317",
"0.6189229",
"0.61839026",
"0.6181274",
"0.61632687",
"0.61555785",
"0.61450595"
] | 0.74112284 | 1 |
Test that force_bytes knows how to convert to bytes an exception containing nonASCII characters in its args. | def test_force_bytes_exception(self):
error_msg = "This is an exception, voilà"
exc = ValueError(error_msg)
result = force_bytes(exc)
self.assertEqual(result, error_msg.encode('utf-8')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_nonASCIIBytesToString(self):\n self.assertRaises(UnicodeError, nativeString, b\"\\xFF\")",
"def test_py2_transaction_exception_message_bytes_implicit_encoding_non_english():\n try:\n\n # Bytes literal with non-ascii compatible characters only allowed in\n # python 2\n\n raise ValueError('I💜🐍')\n except ValueError:\n notice_error()",
"def test_py2_transaction_exception_message_bytes_utf8_encoding_non_english():\n try:\n\n # Bytes literal with non-ascii compatible characters only allowed in\n # python 2\n\n raise ValueError('I💜🐍')\n except ValueError:\n notice_error()",
"def test_py2_application_exception_message_bytes_implicit_encoding_non_english():\n try:\n\n # Bytes literal with non-ascii compatible characters only allowed in\n # python 2\n\n raise ValueError('I💜🐍')\n except ValueError:\n app = application()\n notice_error(application=app)",
"def enforce_bytes(value: Union[bytes, str], *, name: str) -> bytes:\n if isinstance(value, str):\n try:\n return value.encode(\"ascii\")\n except UnicodeEncodeError:\n raise TypeError(f\"{name} strings may not include unicode characters.\")\n elif isinstance(value, bytes):\n return value\n\n seen_type = type(value).__name__\n raise TypeError(f\"{name} must be bytes or str, but got {seen_type}.\")",
"def test_bytes_encoding_arg_non_kwarg(self):\n u = u'Unicode string: \\u5b54\\u5b50'\n b = py23_bytes(u, 'utf-8')\n self.assertEqual(b, u.encode('utf-8'))",
"def test_assert_bytes():\n if backwards.PY2: # pragma: Python 2\n # backwards.assert_bytes(bytearray('hello', 'utf-8'))\n backwards.assert_bytes('hello')\n nt.assert_raises(AssertionError, backwards.assert_bytes,\n unicode('hello'))\n else: # pragma: Python 3\n # backwards.assert_bytes(bytearray('hello', 'utf-8'))\n backwards.assert_bytes(b'hello')\n nt.assert_raises(AssertionError, backwards.assert_bytes,\n 'hello')",
"def test_py2_transaction_exception_message_bytes_non_english():\n try:\n raise ValueError(BYTES_UTF8_ENCODED)\n except ValueError:\n notice_error()",
"def test_py2_application_exception_message_bytes_utf8_encoding_non_english():\n try:\n\n # Bytes literal with non-ascii compatible characters only allowed in\n # python 2\n\n raise ValueError('I💜🐍')\n except ValueError:\n app = application()\n notice_error(application=app)",
"def test_force_text_exception(self):\n class MyString(object):\n def __str__(self):\n return b'\\xc3\\xb6\\xc3\\xa4\\xc3\\xbc'\n\n __unicode__ = __str__\n\n # str(s) raises a TypeError on python 3 if the result is not a text type.\n # python 2 fails when it tries converting from str to unicode (via ASCII).\n exception = TypeError if six.PY3 else UnicodeError\n self.assertRaises(exception, force_text, MyString())",
"def test_py3_transaction_exception_message_bytes_non_english():\n try:\n raise ValueError(BYTES_UTF8_ENCODED)\n except ValueError:\n notice_error()",
"def ensure_bytes(value: AnyStr) -> bytes:\n if isinstance(value, bytes):\n return value\n if isinstance(value, str):\n return value.encode('utf-8')\n raise TypeError(f\"input must be str or bytes, got {type(value).__name__}\")",
"def test_bytes(self):\n self.assertRaises(\n UnicodeDecodeError,\n lambda: bytes_to_str(\"\\N{SNOWMAN}\".encode(\"utf-8\")),\n )\n decoded = bytes_to_str(b\"hello world\")\n self.assertIsInstance(decoded, str)\n self.assertEqual(decoded, \"hello world\")",
"def test_bytes_encoding_arg(self):\n u = u'Unicode string: \\u5b54\\u5b50'\n b = py23_bytes(u, encoding='utf-8')\n self.assertEqual(b, u.encode('utf-8'))",
"def test_ignore(self):\n self.assertEqual(unicode(b'', 'ascii', 'ignore'), '')\n self.assertEqual(unicode(b'\\xff', 'ascii', 'ignore'), '')\n self.assertEqual(unicode(b'a\\xffb\\xffc\\xff', 'ascii', 'ignore'), 'abc')",
"def test_invalid_base64_to_bytes(self):\n @converters.wrap\n def inner_test(param: bytes):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(\n lambda: inner_test(param='foobar='), 3112\n )",
"def test_unicode2bytes():\n if backwards.PY2: # pragma: Python 2\n res = backwards.bytes_type('hello')\n backwards.assert_bytes(res)\n nt.assert_equal(backwards.unicode2bytes('hello'), res)\n nt.assert_equal(backwards.unicode2bytes(unicode('hello')), res)\n nt.assert_equal(backwards.unicode2bytes(bytearray('hello', 'utf-8')), res)\n nt.assert_raises(TypeError, backwards.unicode2bytes, 1)\n else: # pragma: Python 3\n res = backwards.bytes_type('hello', 'utf-8')\n backwards.assert_bytes(res)\n nt.assert_equal(backwards.unicode2bytes('hello'), res)\n nt.assert_equal(backwards.unicode2bytes(b'hello'), res)\n nt.assert_equal(backwards.unicode2bytes(bytearray('hello', 'utf-8')), res)\n nt.assert_raises(TypeError, backwards.unicode2bytes, 1)",
"def test_bytes_to_string():\n\n @type_checked\n def _run_test(something:str):\n assert something == \"yep\"\n\n _run_test(bytes(\"yep\", \"utf-8\"))",
"def bytes_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (bytes, collections.abc.ByteString)):\n name = type(var).__name__\n raise BytesError(\n 'Function {} expected bytes, {} got instead.'.format(func, name))",
"def test_encoders_strings(encoder):\n assert \"\" == encoding.decode(b\"\", encoder)\n\n assert \"string\" == encoding.decode(\n encoding.encode(\n \"string\",\n encoder\n ),\n encoder\n )\n\n with pytest.raises(TypeError):\n encoding.encode(b\"string\", encoder)\n\n with pytest.raises(TypeError):\n encoding.decode(\"foobar\", encoder)",
"def test_py3_transaction_exception_message_bytes_non_english_unicode():\n try:\n raise ValueError(UNICODE_MESSAGE)\n except ValueError:\n notice_error()",
"def test_py2_application_exception_message_bytes_non_english():\n try:\n raise ValueError(BYTES_UTF8_ENCODED)\n except ValueError:\n app = application()\n notice_error(application=app)",
"def test_bytes_to_bytes(self):\n @converters.wrap\n def inner_test(param: bytes):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, b'Test bytes.')\n inner_test(param=b'Test bytes.')",
"def test_py3_application_exception_message_bytes_non_english():\n try:\n raise ValueError(BYTES_UTF8_ENCODED)\n except ValueError:\n app = application()\n notice_error(application=app)",
"def test_bytes_to_pretty_hexinvalid_data():\n try:\n cmds._bytes_to_pretty_hex(data=[1, 2, 3, 4, \"500\"])\n except Exception:\n # The exception that bubbles up from IntelHex is implementation detail\n # from that library, so it could be anything\n assert True, \"Exception raised\"\n else:\n raise AssertionError(\"Exception NOT raised\")",
"def ensure_bytes(s, encoding):\n if isinstance(s, bytes):\n return s\n return s.encode(encoding)",
"def test_assert_unicode():\n if backwards.PY2: # pragma: Python 2\n # backwards.assert_unicode(unicode('hello'))\n # nt.assert_raises(AssertionError, backwards.assert_unicode, 'hello')\n backwards.assert_unicode('hello')\n nt.assert_raises(AssertionError, backwards.assert_unicode,\n unicode('hello'))\n nt.assert_raises(AssertionError, backwards.assert_unicode,\n bytearray('hello', 'utf-8'))\n else: # pragma: Python 3\n backwards.assert_unicode('hello')\n nt.assert_raises(AssertionError, backwards.assert_unicode, b'hello')\n nt.assert_raises(AssertionError, backwards.assert_unicode,\n bytearray('hello', 'utf-8'))",
"def testNoForceEncodeValueError(self):\n test_cases = [\n ('aaabbb', '3a 3b'),\n ('\\n\\n\\n', '3\\n'),\n ('aaaaaaaaaa', '10a'),\n ('aaaaaaaaaabbbbbbbbbbb', '10a 11b'),\n ('a'*1001, '1001a'),\n (''.join(['a'*1001, 'b'*909, 'c'*65, 'd'*2]), '1001a 909b 65c 2d'),\n ('aaaa1111\\nbbbb2222', '4a 41 1\\n 4b 42'),\n ]\n for data, expected in test_cases:\n obj = ASCIITransportFormat(\n ASCIITransportFormat.SupportedTypes.STRING,\n data,\n )\n self.assertEqual(obj.data, data)\n self.assertFalse(obj.encoded)\n self.assertFalse(obj.pseudo_encode)\n\n obj.encode()\n self.assertEqual(obj.data, expected)\n self.assertTrue(obj.encoded)\n self.assertFalse(obj.pseudo_encode)\n\n with self.assertRaises(ValueError):\n obj.encode()\n\n self.assertEqual(obj.data, expected)\n self.assertTrue(obj.encoded)\n self.assertFalse(obj.pseudo_encode)",
"def ensure_bytes(data, encoding=\"utf8\"):\n return data if isinstance(data, bytes) else unicode_type(data).encode(encoding)",
"def test_to_unicode_raises_on_non_string():\n with pytest.raises(TypeError):\n to_unicode(999)"
] | [
"0.77594966",
"0.7754345",
"0.748256",
"0.74188924",
"0.7351609",
"0.7313225",
"0.7176732",
"0.71660197",
"0.71446127",
"0.71294063",
"0.7124105",
"0.71056265",
"0.7031364",
"0.70092523",
"0.6898177",
"0.6873759",
"0.68179154",
"0.6805716",
"0.6797002",
"0.67729616",
"0.6755971",
"0.6749294",
"0.6729973",
"0.6725475",
"0.6720231",
"0.6708509",
"0.6674283",
"0.6664664",
"0.664174",
"0.6630601"
] | 0.81311226 | 0 |
Get response from Cloud Vision API. | def get_response_from_cv_api(data):
url = 'https://vision.googleapis.com/v1/images:annotate?key={}'.format(API_KEY)
response = requests.post(url=url, data=data, headers={'Content-Type': 'application/json'})
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_response(image):\n encoded = base64.b64encode(image.read())\n GOOGLE_CLOUD_VISION_API_URL = 'https://vision.googleapis.com/v1/images:annotate?key='\n API_KEY = 'AIzaSyCKFsYnfYoLFeD2OHpvcjky9opfhHKFnP0'\n api_url = GOOGLE_CLOUD_VISION_API_URL + API_KEY\n header = {'Content-Type': 'application/json'}\n body = json.dumps({\n\t\t\t'requests': [{\n\t\t\t\t'image': {\n\t\t\t\t\t'content': encoded.decode(\"utf-8\"),\n\t\t\t\t},\n\t\t\t\t'features': [{\n\t\t\t\t\t'type': 'DOCUMENT_TEXT_DETECTION',\n\t\t\t\t}]\n\t\t\t}]\n\t\t})\n d = requests.post(api_url,data=body).json()\n return d",
"def get_initial_response():\n # Message to the user\n message = {\n 'apiVersion': 'v1.0',\n 'status': '200',\n 'message': 'Flask API - Doubtnut - OPENCV'\n }\n # Making the message looks good\n resp = jsonify(message)\n # Returning the object\n return resp",
"def get_response(image_path):\n try:\n # Obtain response from OpenALP API for image_path input\n response = CloudTools.api_call_platesv2(image_path)\n if len(response['results']) > 0:\n plateDict = response['results'][0]\n plate = plateDict['candidates'][0]\n prob = plateDict['confidence']\n prob = str(round(float(prob) / 100, 2))\n box = plateDict['coordinates']\n return [plate, prob, box]\n else:\n return []\n except Exception as e:\n print(' No internet Connection or: {}', e)\n return []",
"def detect_object():\n response = None\n try:\n # logger.info(request.Form)\n if request.files['base_image'] is not None:\n base_img = cv2.imdecode(np.fromstring(request.files['base_image'].read(), np.uint8), cv2.IMREAD_UNCHANGED)\n\n if base_img is not None:\n response = predictionService.verify(base_img=base_img)\n else:\n response = BaseResponse(code=400, reason='base_image cannot be null')\n except Exception as e:\n logger.error(e)\n response = BaseResponse(code=500, reason=\"Internal server error occurred. refer to logs\")\n\n return response.toJSON()",
"def get_pose():\n files = {'file': ('image.jpg', open(\n 'assets/image.jpg', 'rb'), 'images/jpeg')}\n result = requests.post(URL, files=files).json()\n img = cv2.imread('assets/image.jpg')[:, :, ::-1]\n return result, img",
"def vr_http_classify(self, img):\n\n img = cv2.resize(img, (self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"], \n self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"]))\n img = self.reshape(img)\n \n return self.get_predictions(img)",
"def detect_object_json():\n response = None\n try:\n logger.info(request)\n req_json = request.get_json()\n logger.info(req_json)\n\n if req_json is not None:\n base_img_url = req_json.get('base_image_url')\n\n if base_img_url is not None:\n\n base_img = cv2.imdecode(\n np.asarray(bytearray(urllib.request.urlopen(base_img_url).read()), dtype=\"uint8\"), cv2.IMREAD_COLOR)\n\n if base_img is not None:\n response = predictionService.verify(base_img=base_img)\n else:\n response = BaseResponse(code=400, reason='base_image cannot be null')\n except urllib.error.URLError as e:\n logger.error(e)\n response = BaseResponse(code=500, reason=\"Could not read from image URL provided for base and target\")\n except cv2.error as e:\n logger.error(e)\n response = BaseResponse(code=500, reason=\"URL provided is not a valid image\")\n except Exception as e:\n logger.error(e)\n response = BaseResponse(code=500, reason=\"Internal server error occurred. refer to logs\")\n\n return response.toJSON()",
"def api_call():\n\tresponse = requests.get(URL_API)\n\treturn response",
"def get(self):\n resp = Response()\n return resp",
"def GetResponse(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def KvGet(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_response_for_api(self):\n coll_1 = \"I'm from Telegram\"\n coll_2 = \" Controller Class\"\n result = coll_1 + coll_2\n return {\n 'response': result\n }",
"async def detect(self, request: Request) -> Response:\n raw_data = await request.body()\n as_str = raw_data.decode(\"utf-8\")\n\n try:\n body = orjson.loads(as_str)\n except orjson.JSONDecodeError as e:\n raise InferenceError(\"Unrecognized request format: %s\" % e)\n\n request_handler = get_request_handler(\n Protocol(self.alibi_detect_settings.protocol), body\n )\n request_handler.validate()\n input_data = request_handler.extract_request()\n\n y = await self.predict_fn(input_data)\n output_data = orjson.dumps(y, option=orjson.OPT_SERIALIZE_NUMPY)\n\n return Response(content=output_data, media_type=\"application/json\")",
"def cloud_information(self):\n url = \"%s/state/teams/%s/cloud\" % (self.url, self.identifier, )\n return perform_request(url)",
"def main_picamera():\n #takephoto() # First take a picture\n\n credentials = GoogleCredentials.get_application_default()\n service = discovery.build('vision', 'v1', credentials=credentials)\n\n with open('image.jpg', 'rb') as image:\n # image_content = base64.b64encode(image.read())\n image_content = image.read()\n service_request = service.images().annotate(body={\n 'requests': [{\n 'image': {\n 'content': image_content.decode('UTF-8')\n },\n 'features': [{\n 'type': 'LOGO_DETECTION',\n 'maxResults': 1\n }]\n }]\n })\n response = service_request.execute()\n\n try:\n label = response['responses'][0]['logoAnnotations'][0]['description']\n except:\n label = \"No response.\"\n\n print(label)",
"def get_response(text: str):\n # Step 01: Initialize the response.\n response = dict()\n results = dict()\n\n vectorized_text = dict()\n vectorized_text['test'] = (PredictionService.__vc.transform([text])) # see options in the above cell\n\n print ('DONE - [EMBEDDING] Apply Chosen Embeddings to the Tweets')\n # Step 02: Predict the label/class of the received text.\n predicted_sentiment = PredictionService.__model.predict(vectorized_text['test']).tolist()\n\n # Step 03: Parse the prediction result.\n if (predicted_sentiment[0] == 1):\n results[\"label\"] = \"Relevant\"\n else:\n results[\"label\"] = \"Not Relevant\"\n\n # Step 04: Prepare the response.\n response[\"status\"] = 200\n response[\"results\"] = results\n\n # Step 05: Return the response.\n return response",
"def get_Response(self):\n return self._output.get('Response', None)",
"def get_Response(self):\n return self._output.get('Response', None)",
"def get_Response(self):\n return self._output.get('Response', None)",
"def get_Response(self):\n return self._output.get('Response', None)",
"def get_Response(self):\n return self._output.get('Response', None)",
"def get_Response(self):\n return self._output.get('Response', None)",
"def get_Response(self):\n return self._output.get('Response', None)",
"def get_Response(self):\n return self._output.get('Response', None)",
"def get_Response(self):\n return self._output.get('Response', None)",
"def get_Response(self):\n return self._output.get('Response', None)",
"def get_Response(self):\n return self._output.get('Response', None)",
"def get_Response(self):\n return self._output.get('Response', None)",
"def cloud_ai_document(self) -> 'outputs.GoogleCloudDocumentaiV1DocumentResponse':\n return pulumi.get(self, \"cloud_ai_document\")",
"def get_result(params):\n global PublicKey, ProjectId, url\n params[\"PublicKey\"] = PublicKey\n params[\"Signature\"] = verfy_ac(params)\n\n if ProjectId != '':\n params[\"ProjectId\"] = ProjectId\n\n r = requests.post(url, params)\n response = json.dumps(r.json(), indent=4)\n\n return response"
] | [
"0.6588146",
"0.641112",
"0.61037785",
"0.57874006",
"0.571129",
"0.5638207",
"0.5509912",
"0.54499793",
"0.5273775",
"0.52694327",
"0.5254997",
"0.52453876",
"0.5232203",
"0.52142817",
"0.519771",
"0.51782495",
"0.51781756",
"0.51781756",
"0.51781756",
"0.51781756",
"0.51781756",
"0.51781756",
"0.51781756",
"0.51781756",
"0.51781756",
"0.51781756",
"0.51781756",
"0.51781756",
"0.5160523",
"0.51159453"
] | 0.7367046 | 0 |
Determines whether a given user from the authentication server has a certain right or not. | def has_right(self, username: str, right: str) -> bool:
form: str = urlencode({'username': username, 'right': right})
headers: dict = {
'Content-type': 'application/x-www-form-urlencoded'
}
connection: HTTPConnection = self.__get_connection()
connection.request('GET', '/users/'+str(username)+'/rights/'+str(right), form, headers)
response: HTTPResponse = connection.getresponse()
if response.status == 200:
return True
if response.status == 404:
raise NotFoundError()
if response.status == 500:
raise HTTPException('Server error')
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_roles_check(request):\n logger.debug('right_user_check')\n options = {\n 'api_file': {'GET': True, 'POST': False}\n }\n url_name = request.request.resolver_match.url_name\n if not request.request.user.is_authenticated:\n return False\n user_have_right = options[url_name][request.request.method]\n if user_have_right:\n return True\n raise PermissionDenied",
"def check_user(self):\n try:\n if (self.get_user()[0][0] == self.username) and (self.check_password(self.password)):\n return True\n else:\n return False\n except:\n return False",
"def has_user(self):\n\t\treturn len( self.a_token ) > 0 and len( self.a_secret ) > 0",
"def check_auth(username, password, expected_user, expected_pw):\n return username == expected_user and password == expected_pw",
"def is_correct_user(self, login, password):\n pass",
"def has_user(self, user): # pylint: disable=unused-argument\r\n return False",
"def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)",
"def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)",
"def checkIfAllowed(self, user):\n\n # Default case if mod access is not needed everyone has access\n if not self.modOnlyAccess:\n return True\n\n # Otherwise check the user's access level\n if user.modAccess == self.modOnlyAccess:\n return True\n else:\n return False",
"def checkRights(self,entry):\n if not self.session.isLoggedin():\n self.logger.debug('Not logged in, we leave checkRights')\n return False\n \n # Ist Eintrag Public (z.B. Authen)\n if entry.get('public'):\n return True\n \n \n rights = entry.get('rights')\n \n if rights is None: \n self.logger.debug('Rights are net set (None), we leave checkRights')\n return True\n\n self.logger.debug('Entryrights: {}'.format(repr(rights)))\n\n found = False\n userRights = self.session.getAttribute('rights')\n self.logger.debug('Userrights: {}'.format(repr(userRights)))\n\n # wurden Rechte gesetzt\n if rights is not None or rights==[]:\n if isinstance(rights,str): rights = rights.split(',')\n \n for right in rights:\n if right.startswith('-'):\n right = right[1:]\n if right in userRights: \n self.logger.debug('Negative righths found: {} is forbidden'.format(right))\n return False\n else:\n if right in (userRights or []):\n found = True \n else:\n # Wenn keine Rechte im Eintrag\n # auf jeden Fall anzeigen\n found = True\n \n self.logger.debug('Result is \"{}\"'.format(found))\n return found",
"def check_my_users(user):\n user_data = my_users.get(user['username'])\n if not user_data:\n return False # <--- invalid credentials\n elif user_data.get('password') == user['password']:\n return True # <--- user is logged in!\n\n return False # <--- invalid credentials",
"def has_user(self, user, allow_superusers=True):\n return self.has_student(user, allow_superusers) or self.has_ta(user, False) or self.has_instructor(user, False)",
"def check_auth(username, password):\n return username == 'admin' and password == 'worcester'",
"def check_auth(username, password):\n return username == 'nicholas' and password == ADMIN_PASS",
"def check_privilege(self, username):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT isAdmin \\\n FROM users WHERE username = %s\", (username,))\n privilege = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if (privilege[0] is True):\n return True\n else:\n return False",
"def check_auth(username, password):\n return username == 'admin' and password == 'Passw0rd'",
"def check_auth(username, password):\n return username == 'jeffkoons' and password == 'likesweirdbaloons'",
"def check_auth(username, password):\n return username == 'admin' and password == 'secret'",
"def check_auth(username, password):\n return username == 'admin' and password == 'secret'",
"def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)",
"def check_user(user):\n result_user = search_column_with_constraint(choose_database(\"auth\"), \"users\", \"id\", \"id\", user)\n # result_user = search_single_entry(choose_database(\"auth\"), \"users\", \"id\", user)\n\n if len(result_user) == 0:\n return 0\n else:\n return 1",
"def privilege_check(user, *required_privileges):\n for perm in required_privileges:\n if user.has_property(perm):\n return True\n return False",
"def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())",
"def check_auth(username, password):\n return username == 'admin' and password == 'pebble'",
"def checkright(self, r):\n return r in self.server.getrights(self.idstring(), self)",
"def check_reply(user):\n if not user.is_authenticated():\n return 'not_auth'\n\n return 'ok'",
"def check_auth(username, password):\n return username == 'admin' and password == 'root'",
"def check_auth(username, password):\n return username == USERNAME and password == PASSWORD",
"def has_authority(self, user):\n UserModel = get_user_model()\n ADMINISTRATOR = UserModel.ROLE_MAP[UserModel.ADMINISTRATOR]\n result = True\n\n if not (user.is_superuser or user.role == ADMINISTRATOR):\n try:\n self.memberships.get(user=user)\n except Membership.DoesNotExist:\n result = False\n\n return result",
"def check_auth(username, password):\n return username == 'admin' and password == 'password'"
] | [
"0.6864294",
"0.6832401",
"0.6566839",
"0.65181917",
"0.650468",
"0.6478974",
"0.64769286",
"0.64769286",
"0.6463664",
"0.64017993",
"0.6368617",
"0.634587",
"0.6326526",
"0.63013756",
"0.6293979",
"0.62584096",
"0.62548155",
"0.62485784",
"0.62485784",
"0.6244719",
"0.62446195",
"0.6239497",
"0.6234003",
"0.62323624",
"0.61692524",
"0.61569357",
"0.6149219",
"0.6146343",
"0.6143246",
"0.61344784"
] | 0.7994539 | 0 |
Write data to transport. | def send_data(self, data):
self._transport.write(data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(self, data, timeout_ms=None, **kwargs):\n raise NotImplementedError(\"implement in derived transport class\")",
"def send_message(self, data):\n self.transport.write(data)",
"def write(self, data):\n if self.closed:\n raise ConnectionResetError(\n 'Transport closed - cannot write on %s' % self\n )\n else:\n t = self.transport\n if self._paused or self._buffer:\n self._buffer.appendleft(data)\n self._buffer_size += len(data)\n self._write_from_buffer()\n if self._buffer_size > 2 * self._b_limit:\n if self._waiter and not self._waiter.cancelled():\n self.logger.warning(\n '%s buffer size is %d: limit is %d ',\n self._buffer_size, self._b_limit\n )\n else:\n t.pause_reading()\n self._waiter = self._loop.create_future()\n else:\n t.write(data)\n self.changed()\n return self._waiter",
"def write(self, data, handle=None):\n if not isinstance(data, (bytes, bytearray, memoryview)):\n raise TypeError(\"data: expecting a bytes-like instance, got {!r}\"\n .format(type(data).__name__))\n if handle is not None and not isinstance(self._handle, pyuv.Pipe):\n raise ValueError('handle: can only be sent over pyuv.Pipe')\n self._check_status()\n if not self._writable:\n raise TransportError('transport is not writable')\n if self._closing:\n raise TransportError('transport is closing')\n try:\n if handle:\n self._handle.write(data, self._on_write_complete, handle)\n else:\n self._handle.write(data, self._on_write_complete)\n except pyuv.error.UVError as e:\n self._error = TransportError.from_errno(e.args[0])\n self.abort()\n raise compat.saved_exc(self._error)\n # We only keep track of the number of outstanding write requests\n # outselves. See note in get_write_buffer_size().\n self._write_buffer_size += 1\n self._maybe_pause_protocol()",
"def write(self, data):\n raise NotImplementedError()",
"def write(self, data):\n try:\n self._conn.send(data)\n except OSError as exc:\n raise TS3ConnectionClosedException(OSError) from exc",
"def write(self, data):\n try:\n self.ser.write(data)\n except SerialException as se:\n log.debug('Serial connection write error: {}'.format(se))",
"def write(self, data):\n with self._write_lock:\n self.socket.send(data)",
"def _write(self, data):\n self._writer.write(data)",
"def write(self, data):\n self._write_lock.acquire()\n try:\n self.socket.sendall(data)\n finally:\n self._write_lock.release()",
"def send(self, data):\r\n\r\n self._serial_object.write(data)",
"def async_write(self, data) -> None:\n if data and self.__is_active:\n # logging.info('async_write: ' + str(data))\n self.__client_socket.async_write_all(data, self.__async_write_callback)\n\n # logging.info('async_write done')",
"def write(self, data):\n with self.writing:\n raise NotImplementedError()",
"def write(self, data: bytes) -> None:\n pass",
"def _write(self, location, data):\n self._connector.write(location=location, data=data)",
"def write(self, data):\n\t\tself.outputbuffer.write(data)",
"def write(self, data):\n self._check_not_closed()\n raise io.UnsupportedOperation(\"Write not supported\")",
"def sendData(self, data):\n self.transport.write(zlib.compress(rencode.dumps(data)))",
"def sendto(self, data, addr=None):\n if not isinstance(data, (bytes, bytearray, memoryview)):\n raise TypeError(\"data: expecting a bytes-like instance, got {!r}\"\n .format(type(data).__name__))\n self._check_status()\n if not self._writable:\n raise TransportError('transport is not writable')\n try:\n self._handle.send(addr, data, self._on_send_complete)\n except pyuv.error.UVError as e:\n error = TransportError.from_errno(e.args[0])\n # Try to discern between permanent and transient errors. Permanent\n # errors close the transport. This list is very likely not complete.\n if error.errno != pyuv.errno.UV_EBADF:\n raise error\n self._error = error\n self.abort()\n self._write_buffer_size += 1\n self._maybe_pause_protocol()",
"def write(self, data: bytes):\n self._writer.write(data)",
"def send_data(self):\n data = self.datastore.use(self.data_name)\n if data is None:\n self.dbg(\"sockets_warning\", \"Data is none for {}\", [self.data_name])\n encoded_data = json.dumps(data).encode()\n self.conn.sendall(encoded_data)\n self.dbg(\"sockets_verbose\", \"Data sent\")",
"def write( data ):",
"def write(self, data):\n return 0",
"def write_data(self, data, response_required=None, timeout=5.0, raw=False):\n if self._transport is None:\n return\n\n if self._paused:\n return\n\n if self._waiting_for_response:\n LOG.debug(\"queueing write %s\", data)\n self._queued_writes.append((data, response_required, timeout))\n return\n\n if response_required:\n self._waiting_for_response = response_required\n if timeout > 0:\n self._timeout_task = self.loop.call_later(\n timeout, self._response_required_timeout\n )\n\n if not raw:\n cksum = 256 - reduce(lambda x, y: x + y, map(ord, data)) % 256\n data = data + \"{:02X}\".format(cksum)\n if int(data[0:2], 16) != len(data) - 2:\n LOG.debug(\"message length wrong: %s\", data)\n\n LOG.debug(\"write_data '%s'\", data)\n self._transport.write((data + \"\\r\\n\").encode())",
"def write(data):",
"def write(self, data: Union[str, bytes]) -> None:\n ...",
"def write_data(self, data):\n print('Wrote %d bytes' % (len(data)))",
"def s_write(self, data):\n self.s.flushOutput()\n\n if self.s.is_open:\n try:\n self.s.write(data)\n if self.log_output:\n self.logfile.write('\\nIN :' + str(len(data)) + '[' + hexlify(data) + ']' + '\\n')\n except Exception as e:\n print(\"Could not write to port \" + str(e))\n else:\n raise IOError('Comport is not open, use ctl_connect()')",
"def __write(self, data):\n return self.__descriptor.write(data.encode(\"utf-8\") + b'\\n')",
"def sendData(self, data):\n self.tx.sendBuffer(data)"
] | [
"0.7657331",
"0.76417875",
"0.76139855",
"0.7602521",
"0.7583665",
"0.75793827",
"0.75037515",
"0.7470128",
"0.74523485",
"0.739304",
"0.73432237",
"0.7279804",
"0.7269122",
"0.7266278",
"0.72515565",
"0.72349596",
"0.7186108",
"0.71751815",
"0.7171012",
"0.7165056",
"0.7093704",
"0.70894516",
"0.70618606",
"0.70502365",
"0.7018334",
"0.7005885",
"0.6993712",
"0.6952333",
"0.69153357",
"0.69088566"
] | 0.8111547 | 0 |
Pause writing callback from transport. | def pause_writing(self):
self._stream.pause_writing() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resume_writing(self):\n self._stream.resume_writing()",
"def _write(self, chunk):\n if self.consumer and not self.stop_event.is_set():\n self.consumer.write(chunk)",
"def write(self, data):\n if self.closed:\n raise ConnectionResetError(\n 'Transport closed - cannot write on %s' % self\n )\n else:\n t = self.transport\n if self._paused or self._buffer:\n self._buffer.appendleft(data)\n self._buffer_size += len(data)\n self._write_from_buffer()\n if self._buffer_size > 2 * self._b_limit:\n if self._waiter and not self._waiter.cancelled():\n self.logger.warning(\n '%s buffer size is %d: limit is %d ',\n self._buffer_size, self._b_limit\n )\n else:\n t.pause_reading()\n self._waiter = self._loop.create_future()\n else:\n t.write(data)\n self.changed()\n return self._waiter",
"def testTriggerPause(self):\n\n # Pause the proxy so data sent to it builds up in its buffer.\n self.proxy.pauseProducing()\n self.assertFalse(self.parentProducer.paused, \"don't pause yet\")\n self.proxy.write(\"x\" * 51)\n self.assertFalse(self.parentProducer.paused, \"don't pause yet\")\n self.proxy.write(\"x\" * 51)\n self.assertTrue(self.parentProducer.paused)",
"def pause_reading(self):\n raise NotImplementedError",
"def _pause(self):\n data_paused = None\n while self.target.is_active and data_paused != '01':\n data_paused = self._mem_read(self.data['paused'][0], 1)\n time.sleep(self.pause_time)\n self.data['paused'][1] = data_paused\n return",
"def endWrite(self):\n self.writing = False\n if len(self.okToRead._waiters) > 0:\n self.okToRead.notify()\n else:\n self.okToWrite.notify()\n self.okToRead.release()\n self.okToWrite.release()",
"def __async_write_callback(self, err) -> None:\n if err != 0:\n logging.info('async_write: disconnected')\n self.close()\n # elif self.__is_active:\n # Data was writen to socket. just handle errors if any.\n # logging.info('async_write: OK')",
"def pause(self, instance, callback):\n pass",
"def async_write(self, data) -> None:\n if data and self.__is_active:\n # logging.info('async_write: ' + str(data))\n self.__client_socket.async_write_all(data, self.__async_write_callback)\n\n # logging.info('async_write done')",
"def flush( finishing=False, callback=None ):",
"def pause(self):\n raise NotImplementedError()",
"def pause(self):\n raise NotImplementedError()",
"def hw_pause(self):\n self.logger.debug(\"Pause called (no-op)\")",
"def pause(self):\n\t\tpass",
"def write( chunk, callback=None ):",
"def beginWrite(self):\n self.okToWrite.acquire()\n self.okToRead.acquire()\n while self.writing or self.readerCount != 0:\n self.okToWrite.wait()\n self.writing = True",
"def write(self, batch):\n time.sleep(self.WRITE_DELAY)",
"def pause(self):\n pass",
"def pause(self):\n pass",
"def paused():\n pause_time = time()\n cache.set('paused', pause_time)\n socketio.emit('paused', pause_time)",
"def connection_made(self, transport):\n self.transport = transport\n self.buf = bytes()\n print('Writer connection created')\n asyncio.ensure_future(self.send())\n print('Writer.send() scheduled')",
"def handle_write(self):\n self.initiate_send()",
"def delay_writing_for(self, ms, soc):\n self._log(\"waiting %sms before responding...\" % ms)\n\n def resume_writing():\n self._write_list.append(soc)\n\n self._write_list.remove(soc)\n self._timer_list.append((_nowms() + ms, resume_writing))",
"def pause(self) :\n raise NotImplementedError(\"pause not implemented\")",
"def setWriteOp(self, writeBuffer):\r\n try:\r\n self._checkAssert(0)\r\n self.writer = self.tlsConnection.writeAsync(writeBuffer)\r\n self._doWriteOp()\r\n except:\r\n self._clear()\r\n raise",
"def whenWriteReady(self, channel, call):",
"def pauseProducing(self):\n pass",
"def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]",
"def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]"
] | [
"0.6722218",
"0.6428597",
"0.6101962",
"0.6073248",
"0.60059124",
"0.59187335",
"0.59112674",
"0.5894187",
"0.58766466",
"0.5876161",
"0.5810171",
"0.5754038",
"0.5754038",
"0.57490945",
"0.5720258",
"0.57075626",
"0.5706053",
"0.5704085",
"0.56786174",
"0.56786174",
"0.56483597",
"0.5630552",
"0.56183773",
"0.55624574",
"0.55499727",
"0.55471677",
"0.5546594",
"0.5544589",
"0.5531049",
"0.5531049"
] | 0.7620428 | 0 |
Resume writing callback from transport. | def resume_writing(self):
self._stream.resume_writing() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resume_reading(self):\n raise NotImplementedError",
"def resume_reading(self):\n if not self._paused_reading:\n raise RuntimeError('Not paused')\n self._paused_reading = False\n if not self._closing:\n self._loop.add_reader(self._sock_fd)",
"def pause_writing(self):\n self._stream.pause_writing()",
"def _write(self, chunk):\n if self.consumer and not self.stop_event.is_set():\n self.consumer.write(chunk)",
"def resume(self):\n raise NotImplementedError()",
"def resume(self):\n raise NotImplementedError()",
"def resume(self):\n\t\tpass",
"def resume(self):\n pass",
"def resume(self):\n pass",
"def resume(self):\n pass",
"def endWrite(self):\n self.writing = False\n if len(self.okToRead._waiters) > 0:\n self.okToRead.notify()\n else:\n self.okToWrite.notify()\n self.okToRead.release()\n self.okToWrite.release()",
"def resume(self):\n self.stdin_queue.put(\"resume\")",
"def beginWrite(self):\n self.okToWrite.acquire()\n self.okToRead.acquire()\n while self.writing or self.readerCount != 0:\n self.okToWrite.wait()\n self.writing = True",
"def handle_write(self):\n # without overriding this we would get an \"unhandled write event\"\n # message from asyncore once connection occurs.",
"def handle_write(self):\n self.initiate_send()",
"def flush( finishing=False, callback=None ):",
"def connection_made(self, transport):\n self.transport = transport\n self.buf = bytes()\n print('Writer connection created')\n asyncio.ensure_future(self.send())\n print('Writer.send() scheduled')",
"def resume(self):\n\n self.shm_command.write({'cmd': 'resume', 'data': {}})",
"def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]",
"def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]",
"def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]",
"def on_resume(self, userdata):\n pass",
"def peek_write(self):\n ...",
"def test_resumeFromResumeOffsetInTheMiddleOfAlreadyWrittenData(self):\n fp = FilePath(self.mktemp())\n fp.setContent(b\"Twisted is amazing!\")\n protocol = self.makeConnectedDccFileReceive(fp.path, resumeOffset=11)\n\n self.allDataReceivedForProtocol(protocol, b\"cool!\")\n\n self.assertEqual(fp.getContent(), b\"Twisted is cool!\")",
"def resume():\n cache.set('is_paused', 'False')\n socketio.emit('resume', {})",
"def __async_write_callback(self, err) -> None:\n if err != 0:\n logging.info('async_write: disconnected')\n self.close()\n # elif self.__is_active:\n # Data was writen to socket. just handle errors if any.\n # logging.info('async_write: OK')",
"def on_write_needed(self, nbytes, underflow):",
"def whenWriteReady(self, channel, call):",
"def write_eof(self):\n self._check_status()\n if not self._writable:\n raise TransportError('transport is not writable')\n if self._closing:\n raise TransportError('transport is closing')\n try:\n self._handle.shutdown(self._on_write_complete)\n except pyuv.error.UVError as e:\n self._error = TransportError.from_errno(e.args[0])\n self.abort()\n raise compat.saved_exc(self._error)\n self._write_buffer_size += 1",
"def handle_write(self):\n if self.established:\n return self.initiate_send()\n self._handshake()"
] | [
"0.66734207",
"0.6322468",
"0.6029015",
"0.60129535",
"0.5927665",
"0.5927665",
"0.5844039",
"0.5816916",
"0.5816916",
"0.5816916",
"0.5802818",
"0.57859164",
"0.578517",
"0.57731336",
"0.5732798",
"0.5677215",
"0.56752896",
"0.5654607",
"0.5628291",
"0.5628291",
"0.5628291",
"0.56281483",
"0.5619409",
"0.5605809",
"0.5547175",
"0.55324507",
"0.55292886",
"0.5504854",
"0.54613787",
"0.54412496"
] | 0.76703745 | 0 |
Report the member's replica set state Submit a service check. Create an event on state change. | def _report_replica_set_state(self, state, clean_server_name, replset_name, agentConfig):
last_state = self._last_state_by_server.get(clean_server_name, -1)
self._last_state_by_server[clean_server_name] = state
if last_state != state and last_state != -1:
return self.create_event(last_state, state, clean_server_name, replset_name, agentConfig) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_member_status():\n notify_member_status()\n logger.info('signal sent for status report')",
"def send_event(instance_config, status, output):\n # This function assumes the input is a string like \"mumble.main\"\n monitoring_overrides = instance_config.get_monitoring()\n if 'alert_after' not in monitoring_overrides:\n monitoring_overrides['alert_after'] = '2m'\n monitoring_overrides['check_every'] = '1m'\n monitoring_overrides['runbook'] = monitoring_tools.get_runbook(\n monitoring_overrides,\n instance_config.service, soa_dir=instance_config.soa_dir,\n )\n\n check_name = (\n 'check_marathon_services_replication.%s' %\n instance_config.job_id\n )\n monitoring_tools.send_event(\n service=instance_config.service,\n check_name=check_name,\n overrides=monitoring_overrides,\n status=status,\n output=output,\n soa_dir=instance_config.soa_dir,\n cluster=instance_config.cluster,\n )\n _log(\n service=instance_config.service,\n line='Replication: %s' % output,\n component='monitoring',\n level='debug',\n cluster=instance_config.cluster,\n instance=instance_config.instance,\n )",
"def create_event(self, last_state, state, clean_server_name, replset_name, agentConfig):\n\n status = self.get_state_description(state)\n short_status = self.get_state_name(state)\n last_short_status = self.get_state_name(last_state)\n hostname = self.hostname_for_event(clean_server_name, agentConfig)\n msg_title = \"%s is %s for %s\" % (hostname, short_status, replset_name)\n msg = \"MongoDB %s (%s) just reported as %s (%s) for %s; it was %s before.\" % (hostname, clean_server_name, status, short_status, replset_name, last_short_status)\n\n self.event({\n 'timestamp': int(time.time()),\n 'source_type_name': self.SOURCE_TYPE_NAME,\n 'msg_title': msg_title,\n 'msg_text': msg,\n 'host': hostname,\n 'tags': [\n 'action:mongo_replset_member_status_change',\n 'member_status:' + short_status,\n 'previous_member_status:' + last_short_status,\n 'replset:' + replset_name,\n ]\n })",
"def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")",
"def test_check_conciliation(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_conciliation()\n self.assertListEqual([call([3])], mocked_check.call_args_list)",
"def _call_proc_state_changed(self, svc, state):\n #log.debug(\"Proc State Changed (%s): %s\", ProcessStateEnum._str_map.get(state, state), svc)\n for cb in self._proc_state_change_callbacks:\n cb(svc, state, self.container)",
"def run(self):\n \n # Loop through all checkers to do an initial state check\n for checker in self.checkers:\n checker.update_last_state()\n\n # Send initial heartbeat\n self._send_heartbeat()\n \n # Main loop\n while True: \n html = \"\"\n for checker in self.checkers:\n if checker.just_changed_state():\n log.warn(\"Checker {} has changed state.\"\n .format(checker.name))\n html += \"<li>\" + checker.html() + \"</li>\\n\"\n \n if isinstance(checker, Process) and checker.state() == FAIL:\n log.warn(\"Process {} is not running.\"\n .format(checker.name))\n html += (\"<li>Attempting to restart \" + \n escape(checker.name) + \"...</li>\\n\")\n try:\n checker.restart()\n except MaxRetriesError, e:\n self.shutdown_reason = str(e)\n return\n time.sleep(5)\n html += (\"<li>State after restart: \" + \n checker.html() + \"</li>\\n\")\n\n if html:\n html = \"<h2>STATE CHANGED:</h2>\\n<ul>\\n\" + html + \"</ul>\\n\" \n html += self.html()\n html += run_commands(self.state_change_cmds)\n self.send_email_with_time(html=html,\n subject=\"Babysitter detected\"\n \" state change.\")\n\n if self._need_to_send_heartbeat():\n self._send_heartbeat()\n\n # Check if a new data subdir has been created\n if self.base_data_dir and self.sub_data_dir:\n if self._find_last_numeric_subdir() != self.sub_data_dir:\n self._send_heartbeat(\"<p>New subdir found so about to restart \"\n \"babysitter. Below are the last stats \"\n \"for the old data subdirectory.</p>\\n\")\n raise NewDataDirError()\n \n time.sleep(UPDATE_PERIOD)",
"def notify_subscribers(self, instance, domain, state=None):\n if not self.notifier:\n return\n\n if not state:\n state = instance.state\n\n tups = domain.get_subscribers()\n for subscriber_name, subscriber_op in tups:\n properties = {'hostname': instance.public_ip}\n content = {'node_id': instance.instance_id, 'state': state,\n 'domain_id': domain.domain_id,\n 'properties': properties}\n self.notifier.notify_by_name(subscriber_name, subscriber_op, content)",
"def start(self):\n self.server.request(\"post\", \"/jobs/%s/%s/state\" % (self.sessionid,\n self.name), body=\"1\")\n return True",
"def monitor(self, membership_set=frozenset(), callback=None):",
"def test_service_initiated():\n assert \"ready\" in bkt_outcome_unwind.index()",
"def test_check_operating_conciliation(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_operating_conciliation()\n self.assertListEqual([call([2, 3])], mocked_check.call_args_list)",
"def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)",
"def test_supvisors_state(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.fsm.serial.return_value = 'RUNNING'\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertEqual('RUNNING', rpc.get_supvisors_state())",
"def zk_state_listener(state):\n if state == KazooState.CONNECTED:\n persistent_create_server_node = retry_data_watch_coroutine(\n server_node, create_server_node)\n IOLoop.instance().add_callback(persistent_create_server_node)",
"def monitor_behavior_status(self):\n self._flexbe_status_subscriber = rospy.Subscriber('/flexbe/status', BEStatus, self.callback_flexbe_status)",
"def check_event_status(self):\n pass",
"def test_check_from_deployment(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_from_deployment()\n self.assertListEqual([call([1, 2, 3, 4, 5])], mocked_check.call_args_list)",
"def postcheck(self):\n logger.debug(\"Postcheck status is %s\" % self.status)\n return self.status",
"def test_starts_cluster_state_service(self):\n options = ControlOptions()\n options.parseOptions(\n [b\"--port\", b\"tcp:8001\", b\"--data-path\", self.mktemp()])\n reactor = MemoryCoreReactor()\n ControlScript().main(reactor, options)\n server = reactor.tcpServers[0]\n service = server[1].resource._v1_user.cluster_state_service\n self.assertEqual((service.__class__, service.running),\n (ClusterStateService, True))",
"def test_update_instances_schedule_state(self):\n pass",
"def run_app(self, set_desired_state=False):\n self.shadowClient.connect()\n start_payload = {\n 'state': {\n 'reported': self.light.current_settings(),\n 'desired': self.light.current_settings()\n }\n }\n JSON_payload = json.dumps(start_payload)\n self.shadowClient.publish(update_topic, JSON_payload, 0)\n self.shadowClient.subscribe(update_docs_topic, 1,\n self._subscribe_update_callback)\n while True:\n pass",
"def test_check_state(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.fsm.state = 1\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test there is no exception when internal state is in list\n rpc._check_state([0, 1, 2])\n # test there is an exception when internal state is not in list\n with self.assertRaises(RPCError) as exc:\n rpc._check_state([0, 2])\n self.assertEqual(Faults.BAD_SUPVISORS_STATE, exc.exception.code)\n self.assertEqual(\"BAD_SUPVISORS_STATE: Supvisors (state=DEPLOYMENT) \"\n \"not in state ['INITIALIZATION', 'OPERATION'] to perform request\",\n exc.exception.text)",
"def StatusChanged(self, state, info):\n pass",
"def _publish_status(self, state='complete'):\n self.logger.debug('Recording catalog status: \"{}\"'.format(state))\n self.status_table.update_item(\n {'api_version': self.api_version},\n {\n 'state': state,\n 'timestamp': time.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'catalog_url': '{0}/v{1}/catalog.json'.format(self.api_url, self.api_version)\n }\n )",
"def service( self ):\n\n self.alive = time.time()",
"def status(server: str):\n\n if isUp(server):\n log.info(f'{server} is running.')\n else:\n log.info(f'{server} is not running.')",
"def test_check_operating(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_operating()\n self.assertListEqual([call([2])], mocked_check.call_args_list)",
"def is_on(self, in_call):\n # print(\"is_on here\", self.dname, self.values[self.dname + '.' + self.cnd['chans'][0]])\n self.error_code = self.cnd['err_code']\n if self.values[self.dname + '.' + self.cnd['chans'][0]]:\n self.fail_count['is_on'] = 0\n else:\n self.fail_count['is_on'] = 1\n self.log_manager('is_on')",
"def get_check_events(self):\n yield self.config[\"id\"], \"salt/master/{id}/start\".format(**self.config)"
] | [
"0.6125251",
"0.60838777",
"0.59746987",
"0.544692",
"0.54446894",
"0.5343415",
"0.5315016",
"0.5272492",
"0.5270251",
"0.52520496",
"0.52330935",
"0.5200511",
"0.519126",
"0.5157797",
"0.51535314",
"0.5149566",
"0.5147222",
"0.51382697",
"0.5132011",
"0.5124594",
"0.5110116",
"0.51069605",
"0.5085767",
"0.50829816",
"0.50822586",
"0.5058732",
"0.5050181",
"0.50473005",
"0.5046334",
"0.50411963"
] | 0.6736493 | 0 |
Return a reasonable hostname for a replset membership event to mention. | def hostname_for_event(self, clean_server_name, agentConfig):
uri = urlsplit(clean_server_name)
if '@' in uri.netloc:
hostname = uri.netloc.split('@')[1].split(':')[0]
else:
hostname = uri.netloc.split(':')[0]
if hostname == 'localhost':
hostname = self.hostname
return hostname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hostname(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"hostname\", _args)\n return _ctx.execute_sync(str)",
"def get_host_name():\n return socket.gethostname()",
"def get_hostname(self):\n return self.name",
"def get_hostname(self):\n prompt = self.session.find_prompt()\n backup_logger.info(f\"Getting hostname configured for {self.current_device}:\")\n hostname_configured = re.search(r'.*?[:@]?([\\w\\-_]*)[#>]', prompt, re.MULTILINE).group(1)\n self.hostname = hostname_configured",
"def hostname():\n return socket.gethostname()",
"def get_hostname(self):\n raise NotImplementedError('get_hostname')",
"def fullHostname(self) -> str:\n\t\treturn self.hostname[1]",
"def Hostname(self):\n return self._get_attribute('hostname')",
"def _getHostname(fqdn):\n\treturn fqdn.split('.')[0]",
"def hostname():\n hostname = socket.gethostname()\n if '.' in hostname:\n hostname = hostname.split('.')[0]\n return hostname",
"def server_hostname(self):\n return dns.future_hostname(\n future_gethostbyaddr=self._server_host,\n fallback_ip=self.server_ip)",
"def get_hostname(self):\n # We set a default in install.py in case it isn't preseeded but when we\n # preseed, we are looking for None anyhow.\n return ''",
"def host_name(self):\n return self._host_name",
"def shortHostname(self) -> str:\n\t\treturn self.hostname[0]",
"def get_host(self) -> str:\n return self.socket.getsockname()[0]",
"def userhost(self):\n if self.user:\n return u\"%s@%s\" % (self.user, self.host)\n else:\n return self.host",
"def hostname(self) -> Optional[str]:\n return pulumi.get(self, \"hostname\")",
"def get_host_name(self):\n return self.get_command_output(\"hostname\").strip(\"\\n\")",
"def remote_hostname(self):\n return pn_connection_remote_hostname(self._impl)",
"def nickname(self):\n if (self.__email and self.__auth_domain and\n self.__email.endswith('@' + self.__auth_domain)):\n suffix_len = len(self.__auth_domain) + 1\n return self.__email[:-suffix_len]\n else:\n return self.__email",
"def hostname(name: str = \"\") -> str:\n ...",
"def get_hostname():\n global HOST\n if '.' in HOST:\n HOST = HOST.split('.')[0]\n return HOST",
"def hostname(self):\n return self._hostname",
"def get_hostname():\n\thostname = socket.gethostname()\n\n\treturn hostname",
"def get_hostname():\n return re.split(\"\\.\", env.host)[0]",
"def get_host_name(self):\n if self.have_metadata is False:\n self._get_metadata()\n self.have_metadata = True\n\n try:\n return self.keyinfo['tracking_id'].attrs['hostname']\n except:\n return None\n\n if self.have_metadata is False:\n self._get_metadata()\n self.have_metadata = True",
"def get_fqdn():\n return socket.getfqdn()",
"def address_string(self):\n\n if self.server.log_ip_activated:\n host = self.client_address[0]\n else:\n host = '127.0.0.1'\n if self.server.resolve_clients:\n return socket.getfqdn(host)\n else:\n return host",
"def node_name(self, name_node):\n return self.fuel_web.get_nailgun_node_by_name(name_node)['hostname']",
"def node_name(self, name_node):\n return self.fuel_web.get_nailgun_node_by_name(name_node)['hostname']"
] | [
"0.6662921",
"0.6660988",
"0.6594623",
"0.6580937",
"0.65463096",
"0.65075785",
"0.650404",
"0.64858186",
"0.64726937",
"0.64697844",
"0.64234984",
"0.64135784",
"0.6406523",
"0.63904727",
"0.6379771",
"0.63460785",
"0.6342044",
"0.6332148",
"0.63308704",
"0.63301355",
"0.63200957",
"0.6309902",
"0.630247",
"0.6292037",
"0.6278695",
"0.6272035",
"0.62660515",
"0.6244636",
"0.62431693",
"0.62431693"
] | 0.73296475 | 0 |
Create an event with a message describing the replication state of a mongo node | def create_event(self, last_state, state, clean_server_name, replset_name, agentConfig):
status = self.get_state_description(state)
short_status = self.get_state_name(state)
last_short_status = self.get_state_name(last_state)
hostname = self.hostname_for_event(clean_server_name, agentConfig)
msg_title = "%s is %s for %s" % (hostname, short_status, replset_name)
msg = "MongoDB %s (%s) just reported as %s (%s) for %s; it was %s before." % (hostname, clean_server_name, status, short_status, replset_name, last_short_status)
self.event({
'timestamp': int(time.time()),
'source_type_name': self.SOURCE_TYPE_NAME,
'msg_title': msg_title,
'msg_text': msg,
'host': hostname,
'tags': [
'action:mongo_replset_member_status_change',
'member_status:' + short_status,
'previous_member_status:' + last_short_status,
'replset:' + replset_name,
]
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def printstatechange(self, event):\n print('printstatechange; event: %s, %s->%s' % (event.event, event.src, event.dst))\n try:\n self.mqttclient.publish(\"{}/{}/events\".format(self.config['MQTT']['TOPICBASE'],\n self.config['CLIENTID']),\n event.event)\n self.mqttclient.publish(\"{}/{}/state\".format(self.config['MQTT']['TOPICBASE'],\n self.config['CLIENTID']),\n event.dst)\n except:\n pass",
"def replication_status(self):\n psql = postgresql_svc.PSQL()\n try:\n query_out = psql.execute(self.replication_status_query)\n except PopenError, e:\n if 'function pg_last_xact_replay_timestamp() does not exist' in str(e):\n raise BaseException('This version of PostgreSQL server does not support replication status')\n else:\n raise e\n query_result = self._parse_query_out(query_out)\n\n is_master = int(__postgresql__[OPT_REPLICATION_MASTER])\n\n if query_result['xlog_delay'] is None:\n if is_master:\n return {'master': {'status': 'up'}}\n return {'slave': {'status': 'down',\n 'error': query_result['error']}}\n return {'slave': {'status': 'up',\n 'xlog_delay': query_result['xlog_delay']}}",
"def _report_replica_set_state(self, state, clean_server_name, replset_name, agentConfig):\n last_state = self._last_state_by_server.get(clean_server_name, -1)\n self._last_state_by_server[clean_server_name] = state\n if last_state != state and last_state != -1:\n return self.create_event(last_state, state, clean_server_name, replset_name, agentConfig)",
"def mmo_replication_status_summary(self, mmo_connection):\n replication_summary = []\n primary_info = {}\n o = self.mmo_replication_status(mmo_connection)\n o = o + self.mmo_configsrv_replication_status(mmo_connection)\n replset_hosts_up_down = {}\n for shard in self.shards:\n replset_hosts_up_down[shard] = 0\n for replicaset in o:\n if \"Error\" not in replicaset[\"command_output\"].keys():\n for member in replicaset[\"command_output\"][\"members\"]:\n if member[\"stateStr\"] == \"PRIMARY\":\n primary_info[replicaset[\"command_output\"][\"set\"]] = member[\"optimeDate\"]\n\n replication_summary.append( { \"replicaset\": replicaset[\"command_output\"][\"set\"],\n \"hostname\": member[\"name\"],\n \"state\": member[\"stateStr\"],\n \"uptime\": member[\"uptime\"],\n \"configVersion\": member[\"configVersion\"],\n \"optimeDate\": member[\"optimeDate\"] } )\n for doc in replication_summary:\n if doc[\"state\"] == \"PRIMARY\":\n doc[\"lag\"] = \"NA\" # not relevant here\n else: # calculate the slave lag from the PRIMARY optimeDate\n if doc[\"replicaset\"] in primary_info.keys(): # is there a primary in the replset?\n try:\n if hasattr((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]), \"total_seconds\"): # Does not exist in python 2.6\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).total_seconds())\n else: # for python 2.6 that does not have total_seconds attribute\n # Will only be correct for delays of up to 24 hours\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).seconds) # Primary needs ot be first in this case\n except:\n doc[\"lag\"] = \"ERR\"\n else:\n doc[\"lag\"] = \"UNK\" # We cannot know what the delay is if there is no primary\n else:\n replset_hosts_up_down[replicaset[\"shard\"]] += 1\n\n #else: Probably redundant code now. Removed ot fix https://github.com/rhysmeister/mmo/issues/34\n # We cannot know the state of much of the replicaset at this point\n # replication_summary.append({\"replicaset\": replicaset[\"shard\"],\n # \"hostname\": \"UNK\",\n # \"state\": \"UNK\",\n # \"uptime\": \"UNK\",\n # \"configVersion\": \"UNK\",\n # \"optimeDate\": \"UNK\"})\n\n\n shard_server_count = {}\n # how many servers in each shard\n for shard in self.shards:\n shard_server_count[shard] = 0\n for s in self.shard_servers:\n shard_server_count[s['shard']] += 1\n # are all the hosts of any shard down?\n for shard in self.shards:\n if replset_hosts_up_down[shard] > 0:\n if replset_hosts_up_down[shard] == shard_server_count[shard]:\n replication_summary.append({\"replicaset\": shard,\n \"hostname\": \"UNK\",\n \"state\": \"UNK\",\n \"uptime\": \"UNK\",\n \"configVersion\": \"UNK\",\n \"optimeDate\": \"UNK\",\n \"lag\": \"UNK\"})\n deduped_replication_summary = []\n for d in replication_summary:\n if d not in deduped_replication_summary:\n deduped_replication_summary.append(d)\n return deduped_replication_summary",
"def send_event(instance_config, status, output):\n # This function assumes the input is a string like \"mumble.main\"\n monitoring_overrides = instance_config.get_monitoring()\n if 'alert_after' not in monitoring_overrides:\n monitoring_overrides['alert_after'] = '2m'\n monitoring_overrides['check_every'] = '1m'\n monitoring_overrides['runbook'] = monitoring_tools.get_runbook(\n monitoring_overrides,\n instance_config.service, soa_dir=instance_config.soa_dir,\n )\n\n check_name = (\n 'check_marathon_services_replication.%s' %\n instance_config.job_id\n )\n monitoring_tools.send_event(\n service=instance_config.service,\n check_name=check_name,\n overrides=monitoring_overrides,\n status=status,\n output=output,\n soa_dir=instance_config.soa_dir,\n cluster=instance_config.cluster,\n )\n _log(\n service=instance_config.service,\n line='Replication: %s' % output,\n component='monitoring',\n level='debug',\n cluster=instance_config.cluster,\n instance=instance_config.instance,\n )",
"def or_conn_status_event(self, event):\r\n pass",
"def shiftr_event_listener(event):\n state = event.data.get(\"new_state\")\n topic = state.entity_id.replace(\".\", \"/\")\n\n try:\n _state = state_helper.state_as_number(state)\n except ValueError:\n _state = state.state\n\n try:\n mqttc.publish(topic, _state, qos=0, retain=False)\n\n if state.attributes:\n for attribute, data in state.attributes.items():\n mqttc.publish(\n f\"/{topic}/{attribute}\", str(data), qos=0, retain=False\n )\n except RuntimeError:\n pass",
"def MongoLog(self, request_number, process, log_message):\n try:\n print(\"Attempting to connect to MongoDB...\")\n client = MongoClient('localhost', 27017)\n db = client.database\n collection = db.logging_database\n\n status_log = {\"Request_No\": request_number, \"Brewing_Process\": process, \"Log_Message\": log_message,\n \"Time\": datetime.datetime.now()}\n\n try:\n collection.insert_one(status_log)\n except TypeError: # Error Handling for MongoDB versions that do not implement insert_one() method\n collection.insert(status_log)\n\n print(status_log)\n except Exception as e:\n print(\"MongoDB connection Error:\" + str(e))",
"def mmo_configsrv_replication_status(self, mmo_connection):\n replication_state = []\n if self.mmo_is_mongos(mmo_connection):\n configsrv = self.mmo_config_servers(mmo_connection)[0]\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n c = self.mmo_connect_mongod(hostname=configsrv[\"hostname\"],\n \t port=configsrv[\"port\"],\n \t username=auth_dic[\"username\"],\n \t password=auth_dic[\"password\"],\n \tauthentication_db=auth_dic[\"authentication_database\"]\n \t)\n if self.mmo_is_cfg_rs(c):\n command_output = c[\"admin\"].command(\"replSetGetStatus\")\n shard = command_output[\"set\"]\n replication_state.append({\"hostname\": configsrv[\"hostname\"], \"port\": configsrv[\"port\"], \"shard\": shard, \"command_output\": command_output})\n else:\n raise Exception(\"Not a mongos process\")\n return replication_state",
"def mmo_replication_status(self, mmo_connection):\n replication_state = []\n if self.mmo_is_mongos(mmo_connection):\n #o = self.mmo_execute_on_primaries(mmo_connection, \"replSetGetStatus\")\n o = self.mmo_execute_on_secondary_or_primary(mmo_connection, \"replSetGetStatus\", \"all\", True)\n #print o2;\n return o\n else:\n raise Exception(\"Not a mongos process\")",
"def initState(currentState):\n\n global client , db \n\n print(\"<<INIT>>\")#DEBUG\n print(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = False\n client = None\n while not connected:\n client = MongoClient(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = not client == None\n db = client.texet\n return 'watch'",
"def zk_state_listener(state):\n if state == KazooState.CONNECTED:\n persistent_create_server_node = retry_data_watch_coroutine(\n server_node, create_server_node)\n IOLoop.instance().add_callback(persistent_create_server_node)",
"def __init__(self):\n command_ports = list()\n replicaed_times = 0\n visited_times = 1\n is_replicated = False",
"def replication(self, replication):\n self._replication = replication",
"def get_mongo_config ( subnets ) :\n replications = \"\"\n primary_ip = get_primary_node(subnets)\n for subnet in subnets :\n if primary_ip != subnet.cidr_block :\n replication = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n replication = replication.replace(\".\", \"-\")\n replications = replications + \"\\nrs.add(\\\"ip-\"+replication+\":27017\\\");\"\n \n \n return \"\"\"#!/bin/bash -ex\n exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1\n:>/etc/replication.js\necho 'rs.initiate();\"\"\"+replications+\"\"\"\n\n'>>/etc/replication.js\n\"\"\"",
"def test_redis_increase_replica_count_usual_case():",
"def publishState(self, msg, suffix = \"\"):\n # generate state topic based off config\n topic = self._formatTopic(\"state_prefix\", suffix)\n self.publish(topic, msg)",
"def mmo_replica_state(self, mmo_connection):\n\n # https://docs.mongodb.org/manual/reference/replica-states/\n replica_states = [\n { \"id\": 0, \"name\": \"STARTUP\", \"description\": \"Not yet an active member of any set. All members start up in this state. The mongod parses the replica set configuration document while in STARTUP.\" },\n { \"id\": 1, \"name\": \"PRIMARY\", \"description\": \"The member in state primary is the only member that can accept write operations.\" },\n { \"id\": 2, \"name\": \"SECONDARY\", \"description\": \"A member in state secondary is replicating the data store. Data is available for reads, although they may be stale.\" },\n { \"id\": 3, \"name\": \"RECOVERING\", \"description\": \"Can vote. Members either perform startup self-checks, or transition from completing a rollback or resync.\" },\n { \"id\": 5, \"name\": \"STARTUP2\", \"description\": \"The member has joined the set and is running an initial sync.\" },\n { \"id\": 6, \"name\": \"UNKNOWN\", \"description\": \"The member's state, as seen from another member of the set, is not yet known.\" },\n { \"id\": 7, \"name\": \"ARBITER\", \"description\": \"Arbiters do not replicate data and exist solely to participate in elections.\" },\n { \"id\": 8, \"name\": \"DOWN\", \"description\": \"The member, as seen from another member of the set, is unreachable.\" },\n { \"id\": 9, \"name\": \"ROLLBACK\", \"description\": \"This member is actively performing a rollback. Data is not available for reads.\" },\n { \"id\": 10, \"name\": \"REMOVED\", \"description\": \"This member was once in a replica set but was subsequently removed.\" }\n ]\n\n if self.mmo_is_mongod(mmo_connection):\n return replica_states[mmo_connection[\"admin\"].command(\"replSetGetStatus\")[\"myState\"]]\n else:\n raise Exception(\"Not a mongod process\")",
"def post_database_node_create(self, resource_dict):\n pass",
"def on_created(self, event):\n\n # the absolute path of the event file/folder\n abs_path = event.src_path\n # replace the root path with a '.' to build a relative path to be sent to server\n relative_event_path = abs_path.replace(self.root_path, \".\")\n\n # retrieve event type and the flag for directory/folder\n event_type = event.event_type\n is_directory = event.is_directory\n\n # only propagate changes if there is a connection with the server\n if self.protocol.connected:\n self.protocol.send_event(event_type, is_directory, relative_event_path)\n else:\n logging.warning(\"Connection with server has not been established, 'create' changes will not be propagated.\")",
"def test_mongodb_oplog_origin(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n time_now = int(time.time())\n mongodb_oplog = pipeline_builder.add_stage('MongoDB Oplog')\n database_name = get_random_string(ascii_letters, 10)\n # Specify that MongoDB Oplog needs to read changes occuring after time_now.\n mongodb_oplog.set_attributes(collection='oplog.rs', initial_timestamp_in_secs=time_now, initial_ordinal=1)\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_oplog >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # Insert documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are inserted in that collection.\n mongodb_database = mongodb.engine[database_name]\n mongodb_collection = mongodb_database[get_random_string(ascii_letters, 10)]\n input_rec_count = 6\n inserted_list = mongodb_collection.insert_many([{'x': i} for i in range(input_rec_count)])\n assert len(inserted_list.inserted_ids) == input_rec_count\n\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n\n assert len(snapshot[mongodb_oplog].output) == input_rec_count\n for record in list(enumerate(snapshot[mongodb_oplog].output)):\n assert record[1].value['value']['o']['value']['x']['value'] == str(record[0])\n # Verify the operation type is 'i' which is for 'insert' since we inserted the records earlier.\n assert record[1].value['value']['op']['value'] == 'i'\n assert record[1].value['value']['ts']['value']['timestamp']['value'] > time_now\n\n finally:\n logger.info('Dropping %s database...', database_name)\n mongodb.engine.drop_database(database_name)",
"def to_replication_dict(self):\n return {}",
"def send(self, event):\r\n try:\r\n self.collection.insert(event, manipulate=False)\r\n except PyMongoError:\r\n # The event will be lost in case of a connection error.\r\n # pymongo will re-connect/re-authenticate automatically\r\n # during the next event.\r\n msg = 'Error inserting to MongoDB event tracker backend'\r\n log.exception(msg)",
"def test_create_event(self):\n event_type = 'SERVICE NOTIFICATION'\n fields = EVENT_FIELDS.get(event_type, None)\n parts = [\n 'nagiosadmin',\n 'nagios4',\n 'Root Partition',\n 'CRITICAL',\n 'notify-service-by-email',\n 'DISK CRITICAL - free space: / 1499 MB (2.46% inode=77%):'\n ]\n event = create_event(\n timestamp=1603813628, event_type=event_type, hostname='docker-desktop', fields=fields._make(parts)\n )\n\n assert event['timestamp'] == 1603813628\n assert event['event_type'] == 'SERVICE NOTIFICATION'\n assert event[\"msg_title\"] == 'Root Partition'\n assert event[\"source_type_name\"] == 'SERVICE NOTIFICATION'\n assert event[\"msg_text\"] == 'CRITICAL'\n assert event['tags'] == [\n 'contact:nagiosadmin',\n 'host:nagios4',\n 'check_name:Root Partition',\n 'event_state:CRITICAL',\n 'notification_type:notify-service-by-email',\n 'payload:DISK CRITICAL - free space: / 1499 MB (2.46% inode=77%):'\n ]",
"def on_publish( client, userdata, mid ):\n logging.info( \"Data published successfully.\" )",
"def test_create_log(self):\n message = \"Message is {0}\".format(random.random())\n resp = gracedb.writeLog(eventId, message)\n self.assertEqual(resp.status, 201)\n new_log_uri = resp.getheader('Location')\n new_log = resp.json()\n self.assertEqual(new_log_uri, new_log['self'])\n check_new_log = gracedb.get(new_log_uri).json()\n self.assertEqual(check_new_log['comment'], message)",
"def __init__(self, **kwargs):\n super(MongodbHistorian, self).__init__(**kwargs)\n\n self._data_collection = 'data'\n self._meta_collection = 'meta'\n self._topic_collection = 'topics'\n self._initial_params = connection['params']\n self._client = None\n\n self._topic_id_map = {}\n self._topic_name_map = {}\n self._topic_meta = {}",
"def action_summary(self, request, action_info):\n replicate_info = {\n 'volume_id': action_info['id'],\n 'replicate_status': action_info['replicate_status']\n }\n\n if 'snapshot_id' in action_info.keys():\n replicate_info['snapshot_id'] = action_info['snapshot_id']\n info = {\n 'replicate': replicate_info\n }\n return info",
"def replication(self):\n return self._replication",
"def onReadNodeCreated():\n ..."
] | [
"0.53126144",
"0.5274733",
"0.5268226",
"0.52290887",
"0.52170926",
"0.5165227",
"0.51233536",
"0.5103651",
"0.50933707",
"0.50899076",
"0.50673026",
"0.4976149",
"0.49485195",
"0.48921156",
"0.48767117",
"0.48721194",
"0.48523217",
"0.4849296",
"0.48433527",
"0.47988123",
"0.47947156",
"0.47879604",
"0.47682634",
"0.47449437",
"0.46995682",
"0.4684156",
"0.46706122",
"0.46694544",
"0.46620485",
"0.46353596"
] | 0.61032283 | 0 |
Build the metric list to collect based on the instance preferences. | def _build_metric_list_to_collect(self, additional_metrics):
metrics_to_collect = {}
# Defaut metrics
for default_metrics in self.DEFAULT_METRICS.itervalues():
metrics_to_collect.update(default_metrics)
# Additional metrics metrics
for option in additional_metrics:
additional_metrics = self.AVAILABLE_METRICS.get(option)
if not additional_metrics:
if option in self.DEFAULT_METRICS:
self.log.warning(
u"`%s` option is deprecated."
u" The corresponding metrics are collected by default.", option
)
else:
self.log.warning(
u"Failed to extend the list of metrics to collect:"
u" unrecognized `%s` option", option
)
continue
self.log.debug(
u"Adding `%s` corresponding metrics to the list"
u" of metrics to collect.", option
)
metrics_to_collect.update(additional_metrics)
return metrics_to_collect | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_metrics_to_collect(self, instance_key, additional_metrics):\n if instance_key not in self.metrics_to_collect_by_instance:\n self.metrics_to_collect_by_instance[instance_key] = \\\n self._build_metric_list_to_collect(additional_metrics)\n return self.metrics_to_collect_by_instance[instance_key]",
"def __get_metrics_list(self):\n metrics = metrics_calculator.MetricsCalculator(self.processor)\n metric_list = []\n # Populate the list\n for key in metrics.get_raw_metrics().keys():\n name = metrics.get_raw_metrics()[key][\"NAME\"]\n formula = metrics.get_raw_metrics()[key][\"FORMULA\"]\n description = metrics.get_raw_metrics()[key][\"DESCRIPTION\"]\n metric = Metric(name, formula, description)\n metric_list.append(metric)\n return metric_list",
"def get_metric_list(self) -> List[str]:\n ...",
"def _build_eval_metrics(self, results, features, labels):\n metrics = {}\n for metric in self.metrics:\n metrics[metric.IDENTIFIER] = getters.get_metric(\n metric.IDENTIFIER, results, labels, **metric.to_dict())\n return metrics",
"def configure_metrics(self):\n allowed = list(METRIC_LOOKUP.keys()) + [None]\n metrics = nn.ModuleDict()\n for k, m in self.branch_metrics.items():\n for metric_name in m:\n if metric_name not in allowed:\n raise ValueError(\n f\"Illegal metric given. Got: {metric_name}. Allowed: {allowed}.\"\n )\n\n if metric_name is not None:\n metric = METRIC_LOOKUP[metric_name]()\n else:\n metric = None\n\n metrics[f\"{k}_{metric_name}\"] = metric\n\n return metrics",
"def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge",
"def list_metrics(self):\n pass",
"def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results",
"def supported_metrics(cls) -> List[str]:\n ...",
"def _init_metrics(self, mp_type, namespace, unreachable=False):\n kind = ResourceKind.METRIC if namespace else ResourceKind.GLOBALMETRIC\n metrics_info = (\n self._METRICS_PROVIDER_INFO.get(mp_type, {})\n .get(namespace, {})\n .get(\"metrics\", [])\n )\n\n # We will collect metrics from all metrics providers of the correct type\n # and in the correct namespace in the metrics list.\n metrics = []\n\n # all metrics providers of the correct type in the correct namespace\n if unreachable:\n mps_list = self.unreachable_metrics_providers\n else:\n mps_list = self.metrics_providers\n mps = mps_list.get(mp_type, {}).get(namespace, {})\n\n # Mapping from metrics provider resource definition to its metrics.\n # Initially empty.\n metrics_for_mp = dict.fromkeys([mp for mp in mps], [])\n for metric_info in metrics_info:\n # check if metric has the correct reachability. Skip if not.\n mp_name = metric_info[\"mp_name\"]\n reachability_matches = True if mp_name in [mp.name for mp in mps] else False\n if reachability_matches:\n # Create and collect the metric\n metric_name = metric_info[\"name\"]\n mp_metric_name = metric_info.get(\"mp_metric_name\", None)\n metric = BaseMetricDefinition(\n metric_name,\n kind,\n namespace,\n metric_info[\"allowed_values\"],\n metric_info[\"min\"],\n metric_info[\"max\"],\n mp_name,\n mp_metric_name=mp_metric_name,\n )\n metrics.append(metric)\n\n # remember its metrics provider for performance reasons\n mps_w_correct_name = [mp for mp in mps if mp.name == mp_name]\n if len(mps_w_correct_name) != 1:\n msg = (\n f\"Expected 1 metrics provider with the name {mp_name}. \"\n f\"Found {len(mps_w_correct_name)}.\"\n )\n raise ValueError(msg)\n mp = mps_w_correct_name[0]\n self._metric_to_metrics_provider[metric] = mp\n\n # save this metric to the metrics provider so it can be added later.\n metrics_for_mp[mp].append(metric)\n\n # The metrics providers need their metrics, so we add them here - also for\n # non-static metrics providers, since information about the metrics they\n # provide is needed in the tests.\n sanity_check_number_of_metrics = 0\n for mp, mp_metrics in metrics_for_mp.items():\n sanity_check_number_of_metrics += len(mp_metrics)\n self._add_metrics_to_metrics_provider(mp, mp_metrics)\n if len(metrics) != sanity_check_number_of_metrics:\n msg = (\n f\"Expected {len(metrics)} and {sanity_check_number_of_metrics} \"\n f\"to be equal.\"\n )\n raise ValueError(msg)\n\n return metrics",
"def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []",
"def collect_metrics(params, start, uuid=str(uuid4())):\n list_of_metrics = ['AWS/EC2/CPUUtilization',\n 'CGCloud/MemUsage',\n 'CGCloud/DiskUsage_mnt_ephemeral',\n 'CGCloud/DiskUsage_root',\n 'AWS/EC2/NetworkIn',\n 'AWS/EC2/NetworkOut',\n 'AWS/EC2/DiskWriteOps',\n 'AWS/EC2/DiskReadOps']\n\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n\n while ids:\n # metrics = {metric: [] for metric in list_of_metrics}\n for instance_id in ids:\n for metric in list_of_metrics:\n averages = []\n try:\n s = start\n while s < stop:\n e = s + (4 * 24 * 3600)\n aws_start = datetime.utcfromtimestamp(s)\n aws_stop = datetime.utcfromtimestamp(e)\n met_object = get_metric(metric, instance_id, aws_start, aws_stop)\n averages.extend([x['Average'] for x in get_datapoints(met_object)])\n s = e\n if averages:\n metrics[metric].append(averages)\n logging.info('# of Datapoints for metric {} is {}'.format(metric, len(metrics[metric][0])))\n except RuntimeError:\n if instance_id in instance_ids:\n instance_ids.remove(instance_id)\n # Remove metrics if no datapoints were collected\n metrics = dict((k, v) for k, v in metrics.iteritems() if v)\n # Save CSV of data\n mkdir_p('{}_{}'.format(uuid, str(datetime.utcnow()).split()[0]))\n for metric in metrics:\n with open('{}_{}/{}.csv'.format(uuid, str(datetime.utcnow()).split()[0], metric.rsplit('/', 1)[1]), 'wb') as f:\n writer = csv.writer(f)\n writer.writerows(metrics[metric])",
"def init_metrics():\n metrics = defaultdict(list)\n metrics['best_acc'] = 0.0\n metrics['best_loss'] = float('inf')\n metrics['best_epoch'] = 0\n return metrics",
"def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options",
"def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()",
"def collect(self) -> Metric:\n ret = self.source()\n if ret is None:\n LOGGER.warning('Statistics are not available')\n return\n gauge = GaugeMetricFamily('wemo_device_state', 'Status of Wemo device', labels=['address', 'parameter'])\n gauge.add_metric([ret.address, 'today_kwh'], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'current_power_mW'], ret.current_power,\n timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_on_time'], ret.today_on_time, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'on_for'], ret.on_for, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_standby_time'], ret.today_standby_time,\n timestamp=ret.collection_time.timestamp())\n\n yield gauge\n\n counter = CounterMetricFamily('wemo_power_usage', 'Today power consumption', labels=['address'])\n counter.add_metric([ret.address], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n yield counter",
"def initialize_metrics():\n metrics = {\n 'cd_losses': [],\n 'cd_corrects': [],\n 'cd_precisions': [],\n 'cd_recalls': [],\n 'cd_f1scores': [],\n }\n\n return metrics",
"def build_metrics_counter_data(count_metrics):\n return [{'name': name, 'delta': delta} for name, delta in iteritems(count_metrics)]",
"def init_metric_definitions():\n metric_definitions = []\n\n # add info to list in memory, one by one, following signature values\n metric_def_ID = 1\n metric_def_name = \"Recovery Time\"\n metric_def_info = \"Measures time taken by ONAP to restore a VNF\"\n metric_definitions.append(RecoveryTimeDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n metric_def_ID = 2\n metric_def_name = \"Uptime Percentage\"\n metric_def_info = \"Measures ratio of uptime to reference time, not counting planned downtime\"\n metric_definitions.append(UptimePercentageDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n\n # write list to binary file\n write_list_bin(metric_definitions, FILE_METRIC_DEFINITIONS)\n\n return metric_definitions",
"def define_metrics(config):\n metrics = []\n if config.get(\"data.output.label.choice\") == \"segmentation\":\n metrics = [\n ext_sm.metrics.IOUScore(),\n ext_sm.metrics.FScore(beta=0.5),\n ext_sm.metrics.FScore(beta=2),\n ]\n metrics = []\n elif config.get(\"data.output.label.choice\") == \"inversion\":\n metrics = [\n rmae\n ]\n return metrics",
"def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)",
"def generate_metrics(self, cfg):\n # filter available metrics based on cfg\"[filter\"]\n for metric in self.list_metrics(cfg[\"filter\"]):\n LOGGER.info(\"Generating metrics for: %s\", metric)\n dg = usmqe_cmdg.data_generators.DataGenerators(metric, cfg[\"since\"], cfg[\"until\"])\n # iterate through the time period\n for timestamp in range(cfg[\"since\"], cfg[\"until\"], cfg[\"interval\"]):\n dg.prev_value = 0\n # combine all listed generators to one value\n for generator in cfg[\"generators\"]:\n value = dg.data_generator( \\\n generator['name'], \\\n timestamp, \\\n generator)\n # push generated value into carbon\n self.__push_metric(metric, value, timestamp)",
"def build_metrics_gauge_data(gauge_metrics):\n return [{'name': name, 'value': value} for name, value in iteritems(gauge_metrics)]",
"def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge",
"def metrics(self) -> list:\n my_metrics = [\n FramesMetric(\"frames\"),\n FPSMetric(\"fps\"),\n EpisodeRewardMetric('PMM:episode_rewards'),\n EpisodeRewardMetricQuantile('P09:episode_rewards', quantile=0.9),\n EpisodeRewardMetricQuantile('P01:episode_rewards', quantile=0.1),\n EpisodeLengthMetric(\"episode_length\")\n ]\n\n return my_metrics + self.algo.metrics() + self.env_roller.metrics()",
"def _create_load_stats(self, context, instance=None):\n values = {}\n\n if instance:\n instances = [instance]\n else:\n self.stats.clear() # re-generating all, so clear old stats\n\n # grab all instances that are not yet DELETED\n filters = {'host': self.host, 'deleted': False}\n instances = db.instance_get_all_by_filters(context,\n {'host': self.host})\n\n for instance in instances:\n self.stats.add_stats_for_instance(instance)\n\n values['current_workload'] = self.stats.calculate_workload()\n values['running_vms'] = self.stats.num_instances\n values['vcpus_used'] = self.stats.num_vcpus_used\n values['stats'] = self.stats\n return values",
"def metrics_group():",
"def __init__(self, replication_num, metric_name_array, metric_collection_types = None, detailed_metric_assembly = False):\n self.replication_num = replication_num\n self.metrics = metric_name_array\n self.metric_collection_types = metric_collection_types # can be a string array elements of which can be one of ('STRING_LIST', 'COUNT_MAX', 'MEAN_STD','MIN','MAX', 'MIN_MAX') \n self.detailed_metric_assembly = detailed_metric_assembly\n self.replication_counter = 0\n self.metric_final_results = {}\n # initialize results array for each metric\n for metric in metric_name_array:\n self.metric_final_results[metric] = []",
"def init_metric_dict(self, metrics=[\"\"], phases=[\"train\", \"val\"]):\n metric_dict = {phase: {metric: [] for metric in metrics} for phase in phases}\n return metric_dict",
"def __init__(self, included_metrics: List[str]):\n self.included_metrics = included_metrics\n self.metrics = self._initialize_metrics()"
] | [
"0.65387714",
"0.6310117",
"0.62119526",
"0.5793617",
"0.57547987",
"0.57211876",
"0.56583136",
"0.5615191",
"0.55901927",
"0.5546556",
"0.5533608",
"0.5482031",
"0.5463035",
"0.5445631",
"0.5387678",
"0.53774405",
"0.5351017",
"0.53399146",
"0.5310214",
"0.52838415",
"0.5282751",
"0.5282483",
"0.5277659",
"0.5242874",
"0.5240562",
"0.52397764",
"0.5233354",
"0.5232137",
"0.52268237",
"0.52249837"
] | 0.6790621 | 0 |
Return the submit method and the metric name to use. | def _resolve_metric(self, original_metric_name, metrics_to_collect, prefix=""):
submit_method = metrics_to_collect[original_metric_name][0] \
if isinstance(metrics_to_collect[original_metric_name], tuple) \
else metrics_to_collect[original_metric_name]
metric_name = metrics_to_collect[original_metric_name][1] \
if isinstance(metrics_to_collect[original_metric_name], tuple) \
else original_metric_name
return submit_method, self._normalize(metric_name, submit_method, prefix) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def method(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"method\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def _get_eval_metric(self):\n self._validate_eval_metric()\n if isinstance(self.eval_metric, types.FunctionType):\n UserDefinedEvalMetric().set_metric(self.eval_metric)\n return \"user_defined_metric\"\n\n if self.eval_metric == \"auto\":\n if self._get_ml_task() == BINARY_CLASSIFICATION:\n return \"logloss\"\n elif self._get_ml_task() == MULTICLASS_CLASSIFICATION:\n return \"logloss\"\n elif self._get_ml_task() == REGRESSION:\n return \"rmse\"\n else:\n return deepcopy(self.eval_metric)",
"def get_method_name(self):\n\t\treturn self.method_name",
"def get_submit_policy(self) -> SubmitPolicyStr:\n return SUBMIT_POLICY.inverse[self.submitPolicy()]",
"def getSubmitTime():",
"def build_metric_submit_file(self, metric):\n\n log_dir = self.rsv.get_metric_log_dir()\n environment = \"PATH=/usr/bin:/bin\\n\"\n condor_id = metric.get_unique_name()\n arguments = \"-v 3 -r -u %s %s %s\" % (metric.host, metric.name, metric.get_settings())\n timestamp = strftime(\"%Y-%m-%d %H:%M:%S %Z\")\n\n probe_interval = metric.get_probe_interval()\n if not probe_interval:\n cron = metric.get_cron_entry()\n if not cron:\n self.rsv.log(\"ERROR\", \"Invalid cron time for metric %s on host %s. Will not start.\" %\n (metric.name, metric.host))\n return \"\"\n\n submit = \"\"\n submit += \"######################################################################\\n\"\n submit += \"# Temporary submit file generated by rsv-control\\n\"\n submit += \"# Generated at %s \" % timestamp\n submit += \"######################################################################\\n\"\n submit += \"Environment = %s\\n\" % environment\n\n if probe_interval:\n submit += \"DeferralPrepTime = ifThenElse(%d - ScheddInterval + 31 > 0, %d - ScheddInterval + 31, 180) \\n\" % (probe_interval, probe_interval)\n submit += \"DeferralTime = (CurrentTime + %d + random(30))\\n\" % probe_interval\n submit += \"DeferralWindow = 99999999\\n\"\n submit += \"+OSGRSVProbeInterval = %d\\n\" % probe_interval\n else:\n submit += \"CronPrepTime = 180\\n\"\n submit += \"CronWindow = 99999999\\n\"\n submit += \"CronMonth = %s\\n\" % cron[\"Month\"]\n submit += \"CronDayOfWeek = %s\\n\" % cron[\"DayOfWeek\"]\n submit += \"CronDayOfMonth = %s\\n\" % cron[\"DayOfMonth\"]\n submit += \"CronHour = %s\\n\" % cron[\"Hour\"]\n submit += \"CronMinute = %s\\n\" % cron[\"Minute\"]\n submit += \"Executable = %s\\n\" % self.rsv.get_wrapper()\n submit += \"Error = %s/%s.err\\n\" % (log_dir, condor_id)\n submit += \"Output = %s/%s.out\\n\" % (log_dir, condor_id)\n submit += \"Log = %s/%s.log\\n\" % (log_dir, condor_id)\n submit += \"Arguments = %s\\n\" % arguments\n submit += \"Universe = local\\n\"\n submit += \"Notification = never\\n\"\n submit += \"OnExitRemove = false\\n\"\n submit += \"PeriodicRelease = HoldReasonCode =!= 1\\n\"\n submit += \"+OSGRSV = \\\"metrics\\\"\\n\"\n submit += \"+OSGRSVHost = \\\"%s\\\"\\n\" % metric.host\n submit += \"+OSGRSVMetric = \\\"%s\\\"\\n\" % metric.name\n submit += \"+OSGRSVUniqueName = \\\"%s\\\"\\n\" % condor_id\n submit += \"Queue\\n\"\n \n return submit",
"def metric_name(self) -> str:\n return self._metric_name",
"def _get_eval_metric(self):\n raise NotImplementedError",
"def __getattr__(self, name):\n try:\n return super(\n SubmittedJob,\n self).__getattribute__('submission')[str(name)]\n\n except KeyError:\n raise AttributeError(\"'SubmittedJob' object has no attribute or \"\n \"parameter: {atr}\".format(atr=name))",
"def get_evaluation_metric(self):\n\t\treturn self.metric",
"def metric_name(self) -> str:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> str:\n return pulumi.get(self, \"metric_name\")",
"def metric(self) -> str:\r\n return self._metric",
"def emit_submit_event(self, *args, **kwargs):\n return self._onsubmit(*args, **kwargs)",
"def method(self):\n return self._method",
"def method(self):\n return self._method",
"def method_name(self):\n pass",
"def submit(self):\n raise NotImplementedError()",
"def metric_name(self) -> str:\n return self._values.get('metric_name')",
"def metric_name(self) -> str:\n return self._values.get('metric_name')",
"def submit(self, **kwargs):\n pwd = curdir\n wd = dirname(self.logFile)\n chdir(wd)\n d = OrderedDict()\n #d['universe'] = 'vanilla'\n #d['executable'] = self.command\n\td['job-name'] = self.name\n\td['nodes'] = 1\n\td['partition'] = defaults.get('queue')\n\td['time'] = defaults.get(\"cputime\")\n\td['mem'] = defaults.get(\"memory\")\n d['output'] = op_join(wd,\"output.log\")\n d['error'] = op_join(wd,\"output.err\")\n csi_file = open(\"submit.sh\", \"w\")\n\tcsi_file.write(\"#!/bin/bash\\n\")\n data = [\"#SBATCH --%s=%s\\n\" % (k, v) for k, v in d.iteritems()]\n csi_file.write(\"\".join(data))\n\tcsi_file.write(\"export DAMPE_WORKFLOW_SERVER_URL=%s\\n\"%DAMPE_WORKFLOW_URL)\n csi_file.write(\"bash script\\n\")\n csi_file.close()\n output = self.__run__(\"sbatch submit.sh\")\n chdir(pwd)\n return self.__regexId__(output)"
] | [
"0.60242224",
"0.5900693",
"0.5900693",
"0.5900693",
"0.5900693",
"0.5900693",
"0.5900693",
"0.5900693",
"0.5900693",
"0.5900693",
"0.5779224",
"0.5709764",
"0.56761044",
"0.5673215",
"0.56213224",
"0.5533348",
"0.550876",
"0.54920304",
"0.5490709",
"0.5478249",
"0.5478249",
"0.5464403",
"0.54516685",
"0.54350233",
"0.54350233",
"0.54065377",
"0.53699434",
"0.5365224",
"0.5365224",
"0.53328604"
] | 0.6122732 | 0 |
Replace casesensitive metric name characters, normalize the metric name, prefix and suffix according to its type. | def _normalize(self, metric_name, submit_method, prefix):
metric_prefix = "mongodb." if not prefix else "mongodb.{0}.".format(prefix)
metric_suffix = "ps" if submit_method == RATE else ""
# Replace case-sensitive metric name characters
for pattern, repl in self.CASE_SENSITIVE_METRIC_NAME_SUFFIXES.iteritems():
metric_name = re.compile(pattern).sub(repl, metric_name)
# Normalize, and wrap
return u"{metric_prefix}{normalized_metric_name}{metric_suffix}".format(
normalized_metric_name=self.normalize(metric_name.lower()),
metric_prefix=metric_prefix, metric_suffix=metric_suffix
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sanitize_metric_name(name: str) -> str:\n name = name.replace(\":\", \"-\")\n return name",
"def normalize_label(label: str) -> str:\n label = re.sub(r\"['\\\"`]+\", \"\", label) # remove apostrophes\n label = re.sub(r\"[-/\\\\ \\t_]+\", \" \", label) # normalize separators\n lower_count = sum(map(str.islower, label))\n upper_count = sum(map(str.isupper, label))\n if \" \" not in label and lower_count > 0 and upper_count > 0:\n # camel case to \"normal case\"\n label = re.sub(r\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", label)\n label = re.sub(r\"(^[Tt]he |^[Aa] )\", \"\", label) # drop determiner\n return label.lower()",
"def normalize_key(metric_key):\n metric_key = SPACES.sub(\"_\", metric_key)\n metric_key = SLASHES.sub(\"-\", metric_key)\n metric_key = NON_ALNUM.sub(\"\", metric_key)\n return metric_key",
"def convert_metric_name(self, key, metric):\n return 'Custom/' + key.replace('.', '/') + '/' + metric",
"def normalize(name):\n name = name.lower()\n name = name.replace('-', '')\n name = name.replace(' ', '')\n return name",
"def normalize_label(label):\n label = normalize('NFKD', label)\n label = re.sub('/[^a-z0-9-_:.]/g', '-', label)\n label = label.lower()\n return label",
"def _camel_killer(attr):\n try:\n attr = str(attr)\n except UnicodeEncodeError:\n attr = attr.encode('utf-8', 'ignore')\n s1 = _first_cap_re.sub('\\\\1_\\\\2', attr)\n s2 = _all_cap_re.sub('\\\\1_\\\\2', s1)\n return re.sub('_+', '_', s2.casefold() if hasattr(s2, 'casefold') else s2.lower())",
"def normalize_name(field_name):\n fixes = (\n (r\"/\", \"_per_\"),\n (r\"%\", \"_pct_\"),\n (r\"\\W\", \"_\"),\n (r\"^_+\", \"\"), # remove '_' if field_name begins with '_'\n (r\"_+$\", \"\"),\n (r\"__+\", \"_\"),\n )\n result = field_name.strip().lower() or None\n # result = field_name.strip().upper() or None\n if result:\n if result.endswith(\"?\"):\n if not re.match(r\"is[_\\W]\", result):\n result = \"is_\" + result\n for pattern, replacement in fixes:\n result = re.sub(pattern, replacement, result)\n return result",
"def normalize_name(self, value):\n import unicodedata\n import re\n\n self.log('Converting string %s' % value)\n \n # Double try in name conversion\n try:\n value = unicodedata.normalize('NFKD', u'%s' % value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n except:\n self.log('Conversion error: \\n%s' % traceback.format_exc())\n\n value = unicode(value, 'ascii', errors='ignore')\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n\n\n self.log('Conversion finished to %s' % value)\n\n return value",
"def symbolize_sensorname(name):\n return name.lower().replace(\" \", \"_\")",
"def __normalize_name(self):\n self.normalized_name = normalizeSimplified(self.name)",
"def standardize_name_for_look_up(name: Any) -> str:\n if not isinstance(name, str):\n return name\n\n name = name.lower().strip()\n name = \" \".join(name.split(\"_\"))\n name = name.translate(\n str.maketrans(\"\", \"\", string.punctuation)\n ) # remove punctuation\n name = \" \".join(\n [part for part in name.split(\" \") if part]\n ) # ensure there is only a single space between words\n return name",
"def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())",
"def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())",
"def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())",
"def normalize(self, name):\n\n\t\t# label emojis, specifically :) and :( as @artist, then apply \n\t\t# base normalization\n\n\t\tname = super().normalize(re.sub(r'\\s*:[\\(\\)]\\s*',' @artist ', name))\n\t\t\n\t\t# if now name is ? it may be an artist, so label as @artist\n\t\tif name.strip() in {'?','...'}:\n\t\t\treturn '@artist'\n\t\t\n\t\t# fix ! - remove if at the end of a word, otherwise replace with i\n\t\tname = re.sub(r'\\!+$','', re.sub(r'\\!+(?=[^\\b\\w])','', name)).replace('!','i')\n\t\t\n\t\t# remove the and a\n\t\tname = re.sub(r'^(the|a)\\s+','', name)\n\t\t \n\t\t# remove multiple white spaces\n\t\tname = re.sub(r'\\s{2,}', ' ', name).strip()\n\t\t\n\t\treturn name",
"def _normalize_show_name(name):\n\tname = name.casefold()\n\tname = re.sub(\"[^a-z0-9]\", \" \", name)\n\tname = re.sub(\"_\", \" \", name)\n\tname = re.sub(\"season \\d( part \\d)?\", \" \", name)\n\tname = re.sub(\"\\s+\", \" \", name)\n\treturn name",
"def normalize_var_name(var_name):\n var_case = detect_case(var_name)\n if var_case == SNAKE_CASE:\n return normalize_snake(var_name)\n elif var_case == CAMEL_CASE:\n return normalize_camel(var_name)\n elif var_case == KEBAB_CASE:\n return normalize_kebab(var_name)\n elif var_case == CONST_CASE:\n return normalize_const(var_name)\n else:\n raise ValueError('unknown case {}'.format(var_case))",
"def normalize_name(self):\n name = self.ua_data.get('name', '')\n if not name:\n return\n self.ua_data['name'] = self.normalized_name.get(name.lower(), name)",
"def _sanitize_to_identifer(name):\n n = name.strip()\n n = re.sub('/', ' ', n)\n n = re.sub('-', ' ', n)\n n = re.sub(' +', '_', n)\n n = re.sub('[\\W]+', '', n)\n return n",
"def sanitize_name(self, value):\n if self.sanitize_names:\n new_value = re.sub('[^a-zA-Z0-9_]', '_', value[:127])\n else:\n new_value = value\n return new_value",
"def make_name2(u):\n\treturn re.sub(r'\\s+', '', u).lower()",
"def convert(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def unmangle_measurement_name(measurement_name):\n measurement_name = measurement_name.replace('_sp_', ' ')\n measurement_name = measurement_name.replace('_dsh_', '-')\n return measurement_name",
"def normalize_name(word):\n return word.strip(\"0123456789!@#$%^&*_() +=\\/?<>,.`~;:\").lower().replace(\" \",\"_\")",
"def _normalize_class_name(self, name):\n class_name = ''.join(\n word.capitalize()\n for word in re.sub('[^A-Za-z0-9]+', ' ', name).split()\n )\n\n if not class_name.endswith('Extension'):\n class_name += 'Extension'\n\n return class_name",
"def clean_name(s):\n return re.sub('[\\W_]+', '', s).lower()",
"def standardizeMapName(mapName):\n newName = os.path.basename(mapName)\n newName = newName.split(\".\")[0]\n newName = newName.split(\"(\")[0]\n newName = re.sub(\"[LTE]+$\", \"\", newName)\n return re.sub(' ', '', newName, flags=re.UNICODE)",
"def normalize_reference_name(name):\n return name.strip().lower().replace(\"-\", \"_\").replace(\" \", \"_\")",
"def _sanitizeName(name):\n\n name = name.lower() # lower.\n name = name.replace('.','') # remove periods.\n name = name.replace('-','') # remove dashes.\n name = name.replace(\"'\",'') # remove apostrophies.\n # return it.\n return name"
] | [
"0.6782942",
"0.6727968",
"0.6660268",
"0.65970635",
"0.6492408",
"0.63663113",
"0.63579243",
"0.62790793",
"0.6254779",
"0.6217546",
"0.6214412",
"0.61892366",
"0.6164987",
"0.6164987",
"0.6164987",
"0.60686266",
"0.602515",
"0.60172975",
"0.5997466",
"0.59772354",
"0.59716785",
"0.5968486",
"0.59553707",
"0.59348065",
"0.5933862",
"0.5921028",
"0.59082925",
"0.5892693",
"0.5882946",
"0.58787644"
] | 0.7554459 | 0 |
Collect indexes statistics for all collections in the configuration. This use the "$indexStats" command. | def _collect_indexes_stats(self, instance, db, tags):
for coll_name in instance.get('collections', []):
try:
for stats in db[coll_name].aggregate([{"$indexStats": {}}], cursor={}):
idx_tags = tags + [
"name:{0}".format(stats.get('name', 'unknown')),
"collection:{0}".format(coll_name),
]
self.gauge('mongodb.collection.indexes.accesses.ops', int(stats.get('accesses', {}).get('ops', 0)), idx_tags)
except Exception as e:
self.log.error("Could not fetch indexes stats for collection %s: %s", coll_name, e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_index_stats(self):\n #Create Index\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False)\n #Check Index Stats\n self.sleep(30)\n index_map = self.get_index_stats()\n self.log.info(index_map)\n for query_definition in self.query_definitions:\n index_name = query_definition.index_name\n for bucket in self.buckets:\n bucket_name = bucket.name\n check_keys = ['items_count', 'total_scan_duration', 'num_docs_queued',\n 'num_requests', 'num_rows_returned', 'num_docs_queued',\n 'num_docs_pending','delete_bytes' ]\n map = self._create_stats_map(items_count=2016)\n self._verify_index_stats(index_map, index_name, bucket_name, map, check_keys)",
"def index_stats(self):\r\n request = http.Request('GET', '/metadata/index_stats')\r\n return request, parsers.parse_json",
"def all_statistics(self):\n statistics_database = CurrentProject().db_client.statistics\n collections = [getattr(statistics_database, name) for name in statistics_database.collection_names()]\n return AttributeDict.attributize_dict({collection.name: list(collection.find()) for collection in collections})",
"def stats():\n stats = es.indices.stats(\n index='webpages',\n metric=[\"docs\", \"store\"],\n fields=[\"count\"],\n human='true'\n )\n \n return stats",
"def index_all(self, index_name):\n oks = 0\n notoks = 0\n for ok, item in streaming_bulk(\n self.es_client,\n self._iter_documents(index_name)\n ):\n if ok:\n oks += 1\n else:\n notoks += 1\n logging.info(\n \"Import results: %d ok, %d not ok\",\n oks,\n notoks\n )",
"def create_indexes_with_stats(self) -> float:\n query_nodes_per_cluster = self.cluster_spec.servers_by_cluster_and_role('n1ql')\n index_nodes_per_cluster = self.cluster_spec.servers_by_cluster_and_role('index')\n\n t0 = time.time()\n for cluster_query_nodes in query_nodes_per_cluster:\n self.create_indexes(query_node=cluster_query_nodes[0])\n\n # Wait for index build to complete on first cluster, and record time\n logger.info('Waiting for index build on primary cluster')\n self.wait_for_indexing(index_nodes=index_nodes_per_cluster[0])\n index_build_time = time.time() - t0\n logger.info(\"Index build completed in {} sec\".format(index_build_time))\n\n # Wait for index build to complete on remaining clusters\n logger.info('Waiting for index build to complete on remaining clusters')\n remaining_index_nodes = [node for nodes in index_nodes_per_cluster[1:] for node in nodes]\n self.wait_for_indexing(index_nodes=remaining_index_nodes)\n\n return index_build_time",
"def stats_indexing(self, host):\n\n s = self.get_stats(host, 'indexing')\n\n data = {\n 'delete_time_in_millis': s['delete_time_in_millis'],\n 'delete_total': s['delete_total'],\n 'delete_current': s['delete_current'],\n 'index_time_in_millis': s['index_time_in_millis'],\n 'index_total': s['index_total'],\n 'index_current': s['index_current']\n }\n\n return data",
"def mmo_collection_stats(self, mmo_connection, execution_database, collection):\n command = { \"collStats\": collection }\n return self.mmo_execute_on_mongos(mmo_connection, command, execution_database)",
"def fetch_metrics(self):\n\n self.explain_all_indices()",
"def get_index_stats(self):\n\n assert self._check_idx, 'No index available'\n idx_stats = []\n for ref in range(self._header.n_refs):\n try:\n mapped = self._index.unmapped[ref].n_mapped\n unmapped = self._index.unmapped[ref].n_unmapped\n idx_stats.append((mapped, unmapped, mapped + unmapped))\n except KeyError:\n idx_stats.append((0, 0, 0))\n return idx_stats",
"def main():\n\n database = MongoDbUtil('ro').database()\n\n tag = 'Px1id'\n daemons = ['daq_files_watcher', 'jobs_validator', 'submitter']\n colls = ['%s_%s'%(coll, tag) for coll in daemons]\n\n datas = []\n for daemon, coll in zip(daemons, colls):\n last_doc = database[coll].find().skip(database[coll].count()-1)[0]\n accum_stats = last_doc['accum_stats']\n\n vals = {}\n timestamps = []\n for key in accum_stats.keys():\n vals[key] = []\n\n for doc in database[coll].find():\n timestamps.append(doc['date'])\n for key in vals:\n vals[key].append(doc['accum_stats'][key])\n\n urls = []\n for key in vals:\n urls.append(draw(timestamps, vals[key], daemon, key))\n\n datas.append({'title': daemon, 'urls': urls})\n\n make_index_file(tag, datas)",
"def test_get_all_stats(self):\n response = self.client.get_all_stats()\n assert isinstance(response, object)\n assert 'databaseSize' in response\n assert isinstance(response['databaseSize'], int)\n assert 'lastUpdate' in response\n assert 'indexes' in response\n assert 'indexUID' in response['indexes']\n assert 'indexUID2' in response['indexes']",
"def list_indexes(self):\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"GET\", \"/1/indexes/\", self.timeout)",
"def get_all_stats():\n\n return get_component(CachingPackage.COMPONENT_NAME).get_all_stats()",
"def indexes(self):\n return getattr(self, '_indexes', None)",
"def profile_index(func, args, kwargs, func_result):\n collection = args[0]\n\n report_kvs = _profile_query(collection)\n\n if len(args) > 1:\n report_kvs['Index'] = _to_json(args[1])\n\n return report_kvs",
"def stats(self):\n ret = super(DiskCache, self).stats()\n ret[\"root\"] = (self.__env.stat(),)\n for name, database in self.__databases.items():\n with self.__env.begin(database, write=False) as txn:\n ret[name] = txn.stat(database)\n\n return ret",
"def rebuild_all_indexes():\n response = _get_lambda_client().invoke(\n FunctionName=indexer_function_name,\n InvocationType=\"Event\",\n )",
"def update_server_stats(self):\n try:\n aio.run(self.client.execute, 'ANALYZE')\n except Exception:\n pass # swallow; CrateDB 4.1.0+ is required to run ANALYZE",
"def get_all_index_servers(self):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM host WHERE type = 'Index Server';\")\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)",
"def _statistics_queries(self):\n queries = {}\n for stats in self._statistics:\n queries[stats.COLLECTION_NAME] = stats.query\n return queries",
"def execute(self):\n for coll in list(self.__bulks):\n try:\n bulkOp = self.__bulks[coll]\n curr_result = Counter(bulkOp.execute())\n self.update_results(coll, curr_result)\n except BulkWriteError as bwe:\n sys.stderr.write(str(bwe.details))",
"async def list_indexes(self, app_id):\n solr_collections, broken = await self.solr.list_collections()\n indexes_metadata = []\n for collection in solr_collections:\n _, app, namespace, index = collection.split('_')\n if app != app_id:\n continue\n metadata = IndexMetadata(app, namespace, index)\n indexes_metadata.append(metadata)\n return indexes_metadata",
"def get_statistics(\n self, index_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any\n ) -> _models.GetIndexStatisticsResult:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n api_version: str = kwargs.pop(\"api_version\", _params.pop(\"api-version\", self._config.api_version))\n cls: ClsType[_models.GetIndexStatisticsResult] = kwargs.pop(\"cls\", None)\n\n _x_ms_client_request_id = None\n if request_options is not None:\n _x_ms_client_request_id = request_options.x_ms_client_request_id\n\n request = build_get_statistics_request(\n index_name=index_name,\n x_ms_client_request_id=_x_ms_client_request_id,\n api_version=api_version,\n template_url=self.get_statistics.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n path_format_arguments = {\n \"endpoint\": self._serialize.url(\"self._config.endpoint\", self._config.endpoint, \"str\", skip_quote=True),\n }\n request.url = self._client.format_url(request.url, **path_format_arguments)\n\n _stream = False\n pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n deserialized = self._deserialize(\"GetIndexStatisticsResult\", pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized",
"def stats_page():\n import alltheitems.stats\n return alltheitems.stats.index()",
"def list_documents(\n self, index: str, query: Dict[str, Any] = None\n ) -> Iterable[Dict[str, Any]]:\n return es_scan(self.__client__, query=query or {}, index=index)",
"def _create_indexes(self):\r\n # WARNING: The collection will be locked during the index\r\n # creation. If the collection has a large number of\r\n # documents in it, the operation can take a long time.\r\n\r\n # TODO: The creation of indexes can be moved to a Django\r\n # management command or equivalent. There is also an option to\r\n # run the indexing on the background, without locking.\r\n self.collection.ensure_index([('time', pymongo.DESCENDING)])\r\n self.collection.ensure_index('event_type')",
"def indexes(self):\n indexes = self.execute(self.commands.get_indexes(self.name))\n return [Index(*tup) for tup in indexes]",
"def mmo_database_stats(self, mmo_connection, database):\n command = { \"dbstats\" : 1 }\n return self.mmo_execute_on_mongos(mmo_connection, command, database)",
"def _calc_stats(self):\n\n for res in self.rsts:\n _LOG.info(\"Calculate statistics for '%s'\", res.reportid)\n res.calc_stats(regexs=self._stats_colnames, funcnames=self._stats_funcs)"
] | [
"0.7033326",
"0.6837621",
"0.66362005",
"0.65313584",
"0.626018",
"0.62069684",
"0.6138566",
"0.612696",
"0.6071093",
"0.58983314",
"0.58849525",
"0.58647007",
"0.5845978",
"0.58452255",
"0.5805989",
"0.57855815",
"0.577376",
"0.5711172",
"0.5703516",
"0.56850964",
"0.56765705",
"0.56547934",
"0.5641601",
"0.5607561",
"0.55791277",
"0.5568652",
"0.5552156",
"0.555061",
"0.5548154",
"0.5520845"
] | 0.78779423 | 0 |
Test case for upload_area | def test_upload_area(self):
area = areamsg
query_string = [('deliveryAckEndPoint', 'https://localhost:8002')]
response = self.client.open('/area',
method='POST',
data=area,
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_multi_area(self):\n pass",
"def test_upload_file(self):\n pass",
"def test_upload_file1(self):\n pass",
"def test_upload_job_description_file_post(self):\n pass",
"def test_upload(self):\n with self.client:\n file = dict(\n file=(BytesIO(b'my file contents'), \"foto.jpg\"),\n )\n response = self.client.post('/upload',\n content_type='multipart/form-data',\n data=file)\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertIn('foto.jpg', os.listdir(PHOTOS_SAVE_PATH))\n self.assertIn('foto.jpg', [photo.filename for photo in Photo.query.all()])",
"def test_file_field():",
"def test_upload_job_description_context_post(self):\n pass",
"def test_image_uploads_on_save(self):\n \n files_count = len(os.listdir(settings.MEDIA_ROOT + '/persons'))\n with open('media/test_images/test.jpg') as f:\n self.client.post(reverse('edit'), {'ava': f})\n files_count_after = len(os.listdir(settings.MEDIA_ROOT + '/persons'))\n # added file and thumbnail\n self.assertEquals(files_count_after - files_count, 2) \n \n # test image scales \n from PIL import Image\n im = Image.open(settings.MEDIA_ROOT + '/persons/test.thumbnail.jpg')\n thumbnail_size = Person.thumbnail_size\n self.assertEquals((thumbnail_size,thumbnail_size), im.size)",
"def test_should_file_field(self):\n self.assertIn(\"image\", self.fields)",
"def test_FlexCrop1(self):",
"def test_absolute_shape_areas(self):\n\n assert self.test_shape.area == pytest.approx((math.pi * (10**2) * 2) + (math.pi * (2 * 10) * 30))\n assert len(self.test_shape.areas) == 3\n assert self.test_shape.areas.count(pytest.approx(math.pi * (10**2))) == 2\n assert self.test_shape.areas.count(pytest.approx(math.pi * (2 * 10) * 30)) == 1",
"def test_areas_locked_ok(self):",
"def test_uploaded_files_from_editor(self):\n # TODO: If you upload you result to the site, someone else cannot guess the\n # url and get this. Instead the user is barred from this.\n \n project = self.testproject \n \n name1 = self.giverandomfilename(self.root)\n name2 = self.giverandomfilename(self.projectadmin)\n name3 = self.giverandomfilename(self.participant)\n \n resp1 = self._upload_test_file_ckeditor(self.root,self.testproject,name1)\n resp2 = self._upload_test_file_ckeditor(self.projectadmin,self.testproject,name2)\n resp3 = self._upload_test_file_ckeditor(self.participant,self.testproject,name3)\n \n \n # TODO: verify that files uploaded in editor can be served directly.\n # This is not possible now because no files get saved, returning \n # HttpResponseNotFound for any url get.. Can we fake this somehow?\n url = reverse(\"project_serve_file\",\n kwargs={\"project_name\":project.short_name,\n \"path\":project.public_upload_dir_rel()+\"/\"+name1})",
"def test_preview_post(self):\n pass",
"def test_local_uploader_upload_correct_file(self, mock):\r\n mock.save.return_value = None\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.jpg')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return True, \\\r\n as this extension is allowed\")\r\n assert res is True, err_msg",
"def test_absolute_areas(self):\n\n assert len(self.test_shape.areas) == 4\n assert len(set([round(i) for i in self.test_shape.areas])) == 3\n assert self.test_shape.areas.count(pytest.approx(60 * math.pi * 2 * 1000)) == 2\n assert self.test_shape.areas.count(pytest.approx(50 * math.pi * 2 * 970)) == 1\n assert self.test_shape.areas.count(pytest.approx(50 * math.pi * 2 * 1030)) == 1",
"def _upload_test_file_ckeditor(self, user, project,testfilename=\"\"): \n \n if testfilename == \"\":\n testfilename = self.giverandomfilename(user)\n \n url = reverse(\"ckeditor_upload_to_project\", \n kwargs={\"site_short_name\":self.testproject.short_name})\n \n factory = RequestFactory()\n request = factory.get(url,\n {\"CKEditorFuncNum\":\"1234\"}) # CKEditorFuncNum is \n # used by ckeditor, \n # normally filled by \n # js but faked here.\n \n request.user = user\n \n fakefile = File(StringIO(\"some uploaded content for\" + testfilename))\n \n fakecontent = \"some uploaded content for\" + testfilename\n request.FILES['upload'] = SimpleUploadedFile(name=testfilename,\n content=fakecontent)\n \n request.method = \"POST\"\n \n # Some magic code to fix a bug with middleware not being found,\n # don't know what this does but if fixes the bug.\n from django.contrib.messages.storage.fallback import FallbackStorage\n setattr(request, 'session', 'session')\n messages = FallbackStorage(request)\n setattr(request, '_messages', messages)\n \n response = upload_to_project(request, project.short_name) \n \n self.assertEqual(response.status_code, 200, \"Uploading file %s as \"\n \"user %s to project %s in ckeditor did not return expected result\"\n % (testfilename, user.username, project.short_name))\n \n errors = re.search('<ul class=\"errorlist\">(.*)</ul>',\n response.content,\n re.IGNORECASE)\n \n self.assertFalse(\"Uploading failed\" in response.content,\n \"Uploading file %s as user %s to project %s in \"\n \"ckeditor return javascript containing 'uploading failed'\"\n % (testfilename, user.username, project.short_name))\n \n \n return response",
"def test_access_cartography_upload_form(self):\n\t\tc = Client()\n\t\tUser.objects.create_superuser('bobby', '[email protected]', 'bob')\n\t\tlog = c.login(username='bobby', password='bob')\n\t\tself.assertTrue(log)\n\t\tresponse = c.get(\"/cartography/upload\")\n\t\tself.assertTrue('Add document' in response.content)",
"def test_rectangle(self):\n result = shape_area.rectangle_area(6,7)\n self.assertEqual(result,26)",
"def test_upload_image(self):\n with open('apps/upload/tests/media/test.jpg') as f:\n r = post(self.client, 'upload.up_image_async', {'image': f},\n args=['questions.Question', 1])\n\n eq_(200, r.status_code)\n json_r = json.loads(r.content)\n eq_('success', json_r['status'])\n file = json_r['file']\n eq_('test.jpg', file['name'])\n eq_(90, file['width'])\n eq_(120, file['height'])\n name = '098f6b.jpg'\n message = 'Url \"%s\" does not contain \"%s\"' % (file['url'], name)\n assert (name in file['url']), message\n\n eq_(1, ImageAttachment.objects.count())\n image = ImageAttachment.objects.all()[0]\n eq_('pcraciunoiu', image.creator.username)\n eq_(150, image.file.width)\n eq_(200, image.file.height)\n eq_('question', image.content_type.model)\n eq_(1, image.object_id)",
"def test_area(self):\n r1 = Rectangle(3, 2)\n self.assertEqual(r1.area(), 6)\n\n r2 = Rectangle(2, 10)\n self.assertEqual(r2.area(), 20)\n\n r3 = Rectangle(10, 10)\n self.assertEqual(r3.area(), 100)",
"def test_crop(self):\r\n u = Uploader()\r\n size = (100, 100)\r\n im = Image.new('RGB', size)\r\n folder = tempfile.mkdtemp()\r\n u.upload_folder = folder\r\n im.save(os.path.join(folder, 'image.png'))\r\n coordinates = (0, 0, 50, 50)\r\n file = FileStorage(filename=os.path.join(folder, 'image.png'))\r\n with patch('pybossa.uploader.Image', return_value=True):\r\n err_msg = \"It should crop the image\"\r\n assert u.crop(file, coordinates) is True, err_msg\r\n\r\n with patch('pybossa.uploader.Image.open', side_effect=IOError):\r\n err_msg = \"It should return false\"\r\n assert u.crop(file, coordinates) is False, err_msg",
"def testMediaUpload(self):\n self._testUpload(DefaultStorage(), 'media')\n self._testUpload(StaticStorage(), 'static')",
"def test_area1(self):\n r1 = Rectangle(3, 2)\n self.assertEqual(r1.area(), 6)",
"def test_cartography_isuploaded(self):\n\t\timgfile = StringIO.StringIO('GIF87a\\x01\\x00\\x01\\x00\\x80\\x01\\x00\\x00\\x00\\x00ccc,\\x00'\n\t\t '\\x00\\x00\\x00\\x01\\x00\\x01\\x00\\x00\\x02\\x02D\\x01\\x00;')\n\t\tf = SimpleUploadedFile('test_img_file.gif', imgfile.read(), 'image/gif')\n\t\tm, __ = Map.objects.get_or_create(id=1, title='foo', projection='4326', zoom=2, center_x=0, center_y=0,\n\t\t owner=User.objects.get_or_create(username='foo')[0])\n\t\tc = Client()\n\t\tUser.objects.create_superuser('bobby', '[email protected]', 'bob')\n\t\tc.login(username='bobby', password='bob')\n\t\tresponse = c.post(\"/cartography/upload\", {'file': f, 'title': 'uploaded_cartography', 'map': m.id},\n\t\t follow=True)\n\t\tself.assertEquals(response.status_code, 200)",
"def test_upload_photo(self):\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.normal_token.key)\n\n url = reverse('crew-api:upload-photo', args=[self.normal_user.crew.uuid])\n\n photo_file = self.generate_photo_file()\n\n data = {\n 'photo':photo_file\n }\n\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_has_form(self):\n form = self.response.context['form']\n self.assertIsInstance(form, UploadFileForm)",
"def test_upload_file(self):\n data = dict(additional_metadata='additional_metadata_example',\n file='file_example')\n response = self.client.open(\n '/pet/{petId}/uploadImage'.format(pet_id=789),\n method='POST',\n data=data,\n content_type='multipart/form-data')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_file_upload_fail(self):\r\n module = self.get_module_from_location(self.problem_location)\r\n\r\n # Simulate a student saving an answer\r\n response = module.handle_ajax(\"save_answer\", {\"student_answer\": self.answer_text})\r\n response = json.loads(response)\r\n self.assertFalse(response['success'])\r\n self.assertIn('error', response)",
"def test_procedure_picture_upload(self):\n image_upload_url = PROCEDURE_URL\n\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n\n payload = {\n 'name': 'temp',\n 'speciality': [self.speciality.pk],\n 'image': ntf,\n 'overview': 'bla bla bla'\n }\n\n res = self.client.post(\n image_upload_url,\n payload,\n format=\"multipart\"\n )\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertIn('image', res.data)"
] | [
"0.68717897",
"0.67497677",
"0.64811105",
"0.6158702",
"0.61027944",
"0.60922873",
"0.59541386",
"0.59376353",
"0.5898766",
"0.5843938",
"0.58039546",
"0.5802995",
"0.5782079",
"0.5776933",
"0.5713552",
"0.56940377",
"0.5679234",
"0.5669817",
"0.56505054",
"0.56200916",
"0.5615791",
"0.5615363",
"0.56006974",
"0.5599945",
"0.55977345",
"0.55969757",
"0.5552817",
"0.55515844",
"0.5512286",
"0.54967844"
] | 0.67986137 | 1 |
Test case for upload_area cleanup | def test_upload_area_cleanup(self):
vis2_uvid='urn:mrn:stm:service:instance:furuno:vis2'
p = Path('import')
files = list(p.glob('**/urn:mrn:s124:*'))
for item in files:
print(item)
os.remove(str(item))
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_final_cleanup():\n cleanup_file(\"tfsaves\")",
"def tearDown(self):\n self.tmp.cleanup()",
"def cleanup():",
"def _cleanup(self):\n pass",
"def test_cleanup(self):\n imgurl = \"{}spei03.nc\".format(self.processor.base_url)\n httpretty.register_uri(httpretty.GET, imgurl,\n body=get_mock_image())\n self.processor.download(imgurl, 'spei03.tif')\n self.assertNotEqual([], glob.glob(os.path.join(\n self.processor.tmp_dir, self.processor.prefix + '*')))\n self.processor.cleanup()\n self.assertEquals([], glob.glob(os.path.join(\n self.processor.tmp_dir, self.processor.prefix + '*')))",
"def after_scenario(context, _):\n context.backup_root_raw.cleanup()",
"def post_cleanup(self):\n pass",
"def cleanup(self):",
"def cleanup(self):",
"def cleanup(self):",
"def cleanup(self, *args, **kwargs):",
"def tearDown(self):\n\t del self.user\n\t del self.created_image",
"def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)",
"def tearDown(self):\n\t\tdel self.user\n\t\tdel self.image",
"def tearDown(self):\n self.image.delete()",
"def test_delete_image(self):\n pass",
"def tearDown(self):\n del self.location",
"def tearDown(self):\n if os.path.exists(settings.MEDIA_ROOT):\n shutil.rmtree(settings.MEDIA_ROOT)",
"def tearDown(self):\n if os.path.exists(settings.MEDIA_ROOT):\n shutil.rmtree(settings.MEDIA_ROOT)",
"def tearDownClass(self):\n if (os.path.exists(MEDIA_ROOT+\"/gitload_test\")):\n shutil.rmtree(MEDIA_ROOT+\"/gitload_test\")",
"def test_multi_area(self):\n pass",
"def test_ospf_area_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', ospf_area.delete,\n {'area': {}}\n )",
"def cleanup(self):\r\n pass",
"def final_cleanup(self):\n raise NotImplementedError()",
"def horde_cleanup(self):",
"def tearDown(self):\n self.recipe.image.delete()",
"def cleanup (self):\n pass",
"def cleanup(self):\n super(Test200SmartSanityDownload004, self).cleanup()",
"def test_delete_collection_image(self):\n pass",
"def teardown(self):\n super(TestCisAsciiFileInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)"
] | [
"0.6206157",
"0.6140681",
"0.6071183",
"0.6062651",
"0.6050911",
"0.6030279",
"0.6009838",
"0.6004148",
"0.6004148",
"0.6004148",
"0.6001869",
"0.5971952",
"0.5948387",
"0.59369427",
"0.59284383",
"0.5869602",
"0.5861927",
"0.5838485",
"0.5838485",
"0.5813883",
"0.5788908",
"0.5786021",
"0.5772909",
"0.5767063",
"0.5765127",
"0.5729434",
"0.572611",
"0.572051",
"0.5702556",
"0.5698381"
] | 0.6698037 | 0 |
Takes in a trajectory file and uploads it to the database | def upload_trajectory(cls, trajectory_file):
# extract the attributes from the file name
pitch, roll, u0 = cls.extract_pitch_roll_velocity(
trajectory_file)
# get or create a database object with those attributes
trajectory, created = SolvedTrajectory.objects.get_or_create(
file_name = trajectory_file)
# set the attributes
trajectory.pitch = pitch
trajectory.roll = roll
trajectory.u0_string = u0
# save the object
trajectory.save()
return created | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_trajectory(trajectory):\n query = \"INSERT INTO trajectories (idUser, startTime, endTime) VALUES (%(id_user)s, %(start_time)s, %(end_time)s)\"\n\n trajectory_data = {\n 'id_user': trajectory.id_user,\n 'start_time': trajectory.start_time,\n 'end_time': trajectory.end_time\n }\n\n inserted_id = qr.run_write_query(query, trajectory_data)\n trajectory.id = inserted_id",
"def process_file(cur, conn, table, filepath):\n\n taxi_table_insert = (\"\"\"\n INSERT INTO {} (trip_id, taxi_id, trip_sec, trip_mile)\n VALUES (%s, %s, %s, %s);\n \"\"\".format(table))\n\n # open csv file\n # https://stackoverflow.com/questions/17444679/reading-a-huge-csv-file\n df = pd.read_csv(filepath)\n\n df = df[['Trip ID', 'Taxi ID', 'Trip Seconds', 'Trip Miles']]\n\n df.dropna(inplace=True)\n\n # insert trip records\n for index, row in df.iterrows():\n cur.execute(taxi_table_insert, row)\n conn.commit()",
"def upload(self, filename, file_path):\n return",
"def upload(det_file):\n db = DatabaseSession()\n\n try:\n LOG.info(f\"Copying REDCap DET records from {det_file.name}\")\n\n row_count = db.copy_from_ndjson((\"receiving\", \"redcap_det\", \"document\"), det_file)\n\n LOG.info(f\"Received {row_count:,} DET records\")\n LOG.info(\"Committing all changes\")\n db.commit()\n\n except:\n LOG.info(\"Rolling back all changes; the database will not be modified\")\n db.rollback()\n raise",
"def upload_csv_data(self, upload_file):\n db = DataBase(self.DATABASE_DATA)\n db.insert_data_from_file(\n 'triagedata.historicdata',\n ('clinic_id', 'severity', 'date_received', 'date_seen'),\n upload_file,\n ','\n )",
"def upload_training_file(self):\n\n file_path = os.getcwd() + \"/\" + self.console_label.training_file_name\n\n with open(file_path, 'r') as f:\n r = requests.post(self.upload_url, files={'file': f})\n\n if r.status_code != requests.codes.ok:\n messagebox.showerror(\"Error\", \"The training file could not be uploaded!\")",
"def upload_shape(shapepath):\n\n conn = None\n cur = None\n\n try:\n # first create the sqlstring with inserts\n # call PGSQL2SHP with some parameters, -s 4326 to set lat/lon srid, -I to create a spatial index on the geometry column\n params = [settings.SHP2PGSQL, \"-s\", \"4326\", \"-I\", shapepath, settings.STATES_TABLE_NAME]\n sqlstring,info = utils.run_tool(params)\n if not sqlstring:\n raise Exception(\"cannot upload file to database\")\n\n #then use the sqlstring\n conn = utils.pgconnect(**settings.DEFAULT_CONNECTION)\n cur = conn.cursor()\n cur.execute(sqlstring)\n conn.commit()\n\n finally:\n if cur:\n cur.close()\n if conn:\n conn.close()",
"def save_trajectory(self, file: Path) -> None:\n if self.__layout.count() > 0 and self.__trajectory_writer is not None:\n traj = self.__trajectory_writer.get_trajectory()\n write_trajectory_to_file(traj, file)",
"def upload_mission(aFileName, vehicle):\n #Read mission from file\n missionlist = readmission(aFileName, vehicle)\n\n print \"\\nUpload mission from a file: %s\" % aFileName\n #Clear existing mission from vehicle\n print ' Clear mission'\n cmds = vehicle.commands\n cmds.clear()\n #Add new mission to vehicle\n for command in missionlist:\n cmds.add(command)\n print ' Upload mission'\n vehicle.commands.upload()",
"def parseUpload(dbconnection, fileName):\n nhContent = ParseText.nohupTranscriptionContent(fileName)\n count = 0\n while count < len(nhContent[0]):\n try:\n rtf = nhContent[0][count]\n transcription = nhContent[1][count].replace(\"'\", \"''\").replace(\"_\", \"\")\n dbID = nhContent[2][count].replace(\".\", \"\")\n duration = nhContent[3][count]\n DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)\n count += 1\n except:\n print(\"couldnt upload one at index \" + str(count))\n count += 1",
"def _upload_to_brain(self, data):\n self._log.info(\"Posting triples\")\n\n return self._connection.upload(data)",
"def write_trajectory(self, environmnent, pdb_filename):\n # TODO\n pass",
"def _upload_file_to_file_system(upload_details):\n upload_url = \"%s%s\" % (main_url, upload_details['upload_path'])\n fsysparams = {\n 'qqfile': upload_filepath,\n 'import_record': upload_dataset_id,\n 'source_type': upload_datatype\n }\n return requests.post(upload_url,\n params=fsysparams,\n files={'file': open(upload_filepath, 'rb')},\n headers=upload_header)",
"def upload_data(self):\n labeled_ids = self.get_labeled_ids()\n\n users = []\n users_ids = []\n\n activities = []\n last_activities = []\n\n trackpoints = []\n\n for root, dirs, files in os.walk(DATASET_PATH, topdown=True):\n path_parts = root.split(\"/\")\n if len(path_parts) < 4: # check if inside user folder\n continue\n user_id = path_parts[3]\n\n if user_id not in labeled_ids:\n continue\n\n if user_id not in users_ids:\n users_ids.append(user_id)\n users.append({\"id\": user_id, \"has_labels\": user_id in labeled_ids})\n\n if 'labels.txt' in files:\n last_activities = self.get_activities(user_id, root + \"/labels.txt\")\n activities.extend(last_activities)\n\n if 'Trajectory' in root:\n files.sort()\n for file_path in files:\n trackpoints.extend(self.get_trackpoints(root + \"/\" + file_path, last_activities))\n print(len(trackpoints))\n\n\n print(\"Uploading data\")\n self.insert_data_bulk(\"User\", users)\n print(\" > Users done\")\n self.insert_data_bulk(\"Activity\", activities)\n print(\" > Activities done\")\n self.insert_data_bulk(\"TrackPoint\", trackpoints)\n print(\" > TrackPoints done\")\n self.cursor.close()",
"def train_projector(self, train_files, projector_file):\n # save something\n bob.io.base.save(_data, projector_file)",
"def saveUploadedTopology(self, file):\r\n filename = str(file)\r\n with open(os.path.join(main.settings.TOPOLOGY_DIR, filename), 'wb+') as destination:\r\n for chunk in file.chunks():\r\n destination.write(chunk)",
"def upload_file(self, file_path, file_name, output_path):",
"def upload():\n\n # TODO: decorator to check token\n token = request.headers.get(\"Authorization\")\n\n has_text = bool(request.get_json())\n has_file = request.files and request.files[\"file\"]\n if not has_text and not has_file:\n error = \"No text input and no file provided\"\n return jsonify({\"success\": False, \"message\": error})\n\n filename, error = save_text(request)\n if error:\n return jsonify({\"success\": False, \"message\": error})\n\n job_id = schedule(filename, token)\n add_user_job(job_id, token)\n\n return jsonify({\"success\": True, \"data\": {\"jobId\": job_id}})",
"def upload_file( processor, user, local_path ):\n operations.publish_work_item(\n operations.create_asset_from_file(\n file_name = local_path,\n owner = user,\n producer = processor,\n child_number = 0,\n asset_class = models.AssetClass.UPLOAD ))",
"def process_song_file(cur, filepath: str) -> None:\n # open song file\n df = pd.read_json(filepath, lines=True)\n\n # insert song record\n for song_record in df[\n [\n \"song_id\",\n \"title\",\n \"artist_id\",\n \"year\",\n \"duration\",\n ]\n ].values:\n cur.execute(sql_queries.song_table_insert, song_record)\n\n # insert artist record\n for artist_record in df[\n [\n \"artist_id\",\n \"artist_name\",\n \"artist_location\",\n \"artist_latitude\",\n \"artist_longitude\",\n ]\n ].values:\n cur.execute(sql_queries.artist_table_insert, artist_record)",
"def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise",
"def upload_file(filename, server, account, projname, language=None,\n username=None, password=None,\n append=False, stage=False, date_format=None):\n stream = transcode_to_stream(filename, date_format)\n upload_stream(stream_json_lines(stream),\n server, account, projname, language=language,\n username=username, password=password,\n append=append, stage=stage)",
"def process_song_file(cur, filepath):\n \n # open song file\n \n df = pd.read_json(filepath,lines=True)\n \n # insert song record\n song_data = df[['song_id', 'title', 'artist_id','year',\n 'duration']].values[0].tolist()\n cur.execute(song_table_insert, song_data)\n \n # insert artist record\n artist_data = df[['artist_id','artist_name',\n 'artist_location', 'artist_latitude',\n 'artist_longitude']].values[0].tolist()\n cur.execute(artist_table_insert, artist_data)",
"def train(self, trainfile):",
"def uploader():\n\tif request.method == 'POST':\n\t\t\n\t\tif \"file\" not in request.files:\n\t\t\treturn \"No data in file.\"\n\n\t\tFile = request.files['file']\n\t\t\n\t\tif File.filename == \"\":\n\t\t\treturn \"No file selected.\"\n\t\t\n\t\tfilename, ext = secure_filename(File.filename).split('.')\n\t\t#Check if file stream exists and file tpye correct.\n\t\tif File and ext == \"hepmc\":\n\t\t\t#The file is a byte stream by default which is not compatible with the current version of hepmcio.\n\t\t\tstring_stream = io.StringIO(File.read().decode('utf-8'))\n\n\t\t\t#Get all events from file and jsonify them.\n\t\t\tevents = hepmcio.HepMCReader(string_stream).all_events()\n\t\t\thepMCEncoder = hepmcio_json.HepMCJSONEncoder()\n\t\t\tjsonified = [hepMCEncoder.encode(event) for event in events]\n\n\t\t\t#Each collection contains all the data in a file.\n\t\t\tif filename not in mongo.db.collection_names():\n\t\t\t\tcollection = mongo.db[filename]\n\t\t\t\tjsonDecoder = json.JSONDecoder()\n\n\t\t\t\t#MongoDB takes in Python objects and not JSON strings, so have to decode before adding documents.\n\t\t\t\tfor jsonObject in jsonified:\n\t\t\t\t\tjsonEvent = jsonDecoder.decode(jsonObject.evt)\n\t\t\t\t\tjsonParticles = [jsonDecoder.decode(p) for p in jsonObject.particles]\n\t\t\t\t\tjsonVertices = [jsonDecoder.decode(v) for v in jsonObject.vertices]\n\n\t\t\t\t\tcollection.insert_one(jsonEvent)\n\t\t\t\t\tcollection.insert_many(jsonParticles)\n\t\t\t\t\tcollection.insert_many(jsonVertices)\n\t\t\n\t\t\t\treturn \"Succesfully uploaded file.\"\n\t\t\t\n\t\t\treturn \"File already in database.\"\n\n\t\treturn \"Incorrect file type.\"",
"def save_model_file_path_to_db(self, file_path, clinic_id, severity, accuracy, in_use):\n db = DataBase(self.DATABASE_DATA)\n query = \"INSERT INTO triagedata.models (file_path, clinic_id, severity, accuracy, in_use) \"\n query += \"VALUES ('%s', %s, %s, %s, %s) \" % (file_path, clinic_id, severity, accuracy, in_use)\n query += \"RETURNING id\"\n return db.insert(query, returning=True)",
"def store(self, filename):",
"def process_song_file(cur, filepath):\n # open song file\n data_frame = pd.read_json(filepath, lines=True)\n\n # insert song record\n song_data = list(data_frame[['song_id', 'title', 'artist_id', 'year', 'duration']].values[0])\n cur.execute(song_table_insert, song_data)\n\n # insert artist record\n artist_data = list(\n data_frame[['artist_id', 'artist_name', 'artist_location',\n 'artist_latitude', 'artist_longitude']].values[0])\n cur.execute(artist_table_insert, artist_data)",
"def process_song_file(cur, filepath):\n # open song file\n df = pd.read_json(filepath, lines=True)\n\n # insert song record\n song_data = list(df[[\"song_id\", \"title\", \"artist_id\", \"year\", \"duration\"]].values[0])\n try:\n cur.execute(song_table_insert, song_data)\n except psycopg2.Error as e:\n print(\"Error: Unable to insert record in songs table\")\n print(e)\n\n # insert artist record\n artist_data = list(df[[\"artist_id\", \"artist_name\", \"artist_location\", \"artist_latitude\", \"artist_longitude\"]].values[0])\n try:\n cur.execute(artist_table_insert, artist_data)\n except psycopg2.Error as e:\n print(\"Error: Unable to insert record in artists table\")\n print(e)",
"def upload_file(self, f):\n return self._telegraph.upload_file(f)"
] | [
"0.67590135",
"0.6199504",
"0.60810244",
"0.5968449",
"0.5886125",
"0.5864965",
"0.58335364",
"0.5823321",
"0.58050644",
"0.5799841",
"0.57324755",
"0.57312024",
"0.57259333",
"0.57164156",
"0.5702718",
"0.569988",
"0.5675204",
"0.5665265",
"0.5634949",
"0.5631133",
"0.5630338",
"0.56173027",
"0.5614619",
"0.5611624",
"0.56012607",
"0.55997264",
"0.559104",
"0.5590249",
"0.55723464",
"0.5569719"
] | 0.7878746 | 0 |
Iterates through every trajectory that has been solved for and then uploads the trajectory that has been solved for | def sync_dir(self):
# mark the trajectories that we have seen
trajectories = os.listdir(self.trajectory_dir)
for trajectory_file in trajectories:
if trajectory_file not in self.seen_trajectories:
created = self.upload_trajectory(trajectory_file)
self.seen_trajectories.add(trajectory_file)
if created is True:
print "Total of %s solved trajectories" % \
SolvedTrajectory.objects.count(), created | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build(self, trajectory):\n #TODO Implement?",
"def build(self, trajectory):\n pass",
"def compute_trajectory():\n pass",
"def _insertAllSteps(self): \n self.uMics = self.inputCoordinatesTiltedPairs.get().getUntilted().getMicrographs()\n self.tMics = self.inputCoordinatesTiltedPairs.get().getTilted().getMicrographs()\n\n self.inputMics = self._createSetOfParticles('auxMics')\n self.inputMics.copyInfo(self.uMics)\n self.inputMics.setStore(False)\n \n for micU, micT in izip(self.uMics, self.tMics):\n micU.cleanObjId()\n micT.cleanObjId()\n self.inputMics.append(micU)\n self.inputMics.append(micT)\n\n self.samplingInput = self.uMics.getSamplingRate()\n \n\n if self.downsampleType.get() != OTHER:\n # If 'same as picking' or 'original' get sampling rate from input micrographs\n #TODO: Review this when downsampling before picking is possible\n self.samplingFinal = self.samplingInput\n else:\n # If 'other' multiply the input sampling rate by the factor provided\n self.samplingFinal = self.samplingInput*self.downFactor.get()\n \n # Write pos files for each micrograph\n firstStepId = self._insertFunctionStep('writePosFilesStep')\n \n # For each micrograph insert the steps\n #run in parallel\n \n deps = []\n for mic in self.inputMics:\n localDeps = [firstStepId]\n micrographToExtract = mic.getFileName()\n micName = removeBaseExt(mic.getFileName())\n micId = mic.getObjId()\n\n # If downsample type is 'other' perform a downsample\n if self.downsampleType == OTHER:\n fnDownsampled = self._getTmpPath(micName+\"_downsampled.xmp\")\n downFactor = self.downFactor.get()\n args = \"-i %(micrographToExtract)s -o %(fnDownsampled)s --step %(downFactor)f --method fourier\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_downsample\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnDownsampled\n \n # If remove dust \n if self.doRemoveDust:\n fnNoDust = self._getTmpPath(micName+\"_noDust.xmp\")\n \n thresholdDust = self.thresholdDust.get() #TODO: remove this extra variable\n args=\" -i %(micrographToExtract)s -o %(fnNoDust)s --bad_pixels outliers %(thresholdDust)f\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_filter\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnNoDust\n \n #self._insertFunctionStep('getCTF', micId, micName, micrographToExtract)\n micName = removeBaseExt(mic.getFileName())\n \n # Actually extract\n deps.append(self._insertFunctionStep('extractParticlesStep', micId, micName, \n None, micrographToExtract, prerequisites=localDeps))\n # TODO: Delete temporary files\n \n # Insert step to create output objects \n self._insertFunctionStep('createOutputStep', prerequisites=deps)",
"def propagateTau(self):\n\n # prepare cases folder\n TauSample_thin = self._thiningSamples(self.nSample_propagate)\n self.intf.genFolders(self.nSample_propagate, self.caseName, self.caseNameRun, TauSample_thin, self.writeInterval)\n caseCount = np.linspace(1, self.nSample_propagate, self.nSample_propagate)\n isample = 0\n for case in caseCount:\n tmpCaseName = self.caseName + \"-tmp_\" + str(case)\n print \"#\", case, \"/\", self.nSample_propagate, \" solving PDE equations to propagate to velocity\"\n self._writeAllFieldToOpenFOAM(tmpCaseName, isample)\n if (self.enablePP):\n self.jobs.append(\n # invoke the dynamic core\n self.job_server.submit(foamOp.callFoam, (tmpCaseName, self.caseSolver, False))\n )\n else:\n foamOp.callFoam(tmpCaseName, self.caseSolver, False)\n isample = isample + 1\n\n print \"Waiting for all OpenFOAM runs to finish ...\"\n # Barrier to make sure all cases are finished before moviing on\n if(self.enablePP):\n part_sum1 = [job() for job in self.jobs]\n self.jobs = []\n\n part_sum1 = 0\n\n # finished all ensemble propagation. print stats\n if(self.enablePP):\n self.job_server.print_stats()\n # propagate baseline\n foamOp.callFoam(self.caseNameRun, self.caseSolver, False)",
"def start_solving(self):\n self.mesh.output_vtk_mesh(self.model_name + \"0\", \n [self.current_pressure, \n self.mesh.get_cell_domain_all()], \n [\"pressure\", \"domain\"])\n\n self.time_step_output(0., 0)\n\n for time_step in range(1,self.number_of_time_steps+1):\n current_time = time_step*self.delta_t\n print(time_step)\n\n self.update_pressure()\n self.find_upwinding_direction()\n self.update_concentration()\n \n if time_step%self.output_frequency == 0:\n self.mesh.output_vtk_mesh(self.model_name+str(time_step), \n [self.current_pressure,\n self.current_concentration, \n self.mesh.get_cell_domain_all()],\n [\"pressure\", \"concentration\" , \"domain\"])\n\n self.time_step_output(current_time, time_step)",
"def __init__(self,replicas,config): \n \n\tself.trjqueue = []\t# A queue of SWAPEVERY coordinates\n self.enequeue = []\t# A queue of SWAPEVERY ene fields\n\t\n self.BUFFERSIZE = 500\t# number of points to keep in queues\n\t\t\t\t# before dumping to file\n\t\n for i in range(0,len(replicas)):\n\t self.trjqueue.append([])\n\t self.enequeue.append([])\n\t \t \n\t\n self.repfiles_trj = []\t\t# file handles for coord trajs for each replica\n self.repfiles_ene = []\t\t# file handles for ene trajs for each replica\n\t\n self.bytemp_trj = []\t\t# file handles for coord trajs arranged by temp\n self.bytemp_ene = []\t\t# file handles for ene trajs arranged by temp\n self.bytemp_replica = []\t# file handle - keeps track of replica number\n\t\n self.byreplica_trj = []\t\t# file handles for coord trajs arranged by temp\n self.byreplica_ene = []\t\t# file handles for ene trajs arranged by temp\n self.byreplica_temp = []\t# file handle - keeps track of temp number\n\n self.mkdir(config.EXPDIR)\n\t \n # make /data /anal and /setup if they don't exist\n\tself.mkdir(config.DATADIR)\n\tself.mkdir(config.ANALDIR)\n\tself.mkdir(config.SETUPDIR)\n\t\n # make /workspace, /by_temp, /by_replica\n # Open trajectory files for writing in the worksapce\n\tself.mkdir(os.path.join(config.DATADIR, 'workspace') )\n\tself.mkdir(os.path.join(config.DATADIR, 'by_temp') )\n\tself.mkdir(os.path.join(config.DATADIR, 'by_replica') )\n\t\n\t\n # Open trajectory files for writing in the worksapce\n\tfor i in range(0,len(replicas)):\n\t\n\t workspacedir = os.path.join(config.DATADIR, 'workspace', str(replicas[i].repnum))\n\t self.mkdir(workspacedir)\n\t \n\t trjname = os.path.join(workspacedir, 'mc.trj')\n\t enename = os.path.join(workspacedir, 'mc.ene')\n\n\t # fout = open(trjname,'w')\n\t self.repfiles_trj.append(open(trjname,'w'))\n\t # fout = open(enename,'w')\n\t self.repfiles_ene.append(open(enename,'w'))\n\n ### write a header file explaining what the ene fields are\n\t eneheader = os.path.join(workspacedir, 'header.ene')\n\t self.write_eneheader(eneheader,replicas[i])\n\t \n\t\n ### Open trajectory files for writing in the by_temp directory\n\tfor i in range(0,len(replicas)):\n\t\n\t bytempdir = os.path.join(config.DATADIR, 'by_temp' )\n\t self.mkdir(bytempdir)\n\t \n\t trjname = bytempdir + '/' + str(replicas[i].repnum) + '.trj'\n\t enename = bytempdir + '/' + str(replicas[i].repnum) + '.ene'\n\t replicaname = bytempdir + '/' + str(replicas[i].repnum) + '.replica'\n\t \n\t self.bytemp_trj.append(open(trjname,'w'))\n\t self.bytemp_ene.append(open(enename,'w'))\n\t self.bytemp_replica.append(open(replicaname,'w'))\n\t \n\t ### write a header file explaining what the ene fields are\n\t eneheader = bytempdir + '/header.ene'\n\t self.write_eneheader(eneheader,replicas[i])\n\n\t \n\t\n ### Open trajectory files for writing in the by_replica directory\n\tfor i in range(0,len(replicas)):\n\t\n\t byreplicadir = os.path.join(config.DATADIR, 'by_replica')\n\t self.mkdir(byreplicadir)\n\t \n\t trjname = byreplicadir + '/' + str(replicas[i].repnum) + '.trj'\n\t enename = byreplicadir + '/' + str(replicas[i].repnum) + '.ene'\n\t tempname = byreplicadir + '/' + str(replicas[i].repnum) + '.temp'\n\t \n\t self.byreplica_trj.append(open(trjname,'w'))\n\t self.byreplica_ene.append(open(enename,'w'))\n\t self.byreplica_temp.append(open(tempname,'w'))\n\n\t ### write a header file explaining what the ene fields are\n\t eneheader = byreplicadir + '/header.ene'\n\t self.write_eneheader(eneheader,replicas[i])\n\t \t\n ### print 'REpfiles:', self.repfiles_trj",
"def upload_data(self):\n labeled_ids = self.get_labeled_ids()\n\n users = []\n users_ids = []\n\n activities = []\n last_activities = []\n\n trackpoints = []\n\n for root, dirs, files in os.walk(DATASET_PATH, topdown=True):\n path_parts = root.split(\"/\")\n if len(path_parts) < 4: # check if inside user folder\n continue\n user_id = path_parts[3]\n\n if user_id not in labeled_ids:\n continue\n\n if user_id not in users_ids:\n users_ids.append(user_id)\n users.append({\"id\": user_id, \"has_labels\": user_id in labeled_ids})\n\n if 'labels.txt' in files:\n last_activities = self.get_activities(user_id, root + \"/labels.txt\")\n activities.extend(last_activities)\n\n if 'Trajectory' in root:\n files.sort()\n for file_path in files:\n trackpoints.extend(self.get_trackpoints(root + \"/\" + file_path, last_activities))\n print(len(trackpoints))\n\n\n print(\"Uploading data\")\n self.insert_data_bulk(\"User\", users)\n print(\" > Users done\")\n self.insert_data_bulk(\"Activity\", activities)\n print(\" > Activities done\")\n self.insert_data_bulk(\"TrackPoint\", trackpoints)\n print(\" > TrackPoints done\")\n self.cursor.close()",
"def _upload_to_brain(self, data):\n self._log.info(\"Posting triples\")\n\n return self._connection.upload(data)",
"def write_trajectory(self, environmnent, pdb_filename):\n # TODO\n pass",
"def upload_analysis(list_of_contents, list_of_names, list_of_dates, session_id, job_id, clean_input_dir):\n\n clean_input_dir = len(clean_input_dir) != 0\n\n print('UPLOAD')\n\n if session_id is not None and list_of_contents is None:\n print(f'Running in session {session_id}')\n\n # make a subdirectory for this session if one doesn't exist\n input_dir = join(BASE_DIR, 'input', f'input_{session_id}')\n try:\n os.mkdir(input_dir)\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(input_dir, 'analysis'))\n except FileExistsError:\n pass\n\n # Create an output directory for this session if it doesn't exist\n output_dir = join(BASE_DIR, 'output', f'output_{session_id}')\n try:\n os.mkdir(output_dir)\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(output_dir, 'analysis'))\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(output_dir, 'analysis', 'images'))\n except FileExistsError:\n pass\n\n def _clean_input_dir():\n \"\"\"\n Clean the input directory by removing every existing file.\n \"\"\"\n for existing_file in os.listdir(join(input_dir, 'analysis')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'analysis', existing_file))\n\n try:\n\n # If the user isn't uplaoding anything and\n # hasn't uploaded anything, ask them to do so.\n # print(os.listdir(input_dir))\n if list_of_contents is None and len(os.listdir(join(input_dir, 'analysis'))) == 0:\n return 'Please upload some files.'\n\n # if the user is uploading something, first clean the input directory,\n # then write the uploaded files to BASE_DIR/input/input_{session_id}\n if list_of_contents:\n\n if clean_input_dir:\n _clean_input_dir()\n\n # Save successfully uploaded filenames here\n written = list()\n\n # Write uploaded files to BASE_DIR/input/input_{session_id}\n # If any of the files do not end in .txt,\n # or cannot be decoded properly, or cannot be parsed\n # into Voigt models, then clean the input directory and print\n # the error message. Otherwise, show a bullet list of files\n # uploaded to the input directory.\n\n if not clean_input_dir:\n old_peaks = pd.read_csv(join(input_dir, 'peaks.csv'))\n old_models = pd.read_csv(join(input_dir, 'models.csv'))\n else:\n old_peaks = pd.DataFrame()\n old_models = pd.DataFrame()\n \n new_peaks = pd.DataFrame()\n\n for i, c in enumerate(list_of_contents):\n\n if not list_of_names[i].endswith('.txt'):\n raise Exception(f'File {list_of_names[i]} must be .txt')\n\n s = c.split(',')[1]\n\n try:\n s = base64.b64decode(s).decode()\n except UnicodeDecodeError:\n raise Exception(f'Error uploading file {list_of_names[i]}.\\\n Please check file format and try again.')\n\n with open(join(input_dir, 'analysis', list_of_names[i]), 'w') as f:\n f.write(s)\n\n try:\n parsed_file = parse_file(join(input_dir, 'analysis', list_of_names[i]))\n new_peaks = pd.concat([new_peaks, parsed_file], sort=True)\n except Exception as e:\n import traceback\n traceback.print_exc()\n raise Exception(f'Cannot parse file {list_of_names[i]}: {e}')\n\n written.append(list_of_names[i])\n\n res = [html.Li(x) for x in written]\n res.insert(0, html.P(f'Success! {len(written)} \\\n .txt files were uploaded.'))\n\n # peaks = read_input(session_id)\n id_vars = pd.Series(new_peaks.columns)\n mask = ~(id_vars.str.contains('(p|n)m', regex=True) &\n id_vars.str.contains('center'))\n id_vars = id_vars.loc[mask]\n new_peaks = new_peaks.melt(id_vars=id_vars)\n new_peaks = new_peaks.loc[new_peaks.value.notnull()]\n\n def compute_models(DATA):\n res = pd.DataFrame([], columns=['filename', 'peak_name', 'peak_position', 'amplitude'])\n for idx, (_, model) in enumerate(DATA.iterrows()):\n\n row = pd.Series()\n row['filename'] = model.filename\n row['peak_name'] = model.variable\n row['peak_position'] = model.value\n \n amp_col = model.variable[:model.variable.index('_')] + '_amplitude'\n row['amplitude'] = model[amp_col]\n\n res.loc[idx] = row\n\n return res\n\n new_models = compute_models(new_peaks)\n\n models = pd.concat([old_models, new_models])\n peaks = pd.concat([old_peaks, new_peaks])\n\n models.to_csv(join(input_dir, 'models.csv'))\n\n peaks.to_csv(join(input_dir, 'peaks.csv'))\n\n return res\n\n except Exception as e:\n # If any of the files raise an error (wrong extension,\n # decoding error, error parsing into models),\n # then print the error message.\n _clean_input_dir()\n import traceback; traceback.print_exc()\n return f'An error occurred while uploading files: {e}'",
"def writePosFilesStep(self): \n \n writeSetOfCoordinates(self._getExtraPath(), self.inputCoordinatesTiltedPairs.get().getUntilted())\n \n writeSetOfCoordinates(self._getExtraPath(), self.inputCoordinatesTiltedPairs.get().getTilted())",
"def submission():\n logging.info('Loading and compiling models...')\n model_systole = get_model()\n model_diastole = get_model()\n\n logging.info('Loading models weights...')\n model_systole.load_weights('../models/weights/weights_systole_best.hdf5')\n model_diastole.load_weights('../models/weights/weights_diastole_best.hdf5')\n\n logging.info('Loading validation data...')\n X, ids = load_validation_data()\n\n logging.info('Pre-processing images...')\n X = preprocess(X)\n\n batch_size = 32\n logging.info('Predicting on validation data...')\n pred_systole = model_systole.predict(X, batch_size=batch_size, verbose=1)\n pred_diastole = model_diastole.predict(X, batch_size=batch_size, verbose=1)\n\n # real predictions to CDF\n cdf_pred_systole = correct_cdf(pred_systole)\n cdf_pred_diastole = correct_cdf(pred_diastole)\n\n logging.info('Accumulating results...')\n sub_systole = accumulate_study_results(ids, cdf_pred_systole)\n sub_diastole = accumulate_study_results(ids, cdf_pred_diastole)\n\n # write to submission file\n logging.info('Writing submission to file...')\n fi = csv.reader(open('../input/sample_submission_validate.csv'))\n f = open('../submissions/submission_13.csv', 'w')\n fo = csv.writer(f, lineterminator='\\n')\n fo.writerow(next(fi))\n for line in fi:\n idx = line[0]\n key, target = idx.split('_')\n key = int(key)\n out = [idx]\n if key in sub_systole:\n if target == 'Diastole':\n out.extend(list(sub_diastole[key][0]))\n else:\n out.extend(list(sub_systole[key][0]))\n else:\n logging.info('Miss {0}'.format(idx))\n fo.writerow(out)\n f.close()\n\n logging.info('Done.')",
"def find_trajectory(self):\n\n translation,_ = self.trans_listener.lookupTransform(\"/map\", \"/base_footprint\", rospy.Time(0))\n self.x = translation[0]\n self.y = translation[1]\n \n cell_x = int(np.floor(self.x / self.metadata.resolution) + self.w / 2) - self.x_offset\n cell_y = int(np.floor(self.y / self.metadata.resolution) + self.h / 2) - self.y_offset\n\n visited = np.zeros(self.costmap.shape)\n visited[cell_y,cell_x] = 1\n\n to_explore = self.add_neighbors(visited, Node(cell_x,cell_y,0,None))\n to_explore.sort(key=operator.attrgetter('cost'))\n\n # Run modified Dijkstra algorithm\n while to_explore: \n next_node = to_explore.pop(0)\n if next_node.cost == -1:\n print(\"Found goal!\")\n\t\tself.send_final_pose(next_node)\n self.number_of_fails = 0\n self.get_trajectory(next_node)\n return\n \n to_explore = to_explore + self.add_neighbors(visited, next_node)\n to_explore.sort(key=operator.attrgetter('cost'))\n\n self.number_of_fails += 1\n print(\"Failed: %d times % self.number_of_fails\")\n\n if self.number_of_fails >= NUMBER_OF_FAILS:\n print(\"Exiting!\")\n msg = Bool()\n msg.data = True\n self.exp_complete_pub.publish(msg)",
"def upload_trajectory(cls, trajectory_file):\n # extract the attributes from the file name\n pitch, roll, u0 = cls.extract_pitch_roll_velocity(\n trajectory_file)\n\n # get or create a database object with those attributes\n trajectory, created = SolvedTrajectory.objects.get_or_create(\n file_name = trajectory_file)\n\n # set the attributes\n trajectory.pitch = pitch\n trajectory.roll = roll\n trajectory.u0_string = u0\n\n # save the object\n trajectory.save()\n return created",
"def execute_tp(self):\n self.status_message = \"State: Execute TP- Executing Motion Plan with trajectory planner\"\n self.current_state = \"execute_tp\"\n self.next_state = \"idle\"\n collect_info = threading.Thread(\n target=self.write_joint_pos, args=(\"tp_wp\",))\n collect_info.start()\n for wp in self.waypoints:\n # Ensure the correct number of joint angles\n full_wp = [0.0] * self.rexarm.num_joints\n full_wp[0:len(wp)] = wp\n self.tp.set_initial_wp()\n self.tp.set_final_wp(full_wp)\n\n if self.next_state == \"estop\":\n break\n # TODO: Set the positions and break if estop is needed\n self.tp.go()\n # self.rexarm.set_positions(wp)\n # time.sleep(1.5)",
"def process_datasets(self):\n\n with open(self.mappings, \"r+\") as json_file:\n emsl_to_jgi = json.load(json_file)\n emsl_to_jgi_copy = copy.deepcopy(emsl_to_jgi)\n\n contaminant_file_loc = emsl_to_jgi[\"contaminant_file_loc\"]\n # run for each dataset\n for dataset_id, values in emsl_to_jgi.items():\n if dataset_id not in [\n \"contaminant_file_loc\",\n \"analysis_activity_file_loc\",\n \"data_objects_file_loc\",\n \"STUDY\",\n \"tools_used\",\n ]:\n raw_file_loc = values[\"raw_file_loc\"]\n self.dataset_name = values[\"dataset_name\"]\n # dataset search against a fasta file\n for genome_directory, locations in values[\n \"genome_directory\"\n ].items():\n # clear object to prepare next job\n ANALYSIS_JOBS_OBJECT.clear()\n\n # create log_dir\n self.save_job_results = os.path.join(\n self.result_loc, dataset_id, genome_directory\n )\n self.log_collected_at = os.path.join(\n os.path.abspath(self.save_job_results), \"analysis_jobs_logs\"\n )\n if not os.path.exists(self.log_collected_at):\n os.makedirs(self.log_collected_at)\n\n files = [locations[\"faa_file_loc\"], contaminant_file_loc]\n contaminated_faa_file_loc = self.contaminate_fasta(files)\n\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"contaminated_faa_file_loc\",\n contaminated_faa_file_loc,\n emsl_to_jgi_copy,\n )\n # convert .faa to .txt\n faa_txt_file = self.convert_faa2txt(\n dataset_id, contaminated_faa_file_loc\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"txt_faa_file_loc\",\n faa_txt_file,\n emsl_to_jgi_copy,\n )\n\n # log & run job\n self.run_n_log_job(\n dataset_id,\n genome_directory,\n contaminated_faa_file_loc,\n raw_file_loc,\n emsl_to_jgi_copy,\n )\n\n # merge analysis\n resultant_file = self.merge_analysis_jobs(\n dataset_id, genome_directory\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"resultant_file_loc\",\n resultant_file,\n emsl_to_jgi_copy,\n )\n\n # capture the job metadata object\n logger.info(\"Jobrun\", extra=LOGGED_ANALYSIS_JOB)\n\n # update emsl_to_jgi.json\n json_file.seek(0) # move back to BOF.\n json_file.truncate()\n json_file.write(json.dumps(emsl_to_jgi_copy, default=str, indent=4))\n pass",
"def run(self):\n\t\tdf_iter = self.file_to_df(50000)\n\t\tdf_airport = self.airport_file_to_df()\n\t\tfor df in df_iter: # type: pd.DataFrame\n\t\t\tdf.drop_duplicates(inplace=True)\n\t\t\tdf = self.transform(df, df_airport)\n\n\t\t\tdf_result = self.get_only_new_records(\n\t\t\t\tdf=df,\n\t\t\t\tdf_columns=self.join_columns,\n\t\t\t\ttable_columns=self.join_columns\n\t\t\t)\n\n\t\t\tif len(df_result) > 0:\n\t\t\t\t# df_result.drop(self.table_columns, axis=1)\n\n\t\t\t\tself.save(\n\t\t\t\t\tdf=df_result,\n\t\t\t\t\ttable_name=\"travel_dimension\",\n\t\t\t\t\tdf_columns=self.table_columns,\n\t\t\t\t\ttable_colums=self.table_columns\n\t\t\t\t)",
"def public_transit_import(request, simulation):\n try:\n # Create a set with all existing OD pairs in the OD matrix.\n matrix = simulation.scenario.supply.pttimes\n pairs = get_query('public_transit', simulation)\n existing_pairs = set(pairs.values_list('p_id', 'q_id'))\n # Create a dictionary to map the centroid user ids with the centroid\n # objects.\n centroids = get_query('centroid', simulation)\n centroid_mapping = dict()\n centroid_id_mapping = dict()\n for centroid in centroids:\n centroid_mapping[centroid.user_id] = centroid\n centroid_id_mapping[centroid.user_id] = centroid.id\n # Convert the imported file to a csv DictReader.\n encoded_file = request.FILES['import_file']\n tsv_file = StringIO(encoded_file.read().decode())\n if encoded_file.name.split(\".\")[-1] == 'tsv':\n reader = csv.DictReader(tsv_file, delimiter='\\t')\n else:\n reader = csv.DictReader(tsv_file, delimiter=',')\n # For each imported OD pair, if the pair already exists in the OD Matrix,\n # it is stored to be updated, else it is stored to be created.\n to_be_updated = set()\n to_be_created = list()\n for row in reader:\n pair = (\n centroid_id_mapping[int(row['origin'])],\n centroid_id_mapping[int(row['destination'])]\n )\n if pair in existing_pairs:\n to_be_updated.add((*pair, float(row['travel time'])))\n else:\n to_be_created.append(\n Matrix(p=centroid_mapping[int(row['origin'])],\n q=centroid_mapping[int(row['destination'])],\n r=float(row['travel time']),\n matrices=matrix)\n )\n if to_be_updated:\n # Create a mapping between the values (p, q, r) and the ids.\n pair_values = set(pairs.values_list('id', 'p_id', 'q_id'))\n pair_mapping = dict()\n for pair in pair_values:\n pair_mapping[pair[1:]] = pair[0]\n # Find the pairs that really need to be updated (i.e. r is also\n # different).\n pair_values = set(pairs.values_list('p_id', 'q_id', 'r'))\n to_be_updated = to_be_updated.difference(pair_values)\n # Retrieve the ids of the pairs to be updated with the mapping and\n # delete them.\n to_be_updated_ids = [pair_mapping[pair[:2]] for pair in to_be_updated]\n with connection.cursor() as cursor:\n chunk_size = 20000\n chunks = [to_be_updated_ids[x:x + chunk_size]\n for x in range(0, len(to_be_updated_ids), chunk_size)]\n for chunk in chunks:\n cursor.execute(\n \"DELETE FROM Matrix \"\n \"WHERE id IN %s;\",\n [chunk]\n )\n # Create a mapping between the centroids ids and the centroid objects.\n centroid_id_mapping = dict()\n for centroid in centroids:\n centroid_id_mapping[centroid.id] = centroid\n # Now, create the updated pairs with the new values.\n to_be_created += [\n Matrix(p=centroid_id_mapping[pair[0]],\n q=centroid_id_mapping[pair[1]],\n r=pair[2],\n matrices=matrix)\n for pair in to_be_updated\n ]\n # Create the new OD pairs in bulk.\n # The chunk size is limited by the MySQL engine (timeout if it is too big).\n chunk_size = 20000\n chunks = [to_be_created[x:x + chunk_size]\n for x in range(0, len(to_be_created), chunk_size)]\n for chunk in chunks:\n Matrix.objects.bulk_create(chunk, chunk_size)\n return HttpResponseRedirect(reverse(\n 'metro:public_transit_view', args=(simulation.id,)\n ))\n except Exception as e:\n print(e)\n context = {\n 'simulation': simulation,\n 'object': 'public_transit',\n }\n return render(request, 'metro_app/import_error.html', context)",
"def process(self):\r\n\r\n\t\ttot_scenes = len(self.scenes)\r\n\r\n\t\t# motion intensity is how much the objects inside the frame move\r\n\t\t# this holds how much intensity each scene has\r\n\t\tself.scene_motion_intensity = [0]*tot_scenes\r\n\r\n\t\t# camera motion occurs when the recording camera itself moves\r\n\t\t# this holds the number of frames in each scene that have\r\n\t\t# camera motion\r\n\t\tself.scene_camera_motion = [0]*tot_scenes\r\n\r\n\t\t# for multithreading\r\n\t\ttmp_file1 = cv2.VideoCapture(self.file_path)\r\n\t\ttmp_file2 = cv2.VideoCapture(self.file_path)\r\n\r\n\t\tf1 = lambda : self.estimate(0, tot_scenes//3, self.file)\r\n\t\tf2 = lambda : self.estimate(tot_scenes//3, 2*tot_scenes//3, tmp_file1)\r\n\t\tf3 = lambda : self.estimate(2*tot_scenes//3, tot_scenes, tmp_file2)\r\n\r\n\t\tt1 = HelperThread(\"Optical Flow 1\", f1)\r\n\t\tt2 = HelperThread(\"Optical Flow 2\", f2)\r\n\t\tt3 = HelperThread(\"Optical Flow 3\", f3)\r\n\r\n\t\tt1.start(), t2.start(), t3.start()\r\n\t\tt1.join(), t2.join(), t3.join()\r\n\r\n\t\ttmp_file2.release()\r\n\t\ttmp_file1.release()",
"def execute(self):\n self.status_message = \"State: Execute - Executing Motion Plan\"\n self.current_state = \"execute\"\n self.next_state = \"idle\"\n collect_info = threading.Thread(\n target=self.write_joint_pos, args=(\"notp_wp\",))\n collect_info.start()\n for wp in self.waypoints:\n # Ensure the correct number of joint angles\n full_wp = [0.0] * self.rexarm.num_joints\n full_wp[0:len(wp)] = wp\n # TODO: Send the waypoints to the trajectory planner and break if estop\n if self.next_state == \"estop\":\n break\n self.rexarm.set_positions(full_wp)\n time.sleep(1.5)",
"def upload_current_stack(self):\n dir_path = self.upload_path\n Path(dir_path).mkdir(parents=True, exist_ok=True)\n\n # Last image in current stack is target (y), the others are features (X)\n X = self.current_stack[:-1]\n y = self.current_stack[-1]\n base_file_name, extension = self.file_name.split('.')\n\n file_name_x = f\"{base_file_name}/X.{extension}\"\n file_name_y = f\"{base_file_name}/y.{extension}\"\n\n # Create directory\n Path(f\"{dir_path}/{base_file_name}\").mkdir(parents=True, exist_ok=True)\n \n for array, file_name in zip([X,y], [file_name_x, file_name_y]):\n full_file_path = f\"{dir_path}/{file_name}\"\n\n # Save X and y\n np.save(\n full_file_path,\n array.data\n )\n \n self.gstorage.put(\n local_path=full_file_path,\n remote_path=full_file_path\n )\n\n Path(full_file_path).unlink()",
"def save_run(self):\n values = get_serializeArray_form_values(self.request)\n\n plates, ichips, aliquots = self.transmogrify_inputs(values['plates'])\n plates = self.remove_empty_plates(plates)\n plates = self.reorder_plates(plates)\n\n solutions = [values[x] for x in values if x.startswith('solution-')]\n\n run = self.context\n run.plates = plates\n run.run_date = values.get('run_date', run.run_date)\n run.solutions = solutions",
"def queue_trj(self,replica):\n\tpoint = []\n\tfor i in range(0, len(replica.chain.coords) ):\n\t point.append((replica.chain.coords[i][0],replica.chain.coords[i][1]))\n\tself.trjqueue[replica.repnum].append(point)\n\tif len(self.trjqueue[replica.repnum]) >= self.BUFFERSIZE:\n\t self.dump_trjqueue(replica)",
"def upload_all(self):\n adapter = self.get_adapter()\n hostname = socket.gethostname()\n for node in self.__nodes:\n if node['hostname'] != hostname:\n adapter(node,self.__local_dir).upload(self.__dataset_name)",
"def _upload_all(self, uploads):\n for upload in uploads:\n if isinstance(upload, dict):\n self._upload(upload)\n elif upload in uploads and isinstance(uploads[upload], dict):\n self._upload(uploads[upload])\n else:\n raise Exception('invalid upload object')",
"def run(self,*args):\n \n for i in xrange(self.K):\n theta = [self.q1theta[n][i] for n in self.q1theta.dtype.names]\n r = self.po.apply_async(self.model, theta)\n self.phi[i]= r.get()[-1]#self.model(*theta)[-1] #phi is the last point in the simulation\n\n self.done_running = True",
"def traj_pipeline(self, prev_trmat=None):\n # image_seq = [image(frame_idx-2), image(frame_idx-1), image(frame_idx)]\n # egomotion update\n egomo = self.est.get_egomotion(self.image_seq)\n\n # egomotion transformation\n assert self.frame_idx >= 2, 'invalid self.frame_idx'\n if prev_trmat is None:\n assert self.frame_idx == 2, 'invalid self.frame_idx'\n # initialization of ego transformation matrix\n init_trmat = egomo_vec2mat(self.init_egomo_vec)\n prev_trmat = np.matmul(init_trmat, egomo_vec2mat(egomo[0])) # frame 0 to 1\n egomo_trmat = np.matmul(prev_trmat, egomo_vec2mat(egomo[1]))\n\n # tracker list update\n for t in self.t_list:\n # skip lost trackers\n if t.get_status()==False:\n continue\n # bounding box & depth\n bbox, depth = t.get_bbox(), t.get_depth()\n # project to 3d camera coordinate\n p3d_cam = cam_proj(self.k_mat, bbox, depth)\n # transform to world coordinate\n p3d = coord_transform(egomo_trmat, p3d_cam)\n t.add_attr_to_est_dict('traj', p3d)\n \n return egomo_trmat",
"def upload_fitting(list_of_contents, list_of_names, list_of_dates, session_id, job_id, format):\n\n min_ = 60\n max_ = 950\n\n print('UPLOAD')\n\n if session_id is not None and list_of_contents is None:\n print(f'Running in session {session_id}')\n\n # make a subdirectory for this session if one doesn't exist\n input_dir = join(BASE_DIR, 'input', f'input_{session_id}')\n input_dir_session = os.path.join(BASE_DIR, 'input', f'input_{session_id}', 'fitting')\n try:\n os.mkdir(input_dir)\n os.mkdir(join(input_dir, 'fitting'))\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(input_dir, 'fitting'))\n except FileExistsError:\n pass\n\n # Create an output directory for this session if it doesn't exist\n output_dir = join(BASE_DIR, 'output', f'output_{session_id}')\n try:\n os.mkdir(output_dir)\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(output_dir, 'fitting'))\n os.mkdir(join(output_dir, 'fitting', 'images'))\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(output_dir, 'fitting', 'images'))\n except FileExistsError:\n pass\n\n def _clean_input_dir():\n \"\"\"\n Clean the input directory by removing every existing file.\n \"\"\"\n for existing_file in os.listdir(join(input_dir, 'fitting')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'fitting', existing_file))\n\n try:\n\n # If the user isn't uplaoding anything and\n # hasn't uploaded anything, ask them to do so.\n if list_of_contents is None and len(os.listdir(join(input_dir, 'fitting'))) == 0:\n return 'Please upload some files. Note: Refreshing page will \\\n remove input files. \\\n Uploading multiple times will first \\\n remove all existing files.', [], min_, max_\n\n # if the user is uploading something, first clean the input directory,\n # then write the uploaded files to BASE_DIR/input/input_{session_id}\n if list_of_contents:\n\n _clean_input_dir()\n\n # Save successfully uploaded filenames here\n written = list()\n\n # Write uploaded files to BASE_DIR/input/input_{session_id}\n # If any of the files do not end in .txt,\n # or cannot be decoded properly, or cannot be parsed\n # into Voigt models, then clean the input directory and print\n # the error message. Otherwise, show a bullet list of files\n # uploaded to the input directory.\n\n for i, c in enumerate(list_of_contents):\n\n if not list_of_names[i].endswith('.txt'):\n raise Exception(f'File {list_of_names[i]} must be .txt')\n\n s = c.split(',')[1]\n\n try:\n\n s = base64.b64decode(s)\n import chardet\n encoding = chardet.detect(s)['encoding']\n if encoding == 'UTF-16':\n s = s.decode('utf-16')\n elif encoding == 'ascii':\n s = s.decode('utf-8')\n else:\n s = s.decode(encoding)\n except UnicodeDecodeError as e:\n print(e)\n raise Exception(f'Error uploading file {list_of_names[i]}.\\\n Please check file format and try again.')\n\n with open(join(input_dir, 'fitting', list_of_names[i]), 'w') as f:\n f.write(s)\n\n try:\n d = read_data(join(input_dir, 'fitting',\n list_of_names[i]), format)\n temp = d[2]\n print(temp[0])\n min_ = max(min_, int(temp[0]) + 1)\n max_ = min(max_, int(temp[-1]) - 1)\n except Exception as e:\n raise Exception(f'Cannot parse file {list_of_names[i]}: {e}')\n\n written.append(list_of_names[i])\n\n res = [html.Li(x) for x in written]\n res.insert(0, html.P(f'Success! {len(written)} \\\n .txt files were uploaded.'))\n return res, [{'label': f, 'value': f} for f in os.listdir(input_dir_session)], min_, max_\n\n except Exception as e:\n # If any of the files raise an error (wrong extension,\n # decoding error, error parsing into models),\n # then print the error message.\n _clean_input_dir()\n import traceback\n traceback.print_exc()\n return f'An error occurred while uploading files: {e}', [], min_, max_",
"def add_solution(self, paths):\n assert self.benchmark.status[\"state\"] == \"RUNNING\", print(\n f\"Benchmark seems to be inactive. state: {self.benchmark.status(['state'])}\")\n assert self.benchmark.status[\"data\"][\"problem_states\"][self.batch_pos] == 0, print(\n \"Problem seems to be already solved\")\n\n self.paths = paths\n self.time = time()-self.start_time\n self.benchmark.status[\"data\"][\"problem_states\"][self.batch_pos] = 1\n\n if all(self.benchmark.status[\"data\"][\"problem_states\"]):\n self.status = {\"state\": \"SUBMITTING\", \"data\": None}\n self.benchmark.submit()"
] | [
"0.59522563",
"0.59471226",
"0.5912256",
"0.5899517",
"0.5856624",
"0.5813247",
"0.57508117",
"0.57315934",
"0.5581307",
"0.55410546",
"0.5502698",
"0.54654527",
"0.5458855",
"0.53929365",
"0.53702384",
"0.5347481",
"0.5322975",
"0.53141",
"0.52880365",
"0.5242809",
"0.5241041",
"0.5234087",
"0.5208282",
"0.5193665",
"0.51918745",
"0.5173756",
"0.5171959",
"0.5162413",
"0.5142477",
"0.513176"
] | 0.6943155 | 0 |
returns a list of classes and titles, parsing through 'html' | def get_classes(html):
# elements = html.find_all("span", "code")
# titles = html.find_all("span", "title")
# classes = []
# for i in range(len(elements)):
# item = elements[i]
# tit = titles[i]
# classes += [(item.text.replace('\xa0', ' '), tit.text.replace('\xa0', ' '))]
# return classes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def EnrolledClasses(self,html): \n classes = []\n soup = BeautifulSoup(html)\n for element in soup.find_all(\"input\"):\n if element[\"name\"] == \"TITLE\" and element[\"value\"]:\n classes.append(element.get(\"value\"))\n return classes",
"def extract_all_tags(final_link, driver):\n\n #driver = webdriver.Chrome(executable_path=\"ChromeDriver/chromedriver.exe\")\n driver.get(str(final_link))\n classes = []\n tags = ['div', 'td', 'li', 'a']\n for tag in tags:\n a = driver.find_elements_by_tag_name(str(tag))\n b = len(a)\n for i in range(b):\n try:\n if a[i].get_attribute(\"class\") == None or a[i].get_attribute(\"class\") == '' or a[i].get_attribute(\"class\") == ' ' or a[i].get_attribute(\"class\") == ' ':\n continue\n else:\n className = a[i].get_attribute(\"class\").strip().split(\" \")\n for classN in className:\n classes.append(str(tag) + '.' + str(classN))\n\n except:\n continue\n\n #driver.quit()\n classes = list(dict.fromkeys(classes))\n return(classes)",
"def parseSearchHtml(self):\n pass",
"def parseSearchHtml(self):\n pass",
"def __g1(soup):\n news = []\n container = soup.select('ul.highlights > li')\n\n for item in container:\n news.append(dict(title=item.a.span.string, link=item.a['href']))\n return news",
"def parse_soup(self, soup):\n # find all class_='gs_r gs_or gs_scl' => each result\n return soup.find_all('li', class_='ais-InfiniteHits-item')",
"def get_tags(html_soup):\n \n tags = html_soup.findAll('a', attrs = {\"class\" : \"tag\"})\n all_tags = []\n for i in tags:\n all_tags.append(i.get_text())\n \n return all_tags",
"def find_text_content_by_class(bs, tag, class_name):\n result = []\n for item in bs.find_all(tag, {\"class\":class_name}):\n item_text = strip_tags(str(item))\n result.append(\" \".join(item_text.split()))\n return result",
"def scrape_html(html):\n return YoutubeScrape(BeautifulSoup(html))",
"def get_tags(html):\n\ttitle = re.findall('\"title\":\"(.*?)\",', html)[0]\n\ttitle = codecs.getdecoder(\"unicode_escape\")(title)[0]\n\n\tartist = re.findall('\"username\":\"(.*?)\",', html)[0]\n\tartist = codecs.getdecoder(\"unicode_escape\")(artist)[0]\n\n\tgenre = re.findall('\"genre\":\"(.*?)\",', html)[0]\n\tgenre = codecs.getdecoder(\"unicode_escape\")(genre)[0]\n\n\treturn title, artist, genre",
"def parse_page(html):\n\n soup = BeautifulSoup(html, \"html.parser\")\n review_soups = soup.find_all(\"script\", type=\"application/ld+json\")\n\n description_list = []\n for soup in review_soups:\n text = soup.string\n # decode the json into python dict\n js_dict = json.loads(text)\n\n if \"review\" in js_dict:\n review_list = js_dict[\"review\"]\n\n for i in range(len(review_list)):\n review_dict = review_list[i]\n description_list.append(review_dict[\"description\"])\n\n return description_list",
"def extractSearchResults(self, html):\n results = list()\n soup = BeautifulSoup(html, 'html.parser')\n div = soup.find('div', id='main')\n if (type(div) == types.NoneType):\n div = soup.find('div', id='center_col')\n if (type(div) == types.NoneType):\n div = soup.find('body')\n if (type(div) != types.NoneType):\n lis = div.findAll('a')\n if(len(lis) > 0):\n for link in lis:\n if (type(link) == types.NoneType):\n continue\n \n url = link['href']\n if url.find(\".google\") > 6:\n continue\n \n url = self.extractUrl(url)\n if(cmp(url, '') == 0):\n continue\n title = link.renderContents()\n title = re.sub(r'<.+?>', '', title)\n result = SearchResult()\n result.setURL(url)\n print '### URL: ' + url\n result.setTitle(title)\n span = link.find('div')\n if (type(span) != types.NoneType):\n content = span.renderContents()\n content = re.sub(r'<.+?>', '', content)\n result.setContent(content)\n results.append(result)\n return results",
"def classes(attrs):\n return attrs.get('class', '').split()",
"def get_tags_with_class(self, class_name: str):\n return self.soup.find_all(attrs={'class': class_name})",
"def parse_list_page_html(html):\n episode_list = []\n\n lines = html.split('\\n')\n for line in lines:\n if 'class=\"topictitle\"' in line and ' - ' in line and 'x' in line:\n datum = {}\n query = line.split('/viewtopic.php?f=177&t=')[1].split('&')[0]\n episode_season_str = line.split('class=\"topictitle\">')[1].split(' - ')[0]\n season_str = episode_season_str.split('x')[0]\n episode_str = episode_season_str.split('x')[1]\n datum['query'] = query\n datum['season'] = int(season_str)\n datum['episode'] = int(episode_str)\n episode_list.append(datum)\n\n return episode_list",
"def parse_top_movies(html: str) -> ResultSet:\n\n soup = BeautifulSoup(html, \"html.parser\")\n return soup.find_all(\"div\", class_=\"lister-item-content\")",
"def __cnn(soup): \n news = []\n headers = soup.find_all('h3', class_='most__read__title') \n for h3 in headers:\n title = h3.a['title']\n link = h3.a['href'] \n news.append(dict(title=title, link=link))\n \n return news",
"def extract_classes(soup):\r\n select = soup.find('select', id='dnn_ctr11396_TimeTableView_ClassesList')\r\n return {option['value']: option.text for option in select.findChildren('option')}",
"def _scrape(self):",
"def classes(self):\n if self.classname:\n return [self.classname]\n return []",
"def _extract_tags(html):\n tags = re.findall(r'<[^>]+>', html)\n\n return tags",
"def archive_parse_for_posts(page_html):\n # <div\\s+class=\"post.+data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\n post_info_regex = \"\"\"<div\\s+class=\"post.+?data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\"\"\"\n post_info = re.findall(post_info_regex, page_html, re.IGNORECASE|re.DOTALL)\n return post_info",
"def get_headers():\n soup = get_html()\n titles = []\n for i in soup.find_all('i'):\n header = str(i.text)\n titles.append(header.strip())\n return titles",
"def html_class(cls):\n return ' '.join(cls.html_classes)",
"def classes(self):\n return self.browser.classes(self)",
"def print_class_dict_rough(self):\n for tag in self.post_div.find_all(\"b\"):\n if tag.next_sibling is not None and tag.next_sibling.name == \"br\":\n text = str(tag.text).lower()\n while \" \" in text:\n text = text.replace(\" \", \"-\")\n i = 0\n while i < len(text):\n if not text[i].isalpha() and text[i] != \"-\":\n text = text[:i] + text[i + 1:]\n else:\n i += 1\n if len(text) > 0:\n if tag.find_next(\"a\") is not None:\n link = tag.find_next(\"a\")[\"href\"]\n else:\n link = \"\"\n print(\"\\\"\" + text + \"\\\":\\\"\" + link + \"\\\",\")",
"def parse_movies(self):\n soup = super().get_soup()\n return soup.find_all(\"div\", class_=\"lister-item\")",
"def find_ahref_by_class(tag, class_name):\n result = []\n for item in bs.find_all(tag, {\"class\":class_name}):\n href = str(item.find('a'))\n href = href.split('\"')[1]\n result.append(href)\n return result",
"def _parse(self):\n soup = BS(self._current_html, 'lxml')\n for item in soup.select('div.c'):\n temp = {}\n # main content\n ctt = item.select('span.ctt')\n if not ctt:\n continue\n weibo_body = item.select('div')\n if len(weibo_body) > 1:\n temp['content'] = weibo_body[0].text\n btn_group = weibo_body[1].text\n else:\n temp['content'] = weibo_body[0].select('span.ctt')[0].text\n btn_group = weibo_body[0].text\n temp['is_repost'] = True if REPO_TEST_PATTERN.match(\n temp['content']) else False\n try:\n temp['like_num'] = LIKE_NUM_PATTERN.findall(btn_group)[0]\n temp['cmt_num'] = COMMENT_NUM_PATTERN.findall(btn_group)[0]\n temp['repo_num'] = REPO_NUM_PATTERN.findall(btn_group)[0]\n except Exception:\n pass\n cmt = item.select('.cmt')\n # visibility\n if cmt:\n try:\n temp['visibility'] = VISIBILITY_PATTERN.findall(\n cmt[0].text)[0]\n except Exception:\n pass\n\n # img in main content\n img = item.select('div a img')\n img_src = img[0].attrs['src'] if img else None\n temp['img_src'] = img_src\n LOGGER.debug('img_src: {}'.format(img_src))\n # time & source device\n ct = item.select('span.ct')\n if ct:\n ct = ct[0]\n text = ct.text\n reg_result = TIME_PATTERN.findall(text)[0]\n\n temp['time'] = ar(\n '{}年{}'.format(self._current_year, reg_result[0]),\n DATE_FMTS[0]\n ).naive if reg_result[0] else ar(\n reg_result[1], DATE_FMTS[1]\n ).naive\n temp['source'] = SOURCE_DEVICE_PATTERN.findall(text)[0]\n self._post_item = Post(**temp)\n self._attachment_item = Attachment(\n uri=img_src, post=self._post_item)\n self._store()",
"def class_frequencies(url):\n links_list_list = []\n try:\n request = requests.get(url)\n soup = BeautifulSoup(request.content, \"lxml\")\n classes = []\n for element in soup.find_all(class_=True):\n list_class = element.get(\"class\")\n classe = \"\"\n for elt in list_class:\n classe += elt + \" \"\n classe = classe[: -1]\n classes.append(classe)\n # print(\"Class:\", classes, \":\", len(classes))\n dict_frequencies = Counter(classes)\n list_frequencies = list(dict_frequencies.values())\n list_frequencies = list(set(list_frequencies))\n list_frequencies = sorted(list_frequencies, reverse=True)\n # list_frequencies = list_frequencies[: 5]\n # print(\"List frequency:\", list_frequencies)\n for classe in dict_frequencies.keys():\n if dict_frequencies[classe] in list_frequencies and dict_frequencies[classe] > 2:\n # print(\"Classes:\", classe, \"|\", dict_frequencies[classe])\n is_project_class = True\n for classes_removed in list_html_classes_removed:\n if classes_removed in classe:\n is_project_class = False\n links_projects_list = []\n soup2 = soup.find_all(class_=classe)\n for i in soup2:\n linl = i.find('a', href=True)\n links_projects_list.append(linl)\n if linl is None:\n is_project_class = False\n\n if is_project_class:\n for i in range(len(links_projects_list)):\n links_projects_list[i] = links_projects_list[i].get('href')\n # print('Projects Links Found : ', links_projects_list)\n links_list_list += [links_projects_list]\n b_set = set(map(tuple, links_list_list))\n list_unique_lists = list(map(list, b_set))\n domain = url.replace('http://', '')\n domain = domain.replace('https://', '')\n ndx = domain.find('/')\n domain = domain[: ndx]\n # print(\"b:\", list_unique_lists, \"| domain:\", domain)\n count_good = 0\n list_good_list = []\n for list_urls_possibles in list_unique_lists:\n is_a_good_list = True\n for i, url_possible in enumerate(list_urls_possibles):\n if url_possible[: 4] == \"http\":\n if domain not in url_possible[: -2] or \".jpg\" in url_possible or url_possible[: -2] in url:\n is_a_good_list = False\n else:\n new_url_possible = domain + \"/\" + url_possible\n if \".jpg\" in new_url_possible or new_url_possible[: -2] in url:\n is_a_good_list = False\n else:\n list_urls_possibles[i] = new_url_possible\n if is_a_good_list:\n count_good += 1\n list_good_list.append(list_urls_possibles)\n # print(list_urls_possibles)\n # print(count_good)\n if count_good > 0:\n return \"Found by class\", from_lists_to_list(list_good_list)\n else:\n url_test = url + \"/\"\n index_projects = url_test.find(\"projects\")\n index_slash = url_test.find(\"/\", index_projects)\n if len(url) > index_slash + 2:\n return \"Direct project\", [url]\n else:\n return \"List of non clickable projects\", [url]\n except requests.exceptions.ConnectionError:\n print(\"Error requests:\", url)\n return \"Nothing\", []"
] | [
"0.8052008",
"0.6688938",
"0.6265724",
"0.6265724",
"0.61887735",
"0.6152137",
"0.60088265",
"0.5969638",
"0.5957513",
"0.59506553",
"0.59392065",
"0.59321856",
"0.5919949",
"0.5894865",
"0.5883525",
"0.5863221",
"0.5795686",
"0.5791473",
"0.57911634",
"0.5790925",
"0.5789725",
"0.57255834",
"0.57207644",
"0.57099706",
"0.56998724",
"0.5698746",
"0.5697803",
"0.5685878",
"0.5683335",
"0.56809545"
] | 0.80175537 | 1 |
Download master cdf file from cdaweb for 'dataset' | def _download_metafile(dataset, path=None):
if not path:
path = sunpy.config.get('downloads', 'sample_dir')
base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'
fname = dataset.lower() + '_00000000_v01.cdf'
url = base_url + fname
try:
downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)
except ModuleNotFoundError:
downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)
return downloaded_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")",
"def download(self, verbose):\n\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from COVID-19 Open Data by Google Cloud Platform https://github.com/GoogleCloudPlatform/covid-19-open-data\")\n # Index\n i_cols = [\"location_key\", \"country_name\", \"subregion1_name\", \"subregion2_name\", \"iso_3166_1_alpha_3\"]\n i_df = pd.read_csv(self.URL_I, usecols=i_cols)\n # Mobility\n m_df = pd.read_csv(self.URL_M)\n m_df = (m_df.set_index([\"date\", \"location_key\"]) + 100).reset_index()\n # Combine data\n df = m_df.merge(i_df, how=\"left\", on=\"location_key\")\n # Location (country/province)\n df = df.loc[df[\"subregion2_name\"].isna()]\n df[self.PROVINCE] = df[\"subregion1_name\"].fillna(self.UNKNOWN).apply(unidecode)\n df[\"country_name\"] = df[\"country_name\"].replace(\n {\n # CIV\n \"Ivory Coast\": \"Cote d'Ivoire\",\n }\n )\n return df",
"def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()",
"def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()",
"def download(dataset_name,dataset_url):\n directory = \"tmp\"\n if not os.path.exists(os.path.join(directory,dataset_name)):\n os.makedirs(os.path.join(directory,dataset_name))\n for url, filename in get_all_data(dataset_url):\n if not os.path.exists(os.path.join(directory,dataset_name,filename)):\n print(\"Downloading \"+filename+\":\",)\n ul.urlretrieve(url,os.path.join(directory,dataset_name,filename),reporthook)\n unzip_ecco_tcp_xmls(os.path.join(directory, dataset_name), os.path.join(directory, dataset_name + \"_unzipped\"))\n shutil.rmtree(os.path.join(directory, dataset_name))\n shutil.move(os.path.join(directory, dataset_name + \"_unzipped\"), os.path.join(directory, dataset_name))\n headers_to_csv(directory, dataset_name)\n corpus_to_csv(directory, dataset_name)\n erase_all_files_with_extension(directory, dataset_name, \".hdr\")\n erase_all_files_with_extension(directory, dataset_name, \".xml\")",
"def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")",
"def download(self, verbose):\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df",
"def download_source():\n \n #if os.path.exists(UNFCC_FILE): \n # os.rename(UNFCC_FILE,'old_'+UNFCC_FILE)\n #if os.path.exists(EBAL_FILE):\n # os.rename(EBAL_FILE,'old_'+EBAL_FILE)\n\n try:\n unsd = sdmx.Request('UNSD')\n sdmx.logger.setLevel(logging.INFO)\n \n logger.info('Loading UNFCC Data')\n resp_unfcc = unsd.data('DF_UNData_UNFCC')\n\n logger.info('Loading UN Energy Balance Data')\n resp_ebal = unsd.data('DF_UNData_EnergyBalance')\n except Exception as e:\n logger.error('Error!! Please look at SDMX logs to troubleshoot' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n try:\n df_ebal = resp_ebal.to_pandas()\n df_unfcc = resp_unfcc.to_pandas()\n\n df_unfcc.reset_index().to_csv(UNFCC_FILE,index=False)\n logger.info('UNFCC Greenhouse Data stored as {}'.format(UNFCC_FILE))\n\n df_ebal.reset_index().to_csv(EBAL_FILE,index=False)\n logger.info('UN Energy Balance Data stored as {}'.format(EBAL_FILE))\n except Exception as e:\n logger.error('Error!! While saving data from SDMX to CSV ' + str(e))\n traceback.print_exc(file = sys.stdout)",
"def download_dataset():\n \n ID = \"1-3_oB5iSF-c_V65-uSdUlo024NzlgSYZ\"\n script1 = f\"\"\"\n wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id='{ID} -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=\"{ID} -O Data.zip && rm -rf /tmp/cookies.txt\n \"\"\"\n script2 = \"\"\"unzip Data.zip\"\"\"\n\n os.system(script1)\n os.system(script2)",
"def download_dataset(url=DATASET_URL):\n # disable insecure https warning\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n c = urllib3.PoolManager()\n with c.request(\"GET\", url, preload_content=False) as res, open(\n LOCAL_FILE_NAME, \"wb\"\n ) as out_file:\n shutil.copyfileobj(res, out_file)\n logging.info(\"Download completed.\")",
"def download_dataset(url=DATASET_URL):\n df = pd.read_csv(url, index_col=0)\n \n # ディレクトリが無ければ,作成する\n if not os.path.isdir(BASE_DIR):\n os.makedirs(BASE_DIR)\n \n df.to_csv(LOCAL_FILE_NAME)",
"def _download_from_web(*, ds_name: str, ds_path: Path):\n import cgi\n import zipfile\n import httpx\n from tqdm import tqdm\n\n url = DATASET_OPTIONS[ds_name]['web']\n if ds_path.exists():\n print('Dataset directory already exists; remove it if you wish to '\n 're-download the dataset')\n return\n\n ds_path.mkdir(parents=True, exist_ok=True)\n\n with httpx.Client() as client:\n with client.stream('GET', url=url) as response:\n if not response.is_error:\n pass # All good!\n else:\n raise RuntimeError(\n f'Error {response.status_code} when trying '\n f'to download {url}')\n\n\n header = response.headers['content-disposition']\n _, params = cgi.parse_header(header)\n # where to store the archive\n outfile = ds_path / params['filename']\n remote_file_size = int(response.headers['content-length'])\n\n with open(outfile, mode='wb') as f:\n with tqdm(desc=params['filename'], initial=0,\n total=remote_file_size, unit='B',\n unit_scale=True, unit_divisor=1024,\n leave=False) as progress:\n num_bytes_downloaded = response.num_bytes_downloaded\n\n for chunk in response.iter_bytes():\n f.write(chunk)\n progress.update(response.num_bytes_downloaded -\n num_bytes_downloaded)\n num_bytes_downloaded = (response\n .num_bytes_downloaded)\n\n assert outfile.suffix == '.zip'\n\n with zipfile.ZipFile(outfile) as zip:\n for zip_info in zip.infolist():\n path_in_zip = Path(zip_info.filename)\n # omit top-level directory from Zip archive\n target_path = str(Path(*path_in_zip.parts[1:]))\n if str(target_path) in ('.', '..'):\n continue\n if zip_info.filename.endswith('/'):\n (ds_path / target_path).mkdir(parents=True, exist_ok=True)\n continue\n zip_info.filename = target_path\n print(f'Extracting: {target_path}')\n zip.extract(zip_info, ds_path)\n\n outfile.unlink()",
"def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url",
"def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()",
"def download_dataset(self):\n raise NotImplementedError",
"def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)",
"def _download_cxr_model(self):\n file_id = \"1KIsLmVv8jKTVG_LxchMZAvR7rugHy7uB\"\n download_from_google_drive(file_id=file_id, folder=\"data/\", name=\"covid_cxr.zip\")",
"def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()",
"def download_data():\r\n print('Downloading cifar-10 data...')\r\n request.urlretrieve(dataurl)\r\n print('Done')\r\n print('Please unzip files. command is:')\r\n print('gzip -d cifar-10-python.tar.gz')\r\n print('tar -xf cifar-10-python.tar')\r\n exit()",
"def cma_bst(redownload: bool = False) -> Dataset:\n return Dataset.get(\"cma_bst\", redownload=redownload)",
"def download_coco(): \n file_type = '.zip'\n img_to_download = ['val','test','train']\n ann_to_download = ['annotations_trainval','image_info_test']\n base_url_images = 'http://images.cocodataset.org/zips/'\n base_url_ann = 'http://images.cocodataset.org/annotations/'\n\n\n click.echo(click.style(f\"\\n DOWNLOAD ANNOTATIONS \\n\", bg='green', bold=True, fg='white'))\n for ann in ann_to_download:\n\n ## build Urls\n ann_url = base_url_ann + ann + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\nDownloading of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be downloaded')\n\n zip_filename_location = save_zip_from_url(ann_url,cfg.PATH_ANNOTATIONS)\n #zip_filename_location = \"/home/kamgo-gpu/Schreibtisch/stuff_annotations_trainval2017.zip\"\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n\n click.echo(click.style(f'\\n Extraction of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be extracted and the zip-file will be deleted')\n\n # Extract zip to annotation directory\n Extract_zip_file(zip_filename_location,cfg.PATH_ANNOTATIONS)\n\n click.echo(click.style(f\"\\n DOWNLOAD IMAGES \\n\", bg='green', bold=True, fg='white'))\n for dataset in img_to_download:\n ## build Urls\n dataset_img_url = base_url_images + dataset + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\n Downloading of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be downloaded')\n\n zip_filename_location = save_zip_from_url(dataset_img_url,cfg.PATH_IMAGES)\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n click.echo(click.style(f'\\n Extraction of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be extracted and the zip-File will be deleted')\n\n # set complet Path to save images\n Extract_zip_file(zip_filename_location,cfg.PATH_IMAGES)\n\n click.echo(click.style(f'\\n Download and extraction termined successfull {dataset} ...\\n', bg='green', bold=True, fg='white'))",
"def _download_data(self):\n logger.info('Downloading ChemIDplus data...')\n outfile_path = self._src_data_dir / self._src_fname\n\n self._ftp_download(self._src_server,\n self._src_dir_path,\n self._src_data_dir,\n self._src_fname)\n\n parser = ET.iterparse(outfile_path, ('start', 'end'))\n date = next(parser)[1].attrib['date']\n version = date.replace('-', '')\n outfile_path.rename(self._src_data_dir / f'chemidplus_{version}.xml')\n logger.info('Finished downloading ChemIDplus data')",
"def download_hess_dr1_data():\n download_data_files(FILENAMES_HESS_DR1)",
"def download(args):\n with_dataset(args, Dataset._download)",
"def download(directory: str) -> None:\n path = f'{directory}/m5/datasets'\n if not os.path.exists(path):\n download_file(directory=path,\n source_url=M5.source_url,\n decompress=True)",
"def download_data_and_save():\n url = 'https://github.com/djay/covidthailand/wiki/combined.csv'\n s=requests.get(url).content\n global df\n global last_updated\n df=pd.read_csv(io.StringIO(s.decode('utf-8')), parse_dates= ['Date'])\n df.to_parquet(file_name, compression='UNCOMPRESSED')\n df.to_csv('jaydata.csv')\n last_updated = df['Date'][df.index[-1]].strftime(\"%d %B %Y\")\n\n url = 'https://raw.githubusercontent.com/wiki/djay/covidthailand/vaccinations.csv'\n s=requests.get(url).content\n global vac_df\n vac_df=pd.read_csv(io.StringIO(s.decode('utf-8')), parse_dates= ['Date'])\n vac_df.to_parquet('vaccination.parquet', compression='UNCOMPRESSED')\n\n print(\"Data downloaded and saved successfully. Data up to \" + last_updated)",
"def cli(ctx, dataset_collection_id, file_path):\n return ctx.gi.dataset_collections.download_dataset_collection(dataset_collection_id, file_path)",
"def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )",
"def download_dataset(self):\n dataset_name = ADE20K_URL.split(\"/\")[-1].split(\".\")[0]\n req = urllib.request.Request(ADE20K_URL, method=\"HEAD\")\n size_file = urllib.request.urlopen(req).headers[\"Content-Length\"]\n download = \"n\"\n while download != \"y\":\n if not self.yes_all:\n download = input(f\"You are about to download {dataset_name} ({size_file} bytes) to the temporary folder {self.tmp_path}. Do you want to continue? [y/n] \\n\")\n if self.yes_all or download == \"y\":\n logger.info(f\"Downloading dataset {dataset_name} at {ADE20K_URL} to temporary folder {self.tmp_path}...\")\n zip_path, hdrs = urllib.request.urlretrieve(ADE20K_URL, f\"{self.tmp_path}/{dataset_name}.zip\")\n logger.info(f\"Extracting {zip_path} to temporary folder {self.tmp_path}...\")\n with zipfile.ZipFile(f\"{zip_path}\", 'r') as z:\n z.extractall(f\"{self.tmp_path}\")\n self.input_data_path = zip_path[:-4]\n break\n elif download == \"n\":\n logger.error(f\"Cannot pursue without downloading the dataset.\")\n sys.exit()\n else:\n logger.error(\"Please enter a valid answer (y or n).\")",
"def download_and_prepare_dmipy_example_dataset(self):\r\n subject_ID = 100307\r\n self.download_subject(subject_ID)\r\n self.prepare_example_slice(subject_ID)"
] | [
"0.6694223",
"0.6588644",
"0.65666646",
"0.65666646",
"0.6565688",
"0.64363176",
"0.64334583",
"0.6366243",
"0.63625884",
"0.63276845",
"0.6297841",
"0.6201097",
"0.61550426",
"0.61423147",
"0.61154634",
"0.6100518",
"0.6091457",
"0.60131127",
"0.5990933",
"0.5977136",
"0.5911065",
"0.5902159",
"0.59016865",
"0.58982104",
"0.5893955",
"0.5869874",
"0.5862128",
"0.5855925",
"0.58549774",
"0.58470196"
] | 0.71092033 | 0 |
Downloads Wind/3DP CDF files via SunPy/Fido from CDAWeb | def wind3dp_download_fido(dataset, startdate, enddate, path=None, max_conn=5):
trange = a.Time(startdate, enddate)
cda_dataset = a.cdaweb.Dataset(dataset)
try:
result = Fido.search(trange, cda_dataset)
filelist = [i[0].split('/')[-1] for i in result.show('URL')[0]]
filelist.sort()
if path is None:
filelist = [sunpy.config.get('downloads', 'download_dir') + os.sep + file for file in filelist]
elif type(path) is str:
filelist = [path + os.sep + f for f in filelist]
downloaded_files = filelist
for i, f in enumerate(filelist):
if os.path.exists(f) and os.path.getsize(f) == 0:
os.remove(f)
if not os.path.exists(f):
downloaded_file = Fido.fetch(result[0][i], path=path, max_conn=max_conn)
# downloaded_files = Fido.fetch(result, path=path, max_conn=max_conn)
# downloaded_files.sort()
except (RuntimeError, IndexError):
print(f'Unable to obtain "{dataset}" data for {startdate}-{enddate}!')
downloaded_files = []
return downloaded_files | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url",
"def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()",
"def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()",
"def download_fermi_crab_3fhl():\n download_data_files(FILENAMES_FERMI_3FHL_CRAB)",
"def dascasi_download():\n p = argparse.ArgumentParser(description=\"download DASC all-sky camera data\")\n p.add_argument(\"site\", choices=[\"EAA\", \"FYU\", \"KAK\", \"PKR\", \"TOO\", \"VEE\"])\n p.add_argument(\n \"startend\", help=\"start/end times UTC e.g. 2012-11-03T06:23 2012-11-03T07\", nargs=2\n )\n p.add_argument(\"odir\", help=\"directory to write downloaded FITS to\")\n p.add_argument(\"-w\", \"--wavelen\", help=\"request specific wavelength(s)\", nargs=\"+\")\n p.add_argument(\"-host\", default=\"ftp://optics.gi.alaska.edu\")\n p = p.parse_args()\n\n # host = \"ftp://mirrors.arsc.edu/AMISR/PKR/DASC/RAW/\"\n download(p.startend, p.site, p.odir, p.host, p.wavelen)",
"def main(cdl_ws, cdl_year='', overwrite_flag=False):\r\n logging.info('\\nDownload and extract CONUS CDL rasters')\r\n site_url = 'ftp://ftp.nass.usda.gov/download/res'\r\n\r\n cdl_format = '{}_30m_cdls.{}'\r\n\r\n for cdl_year in list(util.parse_int_set(cdl_year)):\r\n logging.info('Year: {}'.format(cdl_year))\r\n zip_name = cdl_format.format(cdl_year, 'zip')\r\n zip_url = site_url + '/' + zip_name\r\n zip_path = os.path.join(cdl_ws, zip_name)\r\n\r\n cdl_path = os.path.join(cdl_ws, cdl_format.format(cdl_year, 'img'))\r\n if not os.path.isdir(cdl_ws):\r\n os.makedirs(cdl_ws)\r\n\r\n if os.path.isfile(zip_path) and overwrite_flag:\r\n os.remove(zip_path)\r\n if not os.path.isfile(zip_path):\r\n logging.info(' Download CDL files')\r\n logging.debug(' {}'.format(zip_url))\r\n logging.debug(' {}'.format(zip_path))\r\n try:\r\n urllib.urlretrieve(zip_url, zip_path)\r\n except IOError as e:\r\n logging.error(' IOError, skipping')\r\n logging.error(e)\r\n\r\n if os.path.isfile(cdl_path) and overwrite_flag:\r\n util.remove_file(cdl_path)\r\n if os.path.isfile(zip_path) and not os.path.isfile(cdl_path):\r\n logging.info(' Extracting CDL files')\r\n with zipfile.ZipFile(zip_path) as zf:\r\n zf.extractall(cdl_ws)",
"def wind3dp_single_download(file, path=None):\n\n # add a OS-specific '/' to end end of 'path'\n if path:\n if not path[-1] == os.sep:\n path = f'{path}{os.sep}'\n else:\n path = sunpy.config.get('downloads', 'download_dir') + os.sep\n\n data = file.split('_')[1] # e.g. 'sfsp'\n year = file.split('_')[3][:4]\n base = f\"https://sprg.ssl.berkeley.edu/wind3dp/data/wi/3dp/{data}/{year}/\"\n\n url = base+'/'+file\n\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=file, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=file, path=path, progressbar=False)\n except requests.HTTPError:\n print(f'No corresponding data found at {url}')\n downloaded_file = []\n\n return downloaded_file",
"def _download_to_flc(self):\n self.communicator.download_to_flc()",
"def wind3dp_download(dataset, startdate, enddate, path=None, **kwargs):\n\n trange = a.Time(startdate, enddate)\n cda_dataset = a.cdaweb.Dataset(dataset)\n try:\n result = Fido.search(trange, cda_dataset)\n filelist = [i[0].split('/')[-1] for i in result.show('URL')[0]]\n filelist.sort()\n files = filelist\n if path is None:\n filelist = [sunpy.config.get('downloads', 'download_dir') + os.sep + file for file in filelist]\n elif type(path) is str:\n filelist = [path + os.sep + f for f in filelist]\n downloaded_files = filelist\n\n for i, f in enumerate(filelist):\n if os.path.exists(f) and os.path.getsize(f) == 0:\n os.remove(f)\n if not os.path.exists(f):\n # downloaded_file = Fido.fetch(result[0][i], path=path, max_conn=max_conn)\n downloaded_file = wind3dp_single_download(files[i], path=path)\n\n except (RuntimeError, IndexError):\n print(f'Unable to obtain \"{dataset}\" data for {startdate}-{enddate}!')\n downloaded_files = []\n return downloaded_files",
"def download(path):\n\n # Check if directory exists\n if not os.path.isdir(path + \"birdvox_dcase_20k\"):\n print(\"Creating birdvox_dcase_20k Directory\")\n os.mkdir(path + \"birdvox_dcase_20k\")\n base = \"https://zenodo.org/record/1208080/files/\"\n filename = \"BirdVox-DCASE-20k.zip\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n url = base + filename + \"?download=1\"\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)\n url = \"https://ndownloader.figshare.com/files/10853300\"\n filename = \"data_labels.csv\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)",
"def _download(self) -> bytes:\n\n self.log.info(\"Downloading FCC facilities..\")\n # Disabling weak dh check. FCC should update their servers.\n ciphers = requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS\n requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL'\n r = requests.get(FACILITIES_URL)\n requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS = ciphers\n r.raise_for_status()\n return r.content",
"def download_source():\n \n #if os.path.exists(UNFCC_FILE): \n # os.rename(UNFCC_FILE,'old_'+UNFCC_FILE)\n #if os.path.exists(EBAL_FILE):\n # os.rename(EBAL_FILE,'old_'+EBAL_FILE)\n\n try:\n unsd = sdmx.Request('UNSD')\n sdmx.logger.setLevel(logging.INFO)\n \n logger.info('Loading UNFCC Data')\n resp_unfcc = unsd.data('DF_UNData_UNFCC')\n\n logger.info('Loading UN Energy Balance Data')\n resp_ebal = unsd.data('DF_UNData_EnergyBalance')\n except Exception as e:\n logger.error('Error!! Please look at SDMX logs to troubleshoot' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n try:\n df_ebal = resp_ebal.to_pandas()\n df_unfcc = resp_unfcc.to_pandas()\n\n df_unfcc.reset_index().to_csv(UNFCC_FILE,index=False)\n logger.info('UNFCC Greenhouse Data stored as {}'.format(UNFCC_FILE))\n\n df_ebal.reset_index().to_csv(EBAL_FILE,index=False)\n logger.info('UN Energy Balance Data stored as {}'.format(EBAL_FILE))\n except Exception as e:\n logger.error('Error!! While saving data from SDMX to CSV ' + str(e))\n traceback.print_exc(file = sys.stdout)",
"def download_data():\r\n print('Downloading cifar-10 data...')\r\n request.urlretrieve(dataurl)\r\n print('Done')\r\n print('Please unzip files. command is:')\r\n print('gzip -d cifar-10-python.tar.gz')\r\n print('tar -xf cifar-10-python.tar')\r\n exit()",
"def main():\n # the url for african daily and global daily\n african_dialy_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/africa_daily/tifs/p25/\"\n global_daily_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_daily/tifs/p25/\"\n\n\n each_year_list = GetRasterYears(url=african_dialy_url)\n new_path = makenewdir(each_year_list)\n years_new_list = fecthrasterurl(url=african_dialy_url)\n downloadwithwget(each_year_list, years_new_list, new_path)",
"def download(form, year=None):\n if form == \"2552-96\" and year is not None:\n r = requests.get(f\"http://downloads.cms.gov/Files/hcris/HOSPFY{year}.zip\")\n elif form == \"2552-10\" and year is not None:\n r = requests.get(f\"http://downloads.cms.gov/Files/hcris/HOSP10FY{year}.zip\")\n elif form == \"2552-96\" and year is None:\n r = requests.get(\"http://downloads.cms.gov/files/hcris/HOSP-REPORTS.ZIP\")\n elif form == \"2552-10\" and year is None:\n r = requests.get(\"http://downloads.cms.gov/files/hcris/hosp10-reports.zip\")\n\n # Read content stream of Zip file and extract to data directory\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(f\"{data_dir}/\")\n z.close()",
"def download_goes_hotspot_characterization(folder, start, end, satellite=\"G17\", full_disk=False):\n if full_disk:\n product = 'ABI-L2-FDCF'\n else:\n product = 'ABI-L2-FDCC'\n \n return download_goes_data(folder, start, end, product, satellite)",
"def download_dicom_series(uid, output_folder):\n filename_zipped = os.path.join(output_folder, uid + \".zip\")\n filename = re.sub(\".zip\", \"\", filename_zipped)\n if not (os.path.exists(filename_zipped) or os.path.isdir(filename)):\n client.get_image(\n seriesInstanceUid=uid, downloadPath=output_folder, zipFileName=uid + \".zip\"\n )\n return filename",
"def disk_fxt(request):\n disk = request.param\n disk.download()\n return disk",
"def download(self, download) -> None:\n path_cifarh = path.join(self.root, self.filename_cifarh)\n path_cifar = path.join(self.root, self.filename_cifar)\n is_there = path.isfile(path_cifarh) and path.isfile(path_cifar)\n if is_there:\n print(\"Files already exist.\")\n if download == \"force\" or not is_there:\n download_and_extract_archive(\n self.url_cifar, self.root, filename=self.filename_cifar\n )\n download_and_extract_archive(\n self.url_cifarh, self.root, filename=self.filename_cifarh\n )",
"def download_flight_data():\n response = requests.get(FlightAndRoutConst.URL_FLIGHTS_OF_SPRING_AIRLINE,\n headers={'Content-type': 'text/html; charset=utf-8'})\n output_path = build_file_name()\n # Using response.text instead of response.content for chinese string display\n IOUtils.write_to_file(str(response.text), output_path)\n return output_path",
"def download_dailydialog(daily_raw_fname: str, data_path: str):\n wget.download(daily_raw_fname, data_path)\n # Manually unzip the train/dev/test files",
"def download_dilbert(s, u):\n with open(\"comicfile.jpg\", \"wb\") as file:\n response = s.get(u)\n file.write(response.content)",
"def download_sd_doc(pii, view='full'):\n\n file_path = os.path.join(outdir, 'sd-download', pii+'-'+view+'.xml')\n if not os.path.exists(file_path):\n print(' Download:', pii + '-' + view + '.xml')\n\n url = 'http://api.elsevier.com/content/article/pii:' + pii\n vals = {'view': view,\n 'apikey': SD_API_KEY}\n r = requests.get(url, params=vals)\n\n if r.status_code != requests.codes.ok:\n print('!! ScienceDirect server error:', r.status_code,\n file=sys.stderr)\n print(r.text, file=sys.stderr)\n return\n\n with io.open(file_path, 'w', encoding='utf8') as out:\n out.write(r.text)",
"def _download_data(self):\n logger.info('Downloading ChemIDplus data...')\n outfile_path = self._src_data_dir / self._src_fname\n\n self._ftp_download(self._src_server,\n self._src_dir_path,\n self._src_data_dir,\n self._src_fname)\n\n parser = ET.iterparse(outfile_path, ('start', 'end'))\n date = next(parser)[1].attrib['date']\n version = date.replace('-', '')\n outfile_path.rename(self._src_data_dir / f'chemidplus_{version}.xml')\n logger.info('Finished downloading ChemIDplus data')",
"def dl_pdb(url_dom, pdb_id, dom_sid):\n good_url = re.sub(r'(output=html)', 'output=txt', url_dom)\n\n print(\"Dowloading the good domain of \" + pdb_id + \".pdb from the SCOP \" +\n \"website...\")\n urlreq.urlretrieve(good_url, \"data/\" + dom_sid + '.pdb')\n print(\"Download finished !\\n\")",
"def download_LIDC(output_folder, debug=False):\n\n # Creating config file with path to dataset\n _, config_file = create_config(output_folder, debug, \"fed_lidc_idri\")\n\n # Get patient X study\n patientXstudy = pd.read_json(\n client.get_patient_study(collection=\"LIDC-IDRI\").read().decode(\"utf-8\")\n )\n\n # Get study X series\n series = pd.read_json(\n client.get_series(modality=\"CT\", collection=\"LIDC-IDRI\").read().decode(\"utf-8\")\n )\n\n # Join both of them\n patientXseries = patientXstudy.merge(series).iloc[:]\n\n # there are some images with missing slices. We remove them\n # for reference their loc: 385, 471, 890, 129, 110, 245, 80, 618, 524\n bad_patientID = [\n \"LIDC-IDRI-0418\",\n \"LIDC-IDRI-0514\",\n \"LIDC-IDRI-0672\",\n \"LIDC-IDRI-0146\",\n \"LIDC-IDRI-0123\",\n \"LIDC-IDRI-0267\",\n \"LIDC-IDRI-0085\",\n \"LIDC-IDRI-0979\",\n \"LIDC-IDRI-0572\",\n ]\n patientXseries = patientXseries[~patientXseries[\"PatientID\"].isin(bad_patientID)]\n\n if debug:\n patientXseries = patientXseries[:10]\n\n # Download associated DICOMs\n pool = multiprocessing.Pool(processes=n_cpus)\n downloaded_paths = pool.starmap(\n download_dicom_series,\n zip(patientXseries.SeriesInstanceUID.tolist(), itertools.repeat(output_folder)),\n )\n\n # Download XML annotations\n annotations_path = download_zip_from_url(ANNOTATION_URL, output_folder)\n\n # Unzip everything and remove archives\n zipped_folders = [\n str(p) for p in Path(output_folder).glob(\"./*/\") if str(p).endswith(\".zip\")\n ]\n\n # Check zip integrity, and download corrupted files again\n for zipped_f in zipped_folders:\n try:\n while zipfile.ZipFile(zipped_f).testzip() is not None:\n os.remove(zipped_f)\n download_dicom_series(os.path.splitext(zipped_f)[0], output_folder)\n except zipfile.BadZipFile:\n os.remove(zipped_f)\n download_dicom_series(os.path.splitext(zipped_f)[0], output_folder)\n print(f\"Bad zip file: {zipped_f}\")\n\n for zipped_f in zipped_folders:\n with zipfile.ZipFile(zipped_f, \"r\") as zip_ref:\n zip_file_name = re.sub(\".zip\", \"\", zipped_f)\n # extract only if it does not exist or it is empty\n if not os.path.isdir(zip_file_name) or len(os.listdir(zip_file_name)) == 0:\n os.makedirs(re.sub(\".zip\", \"\", zipped_f), exist_ok=True)\n zip_ref.extractall(zip_file_name)\n os.remove(zipped_f)\n\n # For each patient we record the location of its DICOM\n patientXseries[\"extraction_location\"] = downloaded_paths\n\n # We tie back annotations to the original DICOMS\n xmlfiles = glob.glob(os.path.join(annotations_path, \"tcia-lidc-xml\", \"*\", \"*.xml\"))\n df = pd.DataFrame()\n df[\"annotation_file\"] = xmlfiles\n # We initialize a dask dataframe to speed up computations\n ddf = dd.from_pandas(df, npartitions=8)\n df[\"SeriesInstanceUID\"] = ddf.map_partitions(\n lambda d: d[\"annotation_file\"].apply(get_SeriesUID_from_xml)\n ).compute(scheduler=\"processes\")\n df = df[df.SeriesInstanceUID != \"not found\"]\n df = df[df.SeriesInstanceUID != \"notfound\"]\n # there are several xml files which have the same seriesInstanceUID\n # but the same content, therefore here df has len of 1026.\n # Next, we are removing the duplicates. The correct number of files will be now 1018\n df = df.drop_duplicates(subset=[\"SeriesInstanceUID\"], keep=\"first\")\n patientXseries = df.merge(patientXseries, on=\"SeriesInstanceUID\")\n # Update yaml file\n write_value_in_config(config_file, \"download_complete\", True)\n return patientXseries",
"def get_drms_files(self):\n import drms\n client = drms.Client(email=self.email,verbose=True)\n fmt = '%Y.%m.%d_%H:%M'\n self.t_qstr = self.series+'[{0}_TAI-{1}_TAI@{2}]'.format(self.start.strftime(fmt),self.end.strftime(fmt),self.cadence) \n\n\n #create wavelength query string\n self.w_qstr = '[' \n for i in self.wav: self.w_qstr = self.w_qstr+'{0},'.format(int(i.value))\n #remove last , and add bracket\n self.w_qstr = self.w_qstr[:-1]+']'\n \n #make the series string\n self.s_qstr = '{'+self.segment+'}'\n\n #the full query\n self.qstr = self.t_qstr+self.w_qstr+self.s_qstr\n\n #IF ERRORS WITH URL ERROR IT IS BECAUSE THE DOWNLOAD FILE SIZE IS TOO LARGE\n #export the data file list \n self.expt = client.export(self.qstr)\n#create an array of indexes to download\n index = np.arange(np.size(self.expt.urls.url))\n# get file from JSOC\n #set directory to current if no path set\n outf = self.expt.download(self.odir,index,fname_from_rec=True)",
"def download_file(year: str, month: str, career: str, kind: str = \"ativo\") -> str:\n\n career = career.lower()\n downloaded_files = []\n existing_files = []\n res = \"\"\n for file_ in CGU_FILES[career][kind]:\n url = f\"{URL_CGU_DOWNLOADS}/{year}{month}_{file_}\"\n try:\n division = file_.split(\"_\")[-1] if career == \"civil\" else None\n if not file_exists(year, month, career, kind, division):\n print(f\"Downloading {url}\")\n sleep(10)\n req = requests.get(url, stream=True, timeout=90)\n req.raise_for_status()\n filename = (\n req.headers.get(\"Content-Disposition\")\n .split(\"=\")[-1]\n .replace('\"', \"\")\n )\n saved = save_file(filename, req.content)\n unzipped_file = unzip_salary_file(saved, year, month, career, kind)\n downloaded_files.append(unzipped_file)\n else:\n print(f\"Arquivo {url} já existe\")\n existing_files.append(file_)\n res = f\"Arquivos baixados: {', '.join(downloaded_files)} \\nArquivos existentes: {', '.join(existing_files)}\"\n except requests.exceptions.ConnectionError as err:\n res = f\"Erro de conexão: {err}\"\n # pylint: disable=line-too-long\n # Erro de conexão: HTTPConnectionPool(host='www.portaltransparencia.gov.br', port=80): Max retries\n # exceeded with url: /download-de-dados/servidores/202202_Reserva_Reformas_Militares (Caused by\n # NewConnectionError('<urllib3.connection.HTTPConnection object at 0x7f9cd019cd90>: Failed to establish\n # a new connection: [Errno -3] Temporary failure in name resolution')\n except requests.exceptions.HTTPError as err:\n res = f\"Arquivo inexistente: {url.split('/')[-1]}.zip - {err}\"\n # pylint: disable=line-too-long\n # Erro no download: 404 Client Error: Not Found for url:\n # https://www.portaltransparencia.gov.br/download-de-dados/servidores/202202_Reserva_Reformas_Militares\n\n print(res)\n return res",
"def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")",
"def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file"
] | [
"0.6992597",
"0.6586604",
"0.6586604",
"0.62284845",
"0.62214994",
"0.6183709",
"0.6142462",
"0.61349034",
"0.60533696",
"0.6041575",
"0.6032721",
"0.601811",
"0.5998015",
"0.5983272",
"0.59466475",
"0.5895252",
"0.5893261",
"0.5883837",
"0.58763474",
"0.58670187",
"0.5820259",
"0.5817156",
"0.57951325",
"0.5792582",
"0.57692045",
"0.5764055",
"0.57182527",
"0.57044387",
"0.5664252",
"0.5655608"
] | 0.6681552 | 1 |
Download a single Wind/3DP level 2 data file from SRL Berkeley to local path | def wind3dp_single_download(file, path=None):
# add a OS-specific '/' to end end of 'path'
if path:
if not path[-1] == os.sep:
path = f'{path}{os.sep}'
else:
path = sunpy.config.get('downloads', 'download_dir') + os.sep
data = file.split('_')[1] # e.g. 'sfsp'
year = file.split('_')[3][:4]
base = f"https://sprg.ssl.berkeley.edu/wind3dp/data/wi/3dp/{data}/{year}/"
url = base+'/'+file
try:
downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=file, path=path, progressbar=True)
except ModuleNotFoundError:
downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=file, path=path, progressbar=False)
except requests.HTTPError:
print(f'No corresponding data found at {url}')
downloaded_file = []
return downloaded_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_hess_dr1_data():\n download_data_files(FILENAMES_HESS_DR1)",
"def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()",
"def download_data():\n urllib.request.urlretrieve('http://cs.iit.edu/~culotta/cs579/a1/edges.txt.gz', 'edges.txt.gz')",
"def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")",
"def download_source():\n \n #if os.path.exists(UNFCC_FILE): \n # os.rename(UNFCC_FILE,'old_'+UNFCC_FILE)\n #if os.path.exists(EBAL_FILE):\n # os.rename(EBAL_FILE,'old_'+EBAL_FILE)\n\n try:\n unsd = sdmx.Request('UNSD')\n sdmx.logger.setLevel(logging.INFO)\n \n logger.info('Loading UNFCC Data')\n resp_unfcc = unsd.data('DF_UNData_UNFCC')\n\n logger.info('Loading UN Energy Balance Data')\n resp_ebal = unsd.data('DF_UNData_EnergyBalance')\n except Exception as e:\n logger.error('Error!! Please look at SDMX logs to troubleshoot' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n try:\n df_ebal = resp_ebal.to_pandas()\n df_unfcc = resp_unfcc.to_pandas()\n\n df_unfcc.reset_index().to_csv(UNFCC_FILE,index=False)\n logger.info('UNFCC Greenhouse Data stored as {}'.format(UNFCC_FILE))\n\n df_ebal.reset_index().to_csv(EBAL_FILE,index=False)\n logger.info('UN Energy Balance Data stored as {}'.format(EBAL_FILE))\n except Exception as e:\n logger.error('Error!! While saving data from SDMX to CSV ' + str(e))\n traceback.print_exc(file = sys.stdout)",
"def download_data():\n # Download Unihan meta data for radical-stroke analysis\n os.system(' mkdir Unihan')\n os.system(' curl -O http://unicode.org/Public/UCD/latest/ucd/Unihan.zip')\n os.system(' apt-get -y install unzip')\n os.system(' unzip Unihan.zip -d Unihan/')\n os.system(' rm Unihan.zip')\n\n data_path = 'Unihan/Unihan_RadicalStrokeCounts.txt'\n assert(os.path.isfile(data_path))\n\n return data_path",
"def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")",
"def download_data(origin_time, net, sta, loc, chan):\n \n dataDir_get = '/import/netapp-m-02-bay200/mseed_online/archive/'\n \n fileName = \".\".join((net, sta, \".\" + chan + \".D\",\n origin_time.strftime(\"%Y.%j\")))\n filePath = os.path.join(dataDir_get, origin_time.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName)\n o_time2 = origin_time + 86400\n fileName2 = \".\".join((net, sta, \".\" + chan + \".D\",\n o_time2.strftime(\"%Y.%j\")))\n filePath2 = os.path.join(dataDir_get, o_time2.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName2)\n\n if os.path.isfile(filePath):\n if origin_time.hour > 21:\n st = Stream()\n st.extend(read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600))\n st.extend(read(filePath2, \n starttime = UTCDateTime(o_time2.year, o_time2.month, \n o_time2.day, 0, 0),\n endtime = origin_time + 3 * 3600))\n st.merge(method=-1)\n else:\n st = read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600)\n else:\n print \"++++ cannot find the following file: \\n %s \\n++++\" % filePath\n\n if not st:\n raise RotationalProcessingException('Data not available for this'\n ' event...')\n st.trim(starttime=origin_time-180, endtime=origin_time+3*3600)\n\n print 'Download of', st[0].stats.station, st[0].stats.channel, \\\n 'data successful!'\n\n return st",
"def download(path):\n\n # Check if directory exists\n if not os.path.isdir(path + \"birdvox_dcase_20k\"):\n print(\"Creating birdvox_dcase_20k Directory\")\n os.mkdir(path + \"birdvox_dcase_20k\")\n base = \"https://zenodo.org/record/1208080/files/\"\n filename = \"BirdVox-DCASE-20k.zip\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n url = base + filename + \"?download=1\"\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)\n url = \"https://ndownloader.figshare.com/files/10853300\"\n filename = \"data_labels.csv\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)",
"def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )",
"def download() -> Path:\n rts_downloader.download()\n rts_gmlc_dir = Path(rts_downloader.rts_download_path) / \"RTS-GMLC\"\n return rts_gmlc_dir",
"def _download_data_from_nfs_connection(self) -> 'DataFrame':\n\n # note: as we need to load a data into the memory,\n # we are using pure requests and helpers from the WML client\n data_path = self.location.path\n connection_id = self.connection.asset_id\n\n return self._download_data_from_nfs_connection_using_id_and_path(connection_id, data_path)",
"def _download_to_flc(self):\n self.communicator.download_to_flc()",
"def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()",
"def download_fermi_crab_3fhl():\n download_data_files(FILENAMES_FERMI_3FHL_CRAB)",
"def download_stops_data(path):\n request_result = requests.get(STOPS_URL)\n with open(path, \"wb\") as output_file:\n output_file.write(request_result.content)",
"def _download_from_web(*, ds_name: str, ds_path: Path):\n import cgi\n import zipfile\n import httpx\n from tqdm import tqdm\n\n url = DATASET_OPTIONS[ds_name]['web']\n if ds_path.exists():\n print('Dataset directory already exists; remove it if you wish to '\n 're-download the dataset')\n return\n\n ds_path.mkdir(parents=True, exist_ok=True)\n\n with httpx.Client() as client:\n with client.stream('GET', url=url) as response:\n if not response.is_error:\n pass # All good!\n else:\n raise RuntimeError(\n f'Error {response.status_code} when trying '\n f'to download {url}')\n\n\n header = response.headers['content-disposition']\n _, params = cgi.parse_header(header)\n # where to store the archive\n outfile = ds_path / params['filename']\n remote_file_size = int(response.headers['content-length'])\n\n with open(outfile, mode='wb') as f:\n with tqdm(desc=params['filename'], initial=0,\n total=remote_file_size, unit='B',\n unit_scale=True, unit_divisor=1024,\n leave=False) as progress:\n num_bytes_downloaded = response.num_bytes_downloaded\n\n for chunk in response.iter_bytes():\n f.write(chunk)\n progress.update(response.num_bytes_downloaded -\n num_bytes_downloaded)\n num_bytes_downloaded = (response\n .num_bytes_downloaded)\n\n assert outfile.suffix == '.zip'\n\n with zipfile.ZipFile(outfile) as zip:\n for zip_info in zip.infolist():\n path_in_zip = Path(zip_info.filename)\n # omit top-level directory from Zip archive\n target_path = str(Path(*path_in_zip.parts[1:]))\n if str(target_path) in ('.', '..'):\n continue\n if zip_info.filename.endswith('/'):\n (ds_path / target_path).mkdir(parents=True, exist_ok=True)\n continue\n zip_info.filename = target_path\n print(f'Extracting: {target_path}')\n zip.extract(zip_info, ds_path)\n\n outfile.unlink()",
"def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)",
"def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()",
"def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()",
"def https_download_file(**data):\n import os\n import requests\n\n ##minimal data inputs payload\n server_url = data.get('server_url', '')\n file_name = data.get('file_name', '')\n file_path = data.get('file_path', '')\n headers = data.get('headers', '')\n ##extra data inputs payload\n ##\n ##\n\n if server_url==None:\n raise(NameError('No `server URL` specified'))\n \n if file_name==None:\n raise(NameError('No `file_name` specified'))\n\n file_url = os.path.join(server_url,file_name)\n\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n\n full_name = os.path.join(file_path,file_name)\n \n if not os.path.isfile(full_name):\n r = requests.get(file_url, headers=headers)\n if not r.status_code==200: \n raise r.raise_for_status()\n open(full_name , 'wb').write(r.content)\n\n return full_name",
"def write_file(req, file_type, download, dataset, stream, period, root_name):\n# ~~~~ Loading up the GRIB file~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n head, _ = path.splitext(root_name)\n\n if file_type == 'grib':\n\n if download:\n raise TelemacException(\\\n '... I am not programmed to '\n 'download grib files directly.\\n\\n')\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nLoading essentials from the GRIB\\n')\n grb2slf = Grib(dataset, req, stream)\n\n grb2slf.set_geometry()\n\n if stream == 'spec':\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nSpecial case for spectral file\\n')\n grb2slf.put_geometry('geo_'+head+'.slf')\n grb2slf.set_spectral()\n\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nConverting grib file(s) into SELAFIN\\n')\n grb2slf.put_content(root_name)\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Downloading the NetCDF file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Unfortunately, I did not manage to access the NetCDF file remotely\n elif file_type == 'netcdf':\n\n ecmwf2slf = Ecmwf(period, req)\n if download:\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nMaking an ECMWF request\\n')\n ecmwf2slf.connect_to_ecmwf(\"datasets/%s\" % (req['dataset']))\n\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nHaving to download the ECMWF file first\\n')\n ecmwf2slf.download_ecmwf()\n print(\" ~> download completed.\")\n\n ecmwf2slf.open_ecmwf()\n ecmwf2slf.set_geometry()\n\n if stream == 'spec':\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nSpecial case for spectral file\\n')\n ecmwf2slf.put_geometry('geo_'+head+'.slf')\n ecmwf2slf.set_spectral()\n\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nConverting netcdf file into SELAFIN\\n')\n ecmwf2slf.put_content(root_name, stream)",
"def download(uri: str) -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # create destination dirs\n destination = project_dir / 'data' / 'raw'\n destination.mkdir(exist_ok=True, parents=True)\n\n # download the file\n urllib.request.urlretrieve(uri, destination / \"original.zip\")",
"def _download_spin_data(system_name, boundary_condition, nspins, data_dir):\n # Set default storage location.\n if data_dir is None:\n data_dir = os.path.expanduser(\"~/tfq-datasets\")\n\n # Use Keras file downloader.\n file_path = tf.keras.utils.get_file(\n fname=system_name + '.zip',\n cache_dir=data_dir,\n cache_subdir='spin_systems',\n origin=\"https://storage.googleapis.com/download\"\n \".tensorflow.org/data/quantum/\"\n \"spin_systems/\" + system_name + \".zip \",\n extract=True)\n\n file_path = os.path.splitext(file_path)[0]\n\n data_path = os.path.join(file_path, boundary_condition, str(nspins))\n return data_path",
"def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file",
"def download_flight_data():\n response = requests.get(FlightAndRoutConst.URL_FLIGHTS_OF_SPRING_AIRLINE,\n headers={'Content-type': 'text/html; charset=utf-8'})\n output_path = build_file_name()\n # Using response.text instead of response.content for chinese string display\n IOUtils.write_to_file(str(response.text), output_path)\n return output_path",
"def wind3dp_download(dataset, startdate, enddate, path=None, **kwargs):\n\n trange = a.Time(startdate, enddate)\n cda_dataset = a.cdaweb.Dataset(dataset)\n try:\n result = Fido.search(trange, cda_dataset)\n filelist = [i[0].split('/')[-1] for i in result.show('URL')[0]]\n filelist.sort()\n files = filelist\n if path is None:\n filelist = [sunpy.config.get('downloads', 'download_dir') + os.sep + file for file in filelist]\n elif type(path) is str:\n filelist = [path + os.sep + f for f in filelist]\n downloaded_files = filelist\n\n for i, f in enumerate(filelist):\n if os.path.exists(f) and os.path.getsize(f) == 0:\n os.remove(f)\n if not os.path.exists(f):\n # downloaded_file = Fido.fetch(result[0][i], path=path, max_conn=max_conn)\n downloaded_file = wind3dp_single_download(files[i], path=path)\n\n except (RuntimeError, IndexError):\n print(f'Unable to obtain \"{dataset}\" data for {startdate}-{enddate}!')\n downloaded_files = []\n return downloaded_files",
"def download(data_root, version):\n if version not in GroceriesReal.GROCERIES_REAL_DATASET_TABLES.keys():\n raise ValueError(\n f\"A valid dataset version is required. Available versions are:\"\n f\"{GroceriesReal.GROCERIES_REAL_DATASET_TABLES.keys()}\"\n )\n dest_path = os.path.join(\n data_root, GroceriesReal.LOCAL_PATH, f\"{version}.zip\"\n )\n expected_checksum = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].checksum\n extract_folder = os.path.join(data_root, GroceriesReal.LOCAL_PATH)\n if os.path.exists(dest_path):\n logger.info(\"The dataset file exists. Skip download.\")\n try:\n validate_checksum(dest_path, expected_checksum)\n except ChecksumError:\n logger.info(\n \"The checksum of the previous dataset mismatches. \"\n \"Delete the previously downloaded dataset.\"\n )\n os.remove(dest_path)\n if not os.path.exists(dest_path):\n source_uri = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].source_uri\n GroceriesReal._download_http(source_uri, dest_path, version)\n GroceriesReal._extract_file(dest_path, extract_folder)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)"
] | [
"0.67398626",
"0.6583231",
"0.6520613",
"0.6331625",
"0.6304204",
"0.6291874",
"0.6226741",
"0.62254614",
"0.62199074",
"0.61869997",
"0.60891455",
"0.60750973",
"0.6024186",
"0.60179967",
"0.60074246",
"0.5984744",
"0.59611046",
"0.5960039",
"0.5954429",
"0.5950052",
"0.5932688",
"0.5914161",
"0.59139746",
"0.5910417",
"0.59095806",
"0.5904948",
"0.5888352",
"0.58872455",
"0.5885092",
"0.5885092"
] | 0.69402546 | 0 |
Returns the current state of the gameboard and the two player's colors. Returns np.ndarray a flat 1D representation of the gameboard appended by the two players' colors | def get_state(self):
return np.append(self.game.game_board.get_board(),
[self.game.player_1.color, self.game.player_2.color])[None, :] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_current_game_state(board):\n return np.concatenate((_get_pieces_one_hot(board, color=False),\n _get_pieces_one_hot(board, color=True)),\n axis=-1)",
"def return_state(board):\r\n state = np.array(INIT_ARRAY)\r\n for pos in BOARD_POSITIONS:\r\n state[pos[0]][pos[1]] = board[pos[0]][pos[1]].color\r\n\r\n return state",
"def current_state(self):\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n for i in range(8):\n for j in range(8):\n if self.board_value[i][j]==self.current_player:\n square_state[0][i][j]=1\n elif self.board_value[i][j]!=self.current_player and self.board_value[i][j]!= 0:\n square_state[1][i][j]=1\n # indicate the last move location\n square_state[2][self.last_move // self.width, self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]",
"def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]",
"def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]",
"def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.state:\n moves, players = np.array(list(zip(*self.state.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.state) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state",
"def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width, move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width, move_oppo % self.height] = 1.0\n\n # last move indication\n square_state[2][self.last_move // self.width, self.last_move % self.height] = 1.0\n\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0\n\n return square_state[:, ::-1, :]",
"def blue_matrix(self):\n return np.vstack(np.where(self.np_image_matrix() == 2))",
"def get_color_options(self):\n mask = (self.all_colors != self.player_1.color) & (self.all_colors != self.player_2.color)\n return self.all_colors[mask]",
"def __getColors(self):\n colors = {\"leftSideHighColor\" : \"\", \"leftSideDownColor\" : \"\",\\\n \"rightSideHighColor\" : \"\", \"rightSideDownColor\" : \"\"}\n for team, nestedDict in self.playerPositions.items():\n for player, position in nestedDict.items():\n if 1 == position:\n colors[\"leftSideHighColor\"] = self.playerColors[team][player]\n elif 2 == position:\n colors[\"leftSideDownColor\"] = self.playerColors[team][player]\n elif 3 == position:\n colors[\"rightSideDownColor\"] = self.playerColors[team][player]\n elif 4 == position:\n colors[\"rightSideHighColor\"] = self.playerColors[team][player]\n for key, color in colors.items():\n colors[key] = color.capitalize()\n return colors",
"def get_player_colors() -> List[Tuple[float, float, float]]:\n return PLAYER_COLORS",
"def get_palace_board_red(self):\n\n return self._palace_board_red",
"def __get_whose_turn_in_history(self, time_index: int) -> chess.Color:\n\n # get player from history\n side = self.history[time_index].split(\" \")[1]\n\n if side == \"w\":\n return chess.WHITE\n elif side == \"b\":\n return chess.BLACK",
"def get_colors(self):\n x = np.linspace(0, 1, self.length)\n y = x**self.gamma\n\n value = np.linspace(0, 1, len(self.colors))\n r = np.interp(y, value, self.colors[:,0])\n g = np.interp(y, value, self.colors[:,1])\n b = np.interp(y, value, self.colors[:,2])\n\n return np.dstack((r, g, b)).reshape(len(r), 3).astype(np.uint8)",
"def __init__(self, player1, player2):\n # # players of the game {player1name: {color: , red_marbles:}}\n # self._players = {player1[0]: {\"name\": player1[0], \"color\": player1[1]},\n # player2[0]: {\"name\": player2[0], \"color\": player2[1]}}\n # # empty board, no marbles yet\n # self._board = self.create_board()\n # # current player's turn\n # self._turn = None\n # # winner state\n # self._winner = None\n # # red marbles captured for each player, needs addition of black and white marbles\n # self._captured = {player1[0]: 0, player2[0]: 0}\n pass",
"def to_numpy(self) -> Tuple[np.ndarray, np.ndarray]:\n state = self._zero_rgb_image(round(self.height), round(self.width))\n rendering = self._zero_rgb_image(round(self.height), round(self.width))\n\n for sprite in self.sprites[1:]: # skip self\n sprite_state, sprite_rendering = sprite.to_numpy(self.height, self.width)\n state[sprite_state != 0] = sprite_state[sprite_state != 0]\n rendering[sprite_rendering != 0] = sprite_rendering[sprite_rendering != 0]\n return state, rendering",
"def get_palace_board_blue(self):\n\n return self._palace_board_blue",
"def state_arr(board, player1=1, player2=2):\n width = board.width\n height = board.height\n\n state_str = ''\n\n for i in range(height - 1, -1, -1):\n for j in range(width):\n loc = i * width + j\n p = board.states.get(loc, -1)\n if p == player1:\n state_str += 'X'\n elif p == player2:\n state_str += 'O'\n else:\n state_str += '.'\n state_str += '\\n'\n\n state_arr = [[_ for _ in row_str.strip()] for row_str in state_str.strip().split('\\n')]\n\n return state_arr",
"def state_descriptor(self, state, player):\n\n\t\tboard = np.copy(state.board)\n\n\t\tnp.place(board, board == -1, 0)\n\t\tnp.place(board, board == player, 3)\n\t\tnp.place(board, board == 3 - player, -1)\n\t\tnp.place(board, board == 3, 1)\n\n\t\tmy_cows = state.cows[player - 1]\n\t\ten_cows = state.cows[2 - player]\n\n\t\treturn (self.totuple(board), my_cows, en_cows)",
"def get_state_colors():\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit(\n \"colors\", {\"colors\": state_colors, \"cases\": state_cases, \"active\": state_active}\n )",
"def __init__(self, python_board: list[list[int]] = None, red_active: bool = True) -> None:\n\n game_board = [[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]]\n\n if python_board is not None:\n self.board_array = np.array(python_board)\n else:\n self.board_array = np.array(game_board)\n\n self.move_number = 0\n\n # Creating the kernels to use in a 2d convolution to check the board for a winner later\n across = np.array([[1, 1, 1, 1]])\n vertical = np.transpose(across)\n main_diagonal = np.eye(4, dtype=np.uint8)\n off_diagonal = np.fliplr(main_diagonal)\n self._detection_kernels_red = [across, vertical, main_diagonal, off_diagonal]\n self._detection_kernels_yellow = [kernel * -1 for kernel in self._detection_kernels_red]\n\n self._is_red_active = red_active\n\n # Matches moves to their indices in self._valid_moves, this order is very important\n # for optimising alpha-beta pruning\n self._valid_move_order = {3: 0, 2: 1, 4: 2, 5: 3, 1: 4, 0: 5, 6: 6}\n self._valid_moves = [3, 2, 4, 5, 1, 0, 6]\n self._column_to_row = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n\n self._win_state = None\n\n # This code reads in the hash keys for use in Zobrist hashing, for more information, see\n # opening_book_gen.py\n red_hash_keys = []\n with open('data/Zobrist_Hash_Keys/Zobrist_red_key.csv') as file:\n reader = csv.reader(file)\n for row in reader:\n red_hash_keys.append([int(r) for r in row])\n self._red_hash_keys = np.array(red_hash_keys)\n\n yellow_hash_keys = []\n with open('data/Zobrist_Hash_Keys/Zobrist_yellow_key.csv') as file:\n reader = csv.reader(file)\n for row in reader:\n yellow_hash_keys.append([int(r) for r in row])\n self._yellow_hash_keys = np.array(yellow_hash_keys)\n\n self.hash = 0",
"def get_state(self):\n return {\n \"board\": self.board,\n \"player\": self.player,\n \"winner\": self.winner\n }",
"def get_pixel_obs(self):\n delta = self.side / (self.pixel_side - 1)\n bd1 = -self.side / 2\n bd2 = self.side / 2 + delta\n x, y = np.meshgrid(np.arange(bd1, bd2, delta), np.arange(bd2, bd1, -delta))\n if self.robot.sensor.lower() == \"rgb\":\n obs1, obs2, obs3 = np.zeros(x.shape), np.zeros(x.shape), np.zeros(x.shape)\n # Color humans:\n for human in self.humans:\n robot_distance = np.sqrt(\n (human.px - self.robot.px) ** 2 + (human.py - self.robot.py) ** 2\n )\n if robot_distance < self.robot.horizon:\n obs1[\n np.nonzero((x - human.px) ** 2 + (y - human.py) ** 2 <= human.radius ** 2)\n ] = 1\n # Color goal:\n obs2[\n np.nonzero(\n (x - self.robot.gx) ** 2 + (y - self.robot.gy) ** 2 <= self.goal_radius ** 2\n )\n ] = 1\n # Color robot:\n obs3[\n np.nonzero(\n (x - self.robot.px) ** 2 + (y - self.robot.py) ** 2 <= self.robot.radius ** 2\n )\n ] = 1\n obs = np.concatenate(\n (np.expand_dims(obs1, 0), np.expand_dims(obs2, 0), np.expand_dims(obs3, 0)), axis=0\n )\n return np.float32(np.expand_dims(obs, 0))\n elif self.robot.sensor.lower() == \"gray\":\n obs = np.zeros(x.shape)\n # Color humans:\n for human in self.humans:\n robot_distance = np.sqrt(\n (human.px - self.robot.px) ** 2 + (human.py - self.robot.py) ** 2\n )\n if robot_distance < self.robot.horizon:\n obs[\n np.nonzero((x - human.px) ** 2 + (y - human.py) ** 2 <= human.radius ** 2)\n ] = (1.0 / 3)\n # Color goal:\n obs[\n np.nonzero(\n (x - self.robot.gx) ** 2 + (y - self.robot.gy) ** 2 <= self.goal_radius ** 2\n )\n ] = (2.0 / 3)\n # Color robot:\n obs[\n np.nonzero(\n (x - self.robot.px) ** 2 + (y - self.robot.py) ** 2 <= self.robot.radius ** 2\n )\n ] = 1.0\n return np.float32(np.expand_dims(np.expand_dims(obs, 0), 0))\n else:\n raise ValueError(\"Robot sensor incompatible with pixel observation.\")",
"def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]",
"def setColors(self):\n #productive\n profprint()\n self.color= [[0,0,0] for i in range(205)]\n self.color255= self.setColors255()\n for i in range(205):\n for j in range(3):\n self.color[i][j] = self.color255[i][j]/float(255)\n\n return self.color",
"def get_colour(self):\n \n distorted = []\n if piCameraFound:\n # Use piCamera\n \n #frame = self.capture_generator.next()\n #distorted = frame.array\n self.cam.capture(self.rawCapture, format=\"bgr\", use_video_port=True)\n distorted = self.rawCapture.array\n \n # clear the stream in preparation for the next frame\n self.rawCapture.truncate(0)\n \n else: # Use OpenCV\n retval, distorted = self.cam.read() # Read frame\n\n if not retval: # Error\n print \"Camera.get_colour: Could not read next frame\";\n exit(-1);\n \n \n #colour = cv2.remap(distorted, self.mapx, self.mapy, cv2.CV_INTER_LINEAR)\n #colour = cv2.remap(distorted, self.mapx, self.mapy, cv2.INTER_LINEAR)\n \n # Skip this part because it is slow\n #colour = cv2.undistort(distorted, self.intrinsic_matrix, self.distortion_coeffs)\n colour = distorted\n return colour, distorted",
"def setColors(self):\r\n # productive\r\n profprint()\r\n self.color = [[0, 0, 0] for i in range(MAXCOL)]\r\n self.color255 = self.setColors255()\r\n for i in range(MAXCOL):\r\n for j in range(3):\r\n self.color[i][j] = self.color255[i][j] / float(255)\r\n\r\n return self.color",
"def getBoardInfo(self):\n return self.my_pos, self.opp_pos",
"def serialize(self):\n gstate = np.zeros((10,10, 3), dtype=np.uint8)\n\n gstate[:,:,1] = self.possible_moves_mask\n for move in self.board.lines:\n x, y = self.rotate_move(move)\n gstate[x,y,0] = 1\n gstate[x,y,1] = 0\n if self.board.player == 1:\n gstate[:,:,2] = 1\n\n return gstate",
"def get_opponents(self, game_state):\n if self.red:\n return game_state.get_blue_team_indices()\n else:\n return game_state.get_red_team_indices()"
] | [
"0.6973377",
"0.6708354",
"0.6522196",
"0.651381",
"0.651381",
"0.6499365",
"0.6218236",
"0.6212448",
"0.6204406",
"0.61157256",
"0.6032963",
"0.60149604",
"0.6009506",
"0.59559596",
"0.5954081",
"0.5951532",
"0.58624077",
"0.58483106",
"0.58110374",
"0.57483196",
"0.5746821",
"0.5740826",
"0.5715649",
"0.5702549",
"0.5689226",
"0.56694186",
"0.5664316",
"0.56568176",
"0.56558615",
"0.5642277"
] | 0.8583118 | 0 |
Returns the possible color options that can be played. Returns list a list of the possible color options (as integers) | def get_color_options(self):
mask = (self.all_colors != self.player_1.color) & (self.all_colors != self.player_2.color)
return self.all_colors[mask] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_player_colors() -> List[Tuple[float, float, float]]:\n return PLAYER_COLORS",
"def supported_color_modes(self) -> set[str] | None:\n color_modes = [COLOR_MODE_ONOFF]\n if self.dp_code_bright in self.tuya_device.status:\n color_modes.append(COLOR_MODE_BRIGHTNESS)\n\n if self.dp_code_temp in self.tuya_device.status:\n color_modes.append(COLOR_MODE_COLOR_TEMP)\n\n if (\n self.dp_code_colour in self.tuya_device.status\n and len(self.tuya_device.status[self.dp_code_colour]) > 0\n ):\n color_modes.append(COLOR_MODE_HS)\n return set(color_modes)",
"def _get_goal_colours() -> List[Tuple[int, int, int]]:\n colour_lst = COLOUR_LIST[:]\n random.shuffle(colour_lst)\n return colour_lst",
"def supported_color_modes(self) -> set[ColorMode]:\n return {self.color_mode}",
"def options(self):\n if self._state == GameState.PLAY_OR_DRAW:\n return [NopAction(), DrawAction()] + self._play_options()\n elif self._state == GameState.PLAY:\n return [NopAction()] + self._play_options()\n elif self._state == GameState.PLAY_DRAWN:\n res = [NopAction()]\n if self._can_play(self._current_hand()[-1]):\n res += [PlayCardAction(len(self._current_hand()) - 1)]\n return res\n elif self._state == GameState.PICK_COLOR or self._state == GameState.PICK_COLOR_INIT:\n return [PickColorAction(c) for c in [Color.RED, Color.ORANGE, Color.GREEN, Color.BLUE]]\n elif self._state == GameState.CHALLENGE_VALID or self._state == GameState.CHALLENGE_INVALID:\n return [NopAction(), ChallengeAction()]\n raise RuntimeError('invalid state')",
"def get_all_color_modes(self):\n return self._all_color_modes",
"def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]",
"def getDistinguishableColors(numColors, bgColors = [(1, 1, 1)]):\n\n\t# Start out by generating a sizeable number of RGB triples. This represents our space \n\t# of possible choices. By starting out in the RGB space, we ensure that all of the colors \n\t# can be generated by the monitor.\n\n\t# Number of grid divisions along each axis in RGB space\n\tnumGrid = 30\n\tx = np.linspace(0, 1, numGrid)\n\t[R, G, B] = np.meshgrid(x, x, x)\n\trgb = np.concatenate((R.T.reshape((numGrid*numGrid*numGrid, 1)), \\\n\t\tG.T.reshape((numGrid*numGrid*numGrid, 1)), \\\n\t\tB.T.reshape((numGrid*numGrid*numGrid, 1))), axis = 1)\n\tif numColors > rgb.shape[0] / 3:\n\t\traise ValueError('You cannot really distinguish that many colors! At most 9000 colors')\n\n\t# If the user specified multiple bgColors, compute distance from the candidate colors\n\t# to the background colors.\n\tmindist = np.full(rgb.shape[0], np.inf)\n\tfor c in bgColors:\n\t\tcol = np.full(rgb.shape, 1)\n\t\tcol[:,0] = c[0]\n\t\tcol[:,1] = c[1]\n\t\tcol[:,2] = c[2]\n\t\tdx = np.sum(np.abs(rgb - col), axis = 1)\n\t\tmindist = np.minimum(mindist, dx)\n\n\t# Initialize a list of colors\n\tcolors = []\n\tlastColor = bgColors[-1]\n\tfor i in range(numColors):\n\t\tcol = np.full(rgb.shape, 1)\n\t\tcol[:,0] = lastColor[0]\n\t\tcol[:,1] = lastColor[1]\n\t\tcol[:,2] = lastColor[2]\n\t\tdx = np.sum(np.abs(rgb - lastColor), axis = 1)\n\t\tmindist = np.minimum(mindist, dx)\n\t\tindex = np.argmax(mindist)\n\t\tchosenColor = (rgb[index,0], rgb[index,1], rgb[index,2])\n\t\tcolors.append(chosenColor)\n\t\tlastColor = chosenColor\n\n\treturn colors",
"def get_color_list(self):\n lst = []\n\n _lib.caca_get_dither_color_list.argtypes = [_Dither]\n _lib.caca_get_dither_color_list.restype = ctypes.POINTER(ctypes.c_char_p)\n\n for item in _lib.caca_get_dither_color_list(self):\n if item is not None and item != \"\":\n lst.append(item)\n else:\n #memory occurs otherwise\n break\n\n return lst",
"def getColors():\n return ['#8c99fc', '#cacefd', '#fff1d7', '#feda98', '#fda85a', '#fc6647']",
"def get_available_colors(self):\n\n # Get reportlab available colors\n colors = getAllNamedColors()\n\n # Remove bad colors\n colors.pop('white', None)\n colors.pop('black', None)\n\n # Returns only the colors values (without their names)\n colors = list(colors.values())\n \n # Shuffle colors list\n random.shuffle(colors)\n\n return colors",
"def colors(self):\n return self[\"colors\"]",
"def colors(self):\n return self[\"colors\"]",
"def calculate_image_possibilities():\n\n # Reordering the color ramps in the palette yields 3! combinations\n palette_reorder_possibilities = 6\n\n return len(palettes) * palette_reorder_possibilities * len(grips) * len(pommels) * len(crossguards) * len(blades)",
"def queryNumberOfColors(self):\n self._numColorsInUse = \\\n self._readInt('How many colors are available', 2, len(self._palette))\n return self._numColorsInUse",
"def setColorConf(colors,ngroups)->list:\n if colors == \"hcl\":\n try:\n from colorspace import sequential_hcl\n color_repo = sequential_hcl(h=[15,375],l=65,c=70)\n colors_list = color_repo.colors(ngroups + 1)\n except ImportError:\n print('hcl colorspace package has not being installed.')\n print('please try the following command:')\n print('pip install git+https://github.com/retostauffer/python-colorspace')\n else:\n colors = list(plt.get_cmap(colors).colors)\n colors_list = [to_hex(color) for color in colors]\n colors_list = colors_list[:ngroups]\n\n return colors_list",
"def colorFlags(filterStr=\"\"):\n\tfilterStr = filterStr.upper()\n\tflags = [i for i in dir(cv2) if i.startswith('COLOR_') and filterStr in i]\n\treturn flags",
"def colors(self):\r\n\t\treturn self._colors",
"def _random_color_picker(self, num_of_categories: int) -> List[str]:\n color_list = list()\n color_picker = Tab10()\n\n if num_of_categories > len(color_picker):\n raise IndexError(\"Requested number of colors {} > {}. Please use dataclass that has more colors available.\")\n\n i = 0\n while i < num_of_categories:\n ran_color = color_picker.getrandomcolor()\n if ran_color not in color_list:\n color_list.append(ran_color); i += 1\n\n return color_list",
"def get_options(cls, player, context={}):\n\t\toptions = []\n\t\tfor card in player.hand:\n\t\t\tif cls.can_be_played(card, context):\n\t\t\t\toptions.extend(card.actions)\n\t\toptions.append(Action(None, \"DRAW\", [DrawCard]))\n\t\treturn options",
"def getColors():\n colors = ['#d53e4f',\n '#fc8d59',\n '#fee08b',\n '#ffffbf',\n '#e6f598',\n '#99d594',\n '#3288bd',\n ]\n return colors",
"def get_color(self):\n colors = []\n color_specs = [self._red_spec, self._green_spec,\n self._blue_spec, self._white_spec]\n for spec in color_specs:\n driver = DRIVERS[spec.addr]\n colors.append(driver.get_duty_cycle(spec.pin))\n \n return colors",
"def getcolors(self, maxcolors=256):\r\n\r\n if self._mode in (\"1\", \"L\", \"P\"):\r\n h = self._instance.histogram()\r\n out = []\r\n for i in range(256):\r\n if h[i]:\r\n out.append((h[i], i))\r\n if len(out) > maxcolors:\r\n return None\r\n return out\r\n uni, counts = self._getcolors()\r\n if c>maxcolors: return None\r\n colors = []\r\n for l in range(len(counts)):\r\n colors.append((counts[l], l))\r\n return colors",
"def colors(self):\n return self._colors",
"def _random_color(self):\n levels = range(0, 256)\n return tuple(random.choice(levels) for _ in range(3))",
"def generate_random_colours_list(rng: random.Random, size: int) -> List[TupleInt3]:\n return [random_colour(rng) for _ in range(size)]",
"def getPaletteButtons(self, paletteType): \n if paletteType == \"cloth\":\n return [ [COLOR_YELLOW, COLOR_ORANGE, COLOR_RED], \n [COLOR_PINK, COLOR_BLUE, COLOR_PURPLE], \n [COLOR_GREEN, COLOR_WHITE, COLOR_BLACK] ] \n elif paletteType == \"hair\":\n return [ [COLOR_BLONDE, COLOR_BROWN, COLOR_BLACK] ]\n elif paletteType == \"skin\":\n return [ [SKIN_1, SKIN_2, SKIN_3], \n [SKIN_4, SKIN_5, SKIN_6], \n [SKIN_7, SKIN_8, SKIN_9]]\n else:\n return []",
"def colors(k): \n ret = []\n for i in range(k):\n ret.append((random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))\n return ret",
"def preset_modes(self):\n return list(PRESET_MODE_TO_DPS_MODE.keys())",
"def randcolour():\n colour = [0,0,0]\n while sum(colour)<450:\n for i in range(3):\n colour[i] = int(random.random()*255)\n return(tuple(colour))"
] | [
"0.70465565",
"0.65711427",
"0.6478505",
"0.64456594",
"0.6323879",
"0.62680316",
"0.61799985",
"0.6158194",
"0.61572695",
"0.61044914",
"0.6100955",
"0.60627854",
"0.60627854",
"0.60460067",
"0.60313606",
"0.59857523",
"0.59533656",
"0.5913804",
"0.5863346",
"0.58114314",
"0.5794095",
"0.5790234",
"0.5776632",
"0.5750859",
"0.57339096",
"0.5730778",
"0.570915",
"0.5693706",
"0.5692818",
"0.5670276"
] | 0.75890493 | 0 |
Completes a single turn by showing the gameboard, playing each of the players' turns, and printing the result. The former and latter are only done if the game type is RL. | def play_single_turn(self, action=None):
self.turn_count += 1
if self.save_images_suffix:
self.game_board.graphical_output(save=True, display=False,
image_suffix=f'{self.save_images_suffix}_{self.turn_count}')
if self.game_type == self.game_types['human']:
self.game_board.graphical_output()
self.player_1.play_turn(action if action else self.get_color_options())
self.player_2.play_turn(self.get_color_options())
if self.game_type == self.game_types['vs_ai']:
self.game_board.graphical_output(save=True, image_suffix=self.turn_count)
if self.game_type != self.game_types['r_l']:
print(f"player 1 played {self.player_1.color}: {self.player_1.score}")
print(f"player 2 played {self.player_2.color}: {self.player_2.score}")
print() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def finishTurn(self):\n print \"go\"\n sys.stdout.flush()",
"def play(self, turn):\n # global black_prompt, white_prompt, res, pi, board\n if turn % 2 == 0:\n prompt, requests_add, responses_add, color_to_play = self.bp, self.bp, self.wp, BLACK\n print(\"pure\")\n res = pure_MCTS.UCTAlg(json=prompt).run(time_limit=1)\n else:\n prompt, requests_add, responses_add, color_to_play = self.wp, self.wp, self.bp, WHITE\n print(\"alpha\")\n res = mcts.uctAlg.UCTAlg(predict_model=player, json=prompt, mode='comp').run(time_limit=1)[0]\n print(res)\n self.board.disc_place(color_to_play, res[0], res[1]) # record steps to board\n\n dct = {'x': res[0], 'y': res[1]}\n requests_add[\"responses\"].append(dct)\n responses_add[\"requests\"].append(dct)",
"def __end_turn(self):\n self._turn_counter += 1\n # päivittää pelaajan\n self._player %=2\n self._player += 1\n if (self._gui):\n self.__status[\"text\"] = self._player_names[self._player-1]\n self.__turn_counter_label[\"text\"]= \"{}. Turns taken\".format\\\n (self._turn_counter)\n self.__turn_counter_label.update()\n\n # This will cause stackoverflow in training.\n if (self._state == PLAY and self.__check_ai_turn()):\n self.__ai_turn()",
"def main() -> None:\n # the current game is initialized with 1, 3, 5, 7 matches on the 4 rows.\n game: List[int] = [1, 3, 5, 7]\n\n print(\"\\nGame of Nim\")\n print( \"===========\")\n display_game(game)\n start = input(\"Do you want to start? (y/n) \")\n print()\n if start==\"y\" or start==\"Y\":\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n while True:\n print(\"My turn\")\n computer_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"I WON\\n\")\n break\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"YOU WON\\n\")\n break",
"def turn(player, rnd, turnNumber, dice, actionTable, gameSize):\n\n if player.type() == 'human':\n decision = interactiveTurn(player, rnd, turnNumber, dice)\n elif player.type() == 'ai-dumb':\n decision = yahtzai_dumb_ai.ai_engine(player, rnd, turnNumber, dice)\n elif player.type() == 'ai-less-dumb':\n decision = yahtzai_less_dumb_ai.ai_engine(player, rnd, turnNumber, dice)\n elif player.type() == 'ai-random':\n decision = yahtzai_random_ai.ai_engine(player, rnd, turnNumber, dice)\n elif player.type() == 'ai-rl':\n decision = yahtzai_rl_ai.ai_engine(player, rnd, turnNumber, dice, actionTable, gameSize)\n\n # LOG GAME PLAY -- comma separated values for each turn, newline separating turns\n # LOG SYNTAX: playerName, type, id, scores, roundNumber, turnNumber, dice, decision, returnVal\n # EXAMPLE: player1, human, 2314123.12341, (-1, -1, 6, ...), 4, 2, (2, 2, 2, 3, 3), n, ScoreEnum.FULL_H\n with open('gamelog', 'a+') as f:\n f.write(f'{player.name()},{player.type()},{player.playerId()},')\n f.write(f'{\"(\" + \",\".join(str(i) for i in player.scores()) + \")\"},')\n f.write(f'{rnd},{turnNumber},{\"(\" + \",\".join(str(i) for i in dice.view()) + \")\"},')\n f.write(f'{decision[0]},{decision[1]}\\n')\n\n if (decision[0] == 'n'):\n applyScore(player, decision[1], dice)\n\n if (player.type() == 'human'):\n clearScreen()\n print(f'\\nRound {rnd}. Player {player.name()} results:\\n')\n printScorecard([player])\n pressEnterToContinue()\n\n return False #Indicating to NOT roll again\n else:\n dice.roll(*decision[1])\n return True #Indicating to roll again",
"def displayTurn(self, guess, result):\n self._currentTurnNum += 1\n print 'On turn', self._currentTurnNum, 'of', self._maxNumberOfTurns,\n print 'guess', self._patternAsString(guess), 'scored',\n print result.getNumBlack(), 'black and', result.getNumWhite(), 'white.'",
"def do_turn(self, player):\n \n # if the player is out, skip the turn\n if player.account.balance == 0:\n pass\n else:\n # look up method based on dreidel spin;\n # pass player as argument to method;\n # print the result message along with the turn number\n print 'Turn #%d: %s' % (self._turn_count,\n self.actions[player.spin()](player),)\n \n # if the player ran out of items, the player is out;\n # decrement the number of active players.\n if player.account.balance == 0:\n self.total_in -= 1\n print OUT_TEMPLATE % (player,)\n \n # show updated status of everyone's balance\n self.show_balances()\n print '-'*SEPARATOR_LENGTH\n \n # increment the number of turns\n self._turn_count += 1",
"def play(the_game):\n\n print('-' * 50)\n print('')\n player = the_game.player_list[the_game.turn]\n print(' Turn {0} as {1}'.format(player.name, player.piece.name))\n print(' Rolling...\\n')\n die1, die2, roll = the_game.roll()\n\n print(' {0} + {1} = {2}!'.format(die1, die2, roll))\n\n if the_game.dice.doubles:\n print('** D O U B L E S ! **\\n')\n if player.in_jail:\n print('*** GET OUT OF JAIL ***')\n player.leave_jail()\n player.doubles = 0\n\n if player.doubles == 2:\n player.doubles = 0\n player.go_to_jail()\n print('*** DOUBLES THIRD TIME. GO TO JAIL! ***\\n')\n the_game.next_turn()\n else:\n player.doubles += 1\n if player.doubles == 1:\n print('Doubles First time')\n elif player.doubles == 2:\n print('Doubles Second time')\n else:\n player.doubles = 0\n\n if player.in_jail:\n player.position = 10\n\n if player.passed_go and not (player.doubles == 2 and the_game.dice.doubles):\n print('\\n $$$ {0} Passed GO! $$$\\n'.format(player.name))\n player.passed_go = False\n player.receive(200)\n\n print(' {0} Landed on {1}.'.format(\n player.name, the_game.board.location(player.position).name))",
"def _do_outputs(self):\n self._puzzle.display_revealed_puzzle()\n hint = self._puzzle.get_hint()\n self._console.write(hint)\n print(\"\")\n self._jumper.draw_jumper()\n print(\"\")\n\n # These ifs end the game\n if self._puzzle.is_solved():\n self._keep_playing = False\n self._puzzle.display_win_screen()\n \n if self._puzzle.incorrect_guesses >= 4:\n self._keep_playing = False\n self._puzzle.display_loss_screen()",
"def progress_game(self):\r\n\r\n if self.actions == len(self.players):\r\n # Reveal the 3 first cards\r\n output_text = \"Dealing the flop...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.flop()\r\n\r\n if self.actions == 2 * len(self.players):\r\n # Reveal a 4th card\r\n output_text = \"Dealing the turn...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.turn()\r\n\r\n if self.actions == 3 * len(self.players):\r\n # Reveal a 5th card\r\n output_text = \"Dealing the river...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.river()\r\n\r\n if self.actions == 4 * len(self.players):\r\n self.showdown()",
"def playGame(self):\n print(\"\\nPlay Game\")\n if (self.EndGame()):\n print(\"EndGame stt: \", self.EndGame())\n\n print(\"The End\")\n return True\n else:\n # Get pieceList from thong\n input_result = self.inputMove() \n # input move return 2 forms: True and input result\n if input_result is not True:\n return input_result\n else:\n # Export time table to csv\n black_timetable = pd.DataFrame(self.timetable_black, columns=['Iteration', 'Time']).to_csv(\n \"Time_black.csv\", index=False)\n return True",
"def run_turn(self):\n\n # Here is where you'll want to code your AI.\n\n # We've provided sample code that:\n # 1) prints the board to the console\n # 2) prints the opponent's last move to the console\n # 3) prints how much time remaining this AI has to calculate moves\n # 4) makes a random (and probably invalid) move.\n\n # 1) print the board to the console\n self.print_current_board()\n\n # 2) print the opponent's last move to the console\n if len(self.game.moves) > 0:\n print(\"Opponent's Last Move: '\" + self.game.moves[-1].san + \"'\")\n\n # 3) print how much time remaining this AI has to calculate moves\n print(\"Time Remaining: \" + str(self.player.time_remaining) + \" ns\")\n\n # 4) make a move\n (piece_index, move_index) = self.tlabiddl_minimax()\n\n # flip board indicies if playing from other side\n if self.player.color == \"Black\":\n piece_index = 119 - piece_index\n move_index = 119 - move_index\n\n # convert indices to SAN\n piece_pos = square_san(piece_index)\n move_pos = square_san(move_index)\n piece = self.get_game_piece(piece_pos.rank, piece_pos.file)\n piece.move(move_pos.file, move_pos.rank, promotionType=\"Queen\")\n\n return True # to signify we are done with our turn.",
"def game():\n print(fill('Greeting players! For your convenience, the system will automatically roll for you if no prior '\n 'decisions is required. Let the Game begins!', TXT_WIDTH()))\n player1_name = 'player1'\n player2_name = 'player2'\n score_sheet1 = create_score_sheet(player1_name)\n score_sheet2 = create_score_sheet(player2_name)\n while not endgame(score_sheet1, score_sheet2):\n if EMPTY_BOX() in score_sheet1:\n print('\\nPlayer1, your turn has begun!\\n')\n turn_manager(score_sheet1)\n else:\n print('\\nPlayer1, your score_sheet is fulled! The system will skip your turn now!\\n')\n\n if EMPTY_BOX() in score_sheet2:\n print('\\nPlayer2, your turn has begun!\\n')\n turn_manager(score_sheet2)\n else:\n print('\\nPlayer2, your score_sheet is fulled! The system will skip your turn now!\\n')\n game_summary(score_sheet1, score_sheet2)",
"def play(self) -> None:\n\n def run() -> Iterator[Tuple[int, Optional[int]]]:\n while not self.game_state.is_game_ended():\n self.game_state.print_game_state()\n legal_actions = get_possible_actions(self.game_state)\n assert legal_actions is not None, \"Game has not ended.\"\n action = self.get_action(legal_actions)\n print(\"executing action:\", gi.ACTION_MAPPING[action])\n t = self.execute_action(action, legal_actions)\n yield action, t\n\n # Print player scores once game ends\n self.game_state.print_player_scores()\n return\n\n if not self.outfile:\n [_ for _ in run()]\n return\n\n with open(self.outfile, \"a+\") as outfile:\n for action, t in run():\n if action == gi.DRAW:\n outfile.write(f\"{gi.DRAW_OPTIONS[0]} {t}\\n\")\n else:\n outfile.write(f\"{action}\\n\")",
"def round_end_screen(self, board, p1, ai):\n if self.number == 0:\n if p1.score > ai.score:\n print(f'Round {self.last_game} {p1.score}:{ai.score}\\n{board}\\n\\nYou Won! ', end='')\n elif ai.score > p1.score:\n print(f'Round {self.last_game} {p1.score}:{ai.score}\\n{board}\\n\\nYou lost. ', end='')\n else:\n print(f\"Round {self.last_game} {p1.score}:{ai.score}\\n{board}\\n\\nIt's a draw. \", end='')\n elif self.number == 1:\n print(f'Round {self.last_game} {p1.score}:{ai.score}\\n{board}\\n\\n1 game left! ', end='')\n else:\n print(f'Round {self.last_game} {p1.score}:{ai.score}\\n{board}\\n\\n{self.number} games left! ', end='')",
"def test_turn_ai_and_players(lab):\n print('Test turn')\n lab.update_game()\n test_print(lab)\n print(\"Turn: {}\".format(lab.turn_count))\n print('Test completed')",
"def finish_turn(game_ID):\n state = get_state(game_ID)[\"playerState\"]\n\n if state[\"redPoints\"] == NUM_RED_WORDS:\n return set_winner(game_ID, \"red\")\n elif state[\"bluePoints\"] == NUM_BLUE_WORDS:\n return set_winner(game_ID, \"blue\")\n\n if state[\"attemptsLeft\"] == 0 and state[\"action\"] == \"chooser\":\n r.hset(\n \"state:\" + game_ID,\n mapping={\"turn\": opposite(state[\"turn\"]), \"action\": \"spymaster\"},\n )",
"def go(self):\r\n self.__last_word = ''\r\n result = 'first_turn'\r\n random.seed()\r\n self.__current_player = random.randint(0, 1)\r\n\r\n while True: \r\n if result == 'first_turn':\r\n answer = self.__notify_user(self.__players[self.__current_player], 'first_turn', self.__last_word)\r\n elif result == 'rejected':\r\n self.__current_player = 1 - self.__current_player\r\n self.__last_word = self.__previous_word\r\n answer = self.__notify_user(self.__players[self.__current_player], 'repeat_turn', self.__last_word)\r\n elif result == 'wrong_letter':\r\n answer = self.__notify_user(self.__players[self.__current_player], 'wrong_letter', self.__last_word)\r\n elif result == 'already_used':\r\n answer = self.__notify_user(self.__players[self.__current_player], 'already_used', self.__last_word)\r\n elif result == 'not_exist':\r\n answer = self.__notify_user(self.__players[self.__current_player], 'not_exist', self.__last_word)\r\n elif result == 'validated':\r\n self.__previous_word = self.__last_word\r\n self.__last_word = answer\r\n self._save_state()\r\n self.__current_player = 1 - self.__current_player\r\n answer = self.__notify_user(self.__players[self.__current_player], 'your_turn', self.__last_word)\r\n elif result == 'stop_game':\r\n return\r\n\r\n result = self.__validate_word(answer)",
"def play():\n global done\n done = False\n g = Game()\n turn = random.choice([PLAYER, AI])\n transitions_agent = []\n agent.epsilon = agent.eps_min\n while done == False:\n g.printBoard()\n if turn == PLAYER:\n row = input('{}\\'s turn:'.format('Red'))\n g.insert(int(row), PLAYER_PIECE)\n else:\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = AI if turn == PLAYER else PLAYER\n winner = AI if turn == PLAYER else PLAYER\n if winner == AI:\n reward = 20\n else:\n reward = -20\n for i in range(len(transitions_agent)):\n agent.store_transition(transitions_agent[i][0], transitions_agent[i][1], reward, transitions_agent[i][2],\n transitions_agent[i][3])\n agent.learn()\n return",
"def next_round(self):\n if self.finish_game == 3:\n self.restart_game()\n return\n\n atual_color = self.atual_player.color\n if self.board.valid_moves(atual_color).__len__() > 0:\n self.board.play(self.atual_player.play(self.board.get_clone()), atual_color)\n self.view.atualizar_discos()\n self.finish_game = 0\n else:\n self.finish_game += 1\n self.atual_player = self._opponent(self.atual_player)\n\n self.view.atualizar_jogador_atual(self.atual_player.color)\n\n if self.finish_game == 2:\n self._end_game()",
"def do_next_turn(self, button):\n button.set_label(RUNNING_TURN_MSG)\n self.referee.run_turn()\n self.scores.get_buffer().set_text(self.get_current_scores_buffer())\n button.set_label(RUN_NEXT_TURN_MSG)\n self.darea.queue_draw()",
"def take_turn(self):\r\n self._choose_best_option()\r\n self._do_draw()",
"def run(game):\n name = cli.welcome_user(game.DESCRIPTION)\n\n for _try_num in range(0, 3):\n question, right_answer = game.run_round()\n print('Question: {question}'.format(question=question))\n user_answer = prompt.string('Your answer: ')\n\n if user_answer != right_answer:\n print(WRONG_ANSWER_TEMPLATE.format(user_answer, right_answer, name))\n break\n print('Correct!')\n else:\n print('Congratulations, {name}!'.format(name=name))",
"def play(self):\n \n while True:\n self.print_board()\n self.display_board()\n winner = self.is_game_won()\n if winner or self.is_filled():\n break\n \n if self.turn == _PLAYER:\n col = self.human_turn()\n else:\n col = self.ai_turn()\n\n row = self.get_row_for_col(col)\n self.board[7 * row + col] = self.turn\n self.last_play_rc = row, col\n\n if self.debug:\n print(\"position scores:\",\n \"player=\", score_position(self.board, _PLAYER),\n \"ai=\", score_position(self.board, _AI))\n \n self.turn = _AI if self.turn == _PLAYER else _PLAYER\n \n if winner == 0:\n msg = \"Tie!\"\n elif winner == 1:\n msg = \"You win!\"\n else:\n msg = \"I win!\"\n \n oled.text(msg, 64, 30)\n oled.show()\n print(\"\\n\" + msg + \"\\n\")\n \n if winner == 0 or winner == 1:\n if self.plies == 3:\n print(\"\"\"\n(Of course, you did set me to easy mode, which I feel compelled to mention.)\n\"\"\")\n print(\"\"\"\n\nThere are some interesting things to learn about ConnectFour:\n\n {url}\n\nTo move ahead:\n\n >>> import sensors\n >>> sensors.start()\n\n\"\"\".format(url=url(\"connectfour\")))\n\n else:\n print(\"\"\"\nWow. You were beat by a $4 computer--using only one of my processors (!!).\nTo get the code to move ahead, you'll need to at least tie me.\n\nTo play again, make a new instance of the ConnectFour class. You can choose\ndifferent options than the defaults:\n\n connectfour.ConnectFour(plies, start_player, serial_input, debug)\n - plies [5]: moves to look ahead (3-6, where 3 is easy and 6 is slow and hard\n - start_player [0]: 0 for random, 1 for you, 2 for me\n - serial_input [False]: Enter moves w/keyboard in terminal instead of knob\n - debug [False]: Show information about current AI evaluation scores\n\nFor example:\n\n >>> g = ConnectFour(plies=4, start_player=1)\n >>> g.play()\n\n\"\"\")",
"def display_result(self) -> None:\n winner = self.state.winner\n if winner:\n self._display_message(winner + ' wins!')\n else:\n self._display_message('Draw')\n\n self._display_message(\n f'\\n{self.state.player1} has {self.state.player1_score} wins'\n )\n self._display_message(\n f'{self.state.player2} has {self.state.player2_score} wins\\n'\n )",
"def turn(self):\r\n # 1 throw the dice\r\n if (\r\n not self.player_list[self.current_player].is_in_jail()\r\n or self.try_leave_jail()\r\n ):\r\n thr = Throw()\r\n while thr is not None:\r\n # check in where the current player will land\r\n new_position = self.compute_new_position_from_dice(\r\n self.current_player, thr\r\n )\r\n self.move_player_to(self.current_player, new_position, thr=thr)\r\n\r\n if thr.is_double():\r\n thr = Throw()\r\n else:\r\n thr = None\r\n print(\"------------------------------\")\r\n\r\n self.player_observable_variables()\r\n\r\n # move turn to next player\r\n self.current_player += 1\r\n if self.current_player >= len(self.player_list):\r\n self.current_player = 0\r\n self.full_turn_count += 1\r\n print(\"**********************\")\r\n print(\r\n \"Full turn:\",\r\n self.full_turn_count,\r\n \"\\n\",\r\n \"\\n\".join(map(lambda x: x.full(), self.player_list)),\r\n )\r\n print(\"**********************\")",
"def show_results(self):\r\n\r\n if self.player_cards > self.computer_cards: # player wins\r\n print('\\nCongratulations!!')\r\n print('You WIN by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n elif self.player_cards < self.computer_cards: # computer wins\r\n print('\\nToo bad!!')\r\n print('You LOST by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n else: # tied\r\n print('You TIED by {0} / {1}'.format(self.player_cards, self.computer_cards))",
"def print(self):\n for i in range(self.rows):\n print(\"--\" * self.cols + \"-\")\n for j in range(self.cols):\n cell = self.get_game_cell(i, j)\n if cell is None:\n print(f'({i} - {j}): failed')\n return None\n if cell.status == 'EMPTY':\n print(\"| \", end=\"\")\n else:\n print(f\"|{cell.status}\", end=\"\")\n print(\"|\")\n print(\"--\" * self.cols + \"-\")\n print(f\"Completed({self.completed}) - {self.winner}\")",
"def step(self, action):\n self.game.play_single_turn([action])\n next_obs = self.get_state()\n reward = self.game.player_1.score - self.game.turn_count\n done = self.game.check_for_end_of_game() or self.game.turn_count > 25\n\n if done:\n if self.game.player_1.score > self.game.player_2.score:\n reward += 25\n elif self.game.player_2.score > self.game.player_1.score:\n reward -= 25\n\n if self.game.save_images_suffix:\n image_suffix = f'{self.game.save_images_suffix}_{self.game.turn_count+1}'\n self.game.game_board.graphical_output(save=True, display=False, image_suffix=image_suffix)\n\n return next_obs, reward/100, done",
"def play_against_random(self, action, display_game=False):\n state, status, done = self.step(action)\n if display_game: env.render()\n if not done and self.turn == 2:\n state, s2, done = self.random_step()\n if display_game: env.render()\n if done:\n if s2 == self.STATUS_WIN:\n status = self.STATUS_LOSE\n elif s2 == self.STATUS_TIE:\n status = self.STATUS_TIE\n else:\n raise ValueError(\"???\")\n return state, status, done"
] | [
"0.688064",
"0.66935813",
"0.6467623",
"0.64412105",
"0.6440219",
"0.6420127",
"0.63784677",
"0.6366233",
"0.635333",
"0.62903583",
"0.62639993",
"0.62625086",
"0.6251562",
"0.6237911",
"0.6230091",
"0.6221477",
"0.62081194",
"0.6202306",
"0.616118",
"0.6145242",
"0.6123327",
"0.6120511",
"0.6119033",
"0.6118288",
"0.608557",
"0.6077608",
"0.60745776",
"0.60710454",
"0.6065118",
"0.6059971"
] | 0.67183626 | 1 |
Outputs the gameboard as text. | def text_output(self):
print(self.board)
print() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_board(self):\n print(self.game_board)",
"def render_board(self):\n print \"\"\n for row in self._board:\n print row",
"def draw(self):\n res = ''\n # ANSI code to clear the screen\n #res += chr(27) + \"[2J\"\n for position, value in enumerate(self.board.tttboard):\n if value is None:\n res += str(position)\n #sys.stdout.write(str(position))\n else:\n res += str(value)\n #sys.stdout.write(str(value))\n\n if (position + 1) % 3 != 0:\n res += str('|')\n #sys.stdout.write('|')\n else:\n #print ''\n\n res += str('\\n')\n if position == 2 or position == 5:\n #print '-' * 5\n\n res += '-' * 5\n res += str('\\n')\n return res",
"def print_board(self):\n self.board.print()",
"def print_board(self):\n \n # How to show empty/p1/p2\n VALS = \".XO\"\n\n print(\"\\n a b c d e f g\")\n print(\" /--+-+-+-+-+-+--\\\\\")\n for r in range(_HEIGHT - 1, -1, -1):\n s = \"%s |\" % r\n for c in range(_WIDTH):\n # Print mark next to most recent move\n mark = \">\" if self.last_play_rc == (r, c) else \" \"\n s += mark + VALS[self.board[r * 7 + c]]\n print(s + \" |\")\n print(\" \\\\--+-+-+-+-+-+--/\")\n print(\" a b c d e f g\\n\")",
"def print_board(self):\n print(*self._board, sep=\"\\n\")",
"def print(self):\n board_string = ''\n for y in range(self.size):\n if y == 0:\n board_string += '+ '\n for x in range(self.size):\n board_string += str(x+1) + ' '\n board_string += '\\n'\n board_string += (1+3*self.size)*'-'\n board_string += '\\n'\n board_string += str(y+1)+'|'+y*' '\n \n for x in range(self.size):\n board_string += ' '\n if self.board[y,x] == HexBoard.BLUE:\n board_string += self.char_player1\n elif self.board[y,x] == HexBoard.RED:\n board_string += self.char_player2\n else: \n board_string += self.char_empty\n board_string += '\\n'\n board_string = board_string.strip()\n\n print(board_string)",
"def show_board(self):\n print(self.game_board)",
"def showBoard(self):\n \n brd = \"\\n | | \\n\" + \\\n \" \" + self.squares[0] + \" | \" + self.squares[1] + \" | \" + self.squares[2] + \" \\n\" + \\\n \"___|___|___\\n\" + \\\n \" | | \\n\" + \\\n \" \" + self.squares[3] + \" | \" + self.squares[4] + \" | \" + self.squares[5] + \" \\n\" + \\\n \"___|___|___\\n\" + \\\n \" | | \\n\" + \\\n \" \" + self.squares[6] + \" | \" + self.squares[7] + \" | \" + self.squares[8] + \" \\n\" + \\\n \" | | \\n\"\n\n return brd",
"def print_board(self):\n for i in range(self.size):\n print(\" \".join(self.board[i]))\n print(\"\\n\")",
"def print_board(self):\n print(self.board)",
"def print_board(self):\n print('Board:')\n print('\\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in self.board]))",
"def print_board(self):\n\n print\n\n for row in xrange(8):\n for column in xrange(8):\n if self.squares[row][column]:\n print self.squares[row][column],; sys.stdout.write(u'')\n else:\n if self.dark_square((row, column)):\n print u' __ ',; sys.stdout.write(u'')\n else:\n print u' . ',; sys.stdout.write(u'')\n print\n print",
"def printBoard(self):",
"def print(self):\r\n base = 8 * self.width\r\n print(base * \"-\")\r\n for x in range(self.height):\r\n output = \"\"\r\n for y in range(self.width):\r\n output = output + self.board[x][y] + \"|\"\r\n print(\"|\" + output)\r\n print(base * \"-\")",
"def display(self):\n\n #player UI\n s = \" \"\n for p in range(WIDTH):\n s += str(p)\n s += \" \"\n\n print(s)\n\n for row in range(HEIGHT):\n\n # player UI\n print(row, end=' ')\n\n for col in range(WIDTH):\n\n if self.board[row][col] == 1:\n print(\"X\", end=' ')\n elif self.board[row][col] == 2:\n print(\"O\", end=' ')\n else:\n print(\"-\", end=' ')\n print()",
"def display(self):\n lines = []\n lines.append(\" \".join([str(x + 1) for x in range(self.board.columns)]))\n for row in self.board.board:\n new_row = \"\".join([c for c in row])\n new_row = new_row.replace(\"0\", \" \")\n new_row = new_row.replace(\"1\", colored(f\"{PLAYER1_CHR} \", PLAYER1_COLOR))\n new_row = new_row.replace(\"2\", colored(f\"{PLAYER2_CHR} \", PLAYER2_COLOR))\n lines.append(new_row)\n return \"\\n\".join(lines)",
"def print_board(self):\n\n print(\"Board update:\")\n\n count = 0\n\n for c in self.__game_board:\n i = int(c)\n\n char_to_print = chr(i)\n\n if char_to_print in ('X', 'O'):\n print(chr(i), end='')\n else:\n print(\" \", end='')\n\n printSeparator(count)\n\n count += 1",
"def print_board(self):\n to_join = [\"-\" * self.DIMENSIONS[0]]\n for row in self.grid:\n to_join.append(\"\".join([ch.letter if ch is not None else \" \" for ch in row]))\n\n print(\"\\n\".join(to_join))",
"def print_board(self):\n div = int(math.sqrt(self.BoardSize))\n dash = \"\"\n space = \"\"\n line = \"+\"\n sep = \"|\"\n for i in range(div):\n dash += \"----\"\n space += \" \"\n for i in range(div):\n line += dash + \"+\"\n sep += space + \"|\"\n for i in range(-1, self.BoardSize):\n if i != -1:\n print \"|\",\n for j in range(self.BoardSize):\n if self.CurrentGameBoard[i][j] > 9:\n print self.CurrentGameBoard[i][j],\n elif self.CurrentGameBoard[i][j] > 0:\n print \"\", self.CurrentGameBoard[i][j],\n else:\n print \" \",\n if (j+1 != self.BoardSize):\n if ((j+1)//div != j/div):\n print \"|\",\n else:\n print \"\",\n else:\n print \"|\"\n if ((i+1)//div != i/div):\n print line\n else:\n print sep",
"def render_board(board):\n for line in board:\n print(' '.join(line))",
"def print_board(self):\n print(\" 1 2 3 4 5 6 7\")\n for row in range(self.playable_row_range[0], self.playable_row_range[1]):\n for col in range(self.playable_column_range[0], self.playable_column_range[1]):\n print(\"[{piece}]\".format(piece=self.board[row][col]), end=\" \")\n print('\\n', end=\"\")\n print(\"\\n\")",
"def print_board(self):\n print_sp = functools.partial(print, end=' ')\n print_sp(' ')\n for i in range(BOARD_SIZE):\n print_sp(i)\n print()\n for i in range(BOARD_SIZE):\n print_sp(i)\n for j in range(BOARD_SIZE):\n e = self.board[j][i]\n print_sp('●') if e == BLACK else print_sp('○') if e == WHITE else print_sp('·')\n print()",
"def display_board(self):\n print(\"-\" * 9)\n for i in range(0, len(self.game_board), 3):\n row = self.game_board[i:i + 3]\n print('|', *row, '|', sep=' ')\n print('-' * 9)",
"def print_board(self):\n num_rows = len(self.board)\n num_cols = len(self.board[0])\n \n for i in range(num_rows):\n if i % 3 == 0 and i != 0:\n print(\"- - - - - - - - - - - -\")\n \n for j in range(num_cols):\n if j % 3 == 0 and j != 0:\n print(\" | \", end=\"\")\n \n if j == 8:\n print(self.board[i][j])\n else:\n number = str(self.board[i][j])\n print(\"{} \".format(number), end='')",
"def print_board(self):\n board = \"\"\n for i in range(3):#need to change this in the future\n for j in range(3):#need to change this in the future\n board += self.board[i][j]\n if j != 2:#need to change this in the future\n board += \" | \"\n board += \"\\n\"\n return board",
"def display_board(self):\n print('*' + '*'.join(['**']*len(self.board[0])) + '*')\n for row in self.board:\n print('|' + ' '.join([('%s' % square) for square in row]) + '|')\n print('*' + '*'.join(['**']*len(self.board[0])) + '*')",
"def print_board(self):\n print(\n self.BOARD_TEMPLATE.format(\n *[self.COUNTER_REPRESENTATION[counter] for counter in self.board])\n )",
"def display(self):\n for row in self._board_area:\n print(row, end=\"\\n\")",
"def display_board(self):\r\n board = self._board\r\n col = \"A\"\r\n row = 1\r\n\r\n print(end=\"|\")\r\n for i in range(9): # Prints column labels of board\r\n print(\" \" + col + \" |\", end=\"\")\r\n col = chr(ord(col) + 1)\r\n print()\r\n\r\n for i in range(10): # Prints the board\r\n print(end=\"|\")\r\n for j in range(9):\r\n if board[i][j].get_piece() is not None:\r\n print(board[i][j].get_piece().get_id(), end =\"|\")\r\n else:\r\n print(\" \", end = \"|\")\r\n print(\" \" + str(row))\r\n if i != 4 and i != 9:\r\n print(\"____\" * 9)\r\n if i == 4:\r\n print(\"~~~~~~~~~~~~~~~~RIVER~~~~~~~~~~~~~~~\")\r\n row += 1"
] | [
"0.76990277",
"0.7684276",
"0.7571131",
"0.75571936",
"0.75506896",
"0.75325507",
"0.7497205",
"0.7475444",
"0.74271286",
"0.7372031",
"0.7361939",
"0.73593736",
"0.7353311",
"0.73500687",
"0.7348999",
"0.7339537",
"0.73229283",
"0.73167104",
"0.7309406",
"0.7297968",
"0.7288065",
"0.7235469",
"0.72126937",
"0.7194325",
"0.7193144",
"0.7178924",
"0.71679366",
"0.7165473",
"0.71646",
"0.715733"
] | 0.8650465 | 0 |
Gets the color at the specified coordinates on the gameboard. | def get_color(self, coord):
return self.board[coord[0], coord[1]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_color(self, _pos):\n return self.__framebuffer[_pos]",
"def _get_color(self, r, g, b):\n clr = (r, g, b)\n return clr",
"def get_colour(self, x, y):\n if x >= self.width or y >= self.height:\n return (0, 0, 0)\n\n return self.env_img.get_at((int(x), int(y))).normalize()[0:3]",
"def get_rgb(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_rgb()",
"def get_at(\n self,\n pos: Tuple2NumberType,\n ignore_alpha: bool = False\n ) -> Union[Tuple3IntType, Tuple4IntType, 'pygame.Color']:\n assert_vector(pos, 2)\n color = self._surface.get_at(pos)\n if ignore_alpha:\n return color[0], color[1], color[2]\n return color",
"def get_blue(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_blue()",
"def get_red(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3]",
"def get_red(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_red()",
"def FindColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_FindColor(self, *args)",
"def color(self):\n if self._simplecell:\n self.fetch()\n return self._color",
"def getPixel(self,x,y):\n return color_to_rgb(self._image.get(x, y))",
"def get(self, point):\n string = self.__grid.get(point)\n if string is None:\n return None\n return string.color",
"def GetColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_GetColor(self, *args)",
"def GetPixel(*args, **kwargs):\n return _gdi_.Colour_GetPixel(*args, **kwargs)",
"def get_color(self):\n self.view.present(\n \"sheet\",\n orientations=ORIENTATIONS,\n )\n self.view.wait_modal()\n return self.rgb",
"def get_color(self, point):\n \n d = point - self._origin\n dist = int(d.dot(d) ** 0.5) % 2\n if dist == 0:\n return self.c1.dup()\n else:\n return self.c2.dup()",
"def get_blue(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3 + 2]",
"def get_colors(self, image: np.ndarray, coordinates: np.ndarray) -> np.ndarray:\r\n x = coordinates.squeeze(1)\r\n return np.flip(image[x[:, 1], x[:, 0]].astype(np.float64) / 255.0, axis=1)",
"def getPixel(self, px, py):\n if not self.inBounds(px,py):\n return IColor()\n idx = py*self.w + px\n return self.data[idx]",
"def get_color(self, point):\n return self._color.dup()",
"def get_pixel_color(self, x, y):\n raise NotImplementedError # remove when we fix it. :)\n\n # do the Window import here because we don't want to import it at the\n # top or else we won't be able to set window properties\n from kivy.core.window import Window\n\n # convert the passed x/y to the actual x/y of the Window since it's\n # possible for the mpf-mc display size to be different than the Window\n # size\n x *= Window.width / Window.children[0].width\n y *= Window.height / Window.children[0].height\n\n return glReadPixels(x, y, 1, 1, GL_RGB, GL_UNSIGNED_BYTE)",
"def get_random_color():\n r=random.randint(0,255)\n g=random.randint(0,255)\n b=random.randint(0,255)\n return(r,g,b)",
"def get_color(self):\n return self.color",
"def getPixelColor(self, n):\n\t\treturn self.leds[n]",
"def get_color(self):\r\n return self._color",
"def get_color(self):\n return COLOR_DICT[self.element]",
"def get_green(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_green()",
"def get_pixel(self, frame: int, x: int, y: int) -> Color:\n return self.get_frame(frame).clone()[x, y]",
"def get_color(self):\n return self._color",
"def get_color(self):\n return self._color"
] | [
"0.6995972",
"0.67755926",
"0.6742886",
"0.6678613",
"0.66424483",
"0.6595865",
"0.65444714",
"0.6500604",
"0.6482022",
"0.64749384",
"0.64731526",
"0.6466952",
"0.6460171",
"0.6415692",
"0.6340766",
"0.6315026",
"0.63021356",
"0.6299288",
"0.62905073",
"0.62475747",
"0.62238955",
"0.6215786",
"0.61972374",
"0.61825556",
"0.6174377",
"0.6158712",
"0.6156585",
"0.6145968",
"0.614485",
"0.614485"
] | 0.81525886 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.