query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Return addition config section for coils.
|
def get_coil_config_section(cls) -> Optional[str]:
return None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_coil_config_section(cls):\n return None",
"def get_config(self):\n return self.cat_feats_cfg",
"def _getConfigName(self):\n return \"%s_processCoadd_config\" % (self.config.coaddName,)",
"def get_rec_config(self):\n conf_map = {}\n if len(self.reconstructions.text()) > 0:\n conf_map['reconstructions'] = str(self.reconstructions.text())\n if len(self.device.text()) > 0:\n conf_map['device'] = str(self.device.text()).replace('\\n', '')\n if len(self.alg_seq.text()) > 0:\n conf_map['algorithm_sequence'] = str(self.alg_seq.text()).replace('\\n', '')\n if len(self.beta.text()) > 0:\n conf_map['beta'] = str(self.beta.text())\n if len(self.support_area.text()) > 0:\n conf_map['support_area'] = str(self.support_area.text()).replace('\\n', '')\n if self.cont.isChecked():\n conf_map['cont'] = 'true'\n if len(self.cont_dir_button.text().strip()) > 0:\n conf_map['continue_dir'] = '\"' + str(self.cont_dir_button.text()).strip() + '\"'\n print('cont_dir', conf_map['continue_dir'])\n\n for feat_id in self.features.feature_dir:\n self.features.feature_dir[feat_id].add_config(conf_map)\n\n return conf_map",
"def config(self):\n return \"\\n\".join([ c.config(True) for p, c in self.configs_ ])",
"def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)",
"def configure(self, section):",
"def get_coil_overwrite_section(cls):\n return None",
"def get_section(self,name):\n if self.__config.has_section(name):\n data={}\n for opt,val in self.__config.items(name):\n data[opt]=val\n return data\n else:\n raise Exception(_('EVOGTK: Section \"%s\" does not exist in this preferences instance') % name)",
"def validate_coil_section(self, driver, config):\n base_spec = [\"device\"]\n if self.__class__.get_coil_config_section():\n base_spec.append(self.__class__.get_coil_config_section())\n driver.machine.config_validator.validate_config(\n \"coils\", config, driver.name,\n base_spec=base_spec)\n return config",
"def add_conl_config(cfg):\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n cfg.MODEL.CONL = CN()\n\n cfg.MODEL.CONL.STAGES=['res4']\n cfg.MODEL.CONL.BLOCKS=[[-1,],]\n\n cfg.MODEL.CONL.RATIO = 1.0/4.0\n cfg.MODEL.CONL.DOWNSAMPLE=True\n cfg.MODEL.CONL.USE_GN=False\n cfg.MODEL.CONL.LR_MULT=0\n cfg.MODEL.CONL.USE_OUT=False\n cfg.MODEL.CONL.OUT_BN=False\n cfg.MODEL.CONL.WHITEN_TYPE=['channel']\n cfg.MODEL.CONL.TEMP = 1.0\n cfg.MODEL.CONL.WITH_GC=False\n cfg.MODEL.CONL.WITH_2FC=False\n cfg.MODEL.CONL.DOUBLE_CONV=False\n\n cfg.MODEL.CONL.WITH_STATE=False\n cfg.MODEL.CONL.NCLS=32",
"def add_config(self):\n\n config = {\n 'count_up': CountUp,\n 'count_down': CountDown,\n 'count_up_or_down': CountUpOrDown,\n 'high_speed_counter_definition': HighSpeedCounterDefinition,\n 'high_speed_counter': HighSpeedCounter,\n 'pulse_output': PulseOutput\n }\n\n return config",
"def get_config(self):\n return {'reduction': self.reduction, 'name': self.name}",
"def config_section_data():\n config_data = u\"\"\"[fn_sep]\nsep_base_path=/sepm/api/v1\nsep_auth_path=/sepm/api/v1/identity/authenticate\nsep_host=<SEPM server dns name or ip address>\nsep_port=8446\nsep_username=<username>\nsep_password=<password>\nsep_domain=<SEP domain name>\n# Optional settings for access to SEPM via a proxy.\n#http_proxy=http://proxy:80\n#https_proxy=http://proxy:80\n# Limit result sent to Resilient, add full result as an attachment.\nsep_results_limit=200\n# Period of time (seconds) to wait for all endpoints to return a scan result.\nsep_scan_timeout=1800\n\"\"\"\n return config_data",
"def get_config(self):\n config = {\n }\n base_config = super(MatrixConcat, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))",
"def add_config(self):\n\n config = {\n 'byte_to_integer': ByteToInteger,\n 'integer_to_byte': IntegerToByte,\n 'integer_to_double_integer': IntegerToDoubleInteger,\n 'integer_to_string': IntegerToString,\n 'double_integer_to_integer': DoubleIntegerToInteger,\n 'double_integer_to_real': DoubleIntegerToReal,\n 'double_integer_to_string': DoubleIntegerToString,\n 'binary_coded_decimal_to_integer': BinaryCodedDecimalToInteger,\n 'integer_to_binary_coded_decimal': IntegerToBinaryCodedDecimal,\n 'round': Round,\n 'truncate': Truncate,\n 'real_to_string': RealToString,\n 'integer_to_ascii': IntegerToASCII,\n 'double_integer_to_ascii': DoubleIntegerToASCII,\n 'real_to_ascii': RealToASCII,\n 'ascii_to_hexadecimal': ASCIIToHexadecimal,\n 'hexadecimal_to_ascii': HexadecimalToASCII,\n 'string_to_integer': StringToInteger,\n 'string_to_double_integer': StringToDoubleInteger,\n 'string_to_real': StringToReal,\n 'decode': Decode,\n 'encode': Encode,\n 'segment': Segment\n }\n\n return config",
"def get_config_main_sections(self):\n self.sections_in_config = self.config_handle.sections()",
"def op_config(self) -> Any:\n return self.solid_config",
"def get_config_on_json(self):\n # load section CONFIG from data\n try:\n return self.json_data[\"CONFIG\"]\n except:\n constant.get_error(constant.ERROR_004)",
"def getConfig(self):\n return self.cp",
"def get_config(config):\n section = 'General'\n def add(name, val):\n if not config.has_option(section, name):\n config.set(section, name, val)\n add('input_fofn', 'NA')\n add('target', 'assembly')\n #add('sge_option', 'NA') # Needed for PBS, but not for everything\n add('sge_option_da', 'NA')\n add('sge_option_la', 'NA')\n add('sge_option_pda', 'NA')\n add('sge_option_pla', 'NA')\n add('sge_option_fc', 'NA')\n add('sge_option_cns', 'NA')\n return get_dict_from_old_falcon_cfg(config)",
"def build_confcom_addon_profile(self) -> ManagedClusterAddonProfile:\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_ACC_SGX_QUOTE_HELPER_ENABLED = addon_consts.get(\n \"CONST_ACC_SGX_QUOTE_HELPER_ENABLED\"\n )\n\n confcom_addon_profile = self.models.ManagedClusterAddonProfile(\n enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: \"false\"})\n if self.context.get_enable_sgxquotehelper():\n confcom_addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = \"true\"\n return confcom_addon_profile",
"def corenlp_coref_props(self):\n coref_props = self.config._sections['corenlp_coref_props']\n return coref_props",
"def get_config(self):\n\n return {section: self.sections[section].get_values() for section in self.sections}",
"def cg_config():\n return {}",
"def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif self.config_file is not None:\n ini.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])\n\n if ini.has_section('whoshere'):\n return ini.items('whoshere')\n\n return {}",
"def config(self, function):\n self.cfgs.append(ConfigScope(function))\n return self.cfgs[-1]",
"def config_section_data():\n config_data = u\"\"\"[feeds]\n# comma separated section names. ex. sqlserver_feed,file_feed\nfeed_names=<your feeds>\nreload=true\n# use reload_types to limit the types of objects when reload=true.\n# Ex: incident,task,note,artifact,attachment,<data_table_api_name>\nreload_types=\n# set to true if ElasticSearch errors occur during reload=true\nreload_query_api_method=false\n\n# feed_data is the default message destination that will be listened to\nqueue=feed_data\n\n# set to true if attachment data should be part of payload send to plugins\ninclude_attachment_data=false\n# if necessary, specify the supported workspace (by label, case sensitive) and the list of feeds associated with it\n# ex: 'Default Workspace': ['sqlserver_feed'], 'workspace A': ['kafka_feed', 'resilient_feed']\nworkspaces=\n\"\"\"\n return config_data",
"def get_config_template(self) -> cconfig.Config:",
"def getZapataConf(self):\n #cProf = briProfiles[self['briconfig']] #Grab the config profile\n #output = self.mergeConfigList(cProf, briConfigList)\n output = []\n for portInd, portLine in enumerate(self.portLines[:-1]):\n if self[portInd]['type'] == 'na':\n continue\n signalling = str.join('_', (self[portInd]['type'], self[portInd]['signalling']))\n output.append(\"group = \"+ str.join(', ', self.pluginEntity.getPortGroup(portLine[1])))\n #Get CallerID\n output.append(\"callerid = \" + self[portInd]['callerid'])\n #Get PickupGroup\n output.append(\"callgroup = \" + self[portInd]['callgroup'])\n output.append(\"pickupgroup = \" + self[portInd]['pickupgroup'])\n #Context Bindings\n output.append(\"context = \"+ self[portInd]['context'])\n output.append(\"signalling = \"+ signalling) \n output.append(\"channel = \"+ str(portLine[0]))\n return output"
] |
[
"0.6866842",
"0.61183715",
"0.58979475",
"0.58378327",
"0.5800856",
"0.573693",
"0.57158256",
"0.5709645",
"0.5694161",
"0.5682659",
"0.56358176",
"0.5625038",
"0.5527064",
"0.5511581",
"0.550774",
"0.5460539",
"0.5455494",
"0.54532677",
"0.54231423",
"0.5389646",
"0.53817236",
"0.53488934",
"0.53459555",
"0.53314257",
"0.5301337",
"0.5281136",
"0.52637583",
"0.52586883",
"0.5247703",
"0.52230763"
] |
0.69864786
|
0
|
Validate coil config for platform.
|
def validate_coil_section(self, driver, config) -> dict:
if self.get_coil_config_section():
spec = self.get_coil_config_section() # pylint: disable-msg=assignment-from-none
config = driver.machine.config_validator.validate_config(spec, config, driver.name)
elif config:
raise AssertionError("No platform_config supported but not empty {} for driver {}".
format(config, driver.name))
return config
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def validate_coil_section(self, driver, config):\n base_spec = [\"device\"]\n if self.__class__.get_coil_config_section():\n base_spec.append(self.__class__.get_coil_config_section())\n driver.machine.config_validator.validate_config(\n \"coils\", config, driver.name,\n base_spec=base_spec)\n return config",
"def validate_config(self):\n pass",
"def validate_config(self):\n pass",
"def _validate_config(self):\n pass",
"def validate_config(self):\n\n # LOCALHOST\n if self.location == 'localhost':\n if 'browserName' not in self.config.keys():\n msg = \"Add the 'browserName' in your local_config: e.g.: 'Firefox', 'Chrome', 'Safari'\" # noqa\n self.runner.critical_log(msg)\n raise BromeBrowserConfigException(msg)\n\n # EC2\n elif self.location == 'ec2':\n self.validate_ec2_browser_config()\n\n # VIRTUALBOX\n elif self.location == 'virtualbox':\n self.validate_virtualbox_config()",
"def check_config(config):\n pass",
"def config_sanity_check(config: dict) -> dict:\n\n # back compatibility support\n config = parse_v011(config)\n\n # check model\n if config[\"train\"][\"method\"] == \"conditional\":\n if config[\"dataset\"][\"train\"][\"labeled\"] is False: # unlabeled\n raise ValueError(\n \"For conditional model, data have to be labeled, got unlabeled data.\"\n )\n\n return config",
"def properties_validation(config_data: Dict = None) -> bool:\n\n if config_data is None:\n config_file = os.path.join(\n os.path.dirname(__file__), 'server-config.json')\n with open(config_file) as config:\n config_data = json.load(config)\n platform_properties, err = PlatformPropertiesSchema().load(config_data)\n\n # Raise error if required property is not provided\n if err:\n raise MissingRequiredParameterError(err)\n\n # Raise error if unsupported protocol or module\n for protocol in platform_properties.supported_transfer_protocols:\n if protocol not in SUPPORTED_PROTOCOLS:\n err = str.format(\"Unsupported protocol {}\", protocol)\n raise ValueError(err)\n for module in platform_properties.supported_modules:\n if module not in SUPPORTED_MODULES:\n err = str.format(\"Unsupported module {}\", module)\n raise ValueError(err)\n\n # Raise error if https not in supported protocols\n if \"https\" not in platform_properties.supported_transfer_protocols:\n raise MissingRequiredParameterError(\n 'CARMIN 0.3 requires https support')\n\n # Raise error if minTimeout is greater than maxTimeout\n if (platform_properties.max_authorized_execution_timeout != 0\n and platform_properties.min_authorized_execution_timeout >\n platform_properties.max_authorized_execution_timeout):\n raise ValueError('maxTimeout must be greater than minTimeout')\n return True",
"def validate_config(self, config: Dict) -> bool:\n raise NotImplementedError",
"def validate_config(self):\n\n ServerHeraldNotifyBase.validate_config(self)\n\n # Prowl requires an API key\n if not self.config_has('prowl'):\n print ('`prowl` notification type requires a Prowl API key to be '\n 'specified in the config file.')\n sys.exit(1)\n\n if not self.config_has('prowl', 'apikey'):\n print 'Prowl requires an API key in the config file'\n sys.exit(1)",
"def _check_config(self):\n self._config[\"dataset_name\"] = MetaDataset(self._config[\"dataset_name\"])\n self._config[\"embedding_crop\"] = EmbeddingCrop(\n self._config[\"embedding_crop\"])\n if self._config[\"dataset_name\"] == MetaDataset.TIERED:\n error_message = \"embedding_crop: {} not supported for {}\".format(\n self._config[\"embedding_crop\"], self._config[\"dataset_name\"])\n assert self._config[\n \"embedding_crop\"] == EmbeddingCrop.CENTER, error_message",
"def _check_config(self):",
"def test_valid_configuration(self):\n\n conf = [\n 'gasoline', '228i', 'model_luxury_line', 'silver', 'rims_384',\n 'tapistry_black', 'steptronic', 'smoker_package', 'tow_hook'\n ]\n\n attr_val_ids = self.get_attr_val_ids(conf)\n validation = self.cfg_tmpl.validate_configuration(attr_val_ids)\n self.assertTrue(validation, \"Valid configuration failed validation\")",
"def check_config(cfg):",
"def validate_config(self):\r\n c = self.config\r\n \r\n # Make sure that we have a database_path, and an image_path...\r\n assert 'database_path' in c\r\n assert 'image_path' in c\r\n # We should probably check if these paths exist and make them as well...\r\n \r\n # Set the default values.\r\n graph_draw_frequency = c['graph_draw_frequency']\r\n for period, interval in self.default_config['graph_draw_frequency'].iteritems():\r\n graph_draw_frequency.setdefault(period, interval)\r\n \r\n # A quick check to make sure that our port is an integer.\r\n c['httpd_port'] = int(c['httpd_port'])\r\n \r\n # Make sure that no duplicate IDs exist, and that the template exists as well.\r\n ids = set()\r\n for graph in c['graphs']:\r\n graph.setdefault('config', {})\r\n graph['config'].setdefault('periods', [])\r\n assert graph['id'] not in ids\r\n ids.add(graph['id'])\r\n assert(template_exists(graph['template']))",
"def valid_configuration(self):\n valid = True\n\n if (not self.__config.suffix()) and (self.__config.output_dir() == self.__config.input_dir()):\n print(\"ERROR: output_dir directory cannot be the same as input_dir with an empty suffix!\")\n valid = False\n if not self.__config.public_key():\n print(\"ERROR: public_key not set! Set it through 'pdfworkshop config public_key <your_key>'. \"\n \"A free API key can be obtained from https://developer.ilovepdf.com/\")\n valid = False\n return valid",
"def validate_config(self):\n config = self.config\n\n # which doc types are enabled\n need_at_least_one = ['GOOGLE_DRIVE_ENABLED','GITHUB_ENABLED','DISQUS_ENABLED']\n found_one = False\n for n in need_at_least_one:\n if n in config.keys():\n found_one = True\n break\n if not found_one:\n raise Exception(\"Error: need at least one of: %s\"%(\", \".join(need_at_least_one)))\n\n if 'GOOGLE_DRIVE_ENABLED' in config.keys():\n if config['GOOGLE_DRIVE_ENABLED']:\n if 'GOOGLE_DRIVE_CREDENTIALS_FILE' in config.keys():\n if os.path.basename(config['GOOGLE_DRIVE_CREDENTIALS_FILE']) != 'credentials.json':\n raise Exception(\"Error: the file specified with GOOGLE_DRIVE_CREDENTIALS_FILE in the config file must have a filename of 'credentials.json'\")",
"def validate(self):\n if not self.hmc_address:\n raise ValueError(\"No HMC address provided\")\n if (not self.credentials['user']\n or not self.credentials['password']):\n raise ValueError(\n \"No CPC credentials set. Please provide 'admin-user' and \"\n \"'admin-password' in hypervisor profile\")\n if not self.boot_options:\n raise ValueError(\n \"No CPC boot method configured. Please set \"\n \"'liveimg-insfile-url' in CPC profile parameters or \"\n \"attach a volume with live image\")",
"def _validate(self, config):\n assert isinstance(config, BaseConfig), \\\n \"Configuration should be instance of `BaseConfig`, but given {}\".format(type(config))",
"def check_configuration(self, configuration):\n super(Pixiv_bot, self).check_configuration(configuration)",
"def check_config(config):\n\n # Check config\n assert config.dataset in [\"conll04\", \"ace05\"]\n assert config.train_mode in [\"train\", \"train+dev\"]\n\n for emb in config.embedder:\n assert emb in [\"word\", \"char\", \"bert-base\", \"bert-large\"], emb\n\n if \"char\" in config.embedder:\n assert config.char_pool in [\"last\", \"avg\", \"max\"]\n\n if config.encoder is not None:\n assert config.encoder == \"bilstm\"\n\n for task in config.tasks:\n assert task in [\"ner\", \"re\"]\n\n assert config.ner_decoder in [\"iobes\", \"span\"]\n\n if \"cuda\" in config.device:\n assert torch.cuda.is_available(), \"CUDA not available\"",
"def validate(self, config_json):\n pass",
"def validate_config(config):\n # check if paths are valid\n check_paths = {\n 'data_path': r'data$',\n 'master_list_path': r'master_list\\.csv$',\n 'duplicate_list_path': r'duplicate_list\\.csv$',\n 'log_path': r'data[\\\\\\/]jobfunnel.log$',\n 'filter_list_path': r'data[\\\\\\/]filter_list\\.json$',\n }\n\n for path, pattern in check_paths.items():\n if not re.search(pattern, config[path]):\n raise ConfigError(path)\n # check if the provider list only consists of supported providers\n if not set(config['providers']).issubset(PROVIDERS):\n raise ConfigError('providers')\n\n # check validity of region settings\n validate_region(config['search_terms']['region'])\n\n # check validity of delay settings\n validate_delay(config['delay_config'])\n\n # check the validity of max_listing_days settings\n if(config['max_listing_days'] is not None and config['max_listing_days'] < 0):\n raise ConfigError('max_listing_days')",
"def _verify_options(config: configuration.Config) -> None:\n\n if not config.config['species']:\n log._logger.error('You must specify a species (-s/--species)')\n exit(1)\n\n if config.config['hpc'] and config.config['local']:\n log._logger.error('You can only use one of the config options (hpc/local)')\n exit(1)\n\n if config.config['hpc'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (hpc/custom)')\n exit(1)\n\n if config.config['local'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (local/custom)')\n exit(1)\n\n if (not config.config['hpc']) and\\\n (not config.config['local']) and\\\n (not config.config['custom']):\n log._logger.error(\n 'You must specify a compute cluster environment (hpc/local/custom)'\n )\n exit(1)\n\n if config.config['custom'] and (not config.config['scheduler']):\n log._logger.error(\n 'The custom compute environment requires a scheduler address to be set'\n )\n exit(1)",
"def validate_config_dict(self):\n config_options = [\"pipeline_name\",\n \"num_processors\",\n \"num_sessions_at_once\",\n \"available_memory\",\n \"cluster_system\",\n \"output_directory\",\n \"working_directory\",\n \"template_head_for_anat\",\n \"exclude_zeros\",\n \"start_idx\",\n \"stop_idx\",\n \"write_report\",\n \"write_graph\",\n \"write_all_outputs\",\n \"upload_to_s3\",\n \"bucket_prefix\",\n \"bucket_out_prefix\",\n \"local_prefix\",\n \"bucket_name\",\n \"creds_path\"]\n invalid = []\n for param in self._config.keys():\n if param not in config_options:\n invalid.append(param)\n if len(invalid) > 0:\n err = \"\\n[!] The following parameters in your configuration \" \\\n \"file are not recognized. Double-check the pipeline \" \\\n \"configuration template.\\n\"\n err += \"\\n\".join([x for x in invalid])\n raise Exception(err)\n else:\n return 0",
"def check_configs(self):\n\n pass",
"def validate_config(self):\n reference = data_file(\"../config/template/minimum_aiscalator.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"In Global Application Configuration file \"\n _validate_configs(self._app_conf, ref, msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/aiscalator.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"In Global Application Configuration file \"\n _validate_configs(self._app_conf, ref, msg,\n missing_exception=False,\n type_mismatch_exception=True)\n if self._step_name:\n reference = data_file(\"../config/template/minimum_step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in step named \" + self._step_name\n _validate_configs(self._step,\n ref[\"steps\"][\"Untitled\"],\n msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in step named \" + self._step_name\n _validate_configs(self._step,\n ref[\"steps\"][\"Untitled\"],\n msg,\n missing_exception=False,\n type_mismatch_exception=True)\n if self._dag_name:\n reference = data_file(\"../config/template/minimum_dag.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in dag named \" + self._dag_name\n _validate_configs(self._dag,\n ref[\"dags\"][\"Untitled\"],\n msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in dag named \" + self._dag_name\n _validate_configs(self._dag,\n ref[\"dags\"][\"Untitled\"],\n msg,\n missing_exception=False,\n type_mismatch_exception=True)",
"def config_sanity_check(self):\n if 'name' not in self.config:\n raise EventifyConfigError(\n \"\"\"Required configuration parameter missing!\n Please configure \"name\" as a string in your\n configuration.\"\"\")\n\n if 'publish_topic' not in self.config:\n raise EventifyConfigError(\n \"\"\"Required configuration parameter missing!\n Please configure \"public_topic\" as an object\n in your configuration.\"\"\")\n\n if 'topic' not in self.config['publish_topic']:\n raise EventifyConfigError(\n \"\"\"Required configuration parameter missing!\n Please configure \"topic\" as a key in your\n \"public_topic object.\"\"\")",
"def validate(self):\n AcceleratorType.validate(self.accelerator_type)\n gcp.validate_machine_configuration(self.cpu_cores,\n self.memory,\n self.accelerator_type,\n self.accelerator_count)",
"def config_validate(ctx, **kwargs):\n # Validates pf9-express config file and obtains Auth Token\n #Load Active Config into ctx\n GetConfig(ctx).GetActiveConfig()\n #Get Token\n token = GetToken().get_token_v3(\n ctx.params[\"du_url\"],\n ctx.params[\"du_username\"],\n ctx.params[\"du_password\"],\n ctx.params[\"du_tenant\"] )\n if token is not None:\n click.echo('Config Validated!')\n click.echo('Token: %s' % token)\n else:\n click.echo('Config Validation Failed!')"
] |
[
"0.70886606",
"0.6907376",
"0.6907376",
"0.6905999",
"0.6546647",
"0.64142686",
"0.62825555",
"0.6279739",
"0.62622815",
"0.62202287",
"0.6197608",
"0.6177871",
"0.61492246",
"0.61078686",
"0.608677",
"0.6083871",
"0.6074787",
"0.5983777",
"0.5934141",
"0.58738196",
"0.5852512",
"0.58453107",
"0.5838472",
"0.58332884",
"0.5832556",
"0.58255905",
"0.58018327",
"0.5789037",
"0.5770706",
"0.5770072"
] |
0.7257929
|
0
|
Set pulse on hit rule on driver. Pulses a driver when a switch is hit. When the switch is released the pulse continues. Typically used for autofire coils such as pop bumpers.
|
def set_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):
raise NotImplementedError
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(1, coil.hw_driver.number, enable_switch.hw_switch.number, \n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(2, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(3, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n raise NotImplementedError",
"def set_delayed_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings, delay_ms: int):\n del enable_switch\n del coil\n del delay_ms\n raise AssertionError(\"This platform does not support delayed pulse hardware rules.\")",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_and_disable_rule(coil=%s sw=%s dis_sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number, disable_switch.hw_switch.number))\n self.communicator.rule_add(4, coil.hw_driver.number, enable_switch.hw_switch.number, disable_sw_id=disable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def pulley_activate(self):\n self.pulley(\"up\")\n time.sleep(5 * 0.7)\n self.pulley(\"stop\")\n time.sleep(2)\n self.pulley(\"down\")\n time.sleep(2.85)\n self.pulley(\"stop\")",
"def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)",
"def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)",
"def setInternalPulser(self,pulserEnable,pulseHeight):\n pass",
"def pulse_hi(pin, length=0.00001): \n on(pin)\n time.sleep(length)\n off(pin)\n time.sleep(length)",
"def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")",
"def pulseEnable( self, _data ): # uint8_t\n\t\tself.expanderWrite( _data | LCD_EN ) # En high\n\t\tsleep_us(1) # enable pulse must be >450ns\n\n\t\tself.expanderWrite( _data & (0xFF ^ LCD_EN) ) # En low\n\t\tsleep_us(50) # commands need > 37us to settle",
"def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)",
"def pulse(vjoy, btn_id):\n global g_is_running\n g_is_running = True\n while g_is_running:\n vjoy[1].button(btn_id).is_pressed = True\n time.sleep(g_hold_time)\n vjoy[1].button(btn_id).is_pressed = False\n time.sleep(g_pause_time)",
"def pulse(self, coil, milliseconds):\n self.log.info(\"RASPDriver.Pulse(%s %s, %d ms)\" %\n (coil.config['label'], coil.hw_driver.number, milliseconds))\n self.platform.communicator.driver_pulse(coil.hw_driver.number, milliseconds)\n return milliseconds",
"def servo_set_target(ch, pulse):\n\n # Pulse number is 4x pulse width (in microseconds)\n p_num = 4 * int(pulse)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_target, ch, p_num)",
"def setPulseDivisor(self, pd, motor=0): \n\t\tcmd = 'SAP'\t # Get axis parameter\n\t\ttype = 154\t\t # Microstep resolution\n\t\tvalue = int(pd)\t\t # Microstep resolution \n\t\tself.sendCommand(cmd, type, motor, value)\n\t\tdata = self.receiveData()\n\t\tif data.status != 100:\n\t\t\tif self.errorDict.has_key(data.status):\n\t\t\t\traise MotorError(self.errorDict[data.status])\n\t\t\telif data.status == None:\n\t\t\t\traise MotorError('Incorrect controller response, trying to reconnect')\n\t\t\telse:\n\t\t\t\traise MotorError(''.join(('Unknown error, ', str(data.status))))",
"def trigger(self):\n GPIO.output(self.trigger_pin, 1)\n time.sleep(10/1000000)\n GPIO.output(self.trigger_pin, 0)",
"def pulse_lo(pin, length=0.00001):\n off(pin)\n time.sleep(length)\n on(pin)\n time.sleep(length)",
"def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)",
"def toggle(light_id):\n if light_id == \"alloff\":\n pidomCtrl.pulse(\"alloff\")\n elif light_id == \"outside\":\n pidomCtrl.pulse(\"outside\")\n elif light_id == \"stairs\":\n pidomCtrl.pulse(\"stairs\")\n elif light_id == \"frontdoorgroupoff\":\n pidomCtrl.pulse(\"persistedoff\")\n elif light_id == \"persistedon\":\n pidomCtrl.pulse(\"frontdoorgroupon\")"
] |
[
"0.73512125",
"0.72260875",
"0.7224152",
"0.7212513",
"0.71062016",
"0.6979767",
"0.69002014",
"0.6871125",
"0.64783454",
"0.64376444",
"0.63576806",
"0.6283544",
"0.6267186",
"0.6244172",
"0.6027351",
"0.5870278",
"0.5840956",
"0.57337576",
"0.5722434",
"0.5720375",
"0.56807226",
"0.5677533",
"0.5635341",
"0.56084234",
"0.5588931",
"0.55049753",
"0.5331648",
"0.53284687",
"0.5313669",
"0.52608967"
] |
0.7381012
|
0
|
Set pulse on hit and release rule to driver. Pulses a driver when a switch is hit. When the switch is released the pulse is canceled. Typically used on the main coil for dual coil flippers without eos switch.
|
def set_pulse_on_hit_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):
raise NotImplementedError
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(2, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(3, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_and_disable_rule(coil=%s sw=%s dis_sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number, disable_switch.hw_switch.number))\n self.communicator.rule_add(4, coil.hw_driver.number, enable_switch.hw_switch.number, disable_sw_id=disable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(1, coil.hw_driver.number, enable_switch.hw_switch.number, \n duration=self._get_pulse_ms_value(coil))",
"def pulley_activate(self):\n self.pulley(\"up\")\n time.sleep(5 * 0.7)\n self.pulley(\"stop\")\n time.sleep(2)\n self.pulley(\"down\")\n time.sleep(2.85)\n self.pulley(\"stop\")",
"def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)",
"def set_delayed_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings, delay_ms: int):\n del enable_switch\n del coil\n del delay_ms\n raise AssertionError(\"This platform does not support delayed pulse hardware rules.\")",
"def pulse(vjoy, btn_id):\n global g_is_running\n g_is_running = True\n while g_is_running:\n vjoy[1].button(btn_id).is_pressed = True\n time.sleep(g_hold_time)\n vjoy[1].button(btn_id).is_pressed = False\n time.sleep(g_pause_time)",
"def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)",
"def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)",
"def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)",
"def pulse_hi(pin, length=0.00001): \n on(pin)\n time.sleep(length)\n off(pin)\n time.sleep(length)",
"def setInternalPulser(self,pulserEnable,pulseHeight):\n pass",
"def teleopPeriodic(self):\n\n try:\n if self.debounce(6, gamepad=True):\n self.boulder_automation.toggle_shoot_boulder()\n except:\n self.onException()\n \n try:\n if self.debounce(2) or self.debounce(1, gamepad=True):\n self.boulder_automation.toggle_intake_boulder()\n except:\n self.onException()\n\n try:\n if self.debounce(7):\n self.chassis.toggle_field_oriented()\n except:\n self.onException()\n\n try:\n if self.debounce(8):\n enabled = self.heading_hold_pid.isEnable()\n self.heading_hold_pid.disable()\n self.bno055.resetHeading()\n self.heading_hold_pid.setSetpoint(constrain_angle(self.bno055.getAngle()))\n self.heading_hold_pid.reset()\n if enabled:\n self.heading_hold_pid.enable()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(10):\n self.chassis.toggle_vision_tracking()\n except:\n self.onException()\"\"\"\n\n try:\n if self.debounce(10):\n self.chassis.toggle_range_holding(self.chassis.correct_range)\n except:\n self.onException()\n\n try:\n if self.debounce(1) or self.debounce(8, gamepad=True):\n self.boulder_automation.toggle_shoot_boulder()\n except:\n self.onException()\n\n try:\n if self.debounce(9):\n self.chassis.toggle_heading_hold()\n except:\n self.onException()\n\n try:\n if self.debounce(4):\n self.defeater.up()\n except:\n self.onException()\n\n try:\n if self.debounce(5):\n self.shooter.stop()\n self.intake.stop()\n except:\n self.onException()\n\n try:\n if self.debounce(3):\n #self.chassis.range_setpoint = self.chassis.correct_range\n #self.chassis.distance_pid.enable()\n # self.shooter.start_shoot()\n self.chassis.range_setpoint = 0.0\n self.chassis.track_vision = False\n self.chassis.toggle_range_holding()\n self.chassis.toggle_vision_tracking()\n except:\n self.onException()\n\n try:\n if self.debounce(6):\n self.defeater.down()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(10):\n self.shooter.backdrive()\n self.intake.backdrive()\n except:\n self.onException()\"\"\"\n\n try:\n if self.joystick.getPOV() != -1:\n self.chassis.heading_hold = True\n direction = 0.0\n if self.joystick.getPOV() == 0:\n # shooter centre goal\n direction = math.pi\n elif self.joystick.getPOV() == 90:\n # shooter right goal\n direction = math.pi / 3.0 + math.pi\n elif self.joystick.getPOV() == 270:\n # shooter left goal\n direction = -math.pi / 3.0 + math.pi\n elif self.joystick.getPOV() == 180:\n direction = 0.0\n self.chassis.set_heading_setpoint(direction)\n except:\n self.onException()\n\n try:\n if self.joystick.getRawButton(11) or self.gamepad.getRawButton(2):\n self.chassis.field_oriented = False \n else:\n self.chassis.field_oriented = True\n except:\n self.onException()\n\n try:\n if self.gamepad.getRawButton(3):\n self.boulder_automation.engage(\"backdrive_manual\")\n elif self.boulder_automation.current_state == \"backdrive_manual\":\n self.boulder_automation.done()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(1, gamepad=True):\n self.chassis.zero_encoders()\n self.chassis.distance_pid.setSetpoint(1.2)\n self.chassis.distance_pid.enable()\n except:\n self.onException()\"\"\"\n\n try:\n if self.debounce(10, gamepad=True):\n self.vision.write_image()\n except:\n self.onException()\n\n try:\n if self.joystick.getRawButton(12):\n self.joystick_rate = 0.6\n else:\n self.joystick_rate = 0.4\n except:\n self.onException()\n\n self.chassis.inputs = [-rescale_js(self.joystick.getY(), deadzone=0.05, exponential=1.2),\n - rescale_js(self.joystick.getX(), deadzone=0.05, exponential=1.2),\n - rescale_js(self.joystick.getZ(), deadzone=0.2, exponential=15.0, rate=self.joystick_rate),\n (self.joystick.getThrottle() - 1.0) / -2.0\n ]\n for input in self.chassis.inputs[0:3]:\n if input != 0.0:\n # Break out of auto if we move the stick\n self.chassis.distance_pid.disable()\n self.chassis.range_setpoint = None\n self.chassis.track_vision = False\n # self.chassis.field_oriented = True\n self.putData()",
"def stop_motor(self):\n self.output(self.steering_pin, 0)\n self.pi.set_servo_pulsewidth(self.steering_pin, 0)",
"def pulse_lo(pin, length=0.00001):\n off(pin)\n time.sleep(length)\n on(pin)\n time.sleep(length)",
"def motorswitch(self, bo, pin, t):\n self.app.processEvents()\n if(self.win.getStopped() == True):\n self.win.updatelabel2(\"Jingle button was clicked.\\nClick another!\")\n return\n while self.win.getPaused() == True:\n self.app.processEvents() # Not really too sure if this line is needed. NEEDS TESTING\n self.win.updatelabel2(\"Jingle Song Paused!\\nChoose A new Song or Play to Resume!\")\n time.sleep(.1)\n GPIO.output(pin, bo)\n time.sleep(t)",
"def toggle(light_id):\n if light_id == \"alloff\":\n pidomCtrl.pulse(\"alloff\")\n elif light_id == \"outside\":\n pidomCtrl.pulse(\"outside\")\n elif light_id == \"stairs\":\n pidomCtrl.pulse(\"stairs\")\n elif light_id == \"frontdoorgroupoff\":\n pidomCtrl.pulse(\"persistedoff\")\n elif light_id == \"persistedon\":\n pidomCtrl.pulse(\"frontdoorgroupon\")",
"def event_switch_1_off(self, ioname, iovalue):\n if self.main_state:\n self.rpi.io.relay_1.value = False\n self.state_1_on = False\n self.door_count = self.door_count + 1\n # self.door_outside_time_closed = time.time()\n # self.door_outside_delta_time_open = self.door_outside_time_closed - self.door_outside_time_opened\n # self.door_outside_sum_time_open = self.door_outside_sum_time_open + self.door_outside_delta_time_open\n # print(\"outside delta: \", int(self.door_outside_delta_time_open),\n # \"sec outside sum: \", int(self.door_outside_sum_time_open), ' sec')\n self.door_outside_sum_time_open = \\\n self.door_outside_sum_time_open + time.time() - self.door_outside_time_opened\n self.trigger = self.trigger_door_outside_close",
"def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")",
"def servo_set_target(ch, pulse):\n\n # Pulse number is 4x pulse width (in microseconds)\n p_num = 4 * int(pulse)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_target, ch, p_num)",
"def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")"
] |
[
"0.7406781",
"0.7117068",
"0.71164155",
"0.69559455",
"0.6920032",
"0.6804113",
"0.6787851",
"0.67864007",
"0.6751273",
"0.66386086",
"0.66331583",
"0.64953303",
"0.59653443",
"0.5928787",
"0.59168535",
"0.5900597",
"0.5824056",
"0.5651535",
"0.5548163",
"0.5493299",
"0.53989303",
"0.5390261",
"0.5337012",
"0.53300136",
"0.5328815",
"0.5300045",
"0.52968746",
"0.52897394",
"0.52859384",
"0.5234577"
] |
0.7318522
|
1
|
Set pulse on hit and enable and release rule on driver. Pulses a driver when a switch is hit. Then enables the driver (may be with pwm). When the switch is released the pulse is canceled and the driver gets disabled. Typically used for single coil flippers.
|
def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):
raise NotImplementedError
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_pulse_on_hit_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(3, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(2, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_and_disable_rule(coil=%s sw=%s dis_sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number, disable_switch.hw_switch.number))\n self.communicator.rule_add(4, coil.hw_driver.number, enable_switch.hw_switch.number, disable_sw_id=disable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(1, coil.hw_driver.number, enable_switch.hw_switch.number, \n duration=self._get_pulse_ms_value(coil))",
"def set_delayed_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings, delay_ms: int):\n del enable_switch\n del coil\n del delay_ms\n raise AssertionError(\"This platform does not support delayed pulse hardware rules.\")",
"def pulseEnable( self, _data ): # uint8_t\n\t\tself.expanderWrite( _data | LCD_EN ) # En high\n\t\tsleep_us(1) # enable pulse must be >450ns\n\n\t\tself.expanderWrite( _data & (0xFF ^ LCD_EN) ) # En low\n\t\tsleep_us(50) # commands need > 37us to settle",
"def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")",
"def enable(self, coil):\n self.log.info(\"RASPDriver.Enable(%s %s)\" % (coil.config['label'], coil.hw_driver.number))\n self.platform.communicator.driver_enable(coil.hw_driver.number)\n pass",
"def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)",
"def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)",
"def pulley_activate(self):\n self.pulley(\"up\")\n time.sleep(5 * 0.7)\n self.pulley(\"stop\")\n time.sleep(2)\n self.pulley(\"down\")\n time.sleep(2.85)\n self.pulley(\"stop\")",
"def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)",
"def enable_charge_pump(enable):\n send_command(0x8D)\n if enable:\n send_command(0x14)\n else:\n send_command(0x10)",
"def enable_sensor_power():\n sen = digital.SensorPower(\"senpwr\") \n sen.set()",
"def pulse(vjoy, btn_id):\n global g_is_running\n g_is_running = True\n while g_is_running:\n vjoy[1].button(btn_id).is_pressed = True\n time.sleep(g_hold_time)\n vjoy[1].button(btn_id).is_pressed = False\n time.sleep(g_pause_time)",
"def setInternalPulser(self,pulserEnable,pulseHeight):\n pass",
"def setOn(self, command):\r\n self.setDriver('ST', 1)",
"def cmndWithDriver(self, commands, time):\n\t\tif self.usesDriver: #don't need to reserve driver..\n\t\t\tcommands.append((hold, self, time))\n\t\t\treturn commands\n\t\telse:\n\t\t\tprio=1\n\t\t\tfor pH in self.plantHeads:\n\t\t\t\tif pH.usesDriver: #set a high priority.\n\t\t\t\t\tprio = 2\n\t\t\tcommands.extend([(request, self, self.driver, prio)])\n\t\t\tself.usesDriver=True #this means that a reservation from the driver has been sent, not that he currently has the attention here.\n\t\t\tif prio==1: #we are \"taking the driver\" from the other device, not from our own heads\n\t\t\t\tswitchTime=self.m.times['switchFocus']\n\t\t\t\tif self.driver.isIdle(): #check for how long he's been idle\n\t\t\t\t\tswitchTime-=self.driver.idleTime()\n\t\t\t\t\tif switchTime<0: switchTime=0\n\t\t\t\tcommands.extend([(hold, self, switchTime)]) #add time to switch focus\n\t\t\t\tself.m.timeConsumption['switchFocus']+=switchTime\n\t\t\tcommands.extend([(hold, self, time)])\n\t\treturn commands",
"def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)",
"def enable(self):\n self.switch.enable()\n self._enabled = True",
"def toggle(light_id):\n if light_id == \"alloff\":\n pidomCtrl.pulse(\"alloff\")\n elif light_id == \"outside\":\n pidomCtrl.pulse(\"outside\")\n elif light_id == \"stairs\":\n pidomCtrl.pulse(\"stairs\")\n elif light_id == \"frontdoorgroupoff\":\n pidomCtrl.pulse(\"persistedoff\")\n elif light_id == \"persistedon\":\n pidomCtrl.pulse(\"frontdoorgroupon\")"
] |
[
"0.72877467",
"0.7178663",
"0.71179587",
"0.7099288",
"0.6991583",
"0.6966409",
"0.6957039",
"0.6921401",
"0.6898555",
"0.6874787",
"0.676784",
"0.675397",
"0.6602575",
"0.63033146",
"0.6220044",
"0.6219758",
"0.61118585",
"0.60108113",
"0.59594446",
"0.59371346",
"0.582267",
"0.57970077",
"0.5726565",
"0.5712483",
"0.56325513",
"0.5563083",
"0.5447496",
"0.544143",
"0.5441389",
"0.5438832"
] |
0.7253777
|
1
|
Set pulse on hit and enable and release and disable rule on driver. Pulses a driver when a switch is hit. When the switch is released the pulse is canceled and the driver gets disabled. When the eos_switch is hit the pulse is canceled and the driver becomes disabled. Typically used on the main coil for dualwound coil flippers with eos switch.
|
def set_pulse_on_hit_and_release_and_disable_rule(self, enable_switch: SwitchSettings,
eos_switch: SwitchSettings, coil: DriverSettings,
repulse_settings: Optional[RepulseSettings]):
raise NotImplementedError
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_and_disable_rule(coil=%s sw=%s dis_sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number, disable_switch.hw_switch.number))\n self.communicator.rule_add(4, coil.hw_driver.number, enable_switch.hw_switch.number, disable_sw_id=disable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(3, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(2, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(1, coil.hw_driver.number, enable_switch.hw_switch.number, \n duration=self._get_pulse_ms_value(coil))",
"def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)",
"def set_delayed_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings, delay_ms: int):\n del enable_switch\n del coil\n del delay_ms\n raise AssertionError(\"This platform does not support delayed pulse hardware rules.\")",
"def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")",
"def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")",
"def pulseEnable( self, _data ): # uint8_t\n\t\tself.expanderWrite( _data | LCD_EN ) # En high\n\t\tsleep_us(1) # enable pulse must be >450ns\n\n\t\tself.expanderWrite( _data & (0xFF ^ LCD_EN) ) # En low\n\t\tsleep_us(50) # commands need > 37us to settle",
"def disable_radio(self):\n self.acquire_response(b'AT*R0')",
"def event_switch_1_off(self, ioname, iovalue):\n if self.main_state:\n self.rpi.io.relay_1.value = False\n self.state_1_on = False\n self.door_count = self.door_count + 1\n # self.door_outside_time_closed = time.time()\n # self.door_outside_delta_time_open = self.door_outside_time_closed - self.door_outside_time_opened\n # self.door_outside_sum_time_open = self.door_outside_sum_time_open + self.door_outside_delta_time_open\n # print(\"outside delta: \", int(self.door_outside_delta_time_open),\n # \"sec outside sum: \", int(self.door_outside_sum_time_open), ' sec')\n self.door_outside_sum_time_open = \\\n self.door_outside_sum_time_open + time.time() - self.door_outside_time_opened\n self.trigger = self.trigger_door_outside_close",
"def pulley_activate(self):\n self.pulley(\"up\")\n time.sleep(5 * 0.7)\n self.pulley(\"stop\")\n time.sleep(2)\n self.pulley(\"down\")\n time.sleep(2.85)\n self.pulley(\"stop\")",
"def cycle(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n self.off()\n time.sleep(2) # Small delay before calling power_on\n self.on()",
"def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)",
"def event_switch_2_off(self, ioname, iovalue):\n if self.main_state:\n self.rpi.io.relay_2.value = False\n self.state_2_on = False\n self.door_count = self.door_count + 1\n # self.door_inside_time_closed = time.time()\n # self.door_inside_delta_time_open = self.door_inside_time_closed - self.door_inside_time_opened\n # self.door_inside_sum_time_open = self.door_inside_sum_time_open + self.door_inside_delta_time_open\n # print(\"inside delta: \", int(self.door_inside_delta_time_open),\n # \"sec inside sum: \", int(self.door_inside_sum_time_open), ' sec')\n self.door_inside_sum_time_open = self.door_inside_sum_time_open + time.time() - self.door_inside_time_opened\n self.trigger = self.trigger_door_inside_close",
"def event_main_off(self, ioname, iovalue):\n # Switch on/off output O_1\n self.rpi.io.main_relay.value = False\n self.rpi.io.relay_1.value = False\n self.rpi.io.relay_2.value = False\n self.main_state = False\n self.system_off_time = time.time()\n self.system_delta_time = self.system_off_time - self.system_on_time\n self.system_sum_time = self.system_sum_time + self.system_delta_time\n print(\"system delta: \", int(self.system_delta_time), \"sec system sum: \", int(self.system_sum_time), ' sec')\n\n if self.state_1_on:\n self.door_outside_time_closed = time.time()\n self.door_outside_delta_time_open = self.door_outside_time_closed - self.door_outside_time_opened\n self.door_outside_sum_time_open = self.door_outside_sum_time_open + self.door_outside_delta_time_open\n elif self.state_2_on:\n self.door_inside_time_closed = time.time()\n self.door_inside_delta_time_open = self.door_inside_time_closed - self.door_inside_time_opened\n self.door_inside_sum_time_open = self.door_inside_sum_time_open + self.door_inside_delta_time_open\n self.trigger = self.trigger_system_off_trigger",
"def _led_disable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.HIGH)",
"def enable(self, coil):\n self.log.info(\"RASPDriver.Enable(%s %s)\" % (coil.config['label'], coil.hw_driver.number))\n self.platform.communicator.driver_enable(coil.hw_driver.number)\n pass",
"def setInternalPulser(self,pulserEnable,pulseHeight):\n pass",
"def teleopPeriodic(self):\n\n try:\n if self.debounce(6, gamepad=True):\n self.boulder_automation.toggle_shoot_boulder()\n except:\n self.onException()\n \n try:\n if self.debounce(2) or self.debounce(1, gamepad=True):\n self.boulder_automation.toggle_intake_boulder()\n except:\n self.onException()\n\n try:\n if self.debounce(7):\n self.chassis.toggle_field_oriented()\n except:\n self.onException()\n\n try:\n if self.debounce(8):\n enabled = self.heading_hold_pid.isEnable()\n self.heading_hold_pid.disable()\n self.bno055.resetHeading()\n self.heading_hold_pid.setSetpoint(constrain_angle(self.bno055.getAngle()))\n self.heading_hold_pid.reset()\n if enabled:\n self.heading_hold_pid.enable()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(10):\n self.chassis.toggle_vision_tracking()\n except:\n self.onException()\"\"\"\n\n try:\n if self.debounce(10):\n self.chassis.toggle_range_holding(self.chassis.correct_range)\n except:\n self.onException()\n\n try:\n if self.debounce(1) or self.debounce(8, gamepad=True):\n self.boulder_automation.toggle_shoot_boulder()\n except:\n self.onException()\n\n try:\n if self.debounce(9):\n self.chassis.toggle_heading_hold()\n except:\n self.onException()\n\n try:\n if self.debounce(4):\n self.defeater.up()\n except:\n self.onException()\n\n try:\n if self.debounce(5):\n self.shooter.stop()\n self.intake.stop()\n except:\n self.onException()\n\n try:\n if self.debounce(3):\n #self.chassis.range_setpoint = self.chassis.correct_range\n #self.chassis.distance_pid.enable()\n # self.shooter.start_shoot()\n self.chassis.range_setpoint = 0.0\n self.chassis.track_vision = False\n self.chassis.toggle_range_holding()\n self.chassis.toggle_vision_tracking()\n except:\n self.onException()\n\n try:\n if self.debounce(6):\n self.defeater.down()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(10):\n self.shooter.backdrive()\n self.intake.backdrive()\n except:\n self.onException()\"\"\"\n\n try:\n if self.joystick.getPOV() != -1:\n self.chassis.heading_hold = True\n direction = 0.0\n if self.joystick.getPOV() == 0:\n # shooter centre goal\n direction = math.pi\n elif self.joystick.getPOV() == 90:\n # shooter right goal\n direction = math.pi / 3.0 + math.pi\n elif self.joystick.getPOV() == 270:\n # shooter left goal\n direction = -math.pi / 3.0 + math.pi\n elif self.joystick.getPOV() == 180:\n direction = 0.0\n self.chassis.set_heading_setpoint(direction)\n except:\n self.onException()\n\n try:\n if self.joystick.getRawButton(11) or self.gamepad.getRawButton(2):\n self.chassis.field_oriented = False \n else:\n self.chassis.field_oriented = True\n except:\n self.onException()\n\n try:\n if self.gamepad.getRawButton(3):\n self.boulder_automation.engage(\"backdrive_manual\")\n elif self.boulder_automation.current_state == \"backdrive_manual\":\n self.boulder_automation.done()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(1, gamepad=True):\n self.chassis.zero_encoders()\n self.chassis.distance_pid.setSetpoint(1.2)\n self.chassis.distance_pid.enable()\n except:\n self.onException()\"\"\"\n\n try:\n if self.debounce(10, gamepad=True):\n self.vision.write_image()\n except:\n self.onException()\n\n try:\n if self.joystick.getRawButton(12):\n self.joystick_rate = 0.6\n else:\n self.joystick_rate = 0.4\n except:\n self.onException()\n\n self.chassis.inputs = [-rescale_js(self.joystick.getY(), deadzone=0.05, exponential=1.2),\n - rescale_js(self.joystick.getX(), deadzone=0.05, exponential=1.2),\n - rescale_js(self.joystick.getZ(), deadzone=0.2, exponential=15.0, rate=self.joystick_rate),\n (self.joystick.getThrottle() - 1.0) / -2.0\n ]\n for input in self.chassis.inputs[0:3]:\n if input != 0.0:\n # Break out of auto if we move the stick\n self.chassis.distance_pid.disable()\n self.chassis.range_setpoint = None\n self.chassis.track_vision = False\n # self.chassis.field_oriented = True\n self.putData()",
"def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)",
"def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)"
] |
[
"0.75663674",
"0.7497523",
"0.7221922",
"0.7147664",
"0.71429825",
"0.7118888",
"0.70629704",
"0.6752796",
"0.66929615",
"0.6688346",
"0.6648492",
"0.6215986",
"0.6196026",
"0.5947612",
"0.5741585",
"0.57392573",
"0.57115555",
"0.5514564",
"0.5501918",
"0.5449907",
"0.5414745",
"0.53975683",
"0.5368617",
"0.5358023",
"0.53063273",
"0.53054327",
"0.5296112",
"0.52941275",
"0.5265513",
"0.5258373"
] |
0.7699924
|
0
|
Set pulse on hit and enable and release and disable rule on driver. Pulses a driver when a switch is hit. Then enables the driver (may be with pwm). When the switch is released the pulse is canceled and the driver becomes disabled. When the eos_switch is hit the pulse is canceled and the driver becomes enabled (likely with PWM). Typically used on the coil for singlewound coil flippers with eos switch.
|
def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch: SwitchSettings,
eos_switch: SwitchSettings, coil: DriverSettings,
repulse_settings: Optional[RepulseSettings]):
raise NotImplementedError
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_pulse_on_hit_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_and_disable_rule(coil=%s sw=%s dis_sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number, disable_switch.hw_switch.number))\n self.communicator.rule_add(4, coil.hw_driver.number, enable_switch.hw_switch.number, disable_sw_id=disable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(3, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(2, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(1, coil.hw_driver.number, enable_switch.hw_switch.number, \n duration=self._get_pulse_ms_value(coil))",
"def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)",
"def set_delayed_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings, delay_ms: int):\n del enable_switch\n del coil\n del delay_ms\n raise AssertionError(\"This platform does not support delayed pulse hardware rules.\")",
"def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")",
"def pulseEnable( self, _data ): # uint8_t\n\t\tself.expanderWrite( _data | LCD_EN ) # En high\n\t\tsleep_us(1) # enable pulse must be >450ns\n\n\t\tself.expanderWrite( _data & (0xFF ^ LCD_EN) ) # En low\n\t\tsleep_us(50) # commands need > 37us to settle",
"def enable(self, coil):\n self.log.info(\"RASPDriver.Enable(%s %s)\" % (coil.config['label'], coil.hw_driver.number))\n self.platform.communicator.driver_enable(coil.hw_driver.number)\n pass",
"def cycle(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n self.off()\n time.sleep(2) # Small delay before calling power_on\n self.on()",
"def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)",
"def enable_charge_pump(enable):\n send_command(0x8D)\n if enable:\n send_command(0x14)\n else:\n send_command(0x10)",
"def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)",
"def pulley_activate(self):\n self.pulley(\"up\")\n time.sleep(5 * 0.7)\n self.pulley(\"stop\")\n time.sleep(2)\n self.pulley(\"down\")\n time.sleep(2.85)\n self.pulley(\"stop\")",
"def enable_motor():\n print('Enabling motor')\n start_motor = '{\"id\" : \"Motor1\", \"enabled\" : \"1\"}'\n SERIAL_PARENT.send(start_motor)\n OUTGOING.append(start_motor)",
"def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")",
"def enable_radio(self):\n self.acquire_response(b'AT*R1')",
"def setEnableCondition(*args):",
"def setEnableCondition(*args):",
"def setEnableCondition(*args):",
"def setEnableCondition(*args):",
"def setEnableCondition(*args):"
] |
[
"0.7572105",
"0.7389231",
"0.72445816",
"0.72229266",
"0.71529543",
"0.70727235",
"0.7035159",
"0.680837",
"0.6792148",
"0.6716168",
"0.66067076",
"0.6274268",
"0.62019145",
"0.59817576",
"0.5790895",
"0.57747394",
"0.57251734",
"0.5636799",
"0.55792576",
"0.5506547",
"0.5428595",
"0.5386199",
"0.5376262",
"0.5368671",
"0.5349435",
"0.53207445",
"0.53207445",
"0.53207445",
"0.53207445",
"0.53207445"
] |
0.75235784
|
1
|
Add a callback for when the given future has been resolved.
|
def on_future(self, _future, _callback, *_args, **_kwargs):
callback = functools.partial(self._do_on_future, _callback, _args, _kwargs)
# Create timeout handler and regular handler.
self._future_timeouts[_future] = self.schedule_in(self.future_timeout, callback)
future.add_done_callback(callback)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_done_callback(self, fn):\n if self.done():\n fn(self)\n else:\n self._callbacks.append(fn)",
"def add_done_callback(self, callback):\n with self._done_condition:\n if self._state in [PENDING, RUNNING]:\n self._done_callbacks.append(callback)\n return\n try:\n callback(self)\n except Exception:\n print('exception calling callback')",
"def on_job_done(self, future):\n self.futures.append(future)",
"def add_done_callback(self, fn):\n if self.done():\n # self._loop.call_soon(fn,self)\n call_soon(fn, self)\n else:\n self._callbacks.append(fn)",
"def set_future(self, future):\r\n self._future = future",
"def add(self, future):\n self._scheduled.add(future)",
"def add_callback(self, done_cb: Callable[[], None] = None) -> None:\n\n if done_cb is not None:\n self.callbacks.append(done_cb)",
"def _add_timeout(self, future, timeout):\n def future_timeout():\n if future.done():\n # future already resolved, do nothing\n return\n\n # raise EAGAIN\n future.set_exception(_zmq.Again())\n self._call_later(timeout, future_timeout)",
"def then(self, callback):\n if self.running: # We might want a mutex here...\n return self.future.then(callback)\n else:\n callback(self)\n # return something? (to see when we have a testcase for this...)",
"def _bind_future(function):\n return lambda container: container.bind_future(function)",
"def add_callback(self, fn):\n self._callbacks.append(fn)\n return self",
"def _callbackChooser(self, future):\n assert(self.done())\n try:\n self._resultFuture.result()\n except TimeoutError:\n for c in self._callbackTimeout:\n c()\n except CancelledError:\n for c in self._callbackCancelled:\n c()\n if self._callbackSuccess:\n for c in self._callbackSuccess:\n c()",
"def register_async_callback(self, async_callback):\n self._async_callbacks.append(async_callback)",
"def andThen(self, callback):\n if self.running: # We might want a mutex here...\n return self.future.andThen(callback)\n else:\n callback(self.future.value()) #?\n # return something? (to see when we have a testcase for this...)",
"def add_callback(self, callback):\n if callback is not None:\n self.callbacks.append(callback)",
"def add_callback(self, callback) -> None:\r\n self._callbacks.append(callback)",
"def _done_handler(base_future):\n if not base_future.done():\n # this should never ever be true.\n # having this code here just to avoid infinite timeout\n new_future.cancel()\n return\n\n if base_future.cancelled():\n new_future.cancel()\n return\n\n try:\n result = base_future.result()\n if on_fulfilled:\n result = on_fulfilled(result)\n\n # Per Promise/A+ spec, if return value is a Promise,\n # our promise must adapt the state of the return value Promise\n if isinstance(result, Future):\n # this is the only outcome where we don't\n # set new_future's result in this code and\n # defer resolution of new_future to outcome of return value Promise resolution\n new_future._chain_to_another_future(result)\n else:\n new_future.set_result(result)\n return\n except BaseException:\n # note, that exception may come from self.result()\n # and from on_fulfilled(result) calls.\n ex, trace_back = sys.exc_info()[1:]\n if not on_rejected:\n new_future.set_exception_info(ex, trace_back)\n return\n else:\n try:\n result = on_rejected(ex)\n if isinstance(result, BaseException):\n raise result\n else:\n new_future.set_result(result)\n return\n except BaseException:\n ex, trace_back = sys.exc_info()[1:]\n new_future.set_exception_info(ex, trace_back)\n return",
"def add_callback(self, callback):\n\n self._callbacks.append(callback)",
"def register_callback(self, callback: Callable[[], None]) -> None:\r\n print(\"register callback called\")\r\n self._callbacks.add(callback)",
"def on_add(self, callback):\n self._add_callback = callback if callable(callback) else _void",
"def register_callback(self, callback):\n self.callbacks.add(callback)",
"def register_callback(self, callback):\n self._callbacks.append(callback)",
"def register_execution(in_progress, future, node):\n in_progress[future] = node",
"def add_analysis_callback(self, callback: Callable, **kwargs: Any):\n with self._job_futures.lock and self._analysis_futures.lock:\n # Create callback dataclass\n cid = uuid.uuid4().hex\n self._analysis_callbacks[cid] = AnalysisCallback(\n name=callback.__name__,\n callback_id=cid,\n )\n\n # Futures to wait for\n futs = self._job_futures.values() + self._analysis_futures.values()\n wait_future = self._monitor_executor.submit(\n self._wait_for_futures, futs, name=\"jobs and analysis\"\n )\n\n # Create a future to monitor event for calls to cancel_analysis\n def _monitor_cancel():\n self._analysis_callbacks[cid].event.wait()\n return False\n\n cancel_future = self._monitor_executor.submit(_monitor_cancel)\n\n # Add run analysis future\n self._analysis_futures[cid] = self._analysis_executor.submit(\n self._run_analysis_callback, cid, wait_future, cancel_future, callback, **kwargs\n )",
"def add(self, callback):\n self._callbacks += as_cb_list(callback)",
"def generate_callback_promise(self, _promise):\n def method(result):\n _promise.resolved(result)\n\n return method",
"def set_cb(resolver):\n\n global _resolver\n\n _resolver = resolver",
"def append_thread_callback(self, callback: collections.Callable) -> None:\n self._daemon_execute_callbacks.append(callback)",
"def dispatch_callback(self, callback):\n self.callback_queue.put(lambda: callback.func(*callback.args))",
"def register_callback(self, func):\n self.callback = func"
] |
[
"0.66994846",
"0.659744",
"0.65294266",
"0.6440456",
"0.6353762",
"0.6231799",
"0.61494493",
"0.609444",
"0.6058158",
"0.6058149",
"0.60350883",
"0.5960157",
"0.58498746",
"0.5810766",
"0.5760717",
"0.5746292",
"0.5733729",
"0.57289726",
"0.5723052",
"0.5710217",
"0.5679256",
"0.5675259",
"0.5631868",
"0.5630273",
"0.5596106",
"0.5582972",
"0.5580911",
"0.5539252",
"0.55131733",
"0.5509217"
] |
0.7139849
|
0
|
Schedule a callback to be ran as soon as possible in this loop. Will return an opaque handle that can be passed to `unschedule` to unschedule the function.
|
def schedule(self, _callback, *_args, **_kwargs):
@coroutine
@functools.wraps(_callback)
def inner():
_callback(*_args, **_kwargs)
return self.schedule_async(inner())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def schedule_callback(self, deferred, func, *args, **kwargs):\n self.increment_pc()\n saved_pc = self.program_counter[:]\n\n @wrapper(func)\n def callback_wrapper(*args, **kwargs):\n \"\"\"Wrapper for a callback which ensures a correct PC.\"\"\"\n try:\n current_pc = self.program_counter[:]\n self.program_counter[:] = saved_pc\n self.fork_pc()\n return func(*args, **kwargs)\n finally:\n self.program_counter[:] = current_pc\n\n return deferred.addCallback(callback_wrapper, *args, **kwargs)",
"def schedule_in(self, _when, _callback, *_args, **_kwargs):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n yield from asyncio.sleep(_when)\n _callback(*_args, **_kwargs)\n\n return self.schedule_async(inner())",
"def schedule_async_in(self, _when, _callback):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n yield from asyncio.sleep(_when)\n yield from _callback\n\n return self.schedule_async(inner())",
"def _run(self):\n if not self._running:\n return\n try:\n yield self.callback()\n except Exception: # pylint: disable=W0703\n logging.error(\"Error in periodic callback\", exc_info=True)\n self._schedule_next()",
"def callback(self, fun: Callable[[Timer], None] | None, /) -> None:",
"def callback(self, fun: Callable[[Timer], None] | None, /) -> None:",
"def schedule(function_pointer: Callable, interval: float):\n pyglet.clock.schedule_interval(function_pointer, interval)",
"async def run_in(self, callback: Callable, delay: int, **kwargs) -> str:\n name = self.name\n self.logger.debug(\"Registering run_in in %s seconds for %s\", delay, name)\n # Support fractional delays\n i, d = divmod(float(delay), 1)\n exec_time = await self.get_now() + timedelta(seconds=int(i), microseconds=d * 1000000)\n handle = await self.AD.sched.insert_schedule(name, exec_time, callback, False, None, **kwargs)\n\n return handle",
"def schedule(self):\r\n n = self.next()\r\n if n is not None:\r\n if self.clock:\r\n self.cl = self.clock.callLater(n, self.run)\r\n else:\r\n self.cl = core.call_later(n, self.run)\r\n else:\r\n self.cl = None",
"def schedule_call_global(self, seconds, cb, *args, **kw):\n t = timer.Timer(seconds, cb, *args, **kw)\n self.add_timer(t)\n return t",
"def schedule_once(function_pointer: Callable, delay: float):\n pyglet.clock.schedule_once(function_pointer, delay)",
"def schedule_async_periodically(self, _interval, _callback, *_args, **_kwargs):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n while True:\n yield from asyncio.sleep(_when)\n ret = yield from _callback(*_args, **_kwargs)\n if ret is False:\n break\n\n return self.schedule_async(inner())",
"def thread_it(self, callback):\n\n def function():\n self.acquire_event_lock()\n callback()\n # threading.Thread(target=callback).start()\n self.release_event_lock()\n\n return function",
"def schedule_later(self, hz: float, coroutine_function, priority, task_id, *args, **kwargs):\n ran_once = False\n\n async def call_later():\n nonlocal ran_once\n if ran_once:\n await coroutine_function(*args, **kwargs)\n else:\n await _yield_once()\n ran_once = True\n\n return self.schedule(hz, call_later, priority, task_id)",
"def loop(self, function, *args, **kwargs):\n loop = functools.partial(function, *args, **kwargs)\n timer = _Timer(self, loop, True)\n self._callbacks.append(timer)\n\n return timer",
"def schedule_async(self, _callback):\n @coroutine\n @functools.wraps(_callback)\n def inner():\n try:\n return (yield from _callback)\n except (GeneratorExit, asyncio.CancelledError):\n raise\n except:\n traceback.print_exc()\n\n task = asyncio.ensure_future(inner())\n self._tasks.append(task)\n return task",
"def schedule_periodically(self, _interval, _callback, *_args, **_kwargs):\n if isinstance(_interval, datetime.timedelta):\n _interval = _interval.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n while True:\n yield from asyncio.sleep(_when)\n ret = _callback(*_args, **_kwargs)\n if ret is False:\n break\n\n return self.schedule_async(inner())",
"def repeat_every(seconds, fn):\n def wrapper(scheduler):\n try:\n fn()\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n except:\n print('Error executing function')\n\n scheduler = sched.scheduler(time.time, time.sleep)\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n scheduler.run()",
"def run_with(self, func):\n @coroutine\n @functools.wraps(func)\n def inner():\n yield from func\n self._unschedule_all()\n self.loop.run_until_complete(asyncio.ensure_future(inner()))",
"def _schedule_next(self):\n if self._running:\n current_time = self.io_loop.time()\n while self._next_timeout <= current_time:\n self._next_timeout += self.callback_time / 1000.0\n self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)",
"def schedule_call_local(self, seconds, cb, *args, **kw):\n t = timer.LocalTimer(seconds, cb, *args, **kw)\n self.add_timer(t)\n return t",
"def schedule_task(self, callback, delay=1.0, repeat=False, execute_now=False):\n task_name = str(uuid.uuid4())\n\n self.xmpp.schedule(task_name, delay, callback, repeat=repeat)\n\n return _generate_cancel_method(task_name, self.xmpp.scheduler)",
"def unschedule(function_pointer: Callable):\n pyglet.clock.unschedule(function_pointer)",
"def scheduleCallback(self, callback, *, atTimestamp=None, delay=None):\n if callback is None:\n self._logger.warning(\"Cannot scheduleCallback(None); discarding.\")\n # This would cause the event loop thread to terminate\n return\n\n if atTimestamp is not None and delay is not None:\n raise ValueError(\"atTimestamp and delay arguments cannot both have values.\")\n\n if delay is None:\n delay = 0.0\n\n if atTimestamp is None:\n atTimestamp = time.time() + (delay or 0.0)\n\n with self._lock:\n self._pendingTimedCallbacks.add((atTimestamp, callback))\n\n # if we put this on the front of the queue, we need to wake\n # the thread loop\n if self._pendingTimedCallbacks[0][0] == atTimestamp:\n written = os.write(self._generalWakePipe[1], b\" \")\n if written != 1:\n raise Exception(\"Internal Error: Failed to write to general wake pipe\")",
"def delay_s(\r\n self,\r\n callable,\r\n timeout = None,\r\n immediately = True,\r\n verify = False,\r\n wakeup = True\r\n ):\r\n\r\n # creates the next element tuple that is going to be scheduled according\r\n # to the definition provided to the method\r\n next = (callable, timeout, immediately, verify)\r\n\r\n # acquires the lock that controls the access to the delayed for next\r\n # tick list and then adds the callable to such list, please note that\r\n # the delayed (next) list is only going to be joined/merged with delay\r\n # operations and list on the next tick (through the merge operation)\r\n self._delayed_l.acquire()\r\n try: self._delayed_n.append(next)\r\n finally: self._delayed_l.release()\r\n\r\n # in case the wakeup flag is set this delay operation should have\r\n # been called from a different thread and the event loop should\r\n # awaken as soon as possible to handle the event\r\n if wakeup: self.wakeup()",
"def run():\n curr = getcurrent()\n _run_calls.append(curr)\n _scheduler_remove(curr)\n try:\n schedule()\n assert not _squeue\n finally:\n _scheduler_append(curr)",
"async def test_scheduled_task(self):\n ctx = MockContext(channel=self.text_channel, invoke=mock.MagicMock())\n\n await self.cog.silence.callback(self.cog, ctx, 5)\n\n args = (300, ctx.channel.id, ctx.invoke.return_value)\n self.cog.scheduler.schedule_later.assert_called_once_with(*args)\n ctx.invoke.assert_called_once_with(self.cog.unsilence, channel=ctx.channel)",
"def schedule(time: Time_t, callback: Callable, *args):\n t = _fillDate(time)\n now = dt.datetime.now(t.tzinfo)\n delay = (t - now).total_seconds()\n loop = getLoop()\n return loop.call_later(delay, callback, *args)",
"def callback(self, function, *args, **kwargs):\n callback = functools.partial(function, *args, **kwargs)\n timer = _Timer(self, callback, False)\n self._callbacks.append(timer)\n\n return timer",
"def async_schedule(self) -> None:\n # We want to reschedule the timer in the future\n # every time this is called.\n self._async_cancel_timer()\n self._timer = self._loop.call_later(self._timeout, self._async_execute)"
] |
[
"0.60958743",
"0.60066116",
"0.5989754",
"0.5935567",
"0.59139556",
"0.59139556",
"0.5760046",
"0.5744529",
"0.57212853",
"0.5630919",
"0.56135505",
"0.5577264",
"0.55720043",
"0.5535715",
"0.55142754",
"0.54844826",
"0.5457211",
"0.5456132",
"0.54463506",
"0.5429597",
"0.537691",
"0.5369958",
"0.5337711",
"0.5275566",
"0.5264581",
"0.52318937",
"0.5177849",
"0.5146005",
"0.51330173",
"0.5029413"
] |
0.70464694
|
0
|
Schedule a callback to be ran as soon as possible after `when` seconds have passed. Will return an opaque handle that can be passed to `unschedule` to unschedule the function.
|
def schedule_in(self, _when, _callback, *_args, **_kwargs):
if isinstance(_when, datetime.timedelta):
_when = _when.total_seconds()
@coroutine
@functools.wraps(_callback)
def inner():
yield from asyncio.sleep(_when)
_callback(*_args, **_kwargs)
return self.schedule_async(inner())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def schedule_async_in(self, _when, _callback):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n yield from asyncio.sleep(_when)\n yield from _callback\n\n return self.schedule_async(inner())",
"def _transientSchedule(self, when, now):\n if not self.running:\n return\n if self.timer is not None:\n if self.timer.getTime() < when.asPOSIXTimestamp():\n return\n self.timer.cancel()\n delay = when.asPOSIXTimestamp() - now.asPOSIXTimestamp()\n\n # reactor.callLater allows only positive delay values. The scheduler\n # may want to have scheduled things in the past and that's OK, since we\n # are dealing with Time() instances it's impossible to predict what\n # they are relative to the current time from user code anyway.\n delay = max(_EPSILON, delay)\n self.timer = self.callLater(delay, self.tick)\n self.nextEventAt = when",
"def delay_s(\r\n self,\r\n callable,\r\n timeout = None,\r\n immediately = True,\r\n verify = False,\r\n wakeup = True\r\n ):\r\n\r\n # creates the next element tuple that is going to be scheduled according\r\n # to the definition provided to the method\r\n next = (callable, timeout, immediately, verify)\r\n\r\n # acquires the lock that controls the access to the delayed for next\r\n # tick list and then adds the callable to such list, please note that\r\n # the delayed (next) list is only going to be joined/merged with delay\r\n # operations and list on the next tick (through the merge operation)\r\n self._delayed_l.acquire()\r\n try: self._delayed_n.append(next)\r\n finally: self._delayed_l.release()\r\n\r\n # in case the wakeup flag is set this delay operation should have\r\n # been called from a different thread and the event loop should\r\n # awaken as soon as possible to handle the event\r\n if wakeup: self.wakeup()",
"def schedule_async_periodically(self, _interval, _callback, *_args, **_kwargs):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n while True:\n yield from asyncio.sleep(_when)\n ret = yield from _callback(*_args, **_kwargs)\n if ret is False:\n break\n\n return self.schedule_async(inner())",
"async def run_in(self, callback: Callable, delay: int, **kwargs) -> str:\n name = self.name\n self.logger.debug(\"Registering run_in in %s seconds for %s\", delay, name)\n # Support fractional delays\n i, d = divmod(float(delay), 1)\n exec_time = await self.get_now() + timedelta(seconds=int(i), microseconds=d * 1000000)\n handle = await self.AD.sched.insert_schedule(name, exec_time, callback, False, None, **kwargs)\n\n return handle",
"async def test_scheduled_task(self):\n ctx = MockContext(channel=self.text_channel, invoke=mock.MagicMock())\n\n await self.cog.silence.callback(self.cog, ctx, 5)\n\n args = (300, ctx.channel.id, ctx.invoke.return_value)\n self.cog.scheduler.schedule_later.assert_called_once_with(*args)\n ctx.invoke.assert_called_once_with(self.cog.unsilence, channel=ctx.channel)",
"def scheduleCallback(self, callback, *, atTimestamp=None, delay=None):\n if callback is None:\n self._logger.warning(\"Cannot scheduleCallback(None); discarding.\")\n # This would cause the event loop thread to terminate\n return\n\n if atTimestamp is not None and delay is not None:\n raise ValueError(\"atTimestamp and delay arguments cannot both have values.\")\n\n if delay is None:\n delay = 0.0\n\n if atTimestamp is None:\n atTimestamp = time.time() + (delay or 0.0)\n\n with self._lock:\n self._pendingTimedCallbacks.add((atTimestamp, callback))\n\n # if we put this on the front of the queue, we need to wake\n # the thread loop\n if self._pendingTimedCallbacks[0][0] == atTimestamp:\n written = os.write(self._generalWakePipe[1], b\" \")\n if written != 1:\n raise Exception(\"Internal Error: Failed to write to general wake pipe\")",
"def scheduleAfter(self, dt, handler):\n self.schedule(self.currentTime + dt, handler)",
"def _schedule(self, when):\n sched = IScheduler(self.store)\n for scheduledAt in sched.scheduledTimes(self):\n # https://github.com/twisted/epsilon/issues/38\n if when._time < scheduledAt._time:\n sched.reschedule(self, scheduledAt, when)\n break\n else:\n sched.schedule(self, when)",
"def sleep(self, *args, seconds):\n return deferLater(reactor, seconds, lambda: None)",
"def sleep(self, *args, seconds):\n return deferLater(reactor, seconds, lambda: None)",
"def schedule(self, _callback, *_args, **_kwargs):\n @coroutine\n @functools.wraps(_callback)\n def inner():\n _callback(*_args, **_kwargs)\n\n return self.schedule_async(inner())",
"def defer(self, delay, function, *args, **kwargs):\n if delay <= 0:\n raise ValueError(\"Delay must be greater than 0 seconds.\")\n\n deferred = functools.partial(function, *args, **kwargs)\n timer = _Timer(self, deferred, False, delay, self.latest_poll_time + delay)\n bisect.insort(self._deferreds, timer)\n\n return timer",
"def later(cls, timeout, f, **kwargs):\n def x(*args, **kwargs):\n cls.sleep(timeout)\n return f(*args, **kwargs)\n return cls.new(x, **kwargs)",
"def sleep(duration):\n f = Future()\n IOLoop.current().call_later(duration, lambda: f.set_result(None))\n return f",
"def run_after_delay(delay_ms: float, callback: Callable[[], None]):\n heapq.heappush(\n _sorted_scheduled_events,\n _ScheduledEvent(\n time=pygame.time.get_ticks() + delay_ms, callback=callback\n ),\n )",
"def schedule_once(function_pointer: Callable, delay: float):\n pyglet.clock.schedule_once(function_pointer, delay)",
"def delay(delay=0.):\n def wrap(f):\n @wraps(f)\n def delayed(*args, **kwargs):\n timer = threading.Timer(delay, f, args=args, kwargs=kwargs)\n timer.start()\n return delayed\n return wrap",
"def schedule_periodically(self, _interval, _callback, *_args, **_kwargs):\n if isinstance(_interval, datetime.timedelta):\n _interval = _interval.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n while True:\n yield from asyncio.sleep(_when)\n ret = _callback(*_args, **_kwargs)\n if ret is False:\n break\n\n return self.schedule_async(inner())",
"def call_later(self, fn, timeout_millis):\n runnable = self.get_or_create_runnable(fn)\n self.handler.removeCallbacks(runnable)\n self.handler.postDelayed(runnable, int(timeout_millis))",
"def delayed_call(self, delay, function):\n main_loop = self\n handler = []\n class DelayedCallHandler(TimeoutHandler):\n \"\"\"Wrapper timeout handler class for the delayed call.\"\"\"\n # pylint: disable=R0903\n @timeout_handler(delay, False)\n def callback(self):\n \"\"\"Wrapper timeout handler method for the delayed call.\"\"\"\n try:\n function()\n finally:\n main_loop.remove_handler(handler[0])\n handler.append(DelayedCallHandler())\n self.add_handler(handler[0])",
"def schedule(function_pointer: Callable, interval: float):\n pyglet.clock.schedule_interval(function_pointer, interval)",
"def simulate_waiting(self, exit_when=None):\n self._time_condition.acquire()\n\n # Helper function to reduce code copy\n def wait_block():\n self._increment_waiting_count(1)\n self._time_condition.wait()\n self._increment_waiting_count(-1)\n\n try:\n if exit_when is None:\n wait_block()\n else:\n while not exit_when(self._time):\n wait_block()\n finally:\n self._time_condition.release()",
"def schedule_call_global(self, seconds, cb, *args, **kw):\n t = timer.Timer(seconds, cb, *args, **kw)\n self.add_timer(t)\n return t",
"def schedule_callback(self, deferred, func, *args, **kwargs):\n self.increment_pc()\n saved_pc = self.program_counter[:]\n\n @wrapper(func)\n def callback_wrapper(*args, **kwargs):\n \"\"\"Wrapper for a callback which ensures a correct PC.\"\"\"\n try:\n current_pc = self.program_counter[:]\n self.program_counter[:] = saved_pc\n self.fork_pc()\n return func(*args, **kwargs)\n finally:\n self.program_counter[:] = current_pc\n\n return deferred.addCallback(callback_wrapper, *args, **kwargs)",
"def delay(wait):\n class Decorator:\n def __init__(self):\n self.is_async = False\n def delayed(self, *args, **kwargs):\n if self.is_async:\n self.timer = AsyncTimer(wait, self.fn, args, kwargs)\n else:\n self.timer = Timer(wait, self.fn, args, kwargs)\n self.timer.start()\n def __call__(self, fn):\n self.fn = fn\n return self.delayed\n return Decorator()",
"def sleep(self,secs):\r\n d = Deferred()\r\n self.reactor.callLater(secs,d.callback,'Sleeping')\r\n return d",
"def sleep(self,secs):\r\n d = Deferred()\r\n self.reactor.callLater(secs,d.callback,'Sleeping')\r\n return d",
"def with_execute_after(f):\n @wraps(f)\n def _inner(*args, **kwargs):\n task = args[0]\n if api.has_cancel_request():\n return task.async_result\n if task.execute_after and task.execute_after > time.time():\n t = threading.Timer(\n task.execute_after - time.time(),\n _inner, args=args, kwargs=kwargs\n )\n t.daemon = True\n t.start()\n return task.async_result\n with current_workflow_ctx.push(task.workflow_context):\n return f(*args, **kwargs)\n return _inner",
"def repeat_every(seconds, fn):\n def wrapper(scheduler):\n try:\n fn()\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n except:\n print('Error executing function')\n\n scheduler = sched.scheduler(time.time, time.sleep)\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n scheduler.run()"
] |
[
"0.71576685",
"0.5960168",
"0.5606114",
"0.5550319",
"0.53337073",
"0.5309692",
"0.52763116",
"0.5262237",
"0.5249111",
"0.52373374",
"0.52373374",
"0.5199952",
"0.5189032",
"0.5167308",
"0.51216036",
"0.50948286",
"0.5093543",
"0.5088659",
"0.50886077",
"0.5082117",
"0.5052937",
"0.5045523",
"0.50259745",
"0.4962044",
"0.4959467",
"0.49579298",
"0.49361128",
"0.49361128",
"0.49271935",
"0.49086514"
] |
0.71113753
|
1
|
Schedule a coroutine to be ran as soon as possible after `when` seconds have passed. Will return an opaque handle that can be passed to `unschedule` to unschedule the function.
|
def schedule_async_in(self, _when, _callback):
if isinstance(_when, datetime.timedelta):
_when = _when.total_seconds()
@coroutine
@functools.wraps(_callback)
def inner():
yield from asyncio.sleep(_when)
yield from _callback
return self.schedule_async(inner())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def schedule_in(self, _when, _callback, *_args, **_kwargs):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n yield from asyncio.sleep(_when)\n _callback(*_args, **_kwargs)\n\n return self.schedule_async(inner())",
"def _transientSchedule(self, when, now):\n if not self.running:\n return\n if self.timer is not None:\n if self.timer.getTime() < when.asPOSIXTimestamp():\n return\n self.timer.cancel()\n delay = when.asPOSIXTimestamp() - now.asPOSIXTimestamp()\n\n # reactor.callLater allows only positive delay values. The scheduler\n # may want to have scheduled things in the past and that's OK, since we\n # are dealing with Time() instances it's impossible to predict what\n # they are relative to the current time from user code anyway.\n delay = max(_EPSILON, delay)\n self.timer = self.callLater(delay, self.tick)\n self.nextEventAt = when",
"def _schedule(self, when):\n sched = IScheduler(self.store)\n for scheduledAt in sched.scheduledTimes(self):\n # https://github.com/twisted/epsilon/issues/38\n if when._time < scheduledAt._time:\n sched.reschedule(self, scheduledAt, when)\n break\n else:\n sched.schedule(self, when)",
"def sleep(duration):\n f = Future()\n IOLoop.current().call_later(duration, lambda: f.set_result(None))\n return f",
"def simulate_waiting(self, exit_when=None):\n self._time_condition.acquire()\n\n # Helper function to reduce code copy\n def wait_block():\n self._increment_waiting_count(1)\n self._time_condition.wait()\n self._increment_waiting_count(-1)\n\n try:\n if exit_when is None:\n wait_block()\n else:\n while not exit_when(self._time):\n wait_block()\n finally:\n self._time_condition.release()",
"def sleep(secs: float) -> Coroutine[None, None, None]:\n return time_sleep_coro(secs)",
"def sleep(self, *args, seconds):\n return deferLater(reactor, seconds, lambda: None)",
"def sleep(self, *args, seconds):\n return deferLater(reactor, seconds, lambda: None)",
"def sleep(secs: float) -> Coroutine[None, None, None]:\n\n # Subtract a millisecond to account for overhead\n sleep_for = max(0, secs - 0.001)\n if sleep_for < 0.0005:\n # Less than 0.5ms and its not worth doing the sleep\n return no_sleep_coro()\n\n timer = kernel32.CreateWaitableTimerExW(\n None,\n None,\n CREATE_WAITABLE_TIMER_HIGH_RESOLUTION,\n TIMER_ALL_ACCESS,\n )\n if not timer:\n return time_sleep_coro(sleep_for)\n\n if not kernel32.SetWaitableTimer(\n timer,\n ctypes.byref(LARGE_INTEGER(int(sleep_for * -10_000_000))),\n 0,\n None,\n None,\n 0,\n ):\n kernel32.CloseHandle(timer)\n return time_sleep_coro(sleep_for)\n\n cancel_event = kernel32.CreateEventExW(None, None, 0, TIMER_ALL_ACCESS)\n if not cancel_event:\n kernel32.CloseHandle(timer)\n return time_sleep_coro(sleep_for)\n\n def cancel_inner():\n \"\"\"Sets the cancel event so we know we can stop waiting for the timer.\"\"\"\n kernel32.SetEvent(cancel_event)\n\n async def cancel():\n \"\"\"Cancels the timer by setting the cancel event.\"\"\"\n await asyncio.get_running_loop().run_in_executor(None, cancel_inner)\n\n def wait_inner():\n \"\"\"Function responsible for waiting for the timer or the cancel event.\"\"\"\n if (\n kernel32.WaitForMultipleObjects(\n 2,\n ctypes.pointer((HANDLE * 2)(cancel_event, timer)),\n False,\n INFINITE,\n )\n == WAIT_FAILED\n ):\n time_sleep(sleep_for)\n\n async def wait():\n \"\"\"Wraps the actual sleeping so we can detect if the thread was cancelled.\"\"\"\n try:\n await asyncio.get_running_loop().run_in_executor(None, wait_inner)\n except asyncio.CancelledError:\n await cancel()\n raise\n finally:\n kernel32.CloseHandle(timer)\n kernel32.CloseHandle(cancel_event)\n\n return wait()",
"def schedule_async_periodically(self, _interval, _callback, *_args, **_kwargs):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n while True:\n yield from asyncio.sleep(_when)\n ret = yield from _callback(*_args, **_kwargs)\n if ret is False:\n break\n\n return self.schedule_async(inner())",
"def timeout_function(seconds=5):\n\n def signal_handler(signum, frame):\n raise TimeoutError(\"Timed out!\")\n\n signal.signal(signal.SIGALRM, signal_handler)\n signal.alarm(seconds)\n\n try:\n yield\n finally:\n signal.alarm(0)",
"def tic():\n then = datetime.datetime.now()\n return lambda: delay(datetime.datetime.now() - then)",
"def fake_spawn(time_from_now_in_seconds, func, *args, **kw):\n def thread_start():\n # fake_sleep(time_from_now_in_seconds)\n return func(*args, **kw)\n\n cr = Coroutine(thread_start)\n fake_threads.append({'sleep': time_from_now_in_seconds,\n 'greenlet': cr,\n 'name': str(func)})",
"async def time_sleep_coro(secs: float):\n await asyncio.sleep(secs)",
"def later(cls, timeout, f, **kwargs):\n def x(*args, **kwargs):\n cls.sleep(timeout)\n return f(*args, **kwargs)\n return cls.new(x, **kwargs)",
"def scheduleAfter(self, dt, handler):\n self.schedule(self.currentTime + dt, handler)",
"def timeout_syscall(seconds):\n def timeout_handler(signum, frame):\n raise InterruptedError\n\n original_handler = signal.signal(signal.SIGALRM, timeout_handler)\n signal.alarm(seconds)\n try:\n yield\n finally:\n signal.alarm(0)\n signal.signal(signal.SIGALRM, original_handler)",
"def repeat_every(seconds, fn):\n def wrapper(scheduler):\n try:\n fn()\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n except:\n print('Error executing function')\n\n scheduler = sched.scheduler(time.time, time.sleep)\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n scheduler.run()",
"async def test_trigger_await_gives_self(dut):\n t = Timer(1)\n t2 = await t\n assert t2 is t",
"async def with_timeout(self, duration=-1):\n if duration == -1:\n duration = self.extra.timeout\n if duration > 0:\n with anyio.move_on_after(duration) as sc:\n yield sc\n else:\n yield None",
"def schedule_later(self, hz: float, coroutine_function, priority, task_id, *args, **kwargs):\n ran_once = False\n\n async def call_later():\n nonlocal ran_once\n if ran_once:\n await coroutine_function(*args, **kwargs)\n else:\n await _yield_once()\n ran_once = True\n\n return self.schedule(hz, call_later, priority, task_id)",
"def TimeDelay (self, delay, cancel = None):\n if self.Disposed:\n return RaisedFuture (FutureCanceled ('Core is stopped'))\n\n return self.timer.Await (time () + delay, cancel)",
"async def test_scheduled_task(self):\n ctx = MockContext(channel=self.text_channel, invoke=mock.MagicMock())\n\n await self.cog.silence.callback(self.cog, ctx, 5)\n\n args = (300, ctx.channel.id, ctx.invoke.return_value)\n self.cog.scheduler.schedule_later.assert_called_once_with(*args)\n ctx.invoke.assert_called_once_with(self.cog.unsilence, channel=ctx.channel)",
"def schedule(function_pointer: Callable, interval: float):\n pyglet.clock.schedule_interval(function_pointer, interval)",
"def schedule_once(function_pointer: Callable, delay: float):\n pyglet.clock.schedule_once(function_pointer, delay)",
"def schedule_coroutine(target, loop=None):\n if asyncio.iscoroutine(target):\n return asyncio.ensure_future(target, loop=loop)\n raise TypeError(\"target must be a coroutine, \"\n \"not {!r}\".format(type(target)))",
"def awaitable(obj):\n yield from asyncio.sleep(0)\n return obj",
"def delay(wait):\n class Decorator:\n def __init__(self):\n self.is_async = False\n def delayed(self, *args, **kwargs):\n if self.is_async:\n self.timer = AsyncTimer(wait, self.fn, args, kwargs)\n else:\n self.timer = Timer(wait, self.fn, args, kwargs)\n self.timer.start()\n def __call__(self, fn):\n self.fn = fn\n return self.delayed\n return Decorator()",
"def delay_s(\r\n self,\r\n callable,\r\n timeout = None,\r\n immediately = True,\r\n verify = False,\r\n wakeup = True\r\n ):\r\n\r\n # creates the next element tuple that is going to be scheduled according\r\n # to the definition provided to the method\r\n next = (callable, timeout, immediately, verify)\r\n\r\n # acquires the lock that controls the access to the delayed for next\r\n # tick list and then adds the callable to such list, please note that\r\n # the delayed (next) list is only going to be joined/merged with delay\r\n # operations and list on the next tick (through the merge operation)\r\n self._delayed_l.acquire()\r\n try: self._delayed_n.append(next)\r\n finally: self._delayed_l.release()\r\n\r\n # in case the wakeup flag is set this delay operation should have\r\n # been called from a different thread and the event loop should\r\n # awaken as soon as possible to handle the event\r\n if wakeup: self.wakeup()",
"def sleep(self, seconds):\n\n # We schedule an alarm signal for x=seconds out in the future.\n # noinspection PyUnusedLocal\n def handle_alarm(signal_num, frame):\n pass\n\n signal.signal(signal.SIGALRM, handle_alarm)\n signal.alarm(seconds)\n\n # Wait for either the alarm to go off or for us to receive a SIGINT.\n signal.pause()\n\n # Remove the alarm if it is still pending.\n signal.alarm(0)"
] |
[
"0.6846653",
"0.5696773",
"0.5445648",
"0.52606493",
"0.5258912",
"0.5141864",
"0.51307636",
"0.51307636",
"0.5117959",
"0.51097524",
"0.50966895",
"0.49681464",
"0.49460042",
"0.48996404",
"0.4890596",
"0.48783326",
"0.4865467",
"0.48385656",
"0.48357123",
"0.48107603",
"0.47895348",
"0.478018",
"0.47705",
"0.47672042",
"0.47616538",
"0.4724338",
"0.47137833",
"0.47122395",
"0.46916422",
"0.46769965"
] |
0.6915283
|
0
|
Schedule a callback to be ran every `interval` seconds. Will return an opaque handle that can be passed to unschedule() to unschedule the interval function. A function will also stop being scheduled if it returns False or raises an Exception.
|
def schedule_periodically(self, _interval, _callback, *_args, **_kwargs):
if isinstance(_interval, datetime.timedelta):
_interval = _interval.total_seconds()
@coroutine
@functools.wraps(_callback)
def inner():
while True:
yield from asyncio.sleep(_when)
ret = _callback(*_args, **_kwargs)
if ret is False:
break
return self.schedule_async(inner())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def schedule_async_periodically(self, _interval, _callback, *_args, **_kwargs):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n while True:\n yield from asyncio.sleep(_when)\n ret = yield from _callback(*_args, **_kwargs)\n if ret is False:\n break\n\n return self.schedule_async(inner())",
"def on_interval(interval=0.0):\n\n def decorator(func):\n @wraps(func)\n async def wrapper(*args, **kwargs):\n while True:\n start_time = time.time()\n await func(*args, **kwargs)\n elapsed = time.time() - start_time\n await asyncio.sleep(max(0, interval - elapsed))\n\n wrapper._is_interval_task = True\n return wrapper\n\n return decorator",
"def schedule(function_pointer: Callable, interval: float):\n pyglet.clock.schedule_interval(function_pointer, interval)",
"def run_continuously(self, interval: int = 1):\n cease_continuous_run = threading.Event()\n\n class ScheduleThread(threading.Thread):\n @classmethod\n def run(cls):\n while not cease_continuous_run.is_set():\n schedule.run_pending()\n time.sleep(interval)\n\n continuous_thread = ScheduleThread()\n continuous_thread.start()\n return cease_continuous_run",
"def Schedule(interval=3600):\n # TODO:: if func need to return something\n def schedule(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n while True:\n start_time = time.perf_counter()\n func(*args, **kwargs)\n running_time = time.perf_counter()-start_time\n latency = interval-running_time\n logger.debug(\n f'Next program:{func.__name__} will start at {latency} sec later.')\n time.sleep(latency)\n return wrapper(*args, **kwargs)\n return wrapper\n return schedule",
"def respect_interval(interval, function):\n def wrapper():\n last_called = clock()\n\n while True:\n now = clock()\n dt = now - last_called\n\n if dt >= interval:\n function()\n last_called = now\n\n yield\n\n return wrapper().__next__",
"def cycle(self, interval, function, *args, **kwargs):\n if interval <= 0:\n raise ValueError(\"Interval must be greater than 0 seconds.\")\n\n cycle = functools.partial(function, *args, **kwargs)\n timer = _Timer(self, cycle, True, interval, self.latest_poll_time + interval)\n bisect.insort(self._deferreds, timer)\n\n return timer",
"async def run_every(self, callback: Callable, start: dt.datetime, interval: int, **kwargs) -> str:\n name = self.name\n now = await self.get_now()\n\n if isinstance(start, str) and \"now\" in start: # meaning immediate time required\n now_offset = 0\n if \"+\" in start: # meaning time to be added\n now_offset = int(re.findall(r\"\\d+\", start)[0])\n\n aware_start = await self.get_now()\n aware_start = aware_start + dt.timedelta(seconds=now_offset)\n\n else:\n aware_start = self.AD.sched.convert_naive(start)\n\n if aware_start < now:\n aware_start = now + dt.timedelta(seconds=interval)\n\n self.logger.debug(\n \"Registering run_every starting %s in %ss intervals for %s\",\n aware_start,\n interval,\n name,\n )\n\n handle = await self.AD.sched.insert_schedule(\n name, aware_start, callback, True, None, interval=interval, **kwargs\n )\n return handle",
"def interval(cls, timeout, f, immediate = False, **kwargs):\n def _interval(*args, **kwargs):\n if immediate: f(*args, **kwargs)\n while True:\n cls.sleep(timeout)\n try:\n f(*args, **kwargs)\n except TaskletExit:\n break\n except:\n logging.exception(\"unhandled exception in Tasklet.interval\")\n return cls.new(_interval, **kwargs)",
"def call_repeatedly(interval, function, args):\n stopped = threading.Event()\n\n def loop():\n while not stopped.wait(interval):\n function(**args)\n\n threading.Thread(target=loop).start()\n\n # return the thread closing handle\n return stopped.set",
"def _start_timer(self, interval, callback):\n \n timer = Timer(interval, callback)\n timer.daemon = True\n timer.start()\n\n return timer",
"def start_periodic_timer(self, interval, fun, *args, **kwargs):\n entry = timer(interval, fun, *args, **kwargs)\n self._timers.append(entry)\n return entry",
"def setInterval(interval, times = -1):\n\n # Validate the parameters.\n if isinstance(interval, int):\n interval = float(interval)\n elif not isinstance(interval, float):\n raise TypeError(\"Expected int or float, got %r instead\" % type(interval))\n if not isinstance(times, int):\n raise TypeError(\"Expected int, got %r instead\" % type(times))\n\n # Code adapted from: http://stackoverflow.com/q/5179467\n\n # This will be the actual decorator,\n # with fixed interval and times parameter\n def outer_wrap(function):\n if not callable(function):\n raise TypeError(\"Expected function, got %r instead\" % type(function))\n\n # This will be the function to be\n # called\n def wrap(*args, **kwargs):\n\n stop = Event()\n\n # This is another function to be executed\n # in a different thread to simulate setInterval\n def inner_wrap():\n i = 0\n while i != times and not stop.isSet():\n stop.wait(interval)\n function(*args, **kwargs)\n i += 1\n\n t = Timer(0, inner_wrap)\n t.daemon = True\n t.start()\n\n return stop\n\n return wrap\n\n return outer_wrap",
"def schedule_interval(self, func, interval, *args, **kwargs):\n self.unschedule(func)\n item = _IntervalItem(func, interval, self._get_ticks(), args, kwargs)\n self.interval_schedules.append(item)\n self._need_sort = True",
"def repeat_every(seconds, fn):\n def wrapper(scheduler):\n try:\n fn()\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n except:\n print('Error executing function')\n\n scheduler = sched.scheduler(time.time, time.sleep)\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n scheduler.run()",
"def __set_interval(self, func, sec):\n\n def func_wrapper():\n self.__set_interval(func, sec)\n func()\n\n t = threading.Timer(sec, func_wrapper)\n t.start()\n return t",
"def schedule(self, _callback, *_args, **_kwargs):\n @coroutine\n @functools.wraps(_callback)\n def inner():\n _callback(*_args, **_kwargs)\n\n return self.schedule_async(inner())",
"def every(\n interval: timedelta, start_at: Optional[datetime] = None\n) -> Callable[..., Schedule]:\n start = datetime.now()\n if start_at is not None:\n start = start_at\n\n def get_next_due_date() -> ScheduleGenerator:\n \"\"\"\n Get the datetime where this schedule should be run next\n \"\"\"\n while True:\n time_since_last_schedule = (datetime.now() - start) % interval\n yield datetime.now() + (interval - time_since_last_schedule)\n\n def schedule_decorator(fun: Callable[..., Any]) -> Schedule:\n return Schedule(fun, get_next_due_date())\n\n return schedule_decorator",
"def schedule_call_global(self, seconds, cb, *args, **kw):\n t = timer.Timer(seconds, cb, *args, **kw)\n self.add_timer(t)\n return t",
"def schedule_in(self, _when, _callback, *_args, **_kwargs):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n yield from asyncio.sleep(_when)\n _callback(*_args, **_kwargs)\n\n return self.schedule_async(inner())",
"def process(self, intervalFunc, handler):\n self.scheduleAfter(intervalFunc(), \n _(self, intervalFunc, handler)._process_impl)",
"def schedule_every(time_interval: float, to_repeat: 'function to call repeatedly'):\n # TODO - use module sched ?\n # https://stackoverflow.com/questions/474528/what-is-the-best-way-to-repeatedly-execute-a-function-every-x-seconds-in-python\n\n time.sleep(time_interval)\n while True:\n start_time = time.time()\n end_time = time.time()\n to_repeat()\n processing_time = end_time - start_time\n print(processing_time)\n time.sleep(time_interval - processing_time)\n\n # TODO - find a way to avoid the drift better? Like we could take into account the date in the logs",
"def timeout_handler(interval, recurring = None):\n def decorator(func):\n \"\"\"The decorator\"\"\"\n func._pyxmpp_timeout = interval\n func._pyxmpp_recurring = recurring\n return func\n return decorator",
"def __init__(self, interval, function, *args, **kwargs):\n self._timer = None\n self.function = function\n self.interval = interval\n self.args = args\n self.kwargs = kwargs\n self.is_running = False\n self.start()",
"def loop_coro(\n coro: Coroutine[Any, Any, Any] | Callable[[], Any],\n interval: float = 1.0,\n) -> asyncio.Task:\n\n async def _task_body():\n while True:\n if asyncio.iscoroutine(coro):\n await coro\n elif callable(coro):\n if asyncio.iscoroutinefunction(coro):\n await coro()\n else:\n coro()\n\n await asyncio.sleep(interval)\n\n return asyncio.create_task(_task_body())",
"def Retry_timer(interval=3, retry_times=3):\n def retry_timer(func):\n @wraps(func)\n def wrapper(count=1, interval=interval, retry_times=retry_times, *args, **kwargs):\n try:\n logger.debug(f'Try func:{func.__name__} {count} times.')\n return func(*args, **kwargs)\n except Exception as e:\n logger.warning(f'There have some error: {e}')\n count += 1\n if count <= retry_times:\n logger.debug(f'Will retry in {interval} sec.')\n time.sleep(interval)\n return wrapper(count=count, interval=interval, retry_times=retry_times, *args, **kwargs)\n else:\n logger.critical(f'Failed to execute func:{func.__name__}')\n return wrapper\n return retry_timer",
"def schedule_async_in(self, _when, _callback):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n yield from asyncio.sleep(_when)\n yield from _callback\n\n return self.schedule_async(inner())",
"async def run_in(self, callback: Callable, delay: int, **kwargs) -> str:\n name = self.name\n self.logger.debug(\"Registering run_in in %s seconds for %s\", delay, name)\n # Support fractional delays\n i, d = divmod(float(delay), 1)\n exec_time = await self.get_now() + timedelta(seconds=int(i), microseconds=d * 1000000)\n handle = await self.AD.sched.insert_schedule(name, exec_time, callback, False, None, **kwargs)\n\n return handle",
"def loop(self, function, *args, **kwargs):\n loop = functools.partial(function, *args, **kwargs)\n timer = _Timer(self, loop, True)\n self._callbacks.append(timer)\n\n return timer",
"def _run(self):\n if not self._running:\n return\n try:\n yield self.callback()\n except Exception: # pylint: disable=W0703\n logging.error(\"Error in periodic callback\", exc_info=True)\n self._schedule_next()"
] |
[
"0.7611171",
"0.72958636",
"0.7202084",
"0.7022559",
"0.7002948",
"0.66986394",
"0.6679216",
"0.66536695",
"0.6628146",
"0.6450473",
"0.6406166",
"0.63929296",
"0.6314753",
"0.62664723",
"0.6221932",
"0.61743444",
"0.6099458",
"0.60735184",
"0.5877573",
"0.58319503",
"0.5827353",
"0.57665426",
"0.56652105",
"0.5552169",
"0.54572344",
"0.54403293",
"0.5430357",
"0.5414117",
"0.5396438",
"0.53845245"
] |
0.79773027
|
0
|
Return whether or not the given handle is still scheduled.
|
def is_scheduled(self, handle):
return not handle.cancelled()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_scheduled(self) -> bool:\n return not self.terminated and self.__state != Process.IDLE",
"async def timer_running(self, handle: str) -> bool:\n name = self.name\n self.logger.debug(\"Checking timer with handle %s for %s\", handle, self.name)\n return self.AD.sched.timer_running(name, handle)",
"def is_timer_on(self):\n return self.status == 'ON'",
"def is_locked(self):\n now = get_current_time()\n if self.end <= now:\n return True\n return False",
"def is_locked(self):\n return self._unit_got == False",
"def isActive(self):\n return self._timerID is not None",
"def isScheduleRunning(self):\n if DPxIsDinSchedRunning() == 0:\n schedule_running = False\n else:\n schedule_running = True\n return schedule_running",
"def is_pending(self):\n return self.is_disarming() or self.is_arming()",
"def isWaiting(self):\r\n return self.scheduler.isWaiting()",
"def _iswaiting(self):\n return self._ison() or self._isstandby()",
"def is_scheduled_for_deletion(self, instance_id):\n item = self.get_low_use_instance(instance_id)\n if item is not None:\n return item.get('Scheduled For Deletion', False)",
"def is_locked(self) -> bool | None:\n return self.instrument.is_locked",
"def check(self):\n if GPIO.input(self.number) == self.closed_state:\n current_time = now_in_ms()\n if (current_time - self.last_check_time) > self.delay:\n self.last_check_time = current_time\n return True\n return False",
"def is_task_in_schedule(self, tid: str) -> bool:\n return tid in self.__tasks",
"def schedule_required(self) -> bool:\n return self._local.idle",
"def is_delayed(self) -> bool:\n if self.periodic and self.is_attached():\n return self.runtime.cost > self.runtime.tasklet.tick\n\n return False",
"async def reset_timer(self, handle: str) -> bool:\n name = self.name\n self.logger.debug(\"Resetting timer with handle %s for %s\", handle, self.name)\n return await self.AD.sched.reset_timer(name, handle)",
"def DueToRun(self):\n if self.Get(self.Schema.DISABLED):\n return False\n\n cron_args = self.Get(self.Schema.CRON_ARGS)\n last_run_time = self.Get(self.Schema.LAST_RUN_TIME)\n now = rdfvalue.RDFDatetime().Now()\n\n # Its time to run.\n if (last_run_time is None or\n now > cron_args.periodicity.Expiry(last_run_time)):\n\n # Do we allow overruns?\n if cron_args.allow_overruns:\n return True\n\n # No currently executing job - lets go.\n if self.Get(self.Schema.CURRENT_FLOW_URN) is None:\n return True\n\n return False",
"def _has_thread(self) -> bool:\n with self._condition:\n return not self._is_disposed and self._thread is not None",
"def is_alarm():\n return _alarm",
"def should_poll(self):\n return self._command_state is not None",
"def is_locked(self):\n return self.lock_obj.is_locked()",
"def processPollByHandle(hProcess):\n try:\n dwWait = win32event.WaitForSingleObject(hProcess, 0); # pylint: disable=no-member\n except:\n reporter.logXcpt('hProcess=%s %#x' % (hProcess, hProcess,));\n return True;\n return dwWait != win32con.WAIT_TIMEOUT; #0x102; #",
"def unschedule(self, handle):\n if self.is_scheduled(handle):\n self.schedule(handle.cancel)",
"def locked(self) -> bool:\n return self._owner_task is not None",
"def is_overdue(self):\n\n if self.target_date and not self.closed:\n return self.target_date < timezone.now().date()\n return False",
"def pending_work(self) -> bool:\n return len(self.ongoing) > 0",
"def running(self):\n return (\n self.enabled and (self.elapsed < self.timeout)\n and not math.isclose(self.elapsed, self.timeout)\n )",
"def is_holding(self):\n return self.holding",
"def __bool__(self):\n return self.wait(0)"
] |
[
"0.7325243",
"0.72793305",
"0.6534397",
"0.64810956",
"0.6426111",
"0.63666165",
"0.63640743",
"0.6350375",
"0.6290454",
"0.627354",
"0.6244751",
"0.6230844",
"0.62235695",
"0.6197638",
"0.61975336",
"0.61951977",
"0.61813676",
"0.615372",
"0.614175",
"0.61378837",
"0.6070022",
"0.6050064",
"0.6034378",
"0.60341007",
"0.6006645",
"0.5982436",
"0.5968031",
"0.5959555",
"0.59520805",
"0.5935136"
] |
0.8905771
|
0
|
Run until future is resolved.
|
def run_until(self, future):
@coroutine
def inner():
yield from future
self._unschedule_all()
self.loop.run_until_complete(asyncio.ensure_future(inner()))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def wait_until_done(self) -> None:\n ...",
"async def wait_async(self):\n await self._future",
"def run_until_complete(self, future, timeout=None): # NEW!\r\n assert isinstance(future, Future)\r\n\r\n signal = threading.Condition()\r\n if timeout is None:\r\n while not future.done():\r\n self.run_once()\r\n else:\r\n raise Exception('Not implemented: timeouts until complete')\r\n\r\n return future.result()",
"def wait(self) -> None:\n if self.futures:\n wait(self.futures, return_when='FIRST_COMPLETED').done",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilSuccess():",
"def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()",
"async def async_run_forever(self):\n raise NotImplementedError",
"def wait_completion(self):\n self.tasks.join()",
"def wait_completion(self):\n self.tasks.join()",
"def wait_completion(self):\n self.tasks.join()",
"def wait_completion(self):\n self.tasks.join()",
"def wait_completion(self):\n self.tasks.join()",
"def wait_completion(self):\n self.tasks.join()",
"def wait_completion(self):\n self.tasks.join()",
"def wait_completion(self):\n self.tasks.join()",
"def wait_completion(self):\n self.tasks.join()",
"def wait_completion(self):\r\n self.tasks.join()",
"def wait(self):\n with self.__lock:\n while not self.__complete:\n self.__lock.wait()",
"def wait_forever(self):\r\n while True:\r\n time.sleep(0.5)",
"def _future_work_():\n pass",
"async def wait_for_flow_control(self, stream_id):\n f = asyncio.Future()\n self.flow_control_futures[stream_id] = f\n await f",
"def wait_complete(self):\n self.join()",
"def done(self):\n future = self.future\n\n if future:\n result = future.done()\n return result",
"def __await__(self):\n return self.waiter.__await__()",
"def __await__(self):\n return self.waiter.__await__()",
"async def _run(self):\n # Use self as context manager so an escaping exception doesn't break\n # the event runner instance permanently (i.e. we clean up the future)\n with self:\n # Run until no more events or lingering futures\n while len(self.events) + len(self.futures) > 0:\n # Synchronously run event handler and collect new futures\n new_futures = self._run_events()\n self.futures |= new_futures\n # Don't bother waiting if no futures to wait on\n if len(self.futures) == 0:\n continue\n\n # Run until one or more futures complete (or new events are added)\n new_events = self.loop.create_task(self.new_events.wait())\n LOG.debug('waiting on %s futures', len(self.futures))\n done, pending = await asyncio.wait(self.futures | {new_events}, return_when=asyncio.FIRST_COMPLETED)\n # Remove done futures from the set of futures being waited on\n done_futures = done - {new_events}\n LOG.debug('%s of %s futures done', len(done_futures), len(self.futures))\n self.futures -= done_futures\n if new_events.done():\n LOG.debug('new events to process')\n else:\n # If no new events, cancel the waiter, because we'll create a new one next iteration\n new_events.cancel()"
] |
[
"0.7302693",
"0.7098596",
"0.69729954",
"0.68908244",
"0.6339196",
"0.6339196",
"0.6339196",
"0.6339196",
"0.63155216",
"0.6311712",
"0.6283957",
"0.6271067",
"0.6271067",
"0.6271067",
"0.6271067",
"0.6271067",
"0.6271067",
"0.6271067",
"0.6271067",
"0.6271067",
"0.6238508",
"0.62248415",
"0.6158209",
"0.61369896",
"0.60728425",
"0.60345453",
"0.6014091",
"0.59763014",
"0.59763014",
"0.5959377"
] |
0.7688761
|
0
|
Take a nuke node, such as a read node or a Cryptomatte gizmo, and Reformat metadata into a dictionary, and collect channel information.
|
def __init__(self, node_in):
self.cryptomattes = {}
self.nuke_node = node_in
self.selection = None
if not node_in:
return
exr_metadata_dict = node_in.metadata() or {}
prefix = "exr/cryptomatte/"
default_selection = None
for key, value in exr_metadata_dict.iteritems():
if not key.startswith(prefix):
continue
numbered_key = key[len(prefix):] # ex: "exr/cryptomatte/ae93ba3/name" --> "ae93ba3/name"
metadata_id = numbered_key.split("/")[0] # ex: "ae93ba3/name" --> ae93ba3
partial_key = numbered_key.split("/")[1] # ex: "ae93ba3/name" --> "name"
if metadata_id not in self.cryptomattes:
self.cryptomattes[metadata_id] = {}
self.cryptomattes[metadata_id][partial_key] = value
if default_selection is None:
default_selection = metadata_id
for metadata_id, value in self.cryptomattes.iteritems():
name = value.get("name", "")
channels = self._identify_channels(name)
self.cryptomattes[metadata_id]["channels"] = channels
self.selection = default_selection
if self.nuke_node.Class() == "Cryptomatte":
selection_name = node_in.knob("cryptoLayer").getValue()
if not selection_name:
return
valid_selection = self.set_selection(selection_name)
if not valid_selection and not self.nuke_node.knob("cryptoLayerLock").getValue():
self.selection = default_selection
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def nodes(self, irc, msg, args, channel):\n pie = self.instances[irc.network]\n irc.reply([str(x) for x in pie.graphs[channel].nodes.values()])",
"async def info(ctx):\n server = ctx.message.guild\n name = server.name\n owner = server.owner\n creation_date = server.created_at\n emoji_count = len(server.emojis)\n icon = server.icon_url_as(format=None, static_format='jpeg')\n animated_icon = server.is_icon_animated()\n iden = server.id\n banner = server.banner_url\n desc = server.description\n mfa_level = server.mfa_level\n verification_level = server.verification_level\n content_filter = server.explicit_content_filter\n default_notifs = server.default_notifications\n features = server.features\n splash = server.splash_url\n premium_level = server.premium_tier\n boosts = server.premium_subscription_count\n channel_count = len(server.channels)\n text_channel_count = len(server.text_channels)\n voice_channel_count = len(server.voice_channels)\n category_count = len(server.categories)\n system_channel = server.system_channel\n if type(system_channel) == discord.TextChannel: system_channel = system_channel.mention\n rules_channel = server.rules_channel\n if type(rules_channel) == discord.TextChannel: rules_channel = rules_channel.mention\n public_updates_channel = server.public_updates_channel\n if type(public_updates_channel) == discord.TextChannel: public_updates_channel = public_updates_channel.mention\n emoji_limit = server.emoji_limit\n bitrate_limit = server.bitrate_limit\n filesize_limit = round(server.filesize_limit/1000000, 3)\n boosters = server.premium_subscribers\n for i, b in enumerate(boosters):\n # convert user objects to mentions\n boosters[i] = b.mention\n boosters = \", \".join(boosters)\n print(boosters)\n role_count = len(server.roles)\n member_count = len(server.members)\n max_members = server.max_members\n discovery_splash_url = server.discovery_splash_url\n member_percentage = round(member_count/max_members * 100, 3)\n emoji_percentage = round(emoji_count/emoji_limit * 100, 3)\n channel_percentage = round(channel_count/500 * 100, 3)\n role_percenatege = round(role_count/250 * 100, 3)\n\n staff_member = await is_staff(ctx)\n fields = [\n {\n \"name\": \"Basic Information\",\n \"value\": (\n f\"**Creation Date:** {creation_date}\\n\" +\n f\"**ID:** {iden}\\n\" +\n f\"**Animated Icon:** {animated_icon}\\n\" +\n f\"**Banner URL:** {banner}\\n\" +\n f\"**Splash URL:** {splash}\\n\" +\n f\"**Discovery Splash URL:** {discovery_splash_url}\"\n ),\n \"inline\": False\n },\n {\n \"name\": \"Nitro Information\",\n \"value\": (\n f\"**Nitro Level:** {premium_level} ({boosts} individual boosts)\\n\" +\n f\"**Boosters:** {boosters}\"\n ),\n \"inline\": False\n }\n ]\n if staff_member and ctx.channel.category.name == CATEGORY_STAFF:\n fields.extend(\n [{\n \"name\": \"Staff Information\",\n \"value\": (\n f\"**Owner:** {owner}\\n\" +\n f\"**MFA Level:** {mfa_level}\\n\" +\n f\"**Verification Level:** {verification_level}\\n\" +\n f\"**Content Filter:** {content_filter}\\n\" +\n f\"**Default Notifications:** {default_notifs}\\n\" +\n f\"**Features:** {features}\\n\" +\n f\"**Bitrate Limit:** {bitrate_limit}\\n\" +\n f\"**Filesize Limit:** {filesize_limit} MB\"\n ),\n \"inline\": False\n },\n {\n \"name\": \"Channels\",\n \"value\": (\n f\"**Public Updates Channel:** {public_updates_channel}\\n\" +\n f\"**System Channel:** {system_channel}\\n\" +\n f\"**Rules Channel:** {rules_channel}\\n\" +\n f\"**Text Channel Count:** {text_channel_count}\\n\" +\n f\"**Voice Channel Count:** {voice_channel_count}\\n\" +\n f\"**Category Count:** {category_count}\\n\"\n ),\n \"inline\": False\n },\n {\n \"name\": \"Limits\",\n \"value\": (\n f\"**Channels:** *{channel_percentage}%* ({channel_count}/500 channels)\\n\" +\n f\"**Members:** *{member_percentage}%* ({member_count}/{max_members} members)\\n\" +\n f\"**Emoji:** *{emoji_percentage}%* ({emoji_count}/{emoji_limit} emojis)\\n\" +\n f\"**Roles:** *{role_percenatege}%* ({role_count}/250 roles)\"\n ),\n \"inline\": False\n }\n ])\n embed = assemble_embed(\n title=f\"Information for `{name}`\",\n desc=f\"**Description:** {desc}\",\n thumbnailUrl=icon,\n fields=fields\n )\n await ctx.send(embed=embed)",
"def metadata_get(node):\n\n metadata = dict()\n\n # get parameters common to all hosting providers or platforms\n params = ['hostname', 'domain', 'provider', 'role', 'repo']\n for item in params:\n metadata[item] = hiera_get('metadata:{0}'.format(item), 'fqdn={0}'.format(node))\n # logging.debug('metadata_get {0:<10} {1}'.format(item, metadata[item]))\n\n # build fqdn from hieradata\n metadata['fqdn'] = '{0}.{1}'.format(metadata['hostname'], metadata['domain'])\n\n # get parameters unique to a particular provider or platform\n if metadata['provider'] == 'aws':\n params = ['subnet', 'secgroup', 'keypair', 'ami', 'type', 'region']\n for item in params:\n metadata[item] = hiera_get('metadata:aws:{0}'.format(item), 'fqdn={0}'.format(node))\n # logging.debug('metadata_get {0:<10} {1}'.format(item, metadata[item]))\n\n return metadata",
"def nodeReader(node):\n processSiteInfo = {\n 'event-data': processEventData(),\n 'local-stage-out': processLocalStageOut(),\n 'calib-data': processCalibData(),\n 'fallback-stage-out': processFallbackStageOut()\n }\n\n report = {}\n sProcess = processSite(processSiteInfo)\n processor = processNode(sProcess)\n processor.send((report, node))\n\n return report",
"def _identify_channels(self, name):\n\n channel_list = []\n if self.nuke_node.Class() == \"Cryptomatte\":\n # nuke_node is a keyer gizmo\n channel_list = self.nuke_node.node('Input1').channels()\n else:\n # nuke_node might a read node\n channel_list = self.nuke_node.channels()\n\n relevant_channels = [x for x in channel_list if x.startswith(name)]\n pure_channels = []\n for channel in relevant_channels:\n suffix = \".red\"\n if not channel.endswith(suffix):\n continue\n # to do: validate this somewhere else\n pure_channel = channel[:-len(suffix)]\n pure_channels.append(pure_channel)\n\n return sorted(pure_channels)",
"def get_kegg_info(kegg_info_file):\n kegg_info_fh = open(kegg_info_file, 'r')\n kegg_info_dict = {}\n\n title_line = kegg_info_fh.next()\n kegg_info_dict['title'] = title_line.strip().split(' ')[1]\n\n release_line = kegg_info_fh.next()\n release_info = release_line.strip().split(' ')[1]\n if release_info.startswith('Release'):\n kegg_info_dict['release'] = release_info\n else:\n kegg_info_dict['release'] = None\n\n kegg_info_dict['lab_info'] = kegg_info_fh.next().strip()\n\n for line in kegg_info_fh:\n toks = line.strip().split()\n kegg_info_dict[toks[0]] = ' '.join(toks[1:])\n\n kegg_info_fh.close()\n\n return kegg_info_dict",
"def main(connection, info, args, conf) :\r\n connection.rawsend(\"KICK %s %s :%s\\n\" % (info[\"channel\"], args[1], \" \".join(args[2:])))",
"def get_node(self, node: str) -> Dict:\n n = {}\n if self.graph.has_node(node):\n n = self.graph.nodes[node]\n return n",
"def fix_montage(raw, timestamp):\n # These channels are not recorded during an EEG experiment or are not included in standard 10/20 montage.\n \n non_eeg = ['SaO2 SpO2', 'HR HR','Pulse Plet', 'ExG1', 'ExG2', 'EEG A1', 'EEG A2']\n \n #Check if EOG was recorded. If so, save it so it can later be added to the data.\n EOG_CHANNEL_FOUND = False\n if('ExG1' in raw.ch_names): \n eog_data = raw.copy().pick_channels(['ExG1']).get_data()\n EOG_CHANNEL_FOUND = True\n \n exclude = list(set(non_eeg).intersection(raw.ch_names))\n raw.drop_channels(exclude)\n \n raw.info['ch_names'] = [name.split(' ')[-1] for name in raw.info['ch_names']]\n\n orig_names = raw.ch_names\n montage = mne.channels.read_montage(kind = 'standard_1020', ch_names=raw.info['ch_names'])\n \n data = raw.get_data()\n \n channels_dict = {}\n \n for channel_name, channel_data in zip(orig_names, data):\n channels_dict[channel_name] = channel_data\n \n reordered_data = np.zeros(shape = data.shape) \n \n for idx, channel_name in enumerate(montage.ch_names):\n reordered_data[idx, :] = channels_dict[channel_name]\n \n new_info = mne.create_info(\n ch_names= list(montage.ch_names),\n sfreq = raw.info['sfreq'],\n ch_types = ['eeg'] * len(list(montage.ch_names)),\n #meas_date = [timestamp[0], 0] # Time of the first sample and something else. Not well documented.\n )\n \n # Create new dataset with reordered channels\n new_raw = mne.io.RawArray(reordered_data, new_info)\n # Set electrode localizations using standard 1020 montage\n new_raw.set_montage(montage)\n \n if(EOG_CHANNEL_FOUND): # Add it to other channels\n eog_channel = mne.io.RawArray(eog_data, mne.create_info( ch_names= ['ExG1'], sfreq = raw.info['sfreq'], ch_types = ['eog']))\n new_raw = new_raw.add_channels([eog_channel])\n \n return new_raw",
"def _analyze(node: dict, depth=0, info=defaultdict(int)):\n info[\"depth\"] = max(info[\"depth\"], depth)\n for key in node.keys():\n if key == ITEMSKEY:\n info[\"georecord_containers\"] += 1\n info[\"georecord_items\"] += len(node[key])\n elif key == SUFFIXKEY:\n info[\"suffix_containers\"] += 1\n info[\"suffix_items\"] += len(node[key])\n else:\n info[\"prefix_nodes\"] += 1\n _analyze(node[key], depth + 1, info)\n return info",
"def convert(data):\n NODES, EDGES = {}, {}\n\n for duid, entity in data['entity'].iteritems():\n if entity['entityType'] == 'Trigger':\n continue\n if Node.add(duid, NODES):\n NODES[duid].Label = entity['entityText']\n NODES[duid].source = entity['source']\n NODES[duid].json = json.dumps(entity)\n\n for duid, relation in data['relation'].iteritems():\n duid = relation['duid']\n sn, tn = None, None\n if relation['source'] == 'miRTex':\n sn, tn = tm.mirtex.cytoscape_relation_args(relation)\n # elif relation['source'] == 'RLIMS-P':\n # sn, tn = tm.rlims.cytoscape_relation_args(relation)\n if sn not in NODES or tn not in NODES:\n continue\n if Edge.add(duid, sn, tn, relation['relationType'], EDGES):\n EDGES[duid].json = json.dumps(relation)\n\n return dump_network(NODES, EDGES)",
"def read_channel(self, channel: int, /) -> int:",
"def test__ChannelMetadataGuildForumBase__from_keyword_parameters__0():\n parent_id = 202304110007\n name = 'Armelyrics'\n permission_overwrites = [\n PermissionOverwrite(202304110008, target_type = PermissionOverwriteTargetType.user)\n ]\n position = 7\n available_tags = [\n ForumTag.precreate(\n 202304110009,\n emoji = BUILTIN_EMOJIS['heart'],\n name = 'Yup',\n moderated = False,\n )\n ]\n default_thread_auto_archive_after = 86400\n default_thread_reaction = BUILTIN_EMOJIS['monkey']\n default_thread_slowmode = 60\n flags = ChannelFlag(1)\n topic = 'Dearest'\n default_sort_order = SortOrder.creation_date\n default_forum_layout = ForumLayout.list\n \n keyword_parameters = {\n 'parent_id': parent_id,\n 'name': name,\n 'permission_overwrites': permission_overwrites,\n 'position': position,\n 'available_tags': available_tags,\n 'default_thread_auto_archive_after': default_thread_auto_archive_after,\n 'default_thread_reaction': default_thread_reaction,\n 'default_thread_slowmode': default_thread_slowmode,\n 'flags': flags,\n 'topic': topic,\n 'default_sort_order': default_sort_order,\n 'default_forum_layout': default_forum_layout,\n }\n \n channel_metadata = ChannelMetadataGuildForumBase.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(channel_metadata.parent_id, parent_id)\n vampytest.assert_eq(channel_metadata.name, name)\n vampytest.assert_eq(\n channel_metadata.permission_overwrites,\n {permission_overwrite.target_id: permission_overwrite for permission_overwrite in permission_overwrites},\n )\n vampytest.assert_eq(channel_metadata.position, position)\n vampytest.assert_eq(channel_metadata.available_tags, tuple(available_tags))\n vampytest.assert_eq(channel_metadata.default_thread_auto_archive_after, default_thread_auto_archive_after)\n vampytest.assert_eq(channel_metadata.default_thread_reaction, default_thread_reaction)\n vampytest.assert_eq(channel_metadata.default_thread_slowmode, default_thread_slowmode)\n vampytest.assert_eq(channel_metadata.flags, flags)\n vampytest.assert_eq(channel_metadata.topic, topic)\n vampytest.assert_is(channel_metadata.default_sort_order, default_sort_order)\n vampytest.assert_is(channel_metadata.default_forum_layout, default_forum_layout)",
"def test_api_read_channel(api):\n response = api.read_channel()\n assert \"name='request().json()'\" in repr(response)\n req_call = requests.request\n assert req_call.call_count == 1\n req_args = req_call.call_args[0]\n req_kw = req_call.call_args[1]\n assert req_args[0] == 'GET'\n assert req_args[1] == 'https://news-api.apple.com/channels/FAKE_CHANNEL'\n assert 'Authorization' in req_kw['headers']\n assert 'HHMAC; key=FAKE_ID; signature=' in req_kw['headers']['Authorization']\n assert req_kw['data'] is None",
"def get_channel_info(self):\n items = [('channel_number', int),\n ('range', float),\n ('sampling_rate', float),\n ('digitisation', float),\n ('offset', float),\n ]\n\n attrs = self['/UniqueGlobalKey/channel_id'].attrs\n info = {key: converter(attrs[key]) for key, converter in items}\n new_names = [('range','channel_range'),\n ('sampling_rate', 'channel_sampling_rate'),\n ('digitisation', 'channel_digitisation'),\n ('offset', 'channel_offset'),\n ]\n for old, new in new_names:\n info[new] = info[old]\n del info[old]\n return info",
"def _build_node(self, node_name):\n logger.info('Building node: {0}'.format(node_name))\n nx_graph = self.abstract_graph.nx_graph\n\n for selected_node in nuke.selectedNodes():\n selected_node.setSelected(False)\n node = self.abstract_graph.nx_graph.nodes()[node_name]['node']\n if nuke.toNode(node_name) is None:\n parent = node.build()\n else:\n parent = nuke.toNode(node_name)\n assert parent\n inputs = [edge for edge in nx_graph.edges(data=True)\n if edge[0] == node_name]\n for input in inputs:\n child = self._build_node(input[1])\n logger.info('{0} >> {1} >> {2}'.format(\n input[1],\n input[2]['input'],\n node_name))\n parent.setInput(input[2]['input'], child)\n return parent",
"def get_relevant(self, msg):\n nick = msg.GetNick()\n net = msg.GetNetwork()\n data = {\"body\": msg.GetText(),\n \"network\": net.GetName(),\n \"away\": net.IsIRCAway(),\n \"client_count\": len(net.GetClients()),\n \"nick\": nick.GetNick(),\n \"ident\": nick.GetIdent(),\n \"host\": nick.GetHost(),\n \"hostmask\": nick.GetHostMask()}\n chan = msg.GetChan()\n if chan:\n data[\"context\"] = data[\"channel\"] = chan.GetName()\n data[\"detached\"] = chan.IsDetached()\n else:\n data[\"context\"] = data[\"nick\"]\n return data",
"async def channel_info(bot, message):\n if isinstance(CHANNELS, (int, str)):\n channels = [CHANNELS]\n elif isinstance(CHANNELS, list):\n channels = CHANNELS\n else:\n raise ValueError(\"Unexpected type of CHANNELS\")\n\n text = '📑 **Indexed channels/groups**\\n'\n for channel in channels:\n chat = await bot.get_chat(channel)\n if chat.username:\n text += '\\n@' + chat.username\n else:\n text += '\\n' + chat.title or chat.first_name\n\n text += f'\\n\\n**Total:** {len(CHANNELS)}'\n\n if len(text) < 4096:\n await message.reply(text)\n else:\n file = 'Indexed channels.txt'\n with open(file, 'w') as f:\n f.write(text)\n await message.reply_document(file)\n os.remove(file)",
"async def feed_on(match, channel):\n global items\n chan_hash = str(hash(channel))\n\n item = {\"name\" : match, \"time\" : datetime.utcnow().isoformat() }\n if chan_hash in items:\n items[chan_hash].append(item)\n else:\n items[chan_hash] = [item]\n\n with open(os.path.join(BASEPATH, 'hell.json'), 'w') as cucumber:\n json.dump( items, cucumber )\n\n action = f\"_sneaks out a scaly hand and grabs {match}!_\"\n await channel.send(action)",
"def data_collection():\n global PAUSED\n print(\"Detecting nodes\")\n while True:\n data = SOCK.recvfrom(1024)[0] # buffer size is 1024 bytes\n message = data.decode()\n try:\n message_function = message[0]\n message = message[1:]\n \n if message_function == \"t\":\n loc, temp, hum = message.split(\", \")\n temp = (float(temp) * 1.8) + 32 # convert from C to F\n\n # Checks if location is alreay in the rolling_X dictionarys. If not, it creates an entry\n # in the dictionary and populates it with the defaults\n if loc not in ROLLING_TEMPS:\n ROLLING_TEMPS[loc] = copy(TEMPDEQUEDEFAULT)\n print(loc, \"has connected\")\n if loc not in ROLLING_HUMS:\n ROLLING_HUMS[loc] = copy(HUMDEQUEDEFAULT)\n\n # Append new temp and humidity to appropriate deque in dictionaries\n ROLLING_TEMPS[loc].appendleft(temp)\n ROLLING_HUMS[loc].appendleft(hum)\n LAST_RECEIVED[loc] = datetime.datetime.utcnow()\n \n elif message_function == \"c\":\n if message == \"pause\":\n PAUSED = True\n print(\"pausing\")\n elif message == \"unpause\":\n PAUSED = False\n print(\"unpausing\")\n else:\n print(\"unknown command function\")\n elif message_function == \"i\":\n if message == \"status\":\n print(\"Paused:\", PAUSED)\n else:\n print(\"unknown info function\")\n except:\n print(\"malformed data\")",
"def parse_discord_context_object(context_obj):\n metadata = dict() # TODO: all context_obj.message.{children}.name values\n metadata['user_name'] = context_obj.message.author.name\n metadata['team_name'] = context_obj.message.server.name\n try:\n metadata['channel_name'] = context_obj.message.channel.name\n except Exception:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(context_obj.message.author.name)\n\n return metadata",
"def _meta_dict(self, node):\n meta = {n: self._text(node, n) for n in ('source', 'date', 'key')}\n meta.update(self.infon_dict(node))\n return meta",
"def _read_channels(self, info):\n channels = []\n if info.desc().child(\"channels\").empty():\n return channels\n\n channel = info.desc().child(\"channels\").child(\"channel\")\n for _ in range(info.channel_count()):\n channel_name = channel.child_value(\"label\")\n # If the data stream has a TRG channel, rename it so it doesn't\n # conflict with the marker channel.\n if channel_name == 'TRG' and self._marker_inlets:\n channel_name = \"TRG_device_stream\"\n channels.append(channel_name)\n channel = channel.next_sibling()\n\n for appended_channel in self._appended_channels:\n channels.append(appended_channel)\n\n trg_marker_index = self._trigger_inlet_index()\n for i, inlet in enumerate(self._marker_inlets):\n col = inlet_name(inlet)\n if i == trg_marker_index:\n col = 'TRG'\n channels.append(col)\n\n return channels",
"def test_channel_definition(self):\n TopoObj('topo', data, channels=channels)",
"def DAQchannels(tree, DAQnum, CHnum):\n tree.addNode('.NI_6133.DAQ_' + str(DAQnum) + '.CHANNEL_' + str(CHnum))\n chanpath = ('.NI_6133.DAQ_' + str(DAQnum) + '.CHANNEL_' + str(CHnum)\n + '.CHAN_SETTING')\n tree.addNode(chanpath)\n AddNodeWithTag(tree, chanpath + ':ACTIVE', 'NUMERIC', 'DAQTIVE_DCARD' +\n str(DAQnum) + 'CH' + str(CHnum))\n AddNodeWithTag(tree, chanpath + ':CHANNEL_NAME', 'TEXT', 'USERNAME_DCARD' \n + str(DAQnum) + 'CH' + str(CHnum))\n AddNumericWithUnit(tree, chanpath + ':VOLT_RANGE', 'VOLTRANGE_DCARD' \n + str(DAQnum) + 'CH' + str(CHnum), 'V')\n AddNodeWithTag(tree, chanpath + ':NI_NAME', 'TEXT', 'NINAME_DCARD' \n + str(DAQnum) + 'CH' + str(CHnum))",
"def channelinfo(self):\n\n return ChannelInfo(\n self._filetextbox.text(),\n self._idtextbox.text(),\n self._datafilebox.text()\n )",
"def get_channel_number(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn int(self.keyinfo['channel_id'].attrs['channel_number'])\n\t\texcept:\n\t\t\tpass\n\n\t\ttry:\n\t\t\treturn int(self.keyinfo['read_id'].attrs['channel_number'])\n\t\texcept:\n\t\t\treturn None",
"def get_metadata_for_node(self, node):\n return self.manager.get_metadata(self, node=node)",
"def test__ChannelMetadataGuildForumBase__new__0():\n parent_id = 202209170072\n name = 'Armelyrics'\n permission_overwrites = [\n PermissionOverwrite(202209170073, target_type = PermissionOverwriteTargetType.user)\n ]\n position = 7\n available_tags = [\n ForumTag.precreate(\n 202209170074,\n emoji = BUILTIN_EMOJIS['heart'],\n name = 'Yup',\n moderated = False,\n )\n ]\n default_thread_auto_archive_after = 86400\n default_thread_reaction = BUILTIN_EMOJIS['monkey']\n default_thread_slowmode = 60\n flags = ChannelFlag(1)\n topic = 'Dearest'\n default_sort_order = SortOrder.creation_date\n default_forum_layout = ForumLayout.list\n \n channel_metadata = ChannelMetadataGuildForumBase(\n parent_id = parent_id,\n name = name,\n permission_overwrites = permission_overwrites,\n position = position,\n available_tags = available_tags,\n default_thread_auto_archive_after = default_thread_auto_archive_after,\n default_thread_reaction = default_thread_reaction,\n default_thread_slowmode = default_thread_slowmode,\n flags = flags,\n topic = topic,\n default_sort_order = default_sort_order,\n default_forum_layout = default_forum_layout,\n )\n _assert_fields_set(channel_metadata)\n \n vampytest.assert_eq(channel_metadata.parent_id, parent_id)\n vampytest.assert_eq(channel_metadata.name, name)\n vampytest.assert_eq(\n channel_metadata.permission_overwrites,\n {permission_overwrite.target_id: permission_overwrite for permission_overwrite in permission_overwrites},\n )\n vampytest.assert_eq(channel_metadata.position, position)\n vampytest.assert_eq(channel_metadata.available_tags, tuple(available_tags))\n vampytest.assert_eq(channel_metadata.default_thread_auto_archive_after, default_thread_auto_archive_after)\n vampytest.assert_eq(channel_metadata.default_thread_reaction, default_thread_reaction)\n vampytest.assert_eq(channel_metadata.default_thread_slowmode, default_thread_slowmode)\n vampytest.assert_eq(channel_metadata.flags, flags)\n vampytest.assert_eq(channel_metadata.topic, topic)\n vampytest.assert_is(channel_metadata.default_sort_order, default_sort_order)\n vampytest.assert_is(channel_metadata.default_forum_layout, default_forum_layout)",
"async def _na_channel(self, ctx: Context, *, channel: discord.TextChannel):\n\n await self.config.guild(ctx.guild).na_channel_id.set(channel.id)\n\n await ctx.message.add_reaction(CHECK_MARK)"
] |
[
"0.49682045",
"0.4965133",
"0.48950648",
"0.48330873",
"0.47763747",
"0.47557113",
"0.4672016",
"0.46068865",
"0.45688218",
"0.4524178",
"0.4506004",
"0.44435894",
"0.44433522",
"0.442099",
"0.43905738",
"0.43887165",
"0.43723145",
"0.43646377",
"0.4338512",
"0.43365788",
"0.4333967",
"0.43289587",
"0.43205157",
"0.43071967",
"0.43058193",
"0.42876935",
"0.4281083",
"0.42702162",
"0.42529723",
"0.42505237"
] |
0.5142253
|
0
|
Checks that the selection is valid.
|
def is_valid(self):
if self.selection is None:
return False
if self.selection not in self.cryptomattes:
return False
if "channels" not in self.cryptomattes[self.selection]:
return False
if len(self.cryptomattes[self.selection]["channels"]) < 2:
return False
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def requires_selection(self) -> bool:\n return True",
"def validateSelection(self, exportItems):\n\n invalidItems = []\n # Look for selected items which arent of the correct type\n for item in exportItems:\n if not item.sequence() and not item.trackItem():\n invalidItems.append(item)\n\n return len(invalidItems) < len(exportItems)",
"def HasSelection(self):\n sel = super(EditraBaseStc, self).GetSelection()\n return sel[0] != sel[1]",
"def validate_input(value):\n try:\n values = ['1', '2', '3', '4']\n if value not in values:\n raise ValueError\n except ValueError:\n print(f\"Invalid selection, you typed '{value}'. Please try again.\\n\")\n print('-' * 80)\n return False\n else:\n return True",
"def validateSelection(self):\n if self.cameraMoving == 0:\n return 1\n else:\n return 0",
"def valid(self):\n pass",
"def isRangeValid(self) -> bool:\n ...",
"def check_validity(self):",
"def _check_validity(self):\n pass",
"def validate_choice(value):\n try:\n values = ['y', 'n', 'Y', 'N']\n if value not in values:\n raise ValueError\n except ValueError:\n print(f\"Invalid selection, you typed '{value}'. Please try again.\\n\")\n return False\n else:\n return True",
"def valid(self) -> bool:\n pass",
"def _validate_select_where(self):",
"def _is_valid_index(self, index):\n row = index.row()\n column = index.column()\n return not (row < 0 or column < 0 or\n row >= len(self.view_list) or column > 4 or\n index == QModelIndex())",
"def _is_valid(self):\n self._is_allows_valid()\n self._is_denies_valid()",
"def is_valid(self):\n if self.get_row() != -1 and self.get_column() != -1:\n return True\n else:\n return False",
"def valid(self) -> bool:\n return True",
"def verify_option_unselected_in_the_section(self):\n if self.verify_radio_button_status(\"home\", checked=False) and self.verify_radio_button_status(\"business\", checked=False):\n return True\n if self.verify_radio_button_status(\"home\", checked=True) and self.driver.wait_for_object(\"please_specify_text_in_home_dropdown\", timeout=3, raise_e=False):\n return True\n if self.verify_radio_button_status(\"business\", checked=True) and self.driver.wait_for_object(\"please_specify_text_in_business_dropdown\", timeout=3, raise_e=False):\n return True\n raise OptionsNotUnselected(\"Home or business section have options selected\")",
"def test_validate_available_choice_3(self):\n self.assertIsNone(validate_available_choice(BeerStyle, BeerStyle.LAGER))",
"def is_valid(self):\r\n for lineedit in self.lineedits:\r\n if lineedit in self.validate_data and lineedit.isEnabled():\r\n validator, invalid_msg = self.validate_data[lineedit]\r\n text = to_text_string(lineedit.text())\r\n if not validator(text):\r\n QMessageBox.critical(self, self.get_name(),\r\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\r\n QMessageBox.Ok)\r\n return False\r\n return True",
"def check_box_vaild(self, width, height):\n return (self.box.box_valid() and\n (self.box.rb.x <= width) and\n (self.box.rb.y <= height))",
"def validate(self):\n try:\n self.values.clear()\n self.values.append(int(self.e1.get()))\n except ValueError:\n messagebox.showwarning(\n \"Bad input\",\n \"Illegal values, please try again.\")\n return False\n\n return True",
"def validate(self, txt, pos):\n state, rpos = qt.QDoubleValidator.validate(self, txt, pos)\n if txt.length() == 0:\n state = qt.QValidator.Acceptable\n return state, rpos",
"def test_validate_available_choice_1(self):\n self.assertRaises(\n InvalidStatusOperationError,\n validate_available_choice,\n *(BeerStyle, \"Not an int\")\n )",
"def is_valid(self):\n for lineedit in self.lineedits:\n if lineedit in self.validate_data and lineedit.isEnabled():\n validator, invalid_msg = self.validate_data[lineedit]\n text = to_text_string(lineedit.text())\n if not validator(text):\n QMessageBox.critical(self, self.get_name(),\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\n QMessageBox.Ok)\n return False\n return True",
"def check_dataframe_valid(self, df, option):\n # display(df)\n if df[option].isna().sum() > df.shape[0]/2:\n print(\"invalid data\")\n return False\n else:\n print(\"valid data\")\n return True",
"def _validate(self, model_instance, value):\r\n if self.empty(value) and self.is_required:\r\n raise ValidationError(\"Field '%s' is required.\", self.name)\r\n\r\n if self._selection and value not in self._selection_list:\r\n raise ValidationError(\r\n _(\"Field '%(name)s' is '%(value)s'; must be one of %(selection)s\",\r\n name=self.name, value=value, selection=self._selection_list))\r\n\r\n if self._validator:\r\n self._validator(model_instance, value)\r\n\r\n if value is None:\r\n return value\r\n\r\n return self.validate(value)",
"def is_valid(values, dataset):\r\n # Only includes negative screens.\r\n if values[SCREEN_TYPE_COL] != \"negative selection\":\r\n STATS[NOT_NEG_SCREEN] += 1\r\n return False\r\n # Targets must have the correct length.\r\n if int(values[dataset.end_idx]) - int(values[dataset.start_idx]) !=\\\r\n consts.TARGET_LEN:\r\n STATS[WRONG_END_MINUS_START] += 1\r\n return False\r\n\r\n target = dataset.get_target(values)\r\n # Targets must have an NGG PAM sequence.\r\n if not target.endswith(\"GG\"):\r\n STATS[BAD_PAM] += 1\r\n return False\r\n # Another safety measure against targets with the wrong length.\r\n if len(target) != consts.TARGET_LEN:\r\n STATS[TARGET_BAD_LEN] += 1\r\n return False\r\n return True",
"def is_valid(self):\r\n raise NotImplementedError",
"def is_valid_option(cls, id_):\n return id_ in cls.CHOICES",
"def is_valid(self):\n raise NotImplementedError"
] |
[
"0.6651532",
"0.6511238",
"0.6496035",
"0.6281323",
"0.6092379",
"0.6071178",
"0.60368043",
"0.60075337",
"0.5985491",
"0.5956918",
"0.5955037",
"0.5923048",
"0.591732",
"0.5899168",
"0.5867308",
"0.5866485",
"0.58013046",
"0.57818985",
"0.57534087",
"0.5744871",
"0.5736666",
"0.5717598",
"0.5716249",
"0.57107943",
"0.57068056",
"0.5683285",
"0.5675087",
"0.5653598",
"0.56413305",
"0.5620204"
] |
0.6579121
|
1
|
sets the selection (eg. cryptoObject) based on the name. Returns true if successful.
|
def set_selection(self, selection):
for num in self.cryptomattes:
if self.cryptomattes[num]["name"] == selection:
self.selection = num
return True
self.selection = None
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_active(self, name):\n try:\n obj = self.get_by_name(name)\n item = obj.item\n group = self.group_items[obj.kind]\n\n group_index = self.index(group.row(), 0, QtCore.QModelIndex())\n item_index = self.index(item.row(), 0, group_index)\n\n self.view.selectionModel().select(item_index, QtCore.QItemSelectionModel.Select)\n except Exception as e:\n log.error(\"[ERROR] Cause: %s\" % str(e))\n raise",
"def select(self, collection_name):\n\n if self.__db.lookup_collection(collection_name):\n\n self._collection = collection_name\n return True\n\n else:\n return False",
"def select(self, aid: bytes) -> bool:\n ...",
"def set_active_perspective(self, name):\n self.perspective_cbox.SetStringSelection(name)\n self.enable_import()",
"def test_selection_name(self):\n skill = create_skill()\n skill.speak = mock.Mock()\n skill.get_response = mock.Mock()\n\n skill.get_response.return_value = 'octopus'\n\n options = ['a balloon', 'an octopus', 'a piano']\n response = skill.ask_selection(options, 'which is better')\n self.assertEqual(options[1], response)\n\n # Assert that the spoken sentence contains all options.\n spoken_sentence = skill.speak.call_args[0][0]\n for opt in options:\n self.assertTrue(opt in spoken_sentence)",
"def setSelectedGroup(self, name):\n index = self.groupCombo.findText(name)\n self.groupCombo.setCurrentIndex(index)",
"def get_selection(self, name):\n print 'hi being selected in plotdata'\n return self.selections.get(name, None)",
"def __eq__(self, name):\n return self.name == name",
"def selection(self, name):\n try:\n return self._selections[name]\n except KeyError:\n raise Pype9NameError(\n \"No selection named '{}' (possible '{}')\"\n .format(name, \"', '\".join(self.selection_names)))",
"def _add_selection ( self , nick , sel ) :\n if not self.__selections_.has_key ( self.name() ) :\n self.__selections_[ self.name() ] = {} \n \n if self.__selections_[ self.name()].has_key( nick ) :\n raise AttributeError , \"Selection '%s'already exists \" % nick\n \n self.__selections_[ self.name() ][ nick ] = sel\n \n return sel",
"def _confirm_object(cls, name):\n if name in bpy.data.objects:\n return\n cls._confirm_group()\n #Backup current selection\n selection = ObjectSelection()\n #Create cube\n bpy.ops.mesh.primitive_cube_add(radius=0.024)\n new_cube = bpy.context.selected_objects[0]\n new_cube.parent = bpy.data.objects[cls.GROUP_NAME]\n new_cube.name = name\n cls._hash_color(new_cube)\n #Restore selection\n selection.restore()",
"def set(self, name: str):\n self.dict[name] = True",
"def select(self, location, name):\n if self._transfer is not None:\n return\n\n self._client.proxy.Select(location, name)",
"def setSpherePickable(self, obj, dictName):\n obj.sim.reparentTo(self.selectable)\n obj.sim.find('**/pSphere1').node().setIntoCollideMask(BitMask32.bit(1))\n obj.sim.find('**/pSphere1').node().setTag(dictName, obj.id)",
"def setAccessibleName(self, name: Union[AnyStr, QString]):\n ...",
"def get_selection(self, selection_name, format=None):",
"def is_selected(self):\n return NSCSpecIO().read()[\"profile\"] == self.path.stem",
"def select_new_collection_name(self, name):\n select_new_collection_name_sitem = self.locator_finder_by_id(self.select_new_collection_name_id)\n select_new_collection_name_sitem.click()\n select_new_collection_name_sitem.send_keys(name)\n time.sleep(1)",
"def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False",
"def _syncName (self, extraArgs=None):\n self._classSelectionMenu.syncInfo(cName=self.getName())",
"def SetStringSelection(self, val):\n \n for c in self.choices:\n if val == c.GetLabel():\n c.SetValue(True)\n break",
"def setSelected(*args):",
"def setSelected(*args):",
"def setName(self, name):\n return self",
"def select(self):\r\n pass",
"def select_wallet(rpc_user, rpc_pwd, name='spawned'):\n print('\\nLoading...')\n # Tries to select the wallet until no more 'no wallet selected'\n # RpcError.\n # This should be improved.\n while True:\n data = ('{\"jsonrpc\":\"2.0\",\"method\":\"selectwallet\",' +\n '\"params\" : [\"WalletName\"]}'.replace('WalletName', name))\n call_rpc(rpc_user, rpc_pwd, data)\n try:\n selected_wallet = get_wallet_info(rpc_user,\n rpc_pwd,\n )\n except MyExceptions.RpcError:\n sleep(1)\n continue\n else:\n # Checks the correct wallet has indeed been selected.\n if selected_wallet['walletName'] != name:\n raise MyExceptions.WrongSelection(\n '{}.json wallet has not been ' +\n 'selected, try again'.format(name)\n )\n break\n return",
"def options_by_name(self):\n pass",
"def setName(self, name):\n # type: (str)->None\n self._validator.validate_one('name', VALID_OPTS['name'], name)\n self._ifAttributes['name'] = str(name)",
"def __set_name(self, name):\r\n\t\tself.__name = name\r\n\t\tself._window.chat_panel.place_name = name\r\n\t\tself.encode_message(action=\"NO\", selected_name=name)",
"def _selection ( self, nick ) :\n \n if not self.__selections_.has_key ( self.name() ) :\n self.__selections_[ self.name() ] = {} \n \n return self.__selections_[ self.name() ].get( nick , None )"
] |
[
"0.6081402",
"0.5837094",
"0.5716215",
"0.5606648",
"0.5603318",
"0.5579458",
"0.5501297",
"0.54874736",
"0.5427308",
"0.5410648",
"0.540119",
"0.538927",
"0.53702074",
"0.5359982",
"0.53305274",
"0.5328184",
"0.53180003",
"0.5280301",
"0.5278149",
"0.5265013",
"0.5257278",
"0.52409524",
"0.52409524",
"0.5240504",
"0.5218896",
"0.5207654",
"0.5200552",
"0.5189821",
"0.51792324",
"0.51770115"
] |
0.77932
|
0
|
gets the names of the cryptomattes contained the file, which are the possible selections or cryptomatte channels.
|
def get_cryptomatte_names(self):
return [self.cryptomattes[x]["name"] for x in self.cryptomattes]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _identify_channels(self, name):\n\n channel_list = []\n if self.nuke_node.Class() == \"Cryptomatte\":\n # nuke_node is a keyer gizmo\n channel_list = self.nuke_node.node('Input1').channels()\n else:\n # nuke_node might a read node\n channel_list = self.nuke_node.channels()\n\n relevant_channels = [x for x in channel_list if x.startswith(name)]\n pure_channels = []\n for channel in relevant_channels:\n suffix = \".red\"\n if not channel.endswith(suffix):\n continue\n # to do: validate this somewhere else\n pure_channel = channel[:-len(suffix)]\n pure_channels.append(pure_channel)\n\n return sorted(pure_channels)",
"def get_all_cipher():\n return OpenSSL.cipher_algo.keys()",
"def get_channels(self):\n return [self.afos, \"%s...\" % (self.afos[:3], )]",
"def get_file_type_options():\n curr_type = global_settings.settings[menu_option]\n msg = \"\"\n for i in range(len(file_types)):\n if curr_type == file_types[i]:\n msg += str(i) + \" \" + file_types[i] + \" SELECTED\\n\"\n else:\n msg += str(i) + \" \" + file_types[i] + \"\\n\"\n return msg",
"def GetNamesOfRecoveryPieces(self,file):\n assert self.recoveryDatabase.has_key(file), (\n 'recoveryDatabase has no record for ' + file + '.\\n'\n + 'Keys = ' + `self.recoveryDatabase.keys()` + '\\n')\n return self.recoveryDatabase[file].GetNamesOfPieces()",
"def get_convos():\n file_path = os.path.join(config.DATA_PATH, config.CONVO_FILE)\n convos = []\n with open(file_path, 'rb') as f:\n for line in f.readlines():\n parts = line.split(b' +++$+++ ')\n if len(parts) == 4:\n convo = []\n for line in parts[3][1:-2].split(b', '):\n convo.append(line[1:-1])\n convos.append(convo)\n\n return convos",
"def compose_files(self) -> List[str]:\n return self.data[\"compose_files\"]",
"def getNames(self):\r\n ListFiles = os.listdir(\"Save\")\r\n centering = \" \"\r\n stringFiles = centering + \"List of {} files in your Save folder : \\n \\n\".format(\r\n \"PVP\" if self.PVP else \"AI\"\r\n )\r\n if self.PVP:\r\n for k in ListFiles:\r\n if self.PVP and \"PVP_mode\" == k[:8]:\r\n realName = k[9:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n else:\r\n stringFiles += \" Files where AI is playing white : \\n\"\r\n for k in ListFiles:\r\n if \"AI_mode\" == k[:7] and k[8] == \"B\":\r\n realName = k[8:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n stringFiles += \"\\n Files where AI is playing black : \\n\"\r\n for k in ListFiles:\r\n if \"AI_mode\" == k[:7] and k[8] == \"W\":\r\n realName = k[8:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n self.existingFiles.setText(stringFiles)",
"def get_convos():\n convos = []\n convos_file_path = os.path.join(DATA_PATH, MOVIE_CONVOS_FILE)\n\n with open(convos_file_path, 'r', errors='ignore') as f:\n # +++$+++ is used to split the section in a single line\n # A correct formed line includes four sections\n # The last section is list of lineIDs in each conversation\n\n for line in f:\n line_sections = line.split(' +++$+++ ')\n assert len(line_sections) == 4\n convos.append(line_sections[3][1:-2].replace('\\'', '').split(', '))\n\n return convos",
"def get_codecs_list():\n for codec in CODECS_IN_FILE.iterkeys():\n print codec",
"def get_control_files(name = ''):\n # get all control folder path\n control_dir_list = system.get_control_curve_path()\n # collect all control files and flatten it\n control_files = [control_dir.glob('*.json') for control_dir in control_dir_list]\n control_files = util.flatten(control_files)\n if name:\n control_files = [o for o in control_files if o.stem == name]\n\n return control_files",
"def _possible_names(self, filename):\n names = [filename]\n if not self._iszip(filename):\n for zipext in _file_openers.keys():\n if zipext:\n names.append(filename+zipext)\n return names",
"def select_files():\n\n if not Settings.is_prompt(): return [File.get_random_file()]\n category = Settings.select_category()\n if not category: return File.select_file_upload_method()\n # if not Settings.confirm(category): return File.select_files()\n Settings.print(\"Select Files or a Folder\")\n files = []\n while True:\n file = File.select_file(category)\n if not file: break\n ##\n if \"performer\" in str(category):\n cat = Settings.select_category([cat for cat in Settings.get_categories() if \"performer\" not in cat])\n performerName = file.get_title()\n file = File.select_file(cat, performer=performerName)\n if not file: break\n setattr(file, \"performer\", performerName)\n files.append(file)\n if \"galler\" in str(cat) or \"video\" in str(cat): break\n ##\n files.append(file)\n if \"galler\" in str(category) or \"video\" in str(category): break\n if str(files[0]) == \"unset\": return files\n if not Settings.confirm([file.get_title() for file in files]): return File.select_files()\n return files",
"def get_list():\n\n print(f\"Корневой каталог: {config_tools.NAME_PATH}\")\n for dirpath, dirnames, filenames in os.walk(config_tools.NAME_PATH):\n # перебрать каталоги\n for dirname in dirnames:\n print(\"Каталог:\", os.path.join(dirpath, dirname))\n # перебрать файлы\n for filename in filenames:\n print(\"Файл:\", os.path.join(dirpath, filename))",
"def GetNamesOfPieces(self):\n assert self.RecoveredEnoughPieces()\n result = []\n base = self.fileName + dibs_constants.fileSeparator \n for p in self.piecesRecovered.keys():\n result.append(base + p)\n return result",
"def get_conversations(filename):\n with open(filename, 'r') as fp:\n data = json.load(fp)\n convos = data['conversation_state']\n all_names = []\n for conv in convos:\n conv = conv['conversation_state']['conversation']\n # does if have a name?\n if 'name' in conv:\n name = conv['name']\n else:\n # get all the people in the conv\n people_names = [person['fallback_name']\n for person in conv['participant_data']]\n name = ','.join(people_names)\n all_names.append(name)\n return all_names",
"def get_files(file_list, mode):\n\tfile_set = set(file_list)\n\tif \"band\" in mode:\n\t\tband_all_set = set([\"bands.dat.gnu\", \"freq.plot\"])\n\t\tband_file_set = file_set & band_all_set ; remain_file_set = file_set - band_all_set\n\t\tfiles_str = \", \".join([f\"\\033[32m{b}\\033[0m\" for b in band_file_set]) + \"; \" + \", \".join(remain_file_set)\n\tif mode == \"dos\":\n\t\tdos_all_set = set([\"S.dos\"])\n\t\tdos_file_set = file_set & dos_all_set ; remain_file_set = file_set - dos_all_set\n\t\tfiles_str = \", \".join([f\"\\033[32m{d}\\033[0m\" for d in dos_file_set]) + \"; \" + \", \".join(remain_file_set)\n\tprint(f\"Files in the directory: {files_str}\"); file = input(\"Please choose your file (type one only): \")\n\treturn file",
"def ordered_channel_names(self):\n channel_list = []\n for k in self.__dict__.keys():\n if k.startswith('channel_'):\n channel_list.append(\n [int(k.split('channel_')[1]), self.__dict__[k]]\n )\n channel_list.sort()\n if len(channel_list) == 0:\n print('********* warning!! empty channel list - are there ay channel_N attributes? ')\n return [i[1] for i in channel_list]",
"def getChannelsByName(self, unit, channels): \n\t\treturn self.selectChannelsByName(unit, channels, dontSelect = 1)",
"def get_file_ids(self):\n\n if self.attribute_file_ids:\n file_ids = \"\"\n for id_ in self.attribute_file_ids:\n file_ids += \",\" + str(id_)\n return _(\"Attributes: \") + self.attributes_msg + \" \", f\" in ({file_ids[1:]})\"\n\n file_name = self.ui.comboBox_file.currentText()\n case_name = self.ui.comboBox_case.currentText()\n if file_name == \"\" and case_name == \"\":\n return \"\", \"\"\n if file_name != \"\":\n for f in self.files:\n if f['name'] == file_name:\n return _(\"File: \") + file_name + \" \", f\"={f['id']}\"\n case_id = -1\n for c in self.cases:\n if c['name'] == case_name:\n case_id = c['id']\n break\n cur = self.app.conn.cursor()\n sql = \"select distinct fid from case_text where caseid=?\"\n cur.execute(sql, [case_id, ])\n res = cur.fetchall()\n file_ids = \"\"\n for r in res:\n file_ids += \",\" + str(r[0])\n if file_ids == \"\":\n return \"\", \"\"\n return _(\"Case: \") + case_name + \" \", f\" in ({file_ids[1:]})\"",
"def getComicsListFromFile(filename):\n h = open(filename)\n contents = \"\\n\".join(h.readlines())\n expr = re.compile(\"([a-z0-9]+)\")\n return expr.findall(contents)",
"def GetFileNames(self):\n return self.files",
"def get_keys(self, file_name):\n\n nc = Dataset(file_name)\n keylist = []\n for key in nc.variables.keys():\n if ((not key == \"time\") and (not key == \"grid\")):\n keylist.append(key)\n\n nc.close()\n return keylist",
"def selectChannelsByName(self, unit, channels, dontSelect = 0):\n\t\tret = []\n\t\tself.programmatic = 1 \n\t\tfor item in self.dataUnitItems:\n\t\t\tobj = self.tree.GetPyData(item)\n\t\t\tif obj.getName() in channels and unit == self.dataUnitToPath[obj]:\n\t\t\t\tif not dontSelect and not self.tree.IsSelected(item):\n\t\t\t\t\tself.tree.ToggleItemSelection(item)\n\t\t\t\tret.append(obj)\n\t\tself.programmatic = 0 \n\t\treturn ret",
"def get_convos():\n # returns array of arrays with line data from movie_conversations.txt\n # ex. convos = [['L194', 'L195', 'L196'], ['L198', L'199']]\n file_path = os.path.join(config.DATA_PATH, config.CONVO_FILE)\n convos = []\n with open(file_path, 'rb') as f:\n for line in f.readlines():\n parts = line.split(' +++$+++ ')\n if len(parts) == 4:\n convo = []\n for line in parts[3][1:-2].split(', '):\n convo.append(line[1:-1])\n convos.append(convo)\n\n return convos",
"def list_files(self):\n if self.remote:\n return self.remote.list_files()\n\n M.mset('DUZ',self.DUZ)\n M.mset('U', \"^\")\n if self.isProgrammer:\n M.mset('DUZ(0)', \"@\")\n rv = []\n s0 = \"0\"\n while s0 != \"\":\n s0, name = M.mexec(\n '''set s0=$order(^DIC(s0)) Q:s0'=+s0 I $D(^DIC(s0,0))&$D(^DIC(s0,0,\"GL\"))&$$VFILE^DILFD(s0) S s1=$P(^DIC(s0,0),U,1)''',\n M.INOUT(s0), M.INOUT(\"\"))\n if name:\n rv.append((name, s0))\n return rv",
"def select_files(self):\n pass",
"def names(self):\n return list(item.name for item in self.mechanisms)",
"def get_channel_names(self, datapath):\n self.logger.debug(\"get_channel_names: for %s\", datapath)\n names = glob.glob(datapath+\".*\")\n self.logger.debug(\"get_channel_names: from %s\", names)\n channel_names = []\n for name in names:\n channel_names.append(\"%02d\" % int(os.path.splitext(name)[1][1:]))\n return channel_names",
"def select_files():\n root = Tk()\n root.withdraw()\n root.wm_attributes('-topmost', 1)\n files = askopenfilenames(parent=root,\n title=\"Select file\",\n filetypes=((\"Image files\", '*' + ';*'.join(supported_extensions)), (\"all files\", \"*.*\"))\n )\n return root.tk.splitlist(files)"
] |
[
"0.5787666",
"0.5672296",
"0.55718595",
"0.55461186",
"0.54703754",
"0.5463261",
"0.5390272",
"0.53667766",
"0.5364245",
"0.53621876",
"0.5335746",
"0.5329547",
"0.5319464",
"0.52820706",
"0.5278067",
"0.5262054",
"0.52222556",
"0.5201887",
"0.5199541",
"0.5195934",
"0.5158009",
"0.5154139",
"0.5126311",
"0.5088757",
"0.5070625",
"0.50497293",
"0.50460815",
"0.5036591",
"0.5029201",
"0.50126296"
] |
0.6824994
|
0
|
from a name like "cryptoObject", gets sorted channels, such as cryptoObject, cryptoObject00, cryptoObject01
|
def _identify_channels(self, name):
channel_list = []
if self.nuke_node.Class() == "Cryptomatte":
# nuke_node is a keyer gizmo
channel_list = self.nuke_node.node('Input1').channels()
else:
# nuke_node might a read node
channel_list = self.nuke_node.channels()
relevant_channels = [x for x in channel_list if x.startswith(name)]
pure_channels = []
for channel in relevant_channels:
suffix = ".red"
if not channel.endswith(suffix):
continue
# to do: validate this somewhere else
pure_channel = channel[:-len(suffix)]
pure_channels.append(pure_channel)
return sorted(pure_channels)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def ordered_channel_names(self):\n channel_list = []\n for k in self.__dict__.keys():\n if k.startswith('channel_'):\n channel_list.append(\n [int(k.split('channel_')[1]), self.__dict__[k]]\n )\n channel_list.sort()\n if len(channel_list) == 0:\n print('********* warning!! empty channel list - are there ay channel_N attributes? ')\n return [i[1] for i in channel_list]",
"def find_elements(channel_names):\n\n elements = []\n for i in range(1, 110, 1): \n elements.append(str(ELEMENTS[i].symbol))\n\n elements = sorted(set(channel_names) & set(elements), key = channel_names.index)\n\n return elements",
"def channel_move_sort_key(channel_key):\n return channel_key[0]",
"def getChannelsByName(self, unit, channels): \n\t\treturn self.selectChannelsByName(unit, channels, dontSelect = 1)",
"def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n if asset[0]==\"X\" or asset[0]==\"Z\":\n asset = asset[1:]\n holder = []\n for pair in self.allPairs:\n if asset in pair:\n holder.append(pair)\n if asset == \"XBT\":\n asset = \"BTC\"\n pairsByTickers[asset] = holder\n return pairsByTickers",
"def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n if asset[0]==\"X\" or asset[0]==\"Z\":\n asset = asset[1:]\n holder = []\n for pair in self.allPairs:\n if asset in pair:\n holder.append(pair)\n if asset == \"XBT\":\n asset = \"BTC\"\n pairsByTickers[asset] = holder\n return pairsByTickers",
"def get_channel_list(self, keyfind, origin=None):\n\n if origin:\n\n if keyfind == \"number\":\n return [self.list[origin][x].number for x in [x[\"id\"] for x in self.get_channels(origin)]]\n\n else:\n return [self.list[origin][x].dict[keyfind] for x in [x[\"id\"] for x in self.get_channels(origin)]]\n\n else:\n\n matches = []\n for origin in list(self.list.keys()):\n\n if keyfind == \"number\":\n next_match = [self.list[origin][x].number for x in [x[\"id\"] for x in self.get_channels(origin)]]\n\n else:\n next_match = [self.list[origin][x].dict[keyfind] for x in [x[\"id\"] for x in self.get_channels(origin)]]\n\n if len(next_match):\n matches.append(next_match)\n\n return matches[0]",
"def get_channel_names(self, datapath):\n self.logger.debug(\"get_channel_names: for %s\", datapath)\n names = glob.glob(datapath+\".*\")\n self.logger.debug(\"get_channel_names: from %s\", names)\n channel_names = []\n for name in names:\n channel_names.append(\"%02d\" % int(os.path.splitext(name)[1][1:]))\n return channel_names",
"def _device_sort_key(iface):\n dev = (iface.get(\"device\") or \"\").lower()\n if dev.startswith(\"eth\") or dev.startswith(\"en\"):\n return \"0\" + dev\n if dev.startswith(\"wl\"):\n return \"1\" + dev\n if dev.startswith(\"e\") or dev.startswith(\"w\"):\n return \"2\" + dev\n else:\n return dev",
"def channel_list(self):\n return_str = self.scpi.query_channel_catalog().split(',')\n channel_dct = {}\n for i in range(int(len(return_str)/2)):\n channel_dct[int(return_str[2 * i])] = return_str[2 * i + 1]\n return channel_dct",
"def channels_listall(token):\n channels_results = channels.list()\n channels_list = []\n for channel in channels_results:\n channels_list.append(\n {\"channel_id\": channel[\"channel_id\"], \"name\": channel[\"name\"]}\n )\n return {\"channels\": channels_list}",
"def get_channels(self):\n return [self.afos, \"%s...\" % (self.afos[:3], )]",
"def get_swap_pairs(channels):\n swap_pairs = []\n if ('EEG FP1' in channels) and ('EEG FP2' in channels):\n swap_pairs.append((channels.index('EEG FP1'), channels.index('EEG FP2')))\n if ('EEG Fp1' in channels) and ('EEG Fp2' in channels):\n swap_pairs.append((channels.index('EEG Fp1'), channels.index('EEG Fp2'))) \n if ('EEG F3' in channels) and ('EEG F4' in channels):\n swap_pairs.append((channels.index('EEG F3'), channels.index('EEG F4'))) \n if ('EEG F7' in channels) and ('EEG F8' in channels):\n swap_pairs.append((channels.index('EEG F7'), channels.index('EEG F8'))) \n if ('EEG C3' in channels) and ('EEG C4' in channels):\n swap_pairs.append((channels.index('EEG C3'), channels.index('EEG C4')))\n if ('EEG T3' in channels) and ('EEG T4' in channels):\n swap_pairs.append((channels.index('EEG T3'), channels.index('EEG T4')))\n if ('EEG T5' in channels) and ('EEG T6' in channels):\n swap_pairs.append((channels.index('EEG T5'), channels.index('EEG T6')))\n if ('EEG O1' in channels) and ('EEG O2' in channels):\n swap_pairs.append((channels.index('EEG O1'), channels.index('EEG O2')))\n \n return swap_pairs",
"def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n holder = []\n for pair in self.allPairs:\n if asset in pair:\n holder.append(pair)\n if asset == \"XBT\":\n asset = \"BTC\"\n pairsByTickers[asset] = holder\n return pairsByTickers",
"def get_channels(cj): \n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n channels = opener.open(\"http://www.douban.com/j/app/radio/channels\")\n channel_list = json.loads(channels.read())\n return channel_list[\"channels\"]\n # print channel_list",
"def sorted_gnames():\n return sorted(group_names.keys())",
"def dmc_order(self):\n return sorted(self.lookup_table, key=lambda clr: int(clr.id) if clr.id.isdigit() else 0)",
"def refresh_chanlist(self):\n self._chanlist.delete(0, Tix.END)\n for name in sorted(self._channel_frames.keys(), _k.cmp_channels):\n self._chanlist.insert(Tix.END, name)",
"def order_channels_by_distance(reference, channels, geom):\n coord_main = geom[reference]\n coord_others = geom[channels]\n idx = np.argsort(np.sum(np.square(coord_others - coord_main), axis=1))\n\n return channels[idx], idx",
"def test_channel_hash(self):\n acq_channel_1 = AcquireChannel(123)\n acq_channel_2 = AcquireChannel(123)\n\n hash_1 = hash(acq_channel_1)\n hash_2 = hash(acq_channel_2)\n\n self.assertEqual(hash_1, hash_2)",
"def _get_by_name(self, name: str, default: default_var=None) \\\n -> 'typing.Union[channel.Channel, default_var]':\n s = sorted(self._channels.values(), key=lambda c: c.position)\n try:\n return next(filter(lambda ch: ch.name == name, s))\n except StopIteration:\n return default",
"def test_get_ch_names(dummy_streamers, stream_inlets):\n for _, device, source_id, subscriptions in dummy_streamers:\n inlets = stream_inlets[source_id]\n for stream_type in subscriptions:\n ch_names = acquire.get_ch_names(inlets[source_id][stream_type]\n .info())\n assert (device.PARAMS['streams']['ch_names'][stream_type]\n == tuple(ch_names))",
"def get_channel_pairs(cls):\n channel_pairs = []\n for name, creator in CommonBase.get_channels(cls):\n for pair in creator.pairs:\n channel_pairs.append(pair)\n return channel_pairs",
"def get_sorted_suit_list(self):\n return [x[0] for x in sorted(self.suit_dict.items(), key=lambda x: x[1], reverse=True)]",
"def channel_names(self):\n header_names = [s.strip() for s in\n self.header['Bias Spectroscopy>Channels'].split(';')]\n\n # 'Bias calc (V)' is in file but not in the header.\n return ['Bias calc (V)', ] + header_names",
"def channel_shuffle(input, groups):\n return FunctionLib.apply(\n 'ChannelShuffle', input.device, [input], axis=1, group=groups)",
"def get_activechannels(self,):\n\n channels_nibble = self._read('CSR')[0] >> 4\n channels = []\n\n for i in reversed (range (4)):\n if channels_nibble >> i > 0:\n channels.append(i)\n channels_nibble -= 2**i\n\n channels.reverse()\n\n return channels",
"def names(self, channel, *args, **kwargs):\n pass",
"def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n holder = []\n for pair in self.allPairs:\n if asset.lower() in pair:\n holder.append(pair.upper())\n pairsByTickers[asset] = holder\n return pairsByTickers",
"def list(sw, args):\n parser = argparse.ArgumentParser(\n prog='space channel list',\n description='List channels in spacewalk.'\n )\n parser.add_argument(\n 'type',\n choices=[\n 'all',\n 'user',\n 'popular',\n 'retired',\n 'shared',\n 'software',\n 'vendor'\n ],\n default='popular',\n help=\"Type of search you would like to perform\"\n )\n parser.add_argument(\n '--format',\n choices=[\n 'raw',\n 'json',\n 'pretty'\n ],\n default='pretty',\n required=False\n )\n parser.add_argument(\n '--popcount',\n default=None,\n help=('channels with at least this many systems ' +\n 'subscribed will be returned')\n )\n\n api_calls = {\n 'all': 'channel.listAllChannels',\n 'user': 'channel.listMyChannels',\n 'popular': 'channel.listPopularChannels',\n 'retired': 'channel.listRetiredChannels',\n 'shared': 'channel.listSharedChannels',\n 'software': 'channel.listSoftwareChannels',\n 'vendor': 'channel.listVendorChannels'\n }\n\n p = parser.parse_args(args)\n\n if p.type == 'popular' and not p.popcount:\n print(\"Popular requires popcount arg.\")\n parser.print_help()\n return False\n\n if p.popcount:\n popcount = int(p.popcount)\n results = sw.call(\n api_calls[p.type],\n popcount\n )\n else:\n results = sw.call(\n api_calls[p.type]\n )\n if results == []:\n print(\"Empty result set.\")\n\n channels = []\n for result in results:\n channels.append(result)\n\n if p.format == 'pretty':\n \"\"\"\n int \"id\"\n string \"label\"\n string \"name\"\n string \"provider_name\"\n int \"packages\"\n int \"systems\"\n string \"arch_name\"\n \"\"\"\n if p.type == \"software\":\n t = prettytable.PrettyTable([\n \"Label\",\n \"Name\",\n \"Parent Label\",\n \"End Of Life\",\n \"Arch\"\n ])\n t.align[\"Label\"] = \"l\"\n t.align[\"Name\"] = \"l\"\n t.align[\"Parent Label\"] = \"l\"\n t.padding_width = 1\n for c in results:\n\n t.add_row([\n c['label'],\n c['name'],\n c['parent_label'],\n c['end_of_life'],\n c['arch']\n ])\n else:\n t = prettytable.PrettyTable([\n \"Label\",\n \"Name\",\n \"Provider Name\",\n \"Packages\",\n \"Systems\",\n \"Arch Name\"\n ])\n t.align[\"Label\"] = \"l\"\n t.align[\"Name\"] = \"l\"\n t.align[\"Packages\"] = \"r\"\n t.align[\"Systems\"] = \"r\"\n t.align[\"Provider Name\"] = \"l\"\n t.padding_width = 1\n for c in results:\n\n t.add_row([\n c['label'],\n c['name'],\n c['provider_name'],\n c['packages'],\n c['systems'],\n c['arch_name']\n ])\n print(t)\n\n elif p.format == 'json':\n output = json.dumps(dict(channels=channels))\n print(output)\n else:\n for result in results:\n print(result)\n return results"
] |
[
"0.70834565",
"0.6133864",
"0.56112415",
"0.5569978",
"0.52664435",
"0.52664435",
"0.52457845",
"0.5231811",
"0.5229068",
"0.5212763",
"0.5126728",
"0.5124537",
"0.5118637",
"0.5082058",
"0.50327605",
"0.5009194",
"0.49724874",
"0.49594697",
"0.495632",
"0.49303198",
"0.49223077",
"0.49053547",
"0.4896156",
"0.48636928",
"0.4849427",
"0.48460367",
"0.48324287",
"0.48208624",
"0.48158893",
"0.47945514"
] |
0.615348
|
1
|
Loads json manifest and unpacks hex strings into floats, and converts it to two dictionaries, which map IDs to names and vice versa. Also caches the last manifest in a global variable so that a session of selecting things does not constantly require reloading the manifest (' ~0.13 seconds for a 32,000 name manifest.')
|
def parse_manifest(self):
import json
import struct
num = self.selection
try:
manifest = json.loads(self.cryptomattes[num].get("manifest", "{}"))
except:
manifest = {}
from_names = {}
from_ids = {}
unpacker = struct.Struct('=f')
packer = struct.Struct("=I")
for name, value in manifest.iteritems():
packed = packer.pack(int(value,16))
packed = packed = '\0' * (4 - len(packed)) + packed
id_float = unpacker.unpack( packed )[0]
name_str = str(name)
from_names[name_str] = id_float
from_ids[id_float] = name_str
self.cryptomattes[num]["names_to_IDs"] = from_names
self.cryptomattes[num]["ids_to_names"] = from_ids
global g_cryptomatte_manf_from_names
global g_cryptomatte_manf_from_IDs
g_cryptomatte_manf_from_names = from_names
g_cryptomatte_manf_from_IDs = from_ids
return from_names
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def parse_manifest(manifest_path):\n with open(manifest_path, 'r') as f:\n data = f.read()\n if data:\n return json.loads(data)\n else:\n return {}",
"def _load_manifest(self, filename: Optional[str] = None) -> Dict[str, str]:\n filename = filename or self.manifest_filename\n if not os.path.isfile(filename):\n self.log.debug(f\"Manifest file '{filename}' doesn't exist and will be created.\")\n return {}\n with open(filename, \"r\") as f:\n manifest: Dict[str, str] = json.load(f)\n self.log.debug(f\"Reading manifest '{manifest}' from file '{filename}'\")\n return manifest",
"def _read_manifest_json(self):\n with open(os.path.join(self._crx_dir, \"manifest.json\")) as manifest:\n return json.load(manifest)",
"def read_file_manifest(in_stream):\n count = struct.unpack(COUNT_FMT, checked_read(in_stream, COUNT_LEN))[0]\n name_map = {}\n for dummy in range(0, count):\n length, file_sha, history_sha = \\\n struct.unpack(MANIFEST_ENTRY_HDR_FMT,\n checked_read(in_stream,\n MANIFEST_ENTRY_HDR_LEN))\n\n length -= MANIFEST_ENTRY_HDR_LEN\n name = checked_read(in_stream, length)\n\n assert not name in name_map\n name_map[name] = (file_sha, history_sha)\n return name_map",
"def manufacturing_manifest_json(self):\n\n file_name = os.path.join(self.cur_8digit_dir, \"manufacturing.manifest.json\")\n\n with open(file_name) as f_in:\n man_manifest = json.load(f_in)\n\n for component in man_manifest[\"ComponentManufactureList\"]:\n # Get ManufacturingModel (if exists)\n if \"ManufacturingModel\" in component:\n rel_path_from_results = os.path.join(self.cur_8digit_dir, component[\"ManufacturingModel\"])\n if os.path.exists(rel_path_from_results):\n self.files_for_export.append(os.path.join('results', rel_path_from_results))\n\n # Get STEPModel (if exists)\n if \"STEPModel\" in component:\n rel_path_from_results = os.path.join(self.cur_8digit_dir, component[\"STEPModel\"])\n if os.path.exists(rel_path_from_results):\n self.files_for_export.append(os.path.join('results', rel_path_from_results))\n\n # Since this is a PrepareForIFAB folder, gather all *.stp files in ./AP203_E2_SEPERATE_PART_FILES/\n path_to_stp_files = os.path.join(self.cur_8digit_dir,'AP203_E2_SEPERATE_PART_FILES')\n filter_for_stp_files = os.path.join(path_to_stp_files,'*.stp')\n stp_files = glob(filter_for_stp_files)\n for stp_file in stp_files:\n self.files_for_export.append(os.path.join('results',stp_file))\n\n # If there's a TDP tag, \n # add the referred file,\n\t# then crawl that file and gather its file references\n if \"TDP\" in man_manifest:\n tdp_json_path = os.path.join(self.cur_8digit_dir,man_manifest[\"TDP\"])\n self.tdp_json(tdp_json_path)\n self.files_for_export.append(os.path.join('results', tdp_json_path))",
"def load_manifest(path: Path):\n with open(path, \"rt\") as fin:\n data = json_load(fin)\n return Manifest.schema().load(data, many=True)",
"def update_manifest(self, dst):\n # Read the current manifest into memory\n mpath = os.path.join(os.path.dirname(dst), \"manifest.json\")\n try:\n with open(mpath, 'r') as f:\n manifest = json.load(f)\n except IOError:\n manifest = {}\n\n name, _ = os.path.splitext(os.path.basename(dst))\n # Update the manifest record\n manifest[name] = {\n \"url\": os.path.basename(dst),\n \"signature\": sha256sum(dst),\n }\n\n # Write the manifest back to disk\n with open(mpath, 'w') as f:\n json.dump(manifest, f, indent=2)",
"def test_manifest(self):\n self.parse_manifest()\n\n ids = {}\n errors = []\n collisions = []\n manifest = self.cryptomattes[self.selection][\"names_to_IDs\"]\n for name, idvalue in manifest.iteritems():\n if mm3hash_float(name) != idvalue:\n errors.append(\"computed ID doesn't match manifest ID: (%s, %s)\" % (idvalue, mm3hash_float(name)))\n else:\n if idvalue in ids:\n collisions.append(\"colliding: %s %s\" % (ids[idvalue], name))\n ids[idvalue] = name\n\n print \"Tested %s, %s names\" % (self.nuke_node.name(), len(manifest))\n print \" \", len(errors), \"non-matching IDs between python and c++.\"\n print \" \", len(collisions), \"hash collisions in manifest.\"\n\n return errors, collisions",
"def _ComputeLayerMapping(self):\n raw_manifest = self._v2_image.manifest()\n manifest = json.loads(raw_manifest)\n\n v2_ancestry = [fs_layer['blobSum'] for fs_layer in manifest['fsLayers']]\n v1_jsons = [v1_layer['v1Compatibility'] for v1_layer in manifest['history']]\n\n def ExtractId(v1_json):\n v1_metadata = json.loads(v1_json)\n return v1_metadata['id']\n\n # Iterate once using the maps to deduplicate.\n self._v1_to_v2 = {}\n self._v1_json = {}\n self._v1_ancestry = []\n for (v1_json, v2_digest) in zip(v1_jsons, v2_ancestry):\n v1_id = ExtractId(v1_json)\n if v1_id in self._v1_to_v2:\n assert self._v1_to_v2[v1_id] == v2_digest\n assert self._v1_json[v1_id] == v1_json\n continue\n self._v1_to_v2[v1_id] = v2_digest\n self._v1_json[v1_id] = v1_json\n self._v1_ancestry.append(v1_id)",
"def parse_manifest(manifest_contents):\n manifest = {}\n for line in manifest_contents.split('\\n'):\n line_unpacked = line.split()\n try:\n # Check that the line isn't empty or a comment\n if not line_unpacked or line.strip().startswith('#'):\n continue\n\n target, repo_hash, url, sha256_hash = line_unpacked\n manifest[target] = {\"repo_hash\": repo_hash,\n \"url\": url,\n \"sha256_hash\": sha256_hash,\n }\n except ValueError:\n log(\"WARN\", \"Warning: Invalid line in manifest file:\\n\"\n \" {}\".format(line))\n continue\n return manifest",
"def fact():\n manifests = [x for x in os.walk(manifests_dir)]\n\n return { 'manifests': manifests }",
"def manifest_dict(self):\n return self._parsed",
"def manifest_dict(self):\n return self._parsed",
"def update_manifest(self, filename: Optional[str] = None, manifest: Optional[Dict[str, str]] = None) -> None:\n filename = filename or self.manifest_filename\n manifest = manifest or {}\n self.log.debug(f\"Updating manifest '{manifest}' to file '{filename}'\")\n with open(filename, \"w\") as f:\n json.dump(manifest, f, indent=2)",
"def load_model_manifest(rel_path=\"model_manifest.json\"):\n manifest = []\n manifest_path = \"{}/{}\".format(Path(__file__).parents[1], rel_path)\n if path.exists(manifest_path):\n with open(manifest_path) as json_file:\n manifest = json.load(json_file)\n return manifest",
"def __init__(self, settings, load = True):\n\t\tself.version = 2.0\n\t\tself.data = {'@meta':{'version': 0}}# Default to no version, which will be converted.\n\t\tself.file = stringutil.normalize_file(settings.save_base() + '/Manifest.json.gz')\n\t\tself._completed = []\n\t\tself._failed = []\n\t\tif load and os.path.isfile(self.file): #!cover\n\t\t\ttry:\n\t\t\t\twith gzip.GzipFile(self.file, 'rb') as data_file:\n\t\t\t\t\tself.data = json.loads(data_file.read().decode('utf8'))\n\t\t\texcept:\n\t\t\t\tstringutil.error('Failed to load Manifest at [%s]. Probably corrupt. Try removing the file.' % self.file)\n\t\t\t\traise\n\t\tchange, self.data = self._adapt(self.data)\n\t\twhile change:\n\t\t\tchange, self.data = self._adapt(self.data)\n\t\t#\n\t\tassert 'elements' in self.data\n\t\tassert 'completed' in self.data['elements']\n\t\tassert 'failed' in self.data['elements']\n\t\tself.og_count = len(self.data['elements']['completed']+ self.data['elements']['failed'])",
"def get_manifest(self):\r\n if os.path.exists(self.manifestfile):\r\n return Manifest(json.loads(file(self.manifestfile).read()))\r\n return Manifest({})",
"def process_metadata_file(file):\r\n data = file.read()\r\n\r\n # Find manifest + '=' '{'\r\n manifest_begin = data.split().index(\"manifest_data[]\") + 3\r\n manifest_end = data.split().index(\"};\")\r\n manifest_data = data.split()[manifest_begin:manifest_end]\r\n\r\n for num, elem in enumerate(manifest_data):\r\n manifest_data[num] = int(elem[:-1], base=16)\r\n\r\n _manifest = bytearray(manifest_data)\r\n\r\n _fragments = []\r\n for i in range(1, 100):\r\n try:\r\n fragment_num = \"fragment_0{0}[]\".format(i)\r\n fragment_begin = data.split().index(fragment_num) + 3\r\n fragment_end = data.split().index(\"};\", fragment_begin)\r\n fragment_data = data.split()[fragment_begin:fragment_end]\r\n\r\n for num, elem in enumerate(fragment_data):\r\n fragment_data[num] = int(elem[:-1], base=16)\r\n\r\n _fragments.append(bytearray(fragment_data))\r\n\r\n except ValueError:\r\n return _manifest, _fragments\r\n\r\n return _manifest, _fragments",
"def load_manifest(self, *, user: str, identity_file: str):\n if not self.master_ip:\n return\n\n master_ssh_client = get_ssh_client(\n user=user,\n host=self.master_ip,\n identity_file=identity_file,\n wait=True,\n print_status=False)\n\n with master_ssh_client:\n manifest_raw = ssh_check_output(\n client=master_ssh_client,\n command=\"\"\"\n cat \"$HOME/.flintrock-manifest.json\"\n \"\"\")\n # TODO: Would it be better if storage (ephemeral and otherwise) was\n # implemented as a Flintrock service and tracked in the manifest?\n ephemeral_dirs_raw = ssh_check_output(\n client=master_ssh_client,\n # It's generally safer to avoid using ls:\n # http://mywiki.wooledge.org/ParsingLs\n command=\"\"\"\n shopt -s nullglob\n for f in /media/ephemeral*; do\n echo \"$f\"\n done\n \"\"\")\n\n manifest = json.loads(manifest_raw)\n\n self.ssh_key_pair = SSHKeyPair(\n public=manifest['ssh_key_pair']['public'],\n private=manifest['ssh_key_pair']['private'])\n\n services = []\n for [service_name, manifest] in manifest['services']:\n # TODO: Expose the classes being used here.\n service = globals()[service_name](**manifest)\n services.append(service)\n self.services = services\n\n storage_dirs = StorageDirs(\n root='/media/root',\n ephemeral=sorted(ephemeral_dirs_raw.splitlines()),\n persistent=None)\n self.storage_dirs = storage_dirs",
"def load_app_manifests(self):\n self.app_manifests = []\n apps_lib_path = os.path.join(self.apps_dir_path, \"lib\")\n for app_dir in os.listdir(apps_lib_path):\n if app_dir not in (\"__init__.py\", \"__init__.pyc\"):\n if app_dir.find(\"_v\") > 1:\n app_name = app_dir[:app_dir.find(\"_v\")]\n self.app_manifests.append(json.load(file(os.path.join(self.apps_dir_path, 'lib', app_dir, \"manifest.json\"))))\n log.info(\"Manifest for %s app was loaded\" % (app_dir))\n else:\n log.info(\"Directory %s will be skipped from app loader . Doesn't match naming convention .\" % app_dir)",
"def parse_data(self):\n\n try:\n if self.is_bytes:\n self.data = etree.XML(self.manifest)\n else:\n with open(self.manifest) as fh:\n self.data = etree.XML(fh.read().encode())\n except etree.XMLSyntaxError:\n raise InvalidManifest('Not an XML file')\n\n self.tree = etree.ElementTree(self.data)\n\n self.find_remotes()\n self.find_defaults()\n self.find_projects()\n\n return self.generate_manifest_dict()",
"def parse_manifest(manfile):\n mxd = ''\n service = ''\n url = ''\n databases = {}\n with open(manfile, 'r') as f:\n manifest = json.load(f)\n mxd = manifest['resources'][0]['onPremisePath']\n pathbits = manifest['resources'][0]['serverPath'].split('\\\\')\n servicename = [x for x in pathbits if 'MapServer' in x]\n if len(servicename) == 1:\n service = servicename[0].split('.')[0]\n for src in manifest['databases']:\n dbconfig = {x.split('=')[0]: x.split('=')[1] for x in src['onPremiseConnectionString'].split(';')}\n if 'INSTANCE' in dbconfig:\n dbname = dbconfig['DATABASE'] + ':' + dbconfig['INSTANCE']\n else:\n dbname = dbconfig['DATABASE']\n databases[dbname] = [x['onServerName'] for x in src['datasets']]\n pathpieces = pathbits[pathbits.index('arcgisinput')+1: pathbits.index(servicename[0])]\n pathpieces.append(service)\n #url = '/' + '/'.join(pathpieces)\n url = '/'.join(pathpieces)\n else:\n #some other service type, skip it\n mxd = ''\n service = ''\n url = ''\n databases = {}\n return mxd, service, url, databases",
"def read_manifest(self): # -> None:\n ...",
"def get_normalized_package_data(manifest_main_section):\n if not manifest_main_section or len(manifest_main_section) == 1:\n # only a manifest version\n return\n\n def dget(s):\n v = manifest_main_section.get(s)\n if v and v.startswith(('%', '$', '{')):\n v = None\n return v\n\n built_with_gradle = bool(dget('Gradle-Version'))\n\n # Name, namespace, version\n #########################\n # from Eclipse OSGi\n # Bundle-SymbolicName: org.eclipse.ui.workbench.compatibility\n # Bundle-SymbolicName: org.eclipse.ui.intro.universal;singleton:=true\n b_sym_name = dget('Bundle-SymbolicName')\n if b_sym_name and ';' in b_sym_name:\n b_sym_name, _, _ = b_sym_name.partition(';')\n is_osgi_bundle = bool(b_sym_name)\n\n # Implementation-Title: org.apache.xerces.impl.Version\n # Implementation-Title: Apache Commons IO\n i_title = dget('Implementation-Title')\n i_title_is_id = is_id(i_title)\n\n # if present this is typically gid.aid (but with no clear split)\n # Extension-Name: org.apache.commons.logging\n ext_nm = dget('Extension-Name')\n if ext_nm == b_sym_name:\n ext_nm = None\n ext_nm_is_id = is_id(ext_nm)\n\n # Automatic-Module-Name: org.apache.commons.io\n am_nm = dget('Automatic-Module-Name')\n if am_nm == b_sym_name:\n am_nm = None\n am_nm_is_id = is_id(am_nm)\n\n # Name: Datalogic SDK\n nm = dget('Name')\n nm_is_id = is_id(nm)\n\n # this a namespace\n # Implementation-Vendor-Id: org.apache\n # Implementation-Vendor-Id: commons-io\n # Implementation-Vendor-Id: ${project.groupId}\n i_vendid = dget('Implementation-Vendor-Id')\n\n # Bundle-Version: 3.2.200.v20080610\n # Implementation-Version: 2.6.2\n # ImplementationVersion\n b_version = dget('Bundle-Version')\n i_version = dget('Implementation-Version') or dget('ImplementationVersion')\n\n # Descriptions\n #########################\n # the Bundle-Name is always a short description\n # Bundle-Name: DejaCode Toolkit\n # Bundle-Name: %pluginName\n # Bundle-Name: %fragmentName\n b_name = dget('Bundle-Name')\n\n # Bundle-Description: Apache Log4j 1.2\n b_desc = dget('Bundle-Description')\n\n s_title = dget('Specification-Title')\n if s_title in (i_title, b_name, b_desc,):\n s_title = None\n\n # Implementation-Title structured by Gradle if Gradle-Version: is present\n # Implementation-Title: com.netflix.hystrix#hystrix-rx-netty-metrics-stream;1.5.12\n it_namespace = it_name = it_version = None\n it_split = re.split('[#;]', i_title or '')\n if len(it_split) == 3:\n it_namespace, it_name, it_version = it_split\n has_gradle_structured_i_title = i_title_is_id and it_namespace and it_name and it_version\n\n # Set ns, name and version\n ##############################\n package_type = namespace = name = version = None\n descriptions = []\n\n # FIXME: may be we should then return each \"personality\"\n # we have several cases for names:\n # this is built with gradle and we have good id data\n if has_gradle_structured_i_title:\n package_type = 'maven'\n namespace = it_namespace\n name = it_name\n version = it_version\n descriptions = [nm, s_title, b_name, b_desc]\n\n # we have been created by maven archiver\n elif i_title and i_vendid and i_version:\n # TODO: improve name and namespace if ns is in name\n namespace = i_vendid\n name = i_title\n package_type = 'maven' if (i_title_is_id and not name.startswith(namespace)) else 'jar'\n version = i_version\n descriptions = [b_name, b_desc]\n\n # TODO: add case with only title + version that can still be handled if title is dotted\n\n # this is an OSGi bundle and we have enough to build a bundle\n elif is_osgi_bundle:\n # no namespace\n name = b_sym_name\n version = b_version\n descriptions = [b_name, b_desc]\n package_type = 'osgi'\n\n # we have not much data\n else:\n package_type = 'jar'\n # no namespace\n version = i_version\n\n if i_title_is_id:\n name = i_title\n descriptions = [s_title, nm]\n elif am_nm_is_id:\n name = am_nm\n descriptions = [s_title, i_title, nm]\n elif ext_nm_is_id:\n name = ext_nm\n descriptions = [s_title, i_title, nm]\n elif nm_is_id:\n name = nm\n descriptions = [s_title, i_title]\n else:\n name = i_title or am_nm or ext_nm or nm\n descriptions = [s_title, i_title, nm]\n\n descriptions = unique(descriptions)\n descriptions = [d for d in descriptions if d and d.strip() and d != name]\n description = '\\n'.join(descriptions)\n if description == name:\n description = None\n\n # create the mapping we will return\n package = {}\n package['type'] = package_type\n package['namespace'] = namespace\n package['name'] = name\n package['version'] = version\n package['description'] = description\n\n # licensing\n #########################\n # Bundle-License: http://www.apache.org/licenses/LICENSE-2.0.txt\n package['declared_license'] = dget('Bundle-License')\n # Bundle-Copyright: Apache 2.0\n package['copyright'] = dget('Bundle-Copyright')\n\n # URLs\n #########################\n # typically homepage or DOC\n # Implementation-Url\n # Implementation-URL: http://xml.apache.org/xerces2-j/\n package['homepage_url'] = dget('Implementation-URL') or dget('Implementation-Url')\n\n # Bundle-DocURL: http://logging.apache.org/log4j/1.2\n package['documentation_url'] = dget('Bundle-DocURL')\n\n # vendor/owner/contact\n #########################\n package['parties'] = parties = []\n # Implementation-Vendor: Apache Software Foundation\n # Implementation-Vendor: The Apache Software Foundation\n i_vend = dget('Implementation-Vendor')\n if i_vend:\n parties.append(dict(role='vendor', name=i_vend))\n\n # Specification-Vendor: Sun Microsystems, Inc.\n s_vend = dget('Specification-Vendor')\n if s_vend == i_vend:\n s_vend = None\n if s_vend:\n parties.append(dict(role='spec-vendor', name=s_vend))\n\n # Bundle-Vendor: %providerName\n # Bundle-Vendor: %provider_name\n # Bundle-Vendor: Apache Software Foundation\n # Bundle-Vendor: http://supercsv.sourceforge.net/ and http://spiffyframe\n b_vend = dget('Bundle-Vendor') or dget('BundleVendor')\n if b_vend:\n v = dict(role='vendor', name=b_vend)\n if v not in parties:\n parties.append(v)\n\n # Module-Email: [email protected]\n # Module-Owner: [email protected]\n m_email = dget('Module-Email')\n m_owner = dget('Module-Owner')\n if m_owner:\n o = dict(role='owner', name=m_owner)\n if m_email and m_email != m_owner:\n o['email'] = m_email\n parties.append(o)\n\n # VCS\n # the model is <vcs_tool>+<transport>://<host_name>[/<path_to_repository>][@<revision_tag_or_branch>][#<sub_path>]\n #########################\n vcs_url = None\n code_view_url = None\n\n\n m_vcs_url = dget('Module-Origin') or ''\n if m_vcs_url.strip():\n # this block comes from Gradle?\n # Module-Origin: [email protected]:Netflix/Hystrix.git\n # Module-Source: /hystrix-contrib/hystrix-rx-netty-metrics-stream\n # Branch: master\n # Change: a7b66ca\n m_vcs_url = normalize_vcs_url(m_vcs_url)\n m_vcs_rev = dget('Change') or dget('Branch') or ''\n m_vcs_rev = m_vcs_rev.strip()\n m_vcs_rev = m_vcs_rev and ('@' + m_vcs_rev)\n m_vcs_subpath = dget('Module-Source') or ''\n m_vcs_subpath = m_vcs_subpath.strip('/').strip()\n m_vcs_subpath = m_vcs_subpath and ('#' + m_vcs_subpath.strip('/'))\n vcs_url = '{m_vcs_url}{m_vcs_rev}{m_vcs_subpath}'.format(**locals())\n else:\n # this block comes from Maven?\n # Scm-Url: http://github.com/fabric8io/kubernetes-model/kubernetes-model/\n # Scm-Connection: scm:git:https://github.com/fabric8io/zjsonpatch.git\n # Scm-Revision: ${buildNumber}\n # Scm-Revision: 4ec4abe2e7ac9e1a5e4be88e6dd09403592f9512\n s_vcs_url = dget('Scm-Url') or ''\n s_scm_connection = dget('Scm-Connection') or ''\n\n s_vcs_rev = dget('Scm-Revision') or ''\n s_vcs_rev = s_vcs_rev.strip()\n if s_vcs_rev:\n s_vcs_rev = '@' + s_vcs_rev\n\n if s_vcs_url.strip():\n code_view_url = s_vcs_url\n s_vcs_url = normalize_vcs_url(s_vcs_url)\n vcs_url = '{s_vcs_url}{s_vcs_rev}'.format(**locals())\n elif s_scm_connection.strip():\n vcs_url = parse_scm_connection(s_scm_connection)\n vcs_url = '{s_vcs_url}{s_vcs_rev}'.format(**locals())\n\n package['vcs_url'] = vcs_url\n package['code_view_url'] = code_view_url\n\n # Misc, unused for now\n #########################\n # Source:\n # Eclipse-SourceBundle: org.eclipse.jetty.websocket.api;version=\"9.4.12.v20180830\";roots:=\".\"\n # Deps:\n # Require-Bundle\n\n package['notes'] = dget('Comment')\n return package",
"def json2register(self):\n try:\n with open('registered.json', 'r') as file:\n self.final_dicc = json.load(file)\n except (FileNotFoundError, ValueError, json.decoder.JSONDecodeError):\n pass",
"def build_manifest_json(\n name: str,\n version: str,\n description: str\n) -> Dict[str, Union[str, List[str]]]:\n return ({\n \"name\": name,\n \"version_number\": version,\n \"website_url\": GITHUB_URL,\n \"description\": description,\n \"dependencies\": []\n })",
"def read_manifest(manifest_fn):\n with open(manifest_fn, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=';')\n dicts = list(reader)\n return dicts",
"def read_manifest(manifest_path, max_duration=float('inf'), min_duration=0.0):\n manifest = []\n for json_line in codecs.open(manifest_path, 'r', 'utf-8'):\n try:\n json_data = json.loads(json_line)\n except Exception as e:\n raise IOError(\"Error reading manifest: %s\" % str(e))\n if (json_data[\"duration\"] <= max_duration and\n json_data[\"duration\"] >= min_duration):\n manifest.append(json_data)\n return manifest",
"def clean_manifest(manifest_json):\n manifest_json = copy.deepcopy(manifest_json)\n host = manifest_json[\"host\"]\n host = host.strip(\"/\").lstrip(\"http://\").lstrip(\"https://\")\n manifest_json[\"host\"] = host\n return manifest_json",
"def load_prev_val_cache():\n try:\n f = open(Filenames.VALUE_CACHE, 'r')\n j = json_load_as_ascii(f)\n except:\n print \"The previous value cache (%s) doesn't exist, or its JSON is corrupt. Creating empty.\" % (\n Filenames.VALUE_CACHE\n )\n f = open(Filenames.VALUE_CACHE, 'w')\n j = {}\n json.dump(j, f)\n f.close()\n return j"
] |
[
"0.59855336",
"0.5957822",
"0.58551866",
"0.55946296",
"0.55939585",
"0.5585546",
"0.5535755",
"0.5527503",
"0.5448784",
"0.5435914",
"0.5383605",
"0.537182",
"0.537182",
"0.5306866",
"0.5273999",
"0.524797",
"0.5237274",
"0.5165407",
"0.5143346",
"0.51091653",
"0.51021844",
"0.5097596",
"0.5081327",
"0.5068699",
"0.5056215",
"0.5049335",
"0.50201863",
"0.49739122",
"0.4963781",
"0.49607113"
] |
0.72977996
|
0
|
Testing function to check for implementation errors and hash collisions. Checks all names and values in the manifest in the manifest by rehashing them, to ensure that the entire process is sound. Also finds collisions. Returns a tuple of errors and collisions.
|
def test_manifest(self):
self.parse_manifest()
ids = {}
errors = []
collisions = []
manifest = self.cryptomattes[self.selection]["names_to_IDs"]
for name, idvalue in manifest.iteritems():
if mm3hash_float(name) != idvalue:
errors.append("computed ID doesn't match manifest ID: (%s, %s)" % (idvalue, mm3hash_float(name)))
else:
if idvalue in ids:
collisions.append("colliding: %s %s" % (ids[idvalue], name))
ids[idvalue] = name
print "Tested %s, %s names" % (self.nuke_node.name(), len(manifest))
print " ", len(errors), "non-matching IDs between python and c++."
print " ", len(collisions), "hash collisions in manifest."
return errors, collisions
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs,\n fail_mismatched_oki_ori, fail_bad_oki, fail_bad_ori,\n fail_bad_nki, fail_bad_nri, fail_old_kernel_fs_size,\n fail_old_rootfs_fs_size, fail_new_kernel_fs_size,\n fail_new_rootfs_fs_size):\n # Generate a test payload. For this test, we only care about the manifest\n # and don't need any data blobs, hence we can use a plain paylaod generator\n # (which also gives us more control on things that can be screwed up).\n payload_gen = test_utils.PayloadGenerator()\n\n # Tamper with block size, if required.\n if fail_mismatched_block_size:\n payload_gen.SetBlockSize(test_utils.KiB(1))\n else:\n payload_gen.SetBlockSize(test_utils.KiB(4))\n\n # Add some operations.\n payload_gen.AddOperation(False, common.OpType.MOVE,\n src_extents=[(0, 16), (16, 497)],\n dst_extents=[(16, 496), (0, 16)])\n payload_gen.AddOperation(True, common.OpType.MOVE,\n src_extents=[(0, 8), (8, 8)],\n dst_extents=[(8, 8), (0, 8)])\n\n # Set an invalid signatures block (offset but no size), if required.\n if fail_bad_sigs:\n payload_gen.SetSignatures(32, None)\n\n # Set partition / filesystem sizes.\n rootfs_part_size = test_utils.MiB(8)\n kernel_part_size = test_utils.KiB(512)\n old_rootfs_fs_size = new_rootfs_fs_size = rootfs_part_size\n old_kernel_fs_size = new_kernel_fs_size = kernel_part_size\n if fail_old_kernel_fs_size:\n old_kernel_fs_size += 100\n if fail_old_rootfs_fs_size:\n old_rootfs_fs_size += 100\n if fail_new_kernel_fs_size:\n new_kernel_fs_size += 100\n if fail_new_rootfs_fs_size:\n new_rootfs_fs_size += 100\n\n # Add old kernel/rootfs partition info, as required.\n if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki:\n oki_hash = (None if fail_bad_oki\n else hashlib.sha256('fake-oki-content').digest())\n payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash)\n if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or\n fail_bad_ori):\n ori_hash = (None if fail_bad_ori\n else hashlib.sha256('fake-ori-content').digest())\n payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash)\n\n # Add new kernel/rootfs partition info.\n payload_gen.SetPartInfo(\n True, True, new_kernel_fs_size,\n None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest())\n payload_gen.SetPartInfo(\n False, True, new_rootfs_fs_size,\n None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest())\n\n # Set the minor version.\n payload_gen.SetMinorVersion(0)\n\n # Create the test object.\n payload_checker = _GetPayloadChecker(payload_gen.WriteToFile)\n report = checker._PayloadReport()\n\n should_fail = (fail_mismatched_block_size or fail_bad_sigs or\n fail_mismatched_oki_ori or fail_bad_oki or fail_bad_ori or\n fail_bad_nki or fail_bad_nri or fail_old_kernel_fs_size or\n fail_old_rootfs_fs_size or fail_new_kernel_fs_size or\n fail_new_rootfs_fs_size)\n part_sizes = {\n common.ROOTFS: rootfs_part_size,\n common.KERNEL: kernel_part_size\n }\n\n if should_fail:\n self.assertRaises(PayloadError, payload_checker._CheckManifest, report,\n part_sizes)\n else:\n self.assertIsNone(payload_checker._CheckManifest(report, part_sizes))",
"def hash_check_files(self):\n temp_error = 0\n if not self.hash_log_curr:\n self.hash_log_curr = self.hash_curr_files\n else:\n for key, value in self.hash_curr_files.iteritems():\n if key in self.hash_log_curr:\n #test for valid hash\n if self.valid is not None:\n #test any valid hahses are given\n if key in self.valid:\n # a hash code that is ok to duplicate\n self.print_to_log('Valid Duplicate HashCode, skipping: ' + value[5])\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n continue\n # not valid duplicate hash\n # a dupulicate hash found which is a failure and should abort import\n self.hash_log_curr[key][0] = 'Fail'\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n self.hash_log_curr[key][5] += ', ' + value[5]\n self.print_to_log('Duplicate hash found for file: ' + value[5])\n temp_error = 1\n else:\n #a new hash, no issues\n self.hash_log_curr[key] = value\n self.print_to_log('New Hash for file: ' + value[5])\n self.error = temp_error",
"def verify(self, verbose_failures=False):\n modified = set()\n removed = set()\n count = len(self.entries)\n # TODO: Track number of bytes hashed instead of number of files\n # This will act as a more meaningful progress indicator\n i = 0\n for i, entry in enumerate(self.entries.values(), 1):\n if entry.exists():\n if entry.verify():\n entry.update_attrs()\n else:\n if verbose_failures:\n stderr.write('\\r{} failed hash verification\\n'.format(entry.filename))\n modified.add(entry.filename)\n else:\n removed.add(entry.filename)\n if verbose_failures:\n stderr.write('\\r{} is missing\\n'.format(entry.filename))\n stderr.write('\\rChecked {} of {} files'.format(i, count))\n if i:\n stderr.write('\\n')\n return modified, removed",
"def test_verify_corrupt_archive_compare_data(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable',\n options=[u\"--compare-data\"])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')",
"def _verify_hashes(hashes):\n\n for item in hashes:\n try:\n hashlib.new(item)\n VALID_HASH.append(item)\n except Exception:\n pass",
"def check_fast(self, reproduce=False, **args):\n hashvals = {}\n\n fast_check = self.check_file(\n filepaths=self.data.keys(),\n hashvals=hashvals,\n hashfn=self.fast_hashes,\n shortcircuit=True,\n **args\n )\n\n if not fast_check:\n\n # Save all the fast hashes for failed files that we've already\n # calculated\n for filepath in hashvals:\n for hash, val in hashvals[filepath].items():\n self.data[filepath]['hashes'][hash] = val\n\n if reproduce:\n for filepath in hashvals:\n print('Check failed for {0} {1}'\n ''.format(filepath, hashvals[filepath]))\n tmphash = {}\n full_check = self.check_file(\n filepaths=filepath,\n hashfn=self.full_hashes,\n hashvals=tmphash,\n shortcircuit=False,\n **args\n )\n\n if full_check:\n # File is still ok, so replace fast hashes\n print('Full hashes ({0}) checked ok'\n ''.format(self.full_hashes))\n print('Updating fast hashes for {0} in {1}'\n ''.format(filepath, self.path))\n self.add_fast(filepath, force=True)\n print('Saving updated manifest')\n self.needsync = True\n else:\n sys.stderr.write(\n 'Run cannot reproduce: manifest {0} is not '\n 'correct\\n'.format(self.path)\n )\n for path, hashdict in tmphash.items():\n print(' {0}:'.format(path))\n for hash, val in hashdict.items():\n hash_table = self.data[path]['hashes']\n hash_table_val = hash_table.get(hash, None)\n print(' {0}: {1} != {2}'\n ''.format(hash, val, hash_table_val))\n sys.exit(1)\n else:\n # Not relevant if full hashes are correct. Regenerate full\n # hashes for all filepaths that failed fast check.\n print('Updating full hashes for {0} files in {1}'\n ''.format(len(hashvals), self.path))\n\n # Add all full hashes at once -- much faster. Definitely want\n # to force the full hash to be updated. In the specific case of\n # an empty hash the value will be None, without force it will\n # be written as null.\n self.add(\n filepaths=list(hashvals.keys()),\n hashfn=self.full_hashes,\n force=True,\n fullpaths=[self.fullpath(fpath) for fpath\n in list(hashvals.keys())]\n )\n\n # Flag need to update version on disk\n self.needsync = True",
"def test_verify_corrupt_archive(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')",
"def CheckHashes(self, hashes, unused_external=True):\n hash_map = {}\n for hsh in hashes:\n if hsh.HasField(\"sha1\"):\n digest = hsh.sha1\n hash_urn = self.PATH.Add(str(digest))\n logging.info(\"Checking URN %s\", str(hash_urn))\n hash_map[hash_urn] = digest\n\n for metadata in aff4.FACTORY.Stat(list(hash_map), token=self.token):\n yield metadata[\"urn\"], hash_map[metadata[\"urn\"]]",
"def verify_checksums(self):\n file_checksums = {}\n for method, get_checksum in self.get_apk_checksums:\n checksum = utils.clean_hexdigitstr(\n self._get_checksum(get_checksum)\n )\n if method not in file_checksums.keys():\n file_checksums[method] = verify.get_file_checksum(\n file_name=self.apk_path,\n method=method,\n )\n\n if checksum != file_checksums[method]:\n raise verify.ChecksumMissmatch(\n file_name=self.apk_path,\n method=method,\n checksum_expected=file_checksums[method],\n checksum_was=checksum,\n )\n\n print(' - %s checksum matches' % method)",
"def validate_data_integrity(namespace, path=\"/mnt/test/hashfile\", timeout=600):\n all_pods = get_all_pods(namespace=namespace)\n for pod_obj in all_pods:\n log.info(\"Verify the md5sum values are OK\")\n cmd = f\"md5sum -c {path}\"\n try:\n pod_obj.exec_cmd_on_pod(command=cmd, out_yaml_format=False, timeout=timeout)\n log.info(f\"Pod {pod_obj.name}: All files checksums value matches\")\n except CommandFailed as ex:\n if \"computed checksums did NOT match\" in str(ex):\n log.error(\n f\"Pod {pod_obj.name}: One or more files or datas are modified\"\n )\n raise ex",
"def test_collect(self) -> None:\n for algorithm, expected in {\n 'md5': ('698d51a19d8a121ce581499d7b701668',\n '8980c988edc2c78cc43ccb718c06efd5',\n '53fd88c84ff8a285eb6e0a687e55b8c7'),\n 'sha1': ('6216f8a75fd5bb3d5f22b6f9958cdede3fc086c2',\n '42eda1b5dcb3586bccfb1c69f22f923145271d97',\n '2eb2f7be4e883ebe52034281d818c91e1cf16256'),\n 'sha256': ('f6e0a1e2ac41945a9aa7ff8a8aaa0cebc12a3bcc981a929ad5cf810a090e11ae',\n '25235f0fcab8767b7b5ac6568786fbc4f7d5d83468f0626bf07c3dbeed391a7a',\n 'f8d3d0729bf2427e2e81007588356332e7e8c4133fae4bceb173b93f33411d17'),\n }.items():\n # if the current platform does not support the algorithm we're looking at,\n # skip the test steps for that algorithm, but display a warning to the user\n if algorithm not in ALLOWED_HASH_FORMATS:\n warnings.warn(\"Missing hash algorithm {} on this platform, cannot test with it\".format(algorithm), ResourceWarning)\n else:\n hs = functools.partial(hash_signature, hash_format=algorithm)\n s = list(map(hs, ('111', '222', '333')))\n\n assert expected[0] == hash_collect(s[0:1], hash_format=algorithm)\n assert expected[1] == hash_collect(s[0:2], hash_format=algorithm)\n assert expected[2] == hash_collect(s, hash_format=algorithm)",
"def verify(self):\n self.verify_checksums()\n self.verify_apk_signature()\n self.verify_apk_signature_fprs()",
"def manifest_with_invalid_md5_values_helper(error_log):\n valid_md5 = '\"1596f493ba9ec53023fca640fb69bd3b\"' # pragma: allowlist secret\n assert valid_md5 not in error_log\n\n short_md5 = '\"1596f493ba9ec53023fca640fb69bd3\"' # pragma: allowlist secret\n long_md5 = '\"d9a68f3d5d9ce03f8a08f509242472234\"' # pragma: allowlist secret\n md5_with_non_hexadecimal = (\n '\"5J1bf75c48761b2e755adc1340e5a9259\"' # pragma: allowlist secret\n )\n short_base64_encoded_md5 = '\"aGVsbG8=\"'\n assert short_md5 in error_log\n assert long_md5 in error_log\n assert md5_with_non_hexadecimal in error_log\n assert short_base64_encoded_md5 in error_log",
"def testHashability(self) -> None:\n r = data_types.Result('test_1', ('win', 'x86'), (1, 10), 'id')\n test_set = set([r])\n test_set.add(r)\n self.assertEqual(1, len(test_set))\n\n r = data_types.Result('test_2', ('win', 'x86'), (2, 30), 'id')\n test_set.add(r)\n self.assertEqual(2, len(test_set))",
"def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))",
"def test_not_equal(self):\n p1 = make_package(filename=\"foobar\")\n p2 = make_package(filename=\"foo\")\n self.assertNotEqual(hash(p1), hash(p2))\n self.assertNotEqual(p1, p2)",
"def test_verify_manifest(mock_index):\n mock_index.return_value.async_get_record.side_effect = _async_mock_get_guid\n\n loop = get_or_create_event_loop_for_thread()\n loop.run_until_complete(\n async_verify_object_manifest(\n \"http://localhost\",\n manifest_file=CURRENT_DIR + \"/test_data/test_manifest.csv\",\n max_concurrent_requests=3,\n output_filename=\"test.log\",\n )\n )\n\n logs = {}\n try:\n with open(\"test.log\") as file:\n for line in file:\n guid, error, expected, actual = line.strip(\"\\n\").split(\"|\")\n logs.setdefault(guid, {})[error] = {\n \"expected\": expected.split(\"expected \")[1],\n \"actual\": actual.split(\"actual \")[1],\n }\n except Exception as exc:\n # unexpected file format, fail test\n assert False\n\n # everything in indexd is mocked to be correct for this one\n assert \"dg.TEST/f2a39f98-6ae1-48a5-8d48-825a0c52a22b\" not in logs\n\n assert \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\" in logs\n assert \"dg.TEST/9c205cd7-c399-4503-9f49-5647188bde66\" in logs\n\n # ensure logs exist for fields that are mocked to be incorrect in indexd\n assert \"/programs/DEV/projects/test2\" in logs[\n \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"\n ].get(\"authz\", {}).get(\"expected\")\n assert \"DEV\" in logs[\"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"].get(\n \"acl\", {}\n ).get(\"expected\")\n assert \"235\" in logs[\"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"].get(\n \"file_size\", {}\n ).get(\"expected\")\n assert \"c1234567891234567890123456789012\" in logs[\n \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"\n ].get(\"md5\", {}).get(\"expected\")\n assert \"gs://test/test 3.txt\" in logs[\n \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"\n ].get(\"urls\", {}).get(\"expected\")\n assert \"s3://testaws/file space.txt\" in logs[\n \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"\n ].get(\"urls\", {}).get(\"expected\")\n assert \"s3://testaws/aws/file,with,comma.txt\" in logs[\n \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"\n ].get(\"urls\", {}).get(\"expected\")\n\n # make sure error exists when record doesnt exist in indexd\n assert \"no_record\" in logs[\"dg.TEST/9c205cd7-c399-4503-9f49-5647188bde66\"]",
"def test_hash(self):\n ffs = get_available_force_fields()\n\n for ff1, ff2 in itertools.combinations(ffs, 2):\n assert hash(ff1) != hash(ff2)",
"def test_equality(self):\n p1 = make_package(filename=\"foo\")\n p2 = make_package(filename=\"foo\")\n self.assertEqual(hash(p1), hash(p2))\n self.assertEqual(p1, p2)",
"def test_equality(self):\n tools.eq_(self.old_manifest, load_manifest(StringIO(old_manifest)))",
"def test_file_integrity_return_error_in_case_of_bad_md5():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n result = PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert isinstance(result, ApiResponse)",
"def weak_collision_breaking():\n # Weak Collision Resistance: Given an arbitrary x there exists no x' with x' != x so that h(x) = h(x')\n\n number_trials = 0 # variable to hold number of trials\n\n # Generate 1 random string of length 20 which is fixed for matching\n fixed_rand_str = generate_random_string(20)\n\n while True:\n # Generate a 2nd random strings of length 20\n non_fixed_rand_str = generate_random_string(20)\n\n # Make sure that the strings are not equal to each other\n if fixed_rand_str == non_fixed_rand_str:\n continue\n\n # Otherwise create hashes and see if the hash's match\n else:\n\n hash_string_1, hash_string_2 = generate_hash(fixed_rand_str, non_fixed_rand_str)\n\n # Keep adding to the number of trials\n number_trials += 1\n\n # If the 1st 24 bits of both has values are\n # the same, then break out of the while loop\n # as the hashes match\n if (hash_string_1[0:6] == hash_string_2[0:6]):\n break\n\n return number_trials",
"def check_pack_checksums():\n conn = sqlite3.connect(DBNAME)\n c = conn.cursor()\n for row in c.execute(\"SELECT lower(hex(sum)) FROM packs\"):\n checksum = row[0]\n res = s3.get_object(Bucket=BUCKET, Key=f\"{checksum}.pack\")\n body = res[\"Body\"]\n h = blake3.blake3()\n for chunk in iter(lambda: body.read(4096), b\"\"):\n h.update(chunk)\n\n c = h.hexdigest()\n if c != checksum:\n raise ValueError(\"pack {checksum}: checksum {c} does not match\")",
"def check_hash(self, fname, args):\n fobj = self._open_file(fname)\n\n rc = 0\n format_errors = 0\n hash_errors = 0\n read_errors = 0\n for idx, line in enumerate(fobj):\n # remove any newline characters\n m = self.CHECK_RE.match(line.strip())\n if not m:\n if args.warn:\n self.app.stderr.write(\n 'hasher {0}: {1}: {2}: improperly formatted {3}'\n ' checksum line\\n'.format(self.name, fname, idx + 1,\n self.name.upper()))\n format_errors += 1\n rc = 1\n continue\n hash_value, binary, check_file = m.groups()\n\n try:\n check_f = open(check_file, 'rb' if binary == '*' else 'r')\n except IOError:\n self.app.stderr.write(\n 'hasher {0}: {1}: No such file or directory\\n'.format(\n self.name, check_file))\n if not args.status:\n self.app.stdout.write(\n STATUS_MSG.format(check_file, READ_ERROR))\n read_errors += 1\n rc = 1\n continue\n\n if self._calculate_hash(check_f) == hash_value:\n if not (args.quiet or args.status):\n self.app.stdout.write(\n STATUS_MSG.format(check_file, SUCCESS))\n else:\n if not args.status:\n self.app.stdout.write(\n STATUS_MSG.format(check_file, HASH_ERROR))\n hash_errors += 1\n rc = 1\n\n if format_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} line{2} {3} improperly'\n ' formatted\\n'.format(\n self.name,\n format_errors,\n 's' if format_errors > 1 else '',\n 'are' if format_errors > 1 else 'is',\n ))\n if read_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} listed file{2}'\n ' could not be read\\n'.format(\n self.name,\n read_errors,\n 's' if read_errors > 1 else '',\n ))\n if hash_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} computed checksum{2}'\n ' did NOT match\\n'.format(\n self.name,\n hash_errors,\n 's' if hash_errors > 1 else '',\n ))\n return rc",
"def CheckHashes(self, hashes):\n hash_map = {}\n for hsh in hashes:\n if hsh.HasField(\"sha256\"):\n # The canonical name of the file is where we store the file hash.\n digest = hsh.sha256\n hash_map[aff4.ROOT_URN.Add(\"files/hash/generic/sha256\").Add(\n str(digest))] = digest\n\n for metadata in aff4.FACTORY.Stat(list(hash_map), token=self.token):\n yield metadata[\"urn\"], hash_map[metadata[\"urn\"]]",
"def test_hash(self):\n self.assertEqual(hash(self._version1), hash(self._version1))\n self.assertNotEqual(hash(self._version2), hash(self._version1))\n self.assertEqual(hash(\"0.1\"), hash(self._version1))",
"def validate(self):\n print(\"Validating \")\n sha256_test = _get_file_sha256_hash(self.file_path)\n sha256_truth = self.metadata_pkg[\"hash\"]\n if sha256_test != sha256_truth:\n raise ValueError(\n f\"Hash of modelpkg file {os.path.basename(self.file_path)} ({sha256_test}) does not match truth hash ({sha256_truth}).\")",
"def test_hash_utils(tempf, tempsym, tempdirwithfiles):\n\n # copy file and check hash\n tempfcopy = tempf.name\n shutil.copy2(tempf, tempfcopy)\n h1 = utils.hash_cp_stat(tempf)\n h2 = utils.hash_cp_stat(tempfcopy)\n assert h1 == h2\n os.unlink(tempfcopy)\n\n # check hash walk for random dir\n assert utils.hash_walk(\"randomxx\") is None\n # check hash works for directories with files\n h1 = utils.hash_walk(tempdirwithfiles)\n tempdcopy = Path(\"tmpdircopy\")\n\n shutil.rmtree(tempdcopy) if tempdcopy.exists() else None\n shutil.copytree(tempdirwithfiles, tempdcopy, symlinks=True)\n\n h2 = utils.hash_walk(tempdcopy)\n print(h1, h2, sep=' : ') \n #assert [x[1] for x in h1] == [x[1] for x in h2]\n #assert h1 == h2\n shutil.rmtree(tempdcopy)",
"def test_good(self):\n expected = {\n '0.1.0': rpm_version('0.1.0', '1'),\n '0.1.0-99-g3d644b1': rpm_version('0.1.0', '1.99.g3d644b1'),\n '0.1.1pre1': rpm_version('0.1.1', '0.pre.1'),\n '0.1.1': rpm_version('0.1.1', '1'),\n '0.2.0dev1': rpm_version('0.2.0', '0.dev.1'),\n '0.2.0dev2-99-g3d644b1':\n rpm_version('0.2.0', '0.dev.2.99.g3d644b1'),\n '0.2.0dev3-100-g3d644b2-dirty': rpm_version(\n '0.2.0', '0.dev.3.100.g3d644b2.dirty'),\n }\n unexpected_results = []\n for supplied_version, expected_rpm_version in expected.items():\n actual_rpm_version = make_rpm_version(supplied_version)\n if actual_rpm_version != expected_rpm_version:\n unexpected_results.append((\n supplied_version,\n actual_rpm_version,\n expected_rpm_version\n ))\n\n if unexpected_results:\n self.fail(unexpected_results)",
"def test_handle_collisions_with_same_rpms(mock_grft, mock_get_session):\n mmd = load_mmd(read_staged_data(\"formatted_testmodule.yaml\"))\n xmd = mmd.get_xmd()\n xmd[\"mbs\"][\"buildrequires\"][\"platform\"][\"koji_tag\"] = \"module-el-build\"\n xmd[\"mbs\"][\"buildrequires\"][\"python\"] = {\"koji_tag\": \"module-python27\"}\n xmd[\"mbs\"][\"buildrequires\"][\"bash\"] = {\"koji_tag\": \"module-bash\"}\n mmd.set_xmd(xmd)\n\n bm_rpms = {\n \"bash-completion-1:2.7-5.el8.noarch\",\n \"bash-0:4.4.19-7.el8.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.x86_64\",\n }\n non_bm_rpms = {\n \"bash-0:4.4.20-1.el8.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.x86_64\",\n }\n mock_grft.side_effect = [bm_rpms, non_bm_rpms]\n\n default_modules.handle_collisions_with_base_module_rpms(mmd, [\"aarch64\", \"x86_64\"])\n\n mock_get_session.assert_called_once()\n xmd_mbs = mmd.get_xmd()[\"mbs\"]\n assert set(xmd_mbs[\"ursine_rpms\"]) == {\n \"bash-0:4.4.19-7.el8.aarch64\",\n }\n assert mock_grft.call_count == 2\n # We can't check the calls directly because the second argument is a set converted to a list,\n # so the order can't be determined ahead of time.\n first_call = mock_grft.mock_calls[0][1]\n assert first_call[0] == mock_get_session.return_value\n assert first_call[1] == [\"module-el-build\"]\n assert first_call[2] == [\"aarch64\", \"x86_64\"]\n\n second_call = mock_grft.mock_calls[1][1]\n assert second_call[0] == mock_get_session.return_value\n assert set(second_call[1]) == {\"module-bash\", \"module-python27\"}\n assert second_call[2] == [\"aarch64\", \"x86_64\"]"
] |
[
"0.6299262",
"0.60119635",
"0.5949086",
"0.58008045",
"0.57777554",
"0.57768065",
"0.568363",
"0.56668353",
"0.56551546",
"0.5612662",
"0.55293757",
"0.551588",
"0.55115753",
"0.55038774",
"0.54876107",
"0.54841405",
"0.5482515",
"0.54822046",
"0.5422499",
"0.54187906",
"0.5403439",
"0.5400648",
"0.5386662",
"0.53672755",
"0.534884",
"0.5346022",
"0.5335421",
"0.53203106",
"0.52973074",
"0.5282718"
] |
0.7577914
|
0
|
Make a connection to another endpoint and return a ConnectionId for it. You can send messages on this ConnectionId immediately.
|
def connect(self, endpoint: Endpoint) -> ConnectionId:
if not self.started:
raise Exception(f"Bus {self.busIdentity} is not active")
endpoint = Endpoint(endpoint)
with self._lock:
connId = self._newConnectionId()
self._connIdToOutgoingEndpoint[connId] = endpoint
self._connIdPendingOutgoingConnection.add(connId)
# TriggerConnect must go on the sendQueue and not the EventQueue
# in order for the auth_token to be sent (if necessary) before
# any subsequent sendMessage calls schedule messages on the connection.
# self._scheduleEvent((connId, TriggerConnect))
self._putOnSendQueue(connId, TriggerConnect)
return connId
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create(self, connectionParams) :\n conn = RemoteConnection(connectionParams)\n\n # Generate a unique id by which to refer to this connection in the future\n id = str(uuid.uuid1())\n\n self.remoteConnections[id] = conn\n return id",
"def create_connection(\n self,\n from_id: str,\n to_id: str\n ):\n raise NotImplementedError",
"def _connect(self, addr, connID):\r\n return self._clientEndpoint.connect(connID, (addr.host, addr.port))",
"def _connection_maker(\n self,\n first_device,\n first_port,\n second_device,\n second_port):\n if first_port is None:\n return self.network.make_connection(\n first_device.id, None,\n second_device.id, second_port.id)\n else:\n return self.network.make_connection(\n first_device.id, first_port.id,\n second_device.id, second_port.id)",
"def _connectTo(self, connId: ConnectionId):\n try:\n endpoint = self._connIdToOutgoingEndpoint[connId]\n\n naked_socket = socket.create_connection((endpoint.host, endpoint.port))\n\n if self._wantsSSL:\n sock = self._outboundSslContext.wrap_socket(naked_socket)\n else:\n sock = naked_socket\n\n sock.setblocking(False)\n\n with self._lock:\n self._socketToOutgoingConnId[sock] = connId\n self._connIdToOutgoingSocket[connId] = sock\n self._incomingSocketBuffers[sock] = MessageBuffer(self.extraMessageSizeCheck)\n\n if connId in self._messagesForUnconnectedOutgoingConnection:\n messages = self._messagesForUnconnectedOutgoingConnection.pop(connId)\n\n for m in messages:\n self._scheduleBytesForWrite(connId, m)\n\n self._connIdPendingOutgoingConnection.discard(connId)\n\n # this message notifies the socket loop that it needs to pay attention to this\n # connection.\n self._scheduleEvent(self.eventType.OutgoingConnectionEstablished(connId))\n\n return True\n\n except Exception as e:\n self._logger.debug(f\"Failed to Connect to {endpoint}: {str(e)}\")\n # we failed to connect. cleanup after ourselves.\n with self._lock:\n if connId in self._connIdToOutgoingEndpoint:\n del self._connIdToOutgoingEndpoint[connId]\n\n self._connIdPendingOutgoingConnection.discard(connId)\n\n if connId in self._messagesForUnconnectedOutgoingConnection:\n del self._messagesForUnconnectedOutgoingConnection[connId]\n\n if connId in self._connIdToOutgoingSocket:\n sock = self._connIdToOutgoingSocket.pop(connId)\n del self._socketToOutgoingConnId[sock]\n del self._connIdToOutgoingSocket[connId]\n del self._incomingSocketBuffers[sock]\n if sock in self._socketToBytesNeedingWrite:\n del self._socketToBytesNeedingWrite[sock]\n\n self._scheduleEvent(self.eventType.OutgoingConnectionFailed(connectionId=connId))\n\n return False",
"def make_connection(self):\n if self._created_connections() >= self.max_connections:\n raise ConnPoolException(\"Too many connections.\")\n connection = self.connection_class(**self.connection_kwargs)\n connection.connect()\n return connection",
"def createConnection(self, interfaceA, interfaceB):\r\n return self._network.createConnection(interfaceA, interfaceB)",
"def __make_connection(self):\n return self.euca.make_connection()",
"def create_connection(location=None, bandwidth=None, connectionName=None):\n pass",
"def makeConnection(self, transport):\n pass",
"async def connect(self, peer_ip, peer_port):\n peer_name = f\"{peer_ip}:{peer_port}\"\n try:\n reader, writer = await open_connection(peer_ip, peer_port)\n self.peers[peer_name] = {\n \"reader\": reader,\n \"writer\": writer,\n \"buffer\": ProtocolBuffer()\n }\n client_coro = create_task(self.connection_handler(peer_name))\n await client_coro\n except CancelledError:\n print(f\"Warning: Task handling connection to {peer_name} canceled.\")\n except NodeDisconnectException:\n print(f\"Warning: Peer {peer_name} disconnected\")\n await self.close_connection(peer_name)\n except ConnectionError:\n print(f\"Error: connection error for peer {peer_name}\")",
"def _create_connection(self):\n if not self.connection:\n log.info(\"{}: Creating connection with address={}\".format(\n self.container_id, self.address.geturl()))\n self.connection = Connection(\n self.address.hostname,\n self.auth,\n container_id=self.container_id,\n properties=self._create_properties(),\n debug=self.debug)",
"def create_connection(your_group, other_group=0, other_IP='127.0.0.1', verbose=False):\r\n \r\n # init verbose display\r\n if verbose:\r\n print('\\n[--- starts connection -----------------------------------------------------\\n')\r\n \r\n # check whether there is a referee\r\n if other_group == 0:\r\n if verbose:\r\n print('** group %d connecting to referee on %s **\\n' % (your_group, other_IP))\r\n \r\n # create one socket (client only)\r\n socket_out = create_client_socket(other_IP, 42000+your_group, verbose)\r\n \r\n connection = {'in':socket_out, 'out':socket_out}\r\n \r\n if verbose:\r\n print('** group %d successfully connected to referee on %s **\\n' % (your_group, other_IP))\r\n else:\r\n if verbose:\r\n print('** group %d connecting to group %d on %s **\\n' % (your_group, other_group, other_IP))\r\n\r\n # create two sockets (server and client)\r\n socket_in = create_server_socket(42000+your_group, verbose)\r\n socket_out = create_client_socket(other_IP, 42000+other_group, verbose)\r\n \r\n socket_in = wait_for_connection(socket_in, verbose)\r\n \r\n connection = {'in':socket_in, 'out':socket_out}\r\n\r\n if verbose:\r\n print('** group %d successfully connected to group %d on %s **\\n' % (your_group, other_group, other_IP))\r\n \r\n # end verbose display\r\n if verbose:\r\n print('----------------------------------------------------- connection started ---]\\n')\r\n\r\n return connection",
"def make_connection( hostname, port = 4663 ):\n \tconnection = socket.socket();",
"def connection(self):\n ctx = stack.top\n if ctx is not None:\n if not hasattr(ctx, 'simple_connection'):\n ctx.simple_connection = connect_to_region(\n self.app.config['AWS_REGION'],\n aws_access_key_id = self.app.config['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key = self.app.config['AWS_SECRET_ACCESS_KEY'],\n )\n\n return ctx.simple_connection",
"def connect(self, *args, **kw):\n\n return self.get_pool(*args, **kw).connect()",
"def connect(self, connID, addr):\r\n return self.callRemote('connect', connID, addr)",
"async def connection_factory(*args, **kwargs):\n return (transport, protocol)",
"async def connection_factory(*args, **kwargs):\n return (transport, protocol)",
"def connect(self) -> str:\n\n # Generate a new client id.\n sid = str(uuid.uuid4())\n\n # Add the new id to the list of connected users.\n self.clients.append(sid)\n return sid",
"def create_connection(self, user_id):\n logging.debug('ConnectionsClient/create_connection()')\n url = '/pod/v1/connection/create'\n data = {'userId': user_id}\n return self.bot_client.execute_rest_call('POST', url, json=data)",
"def createConnection(self, interfaceA, interfaceB):\r\n assert interfaceA != interfaceB\r\n\r\n epA = interfaceA.endpoint\r\n epB = interfaceB.endpoint\r\n\r\n epA_epB = self._getEndpointConnection(epA, epB)\r\n\r\n pA_iA = epA.getInterfaceConnection(interfaceA,\r\n epA_epB.getProtocol(epA))\r\n pB_iB = epB.getInterfaceConnection(interfaceB,\r\n epA_epB.getProtocol(epB))\r\n\r\n return Connection(pA_iA, pB_iB)",
"def connect(self) -> ContextManager[Connection]:",
"def establish_connection(self):\n conninfo = self.client\n for name, default_value in items(self.default_connection_params):\n if not getattr(conninfo, name, None):\n setattr(conninfo, name, default_value)\n if conninfo.hostname == 'localhost':\n conninfo.hostname = '127.0.0.1'\n conn = self.Connection(host=conninfo.host,\n userid=conninfo.userid,\n password=conninfo.password,\n login_method=conninfo.login_method,\n virtual_host=conninfo.virtual_host,\n insist=conninfo.insist,\n ssl=conninfo.ssl,\n connect_timeout=conninfo.connect_timeout)\n conn.client = self.client\n return conn",
"async def connect(self):\n connect = asyncio.gather(*[conn.connect_to_server() for name, conn in self._exchange_connections.items()])\n wait_for = asyncio.gather(*[self.on_connection(name) for name, conn in self._exchange_connections.items()])\n await asyncio.gather(connect, wait_for)",
"def create_outbound(self, addr, use_new_connection=False):",
"def _socket_connect(endpoint: urllib.parse.ParseResult) -> typing.Union[ssl.SSLSocket, socket.socket]:\n address = endpoint.netloc.split(':')\n if endpoint.scheme == 'https':\n if len(address) == 1:\n address.append(443)\n context = ssl.SSLContext(ssl.PROTOCOL_TLS)\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n context.load_default_certs()\n sock = socket.socket()\n connection = context.wrap_socket(sock, server_hostname=address[0])\n else:\n if len(address) == 1:\n address.append(80)\n connection = socket.socket()\n if isinstance(address[1], str):\n address[1] = int(address[1])\n connection.connect((address[0], address[1]))\n return connection",
"def connect(self,ip,port):\n return self.network.connect(ip,port)",
"def get_connection(connection_id: Optional[str] = None,\n location: Optional[str] = None,\n project: Optional[str] = None,\n view: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionResult:\n __args__ = dict()\n __args__['connectionId'] = connection_id\n __args__['location'] = location\n __args__['project'] = project\n __args__['view'] = view\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:connectors/v1:getConnection', __args__, opts=opts, typ=GetConnectionResult).value\n\n return AwaitableGetConnectionResult(\n auth_config=pulumi.get(__ret__, 'auth_config'),\n config_variables=pulumi.get(__ret__, 'config_variables'),\n connector_version=pulumi.get(__ret__, 'connector_version'),\n create_time=pulumi.get(__ret__, 'create_time'),\n description=pulumi.get(__ret__, 'description'),\n destination_configs=pulumi.get(__ret__, 'destination_configs'),\n envoy_image_location=pulumi.get(__ret__, 'envoy_image_location'),\n image_location=pulumi.get(__ret__, 'image_location'),\n labels=pulumi.get(__ret__, 'labels'),\n lock_config=pulumi.get(__ret__, 'lock_config'),\n log_config=pulumi.get(__ret__, 'log_config'),\n name=pulumi.get(__ret__, 'name'),\n node_config=pulumi.get(__ret__, 'node_config'),\n service_account=pulumi.get(__ret__, 'service_account'),\n service_directory=pulumi.get(__ret__, 'service_directory'),\n ssl_config=pulumi.get(__ret__, 'ssl_config'),\n status=pulumi.get(__ret__, 'status'),\n subscription_type=pulumi.get(__ret__, 'subscription_type'),\n suspended=pulumi.get(__ret__, 'suspended'),\n update_time=pulumi.get(__ret__, 'update_time'))",
"def _connect(self):\n hostport = self.getHost()\n channelOpenData = forwarding.packOpen_direct_tcpip((self.host, self.port), (hostport.host, hostport.port))\n self.connector.connection.openChannel(self, channelOpenData)"
] |
[
"0.6547448",
"0.64600074",
"0.63099647",
"0.6295127",
"0.62183815",
"0.61354405",
"0.59780264",
"0.59573513",
"0.5940008",
"0.59051204",
"0.58524",
"0.58322716",
"0.58006173",
"0.57918835",
"0.5767221",
"0.57654315",
"0.57045084",
"0.57021475",
"0.57021475",
"0.5698933",
"0.56783485",
"0.5656552",
"0.56537044",
"0.5606066",
"0.55930656",
"0.55826986",
"0.55515265",
"0.55177146",
"0.55056065",
"0.5498424"
] |
0.7308499
|
0
|
Schedule a callback to fire on the message read thread. Use 'delay' or 'atTimestamp' to decide when the callback runs, or use neither to mean 'immediately'. You can't use both.
|
def scheduleCallback(self, callback, *, atTimestamp=None, delay=None):
if callback is None:
self._logger.warning("Cannot scheduleCallback(None); discarding.")
# This would cause the event loop thread to terminate
return
if atTimestamp is not None and delay is not None:
raise ValueError("atTimestamp and delay arguments cannot both have values.")
if delay is None:
delay = 0.0
if atTimestamp is None:
atTimestamp = time.time() + (delay or 0.0)
with self._lock:
self._pendingTimedCallbacks.add((atTimestamp, callback))
# if we put this on the front of the queue, we need to wake
# the thread loop
if self._pendingTimedCallbacks[0][0] == atTimestamp:
written = os.write(self._generalWakePipe[1], b" ")
if written != 1:
raise Exception("Internal Error: Failed to write to general wake pipe")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def call_later(self, delay, callback):\n reactor.callFromThread(reactor.callLater, delay, callback)",
"def call_later(self, delay, callback):\n reactor.callLater(delay, callback)",
"def _call_later(self, delay, callback):\n self.io_loop.call_later(delay, callback)",
"def run_after_delay(delay_ms: float, callback: Callable[[], None]):\n heapq.heappush(\n _sorted_scheduled_events,\n _ScheduledEvent(\n time=pygame.time.get_ticks() + delay_ms, callback=callback\n ),\n )",
"def poll(self):\n msgs = self._read()\n\n if msgs and self.callback:\n for msg in msgs:\n self.callback(msg)",
"def call_later(self, delay, callback):\n self.factory.manager.call_later(delay, callback)",
"def call_later(delay, callback, *args, **kwargs):\n if not _current_server:\n _pending_call_laters.append((delay, callback, args, kwargs))\n return\n server = current_server()\n server.call_later(delay, callback, *args, **kwargs)",
"async def run_in(self, callback: Callable, delay: int, **kwargs) -> str:\n name = self.name\n self.logger.debug(\"Registering run_in in %s seconds for %s\", delay, name)\n # Support fractional delays\n i, d = divmod(float(delay), 1)\n exec_time = await self.get_now() + timedelta(seconds=int(i), microseconds=d * 1000000)\n handle = await self.AD.sched.insert_schedule(name, exec_time, callback, False, None, **kwargs)\n\n return handle",
"async def test_scheduled_task(self):\n ctx = MockContext(channel=self.text_channel, invoke=mock.MagicMock())\n\n await self.cog.silence.callback(self.cog, ctx, 5)\n\n args = (300, ctx.channel.id, ctx.invoke.return_value)\n self.cog.scheduler.schedule_later.assert_called_once_with(*args)\n ctx.invoke.assert_called_once_with(self.cog.unsilence, channel=ctx.channel)",
"def scheduleIn(self,delay,cb,uniqueTag=None,priority=0,exceptCurrentASN=True):\n\n with self.dataLock:\n asn = int(self.asn+(float(delay)/float(self.settings.slotDuration)))\n\n self.scheduleAtAsn(asn,cb,uniqueTag,priority,exceptCurrentASN)",
"async def twisted_sleep(delay, twisted_reactor):\n deferred: Deferred[None] = Deferred()\n twisted_reactor.callLater(delay, deferred.callback, None)\n await deferred",
"def on_message(self, message):\n\n # Start an infinite loop when this is called\n if message == \"read_camera\":\n self.camera_loop = PeriodicCallback(self.loop, 10)\n self.camera_loop.start()\n\n # Extensibility for other methods\n else:\n print(\"Unsupported function: \" + message)",
"def testCallLaterDelayed(self):\n events = []\n c = task.Clock()\n call = c.callLater(1, lambda a, b: events.append((a, b)), 1, b=2)\n call.delay(1)\n self.assertEquals(call.getTime(), 2)\n c.advance(1.5)\n self.assertEquals(events, [])\n c.advance(1.0)\n self.assertEquals(events, [(1, 2)])",
"def testCallLaterDelayed(self):\n events = []\n c = task.Clock()\n call = c.callLater(1, lambda a, b: events.append((a, b)), 1, b=2)\n call.delay(1)\n self.assertEqual(call.getTime(), 2)\n c.advance(1.5)\n self.assertEqual(events, [])\n c.advance(1.0)\n self.assertEqual(events, [(1, 2)])",
"def delay_s(\r\n self,\r\n callable,\r\n timeout = None,\r\n immediately = True,\r\n verify = False,\r\n wakeup = True\r\n ):\r\n\r\n # creates the next element tuple that is going to be scheduled according\r\n # to the definition provided to the method\r\n next = (callable, timeout, immediately, verify)\r\n\r\n # acquires the lock that controls the access to the delayed for next\r\n # tick list and then adds the callable to such list, please note that\r\n # the delayed (next) list is only going to be joined/merged with delay\r\n # operations and list on the next tick (through the merge operation)\r\n self._delayed_l.acquire()\r\n try: self._delayed_n.append(next)\r\n finally: self._delayed_l.release()\r\n\r\n # in case the wakeup flag is set this delay operation should have\r\n # been called from a different thread and the event loop should\r\n # awaken as soon as possible to handle the event\r\n if wakeup: self.wakeup()",
"def doRead(self):\n if self.read_scheduled is not None:\n if not self.read_scheduled.called:\n self.read_scheduled.cancel()\n self.read_scheduled = None\n\n while True:\n if self.factory is None: # disconnected\n return\n\n events = self.socket_get(constants.EVENTS)\n\n if (events & constants.POLLIN) != constants.POLLIN:\n return\n\n try:\n message = self._readMultipart()\n except error.ZMQError as e:\n if e.errno == constants.EAGAIN:\n continue\n\n raise e\n\n log.callWithLogger(self, self.messageReceived, message)",
"def _rostopic_delay(node, topic, window_size=DEFAULT_WINDOW_SIZE):\n # pause hz until topic is published\n msg_class = get_msg_class(node, topic, blocking=True, include_hidden_topics=True)\n\n if msg_class is None:\n node.destroy_node()\n return\n\n rt = ROSTopicDelay(node, window_size)\n node.create_subscription(\n msg_class,\n topic,\n rt.callback_delay,\n qos_profile_sensor_data)\n\n timer = node.create_timer(1, rt.print_delay)\n while rclpy.ok():\n rclpy.spin_once(node)\n\n node.destroy_timer(timer)\n node.destroy_node()\n rclpy.shutdown()",
"def trigger_callback(self, fd):\n name = self.callbacks[fd][0]\n callback = self.callbacks[fd][1]\n on_error = self.callbacks[fd][3]\n try:\n if callback:\n LOGGER.debug('Monitoring callback fd %d (%s) start', fd, name)\n callback()\n LOGGER.debug('Monitoring callback fd %d (%s) done', fd, name)\n else:\n LOGGER.debug('Monitoring callback flush fd %d (%s)', fd, name)\n os.read(fd, 1024)\n except Exception as e:\n LOGGER.error('Monitoring callback exception (%s): %s', name, str(e))\n self.error_handler(e, name, on_error)",
"async def read(self) -> None:\n make_non_blocking(self.stream)\n\n while not self.stream.closed:\n message = None\n try:\n message = await self.read_one()\n\n if not message:\n await self.sleep()\n continue\n else:\n self.wake()\n\n IOLoop.current().add_callback(self.queue.put_nowait, message)\n except Exception as e: # pragma: no cover\n self.log.exception(\n \"%s couldn't enqueue message: %s (%s)\", self, message, e\n )\n await self.sleep()",
"def schedule_in(self, _when, _callback, *_args, **_kwargs):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n yield from asyncio.sleep(_when)\n _callback(*_args, **_kwargs)\n\n return self.schedule_async(inner())",
"def ratelimited_callback_caller(self, callback_function):\n if callback_function is None:\n return\n now = datetime.datetime.now()\n if self.time_of_last_callback is None:\n self.time_of_last_callback = now\n callback_function(self)\n else:\n time_delta = (now-self.time_of_last_callback).seconds\n if time_delta >= self.callback_delay:\n callback_function(self)\n self.time_of_last_callback = now",
"async def twisted_sleep(delay: float, twisted_reactor: \"SygnalReactor\") -> None:\n deferred: Deferred[None] = Deferred()\n twisted_reactor.callLater(delay, deferred.callback, None)\n await deferred",
"def pollster_callback(self, _active_socket, readable, writable):\n \n # assume we are readable, because we are only registered for read\n assert readable\n message = self._receive_message() \n\n # if we get None, that means the socket would have blocked\n # go back and wait for more\n if message is None:\n return\n\n # we handle our own message traffic (i.e. resilient client handshakes\n # and signoffs).\n # otherwise, feed message into the receive queue to be handled\n # elsewhere\n if message.control[\"message-type\"] in self._dispatch_table:\n self._dispatch_table[message.control[\"message-type\"]](\n message.control, message.body\n )\n self._send_ack(\n message.control[\"message-type\"],\n message.ident, \n message.control[\"message-id\"])\n elif not \"client-tag\" in message.control:\n self._log.error(\"receive: invalid message '%s'\" % (\n message.control, \n ))\n else:\n if message.control[\"client-tag\"] in self._active_clients:\n self._receive_queue.append((message.control, message.body, ))\n self._send_ack(\n message.control[\"message-type\"],\n message.ident, \n message.control[\"message-id\"]\n )\n else:\n self._log.error(\n \"receive: No active client %s message discarded\" % (\n message.control[\"client-tag\"]\n )\n )",
"def schedule_task(self, callback, delay=1.0, repeat=False, execute_now=False):\n task_name = str(uuid.uuid4())\n\n self.xmpp.schedule(task_name, delay, callback, repeat=repeat)\n\n return _generate_cancel_method(task_name, self.xmpp.scheduler)",
"def schedule_async_in(self, _when, _callback):\n if isinstance(_when, datetime.timedelta):\n _when = _when.total_seconds()\n\n @coroutine\n @functools.wraps(_callback)\n def inner():\n yield from asyncio.sleep(_when)\n yield from _callback\n\n return self.schedule_async(inner())",
"def setReadCallback(self, callFunc=None):\n self._readCallback = callFunc or nullCallback",
"def schedule(self, sleep_time, delay):\n self.sleep_time = sleep_time\n self.delay = delay\n self.thread = Thread(target=self.run)\n self.thread.start()",
"def getsleepcallback(cls, arg):\n return cls.getcallback('sleep', arg)",
"def _transientSchedule(self, when, now):\n if not self.running:\n return\n if self.timer is not None:\n if self.timer.getTime() < when.asPOSIXTimestamp():\n return\n self.timer.cancel()\n delay = when.asPOSIXTimestamp() - now.asPOSIXTimestamp()\n\n # reactor.callLater allows only positive delay values. The scheduler\n # may want to have scheduled things in the past and that's OK, since we\n # are dealing with Time() instances it's impossible to predict what\n # they are relative to the current time from user code anyway.\n delay = max(_EPSILON, delay)\n self.timer = self.callLater(delay, self.tick)\n self.nextEventAt = when",
"def _delay(self, n=None):"
] |
[
"0.61164474",
"0.60866094",
"0.60417444",
"0.53039217",
"0.5299968",
"0.5269826",
"0.51456463",
"0.5127129",
"0.508833",
"0.50769556",
"0.5021864",
"0.5014233",
"0.49564296",
"0.49563497",
"0.49330074",
"0.49227577",
"0.49190187",
"0.4907367",
"0.48957562",
"0.48859304",
"0.4832781",
"0.48302293",
"0.4825909",
"0.4803157",
"0.4780591",
"0.47508144",
"0.4740216",
"0.4738065",
"0.47214597",
"0.4717307"
] |
0.61563295
|
0
|
Schedule an event to get sent to the onEvent callback on the input loop
|
def _scheduleEvent(self, event):
self._eventsToFireQueue.put(event)
assert os.write(self._eventToFireWakePipe[1], b" ") == 1
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def handle_input(self, event):\n self.update_timeval()\n self.events = []\n code = self._get_event_key_code(event)\n\n if code in self.codes:\n new_code = self.codes[code]\n else:\n new_code = 0\n event_type = self._get_event_type(event)\n value = self._get_key_value(event, event_type)\n scan_event, key_event = self.emulate_press(\n new_code, code, value, self.timeval)\n\n self.events.append(scan_event)\n self.events.append(key_event)\n # End with a sync marker\n self.events.append(self.sync_marker(self.timeval))\n # We are done\n self.write_to_pipe(self.events)",
"def event_queue_proc(self,event):\r\n event()",
"def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)",
"def handle_input(self):\n difference = self.check_state()\n if not difference:\n return\n self.events = []\n self.handle_new_events(difference)\n self.update_timeval()\n self.events.append(self.sync_marker(self.timeval))\n self.write_to_pipe(self.events)",
"def tick(self):\n if self.time.update:\n if self.time.latestCode in self.events:\n log(\"[Scheduler] Timed event triggered at [time]: %s\"\n % self.events[self.time.latestCode][1])\n try:\n event = self.events[self.time.latestCode]\n event[0](**event[2])\n except TypeError:\n log.error(\"[Scheduler] Timed event could not be triggered\")",
"def on(self): # pylint: disable=invalid-name\n self._make_event(1)",
"def doEvent(self, source):\n pass",
"def pre_loop(self, event):\n self.do_sync()",
"def schedule_next_event(self):\n if self.events:\n self.event = self.events.pop()\n self.timeout_counter = self.event.get(\"timeout\", 0)\n self.label.set_text(\n self.event[\"template\"].format(time=self.timeout_counter)\n )\n glib.timeout_add_seconds(1, self.on_timeout_cb)\n else:\n # Return Accept response\n # if there are no other events scheduled\n self.response(gtk.RESPONSE_ACCEPT)",
"def send_event(self, event):\n cmd = \"event \" + event\n self.mgen_pipe.Send(cmd)",
"def dispatch(self, event):\n self.queue.put(event)",
"def run(self):\n tick_duration = 1 / self.config.tick_rate\n last_tick_time = time.time()\n\n while True:\n input_ = self.input_source.get_input()\n self.__update(input_)\n\n if self.state.exit:\n break\n\n current_time = time.time()\n sleep_time = tick_duration - (current_time - last_tick_time)\n if sleep_time > 0:\n time.sleep(sleep_time)\n last_tick_time = current_time",
"def listen(self):\n self.processor_thread = Thread(target = self.event_loop, name=\"InputThread-\"+str(self.thread_index), args=(self.thread_index, ))\n self.thread_index += 1\n self.processor_thread.daemon = True\n self.processor_thread.start()",
"def handle_input(self, event):\n self.update_timeval()\n self.events = []\n code = self._get_event_type(event)\n\n # Deal with buttons\n self.handle_button(event, code)\n\n # Mouse wheel\n if code == 22:\n self.handle_scrollwheel(event)\n # Other relative mouse movements\n else:\n self.handle_relative(event)\n\n # Add in the absolute position of the mouse cursor\n self.handle_absolute(event)\n\n # End with a sync marker\n self.events.append(self.sync_marker(self.timeval))\n\n # We are done\n self.write_to_pipe(self.events)",
"def handle(self):\n if self.locked:\n self.timer.start(0.01)\n return\n if not self.queue:\n return\n self.locked = True\n event = self.queue[0]\n self.queue = self.queue[1:]\n\n try:\n for handler in copy.copy(self.handler):\n handler(event)\n except Exception, e:\n log.exception('event callback')\n self.locked = False\n if self.queue and not self.timer.active:\n self.timer.start(0)",
"def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()",
"def start(self):\n self.has_event = False\n self.running = True\n self._condition.acquire()\n self._thread = threading.Thread(target=read_input, args=(self,))\n self._thread.start()",
"def initiate_event(self):\n\n # Determine and log status change\n status = \"ALARM; initiating alarm event.\" \\\n if self.get_input_status() else \"RECOVERY.\"\n \n self.get_board_logger().info(\"Pin input status changed to %s\", \\\n status)\n\n # Create thread to handle event alert\n event_thread = Thread(target=self.alert, args=(), \\\n name=\"EventThread\")\n \n event_thread.start()",
"def run(self):\n while True:\n self._event.clear()\n self._queue.get().run(self._event)",
"def run(self, event):\n pass",
"def schedule_handler(userdata, *args):\n\t\tfor event in database.devschedule(userdata[\"cursor\"], args[0]):\n\t\t\tprint(str(event))\n\t\t\n\t\tprint(\"\")",
"def _eventloop(self):\n logging.debug(\"%s - eventloop started\" % self.name)\n while not self.stopped:\n event = self.inqueue.get()\n if not event: break\n self.doevent(event)\n logging.debug(\"%s - eventloop stopped\" % self.name)",
"def event_in_cb(self, msg):\n self.event = msg.data",
"def ProcessEvents(self):\n self.work_queue.put(self.__ProcessEventsAsync)",
"def simulateCore(self):\n while len(self.event_q) > 0:\n evts = self.nextEvents()\n self.handleEvents(evts)\n self.gatherSystemStatistics(self.scheduler.system)\n self.dumpEventQueue()",
"def fake_event_source():\r\n while 1:\r\n if len(fake_event_listeners):\r\n now = int(time.time())\r\n if now % 10 == 0:\r\n for f in fake_event_listeners:\r\n f(now)\r\n sleep(1)",
"def loop(self):\n keys.mode = 'main'\n for line in client.readlines('/event'):\n if not self.alive:\n break\n self.dispatch(*line.split(' ', 1))\n self.alive = False",
"def _start_io_event_loop(self):\r\n self._event_loop_started = threading.Lock()\r\n self._event_loop_started.acquire()\r\n threading.Thread(None, self._io_event_loop_thread).start()\r\n self._event_loop_started.acquire()",
"def listen(device_input, callback):\n while True:\n time.sleep(0.01)\n event = readControlDataRaw(device_input)\n (control_id, control_type, event_type, value) = parseControlEvent(event)\n if control_id != -1:\n callback(control_id, control_type, event_type, value)",
"def put(self, event):\n if self.isgae:\n from jsb.lib.gae.tasks import start_botevent\n start_botevent(self, event, event.speed)\n else: self.inqueue.put_nowait(event)"
] |
[
"0.6275396",
"0.62038547",
"0.60986376",
"0.60479087",
"0.5983007",
"0.5967989",
"0.5935694",
"0.5927004",
"0.5906138",
"0.58961374",
"0.585495",
"0.5826788",
"0.581383",
"0.5796444",
"0.5790609",
"0.57666177",
"0.5715084",
"0.56671846",
"0.56589913",
"0.5647515",
"0.5647162",
"0.5644079",
"0.5643406",
"0.56369007",
"0.562761",
"0.561538",
"0.55938756",
"0.5582238",
"0.5575851",
"0.5575204"
] |
0.63556486
|
0
|
Our select loop indicated 'socketWithData' has data pending.
|
def _handleReadReadySocket(self, socketWithData):
if socketWithData is self._acceptSocket:
try:
newSocket, newSocketSource = socketWithData.accept()
except OSError as exc:
# e.g., OSError: [Errno 24] Too many open files
self._logger.info(f"Failed to accept incoming socket: {exc}")
return False
else:
newSocket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
newSocket.setblocking(False)
with self._lock:
connId = self._newConnectionId()
if self._authToken is not None:
self._unauthenticatedConnections.add(connId)
self._connIdToIncomingSocket[connId] = newSocket
self._socketToIncomingConnId[newSocket] = connId
self._connIdToIncomingEndpoint[connId] = newSocketSource
self._incomingSocketBuffers[newSocket] = MessageBuffer(
self.extraMessageSizeCheck
)
self._allSockets.addForRead(newSocket)
self._fireEvent(
self.eventType.NewIncomingConnection(
source=Endpoint(newSocketSource), connectionId=connId
)
)
return True
elif socketWithData in self._allSockets:
try:
bytesReceived = socketWithData.recv(MSG_BUF_SIZE)
except ssl.SSLWantReadError:
bytesReceived = None
except ssl.SSLWantWriteError:
self._socketsWithSslWantWrite.add(socketWithData)
except ConnectionResetError:
bytesReceived = b""
except Exception:
self._logger.exception("MessageBus read socket shutting down")
bytesReceived = b""
if bytesReceived is None:
# do nothing
pass
elif bytesReceived == b"":
self._markSocketClosed(socketWithData)
return True
else:
self.totalBytesRead += len(bytesReceived)
messageBuffer = self._incomingSocketBuffers[socketWithData]
oldBytecount = messageBuffer.pendingBytecount()
try:
newMessages = messageBuffer.write(bytesReceived)
except CorruptMessageStream:
connId = self._getConnectionIdFromSocket(socketWithData)
if connId is not None:
self._logger.error(
f"Closing connection {connId} due to corrupted message stream."
)
self._markSocketClosed(socketWithData)
return True
self.totalBytesPendingInInputLoop += (
self._incomingSocketBuffers[socketWithData].pendingBytecount()
- oldBytecount
)
self.totalBytesPendingInInputLoopHighWatermark = max(
self.totalBytesPendingInInputLoop,
self.totalBytesPendingInInputLoopHighWatermark,
)
for m in newMessages:
if not self._handleIncomingMessage(m, socketWithData):
self._markSocketClosed(socketWithData)
break
return True
else:
self._logger.warning(
"MessageBus got data on a socket it didn't know about: %s", socketWithData
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def poll_data(self):\n with s.socket(s.AF_INET, s.SOCK_DGRAM) as sock:\n sock.bind(('', self.__port))\n while True:\n message, address = sock.recvfrom(1024)\n self.__address = address\n logging.debug('Received: {}'.format(message))\n self.process_data(message)",
"def await_data(self):\n self.data.append(self.socket.recv(1))",
"def _read_data(self):\n while True:\n try:\n data = yield from asyncio.wait_for(self._socket.recv(), 1)\n except asyncio.TimeoutError:\n continue\n except asyncio.CancelledError:\n break\n except ConnectionClosed:\n break\n\n self._push_packet(data)\n\n self._loop.call_soon(self.close)",
"def _read_from_socket(self):\n data = \"\"\n try:\n data = self.__socket.recv(SOCKET_BUFFER_SIZE)\n except socket.timeout:\n self.state[\"Errors\"] = True\n raise socket.timeout(\"Error! Socket did not get info, when expected\")\n if not data:\n s = \"Empty\"\n else:\n s = data.decode('utf-8')\n print(\"\\n === Read from socket === \\n%s\\n\" % s)\n self._load_to_queue(s)",
"def __listener(self, sock):\n self.__serving = True\n try:\n pr = [sock]\n pw = []\n pe = [sock]\n while self.__thread is not None:\n rd, rw, re = select.select(pr, pw, pe, 0.5)\n if len(re) != 0:\n raise Exception(\"Error on select was detected.\")\n if len(rd) == 0:\n continue\n while 1: # Slurp up waiting packets, return to select if EAGAIN\n try:\n data = sock.recv(8192, socket.MSG_DONTWAIT)\n self.checkMsg(data)\n except Exception:\n break # Go back to select so we don't busy-wait\n finally:\n sock.close()\n self.__serving = False",
"def sock_avail(self):\n return self.channel.recv_ready()",
"def _clear_buffer(self, data_socket):\n\n # attempt to read a 1 byte length messages without blocking.\n # recv throws an exception as it fails to receive data from the cleared buffer\n data_socket.setblocking(False)\n while True:\n try:\n data_socket.recv(1)\n except IOError:\n break\n data_socket.setblocking(True)",
"def _clear_buffer(self, data_socket):\n\n # attempt to read a 1 byte length messages without blocking.\n # recv throws an exception as it fails to receive data from the cleared buffer\n data_socket.setblocking(False)\n while True:\n try:\n data_socket.recv(1)\n except IOError:\n break\n data_socket.setblocking(True)",
"def run(self):\n macId, sensorId = getRequest(self.socket)\n try:\n while True:\n self.condition.acquire()\n while True:\n data = self.lastmessage.readData(macId, sensorId)\n self.socket.send(str(data) + '\\n')\n #self.socket.send(\"done\\n\")\n self.condition.wait(5)\n except socket.error, e:\n print \"Catching broken pipe\"\n self.condition.release()\n self.socket.close()\n\n\n \"\"\"\n macId, sensorId = getRequest(self.socket)\n print macId + \"|\" +sensorId\n self.condition.acquire()\n self.condition.wait()\n data = self.lastmessage.readData(macId, sensorId)\n self.condition.release()\n print str(data)\n self.socket.send(str(data) + '\\n')\n self.socket.send(\"done\\n\")\n self.socket.close()\n\"\"\"",
"def new_messages(self):\n ready, _, _ = select([self.socket], [], [], 0.0)\n return self.socket in ready",
"def on_data(self, event):\n if not self.quitting:\n self.the_server.process_ready_socks([event.socket_ID])",
"def _receive(self):\n # initialize sockets map\n r, w, x = [self.socket], [], []\n r, w, x = select.select(r, w, x, self.sessiondata.timeout)\n if r:\n return self.socket.recv(4096)\n # return nothing on timeout\n return None",
"def nonblocking_send(self, data):\n try:\n if len(data) == 0:\n return None\n self.amount_so_far += self.socket.send(data[self.amount_so_far:])\n except Exception as exc:\n active_sockets_dict.pop(self.socket, None)\n self.socket.close()\n print(\"An error occurred: %s\\n\" % exc)\n return -1\n ret = self.is_send_done()\n return ret",
"def dataReceived(self, data):\n if not self.disconnected:\n self.protocol.dataReceived(data)",
"def recieve_data(self):\r\n try:\r\n while True:\r\n try:\r\n data, self.addr = self.sock.recvfrom(1024)\r\n return data\r\n except socket.timeout:\r\n print(\"There is no packet at all!\")\r\n break\r\n except Exception:\r\n print(\"Can't recieve a package\")",
"def _read_socket(self):\n data = ''\n while True:\n try:\n input_buffer = self._connection_socket.recv(self._buffer_size)\n if not input_buffer:\n raise ConnectionClosedException()\n else:\n data += input_buffer.strip()\n if re.search(self.REQUEST_END, data):\n break\n except socket.timeout:\n continue\n return data",
"def empty_socket(sock):\n input = [sock]\n while 1:\n inputready, o, e = select.select(input,[],[], 0.0)\n if len(inputready)==0: break\n for s in inputready: s.recv(1)",
"def _handle_data(self):\n\n # Once connected, keep receiving and sending the data, raise exception in case of errors\n try:\n\n # Send the frame\n self._client_socket.sendall(self._frame)\n\n # Mark that the frame was sent\n self._client_socket.sendall(self._end_payload)\n\n # Wait for the acknowledgement\n self._client_socket.recv(128)\n\n except (ConnectionResetError, ConnectionAbortedError, timeout):\n raise self.DataError",
"def data_received(self, data):\n self.buf += data\n if b'\\n' in self.buf:\n lines = self.buf.split(b'\\n')\n self.buf = lines[-1] # whatever was left over\n for line in lines[:-1]:\n asyncio.ensure_future(self.q.put(line))\n self.msgs_recvd += 1\n if self.msgs_recvd == 4:\n self.transport.close()",
"def _socket_ready_handle(self, s):\n\n if s and s == self.switch_socket:\n for idx in range(3): # debug: try a couple of times\n try:\n pkt = self.switch_socket.recvfrom(self.rcv_size)\n except:\n self.logger.warning(\"Error on switch read\")\n return -1\n\n if not self.active:\n return 0\n\n if len(pkt) == 0:\n self.logger.warning(\"Zero-length switch read, %d\" % idx)\n else:\n break\n\n if len(pkt) == 0: # Still no packet\n self.logger.warning(\"Zero-length switch read; closing cxn\")\n self.logger.info(str(self))\n return -1\n\n self._pkt_handle(pkt)\n elif s and s == self.waker:\n self.waker.wait()\n else:\n self.logger.error(\"Unknown socket ready: \" + str(s))\n return -1\n\n return 0",
"def dataReceived (self, data) :\r\n \r\n buf = buffer.Buffer(self.recvbuffer + data)\r\n \r\n # process packets until there are no more of them\r\n\r\n try :\r\n buf.processWith(self.processPacket)\r\n except BaseClientAbort, e :\r\n self.do_abort(e.errorCode())\r\n \r\n self.log(\"closing connection\")\r\n self.transport.loseConnection()\r\n \r\n except BaseClientError, e :\r\n self.do_error(e.errorCode())\r\n \r\n except Exception, e :\r\n self.log(\"unknown exception %s: %s\" % (type(e), e))\r\n \r\n self.log(\"closing connection\")\r\n self.transport.loseConnection()\r\n \r\n raise\r\n \r\n # stuff remaining data back into recvbuf\r\n self.recvbuffer = buf.read()",
"def run(self):\n self._create_data_socket()\n\n self._is_running = True\n\n # self._clear_buffer(data_socket)\n\n # prevent recv from block indefinitely\n self._socket.settimeout(DataThread.TIMEOUT)\n\n while self._is_running:\n try:\n data = self._socket.recv(SIZE_BUFFER)\n if len(data):\n self._adapter.process_message(data)\n except (KeyboardInterrupt, SystemExit, OSError):\n print('Exiting data socket')\n\n except socket.timeout:\n print('NatNetClient data socket timeout!')\n continue\n\n self._close_socket()",
"def receive_data(self):\n self.new_socket.listen(5)\n while True:\n channel, address = self.new_socket.accept()\n if self.clients_count < 5:\n Thread(target=self.listen_clients, args=(channel, address)).start()\n self.clients_count += 1\n print(\"No. of clients connected:\" + str(self.clients_count))\n else:\n print(\"No new threads allowed\")\n break",
"def data_received(self, data):\n # This may seem strange; feeding all bytes received to the **writer**,\n # and, only if they test positive, duplicating to the **reader**.\n #\n # The writer receives a copy of all raw bytes because, as an IAC\n # interpreter, it may likely **write** a responding reply.\n self._last_received = datetime.datetime.now()\n\n cmd_received = False\n for byte in data:\n try:\n recv_inband = self.writer.feed_byte(bytes([byte]))\n except:\n self._log_exception(logger.warning, *sys.exc_info())\n else:\n if recv_inband:\n # forward to reader (shell).\n self.reader.feed_data(bytes([byte]))\n\n # becomes True if any out of band data is received.\n cmd_received = cmd_received or not recv_inband\n\n # until negotiation is complete, re-check negotiation aggressively\n # upon receipt of any command byte.\n if not self._waiter_connected.done() and cmd_received:\n self._check_negotiation_timer()",
"def receive_from(self, socket):\n socket.settimeout(self.socket_timeout)\n try:\n data_buffer = b\"\"\n while True:\n received_data = socket.recv(self.buffer_size)\n if not received_data:\n break\n logging.debug(f'Received data: {received_data}')\n data_buffer += received_data\n except Exception:\n # logging.error(f\"[!] Socket Error {e}\")\n pass\n return data_buffer",
"def socket_send_loop(sock, byte_source, timeout=5):\n rlist = [sock]\n wlist = [sock]\n buffered_bytes = b''\n while True:\n rlist, wlist, _ = select.select([sock], [sock], [], timeout)\n if rlist:\n read_data = sock.recv(8192)\n if not read_data:\n # Socket closed.\n return\n yield read_data\n elif wlist:\n if buffered_bytes:\n data_to_send = buffered_bytes\n else:\n try:\n data_to_send = next(byte_source)\n except StopIteration:\n # Sending is done. We should stop checking if the socket is\n # writeable.\n wlist = []\n continue\n\n sent_bytes = sock.send(data_to_send)\n buffered_bytes = data_to_send[sent_bytes:]",
"def ready(self):\n if self.socket is None or self._is_connected is False:\n return False\n\n try:\n # Use a timeout of 0 so we get an \"instant\" result\n ready, _, _ = select.select([self.socket], [], [], 0)\n except (socket.error, socket.timeout, ValueError):\n # Evt17: Transport connection closed\n self.event_queue.put('Evt17')\n return False\n\n return bool(ready)",
"def read_socket( self ):\n incoming = self.conn.recv( 4096 )\n other = \"source\" if self.sink else \"sink\"\n self.test.info( \"received payload from %s <<<%s>>>\" % ( other,\n incoming ) )\n self.received_data += incoming",
"def data_received(self, data):\n self.buffered += data\n while True:\n if self.have_length:\n if len(self.buffered) < self.message_length:\n break\n self._decode_message(self.buffered[:self.message_length])\n self.have_length = False\n self.buffered = self.buffered[self.message_length:]\n self.message_length = 0\n else:\n if len(self.buffered) < 4:\n break\n (self.message_length,) = struct.unpack_from(\">I\", self.buffered)\n self.buffered = self.buffered[4:]\n self.have_length = True",
"def dataReceived( self, data ):\n # if self.log.isEnabledFor(logging.DEBUG):\n # self.log.debug(\"Received data [%s]\" % _safelylogOutPdu(data))\n \n self.recvBuffer = self.recvBuffer + data\n \n while True:\n if self.connectionCorrupted:\n return\n msg = self.readMessage()\n if msg is None:\n break\n self.endPDURead()\n self.rawMessageReceived(msg)\n \n if len(self.recvBuffer) > 0:\n self.incompletePDURead()"
] |
[
"0.64880186",
"0.6481812",
"0.64238644",
"0.62901354",
"0.61899686",
"0.6123691",
"0.6109674",
"0.6109674",
"0.60701644",
"0.606652",
"0.60222965",
"0.6005148",
"0.59990996",
"0.5927602",
"0.5922998",
"0.59135413",
"0.58800566",
"0.58775216",
"0.58682287",
"0.58518314",
"0.58516216",
"0.58440506",
"0.5837359",
"0.5809053",
"0.580418",
"0.5788048",
"0.5780153",
"0.5775531",
"0.5773768",
"0.5770105"
] |
0.6618068
|
0
|
Socket 'writeable' can accept more bytes.
|
def _handleWriteReadySocket(self, writeable):
if writeable not in self._socketToBytesNeedingWrite:
return
try:
bytesWritten = writeable.send(self._socketToBytesNeedingWrite[writeable])
except ssl.SSLWantReadError:
bytesWritten = -1
except ssl.SSLWantWriteError:
self._socketsWithSslWantWrite.add(writeable)
bytesWritten = -1
except (OSError, BrokenPipeError):
bytesWritten = 0
except Exception:
self._logger.exception(
"MessageBus write socket shutting down because of exception"
)
bytesWritten = 0
if bytesWritten == 0:
# the primary socket close pathway is in the socket handler.
self._allSockets.discardForWrite(writeable)
with self._lock:
del self._socketToBytesNeedingWrite[writeable]
return True
elif bytesWritten == -1:
# do nothing
return False
elif bytesWritten > 0:
with self._lock:
self.totalBytesPendingInOutputLoop -= bytesWritten
self.totalBytesWritten += bytesWritten
self._socketToBytesNeedingWrite[writeable][:bytesWritten] = b""
if not self._socketToBytesNeedingWrite[writeable]:
# we have no bytes to flush
self._allSockets.discardForWrite(writeable)
del self._socketToBytesNeedingWrite[writeable]
return True
else:
self._logger.error(f"Internal Error: bytesWritten = {bytesWritten}")
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def on_write_needed(self, nbytes, underflow):",
"def ReceiveBufferSize(self) -> int:",
"def ReceiveBufferSize(self) -> int:",
"def SendBufferSize(self) -> int:",
"def SendBufferSize(self) -> int:",
"def writable(self):\n return self.append_connect or (self.append_send_buffer)",
"def writable(self):\n return self.append_connect or (self.append_send_buffer)",
"def retryable_writes_supported(self):\n ...",
"def socket_write(self, sock_num: int, buffer: bytes, timeout: int = 120000) -> bool:\n self._read_line()\n assert (\n sock_num < FONA_MAX_SOCKETS\n ), \"Provided socket exceeds the maximum number of \\\n sockets for the FONA module.\"\n\n self._uart.reset_input_buffer()\n\n self._uart_write(\n b\"AT+CIPSEND=\"\n + str(sock_num).encode()\n + b\",\"\n + str(len(buffer)).encode()\n + b\"\\r\\n\"\n )\n self._read_line()\n if self._buf[0] != 62:\n # promoting mark ('>') not found\n return False\n\n self._uart_write(buffer + b\"\\r\\n\")\n self._read_line() # eat 'OK'\n\n self._read_line(3000) # expect +CIPSEND: rx,tx\n if not self._parse_reply(b\"+CIPSEND:\", idx=1):\n return False\n if not self._buf == len(buffer): # assert data sent == buffer size\n return False\n\n self._read_line(timeout)\n if \"Send ok\" not in self._buf.decode():\n return False\n return True",
"def test_send_too_many_bytes(self):\n self.client_socket.sendto(b'12345678901234567890', CONTROL_SOCKET_FILE)\n (bytes, address) = self.client_socket.recvfrom(6)\n #self.assertEquals(address, CONTROL_SOCKET_FILE)\n self.assertEqual(len(bytes), 1)\n self.assertEqual(bytes[0], 7)",
"def test_wantWriteError(self):\n client_socket, server_socket = socket_pair()\n # Fill up the client's send buffer so Connection won't be able to write\n # anything. Start by sending larger chunks (Windows Socket I/O is slow)\n # and continue by writing a single byte at a time so we can be sure we\n # completely fill the buffer. Even though the socket API is allowed to\n # signal a short write via its return value it seems this doesn't\n # always happen on all platforms (FreeBSD and OS X particular) for the\n # very last bit of available buffer space.\n for msg in [b\"x\" * 65536, b\"x\"]:\n for i in range(1024 * 1024 * 64):\n try:\n client_socket.send(msg)\n except error as e:\n if e.errno == EWOULDBLOCK:\n break\n raise # pragma: no cover\n else: # pragma: no cover\n pytest.fail(\n \"Failed to fill socket buffer, cannot test BIO want write\"\n )\n\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, client_socket)\n # Client's speak first, so make it an SSL client\n conn.set_connect_state()\n with pytest.raises(WantWriteError):\n conn.do_handshake()",
"def AcceptSocket(self) -> Socket:",
"def test_overlongWrite(self):\n buf = imap4.WriteBuffer(self.transport)\n data = b'x' * (buf.bufferSize + 1)\n\n buf.write(data)\n\n self.assertEqual(self.transport.value(), data)",
"def can_write_eof(self):\n return True",
"def bufsized (sock, size = 1):\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, size)\n return sock",
"def _write(self, data):\n if not self.connected:\n raise IOError(\"Not connected.\")\n\n if len(data + b'\\r\\n') > self.MAX_MESSAGE_SIZE:\n logger.error(\n \"A message wasn't sent to %r because it was larger than %d \"\n \"bytes (that is MAX_MESSAGE_SIZE). Consider raising that \"\n \"value if the message seemed legit.\", self._repr_remote(),\n self.MAX_MESSAGE_SIZE)\n # No need to call finalize.\n raise IOError(\"Message too long.\")\n\n try:\n with self._write_lock:\n if not self.connected:\n raise IOError(\"Not connected.\")\n # Does the same as self._socket.sendall.\n self._writer.write(data + b'\\r\\n')\n self._writer.flush()\n except socket.error as error:\n self.finalize(\"Write failed.\")\n logger.warning(\"Failed writing to socket: %s.\", error)\n raise error",
"def test_partialWrite(self):\n buf = imap4.WriteBuffer(self.transport)\n data = b'x' * buf.bufferSize\n\n buf.write(data)\n\n self.assertFalse(self.transport.value())",
"def min_buf_size ():\n #test_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #test_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)\n #return test_sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n return 65535",
"def test_buffer_size(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n conn.set_connect_state()\n try:\n conn.do_handshake()\n except WantReadError:\n pass\n data = conn.bio_read(2)\n assert 2 == len(data)",
"def test_outgoing_overflow(self):\n server = self._server(None)\n client = self._client(None)\n\n interact_in_memory(client, server)\n\n size = 2**15\n sent = client.send(b\"x\" * size)\n # Sanity check. We're trying to test what happens when the entire\n # input can't be sent. If the entire input was sent, this test is\n # meaningless.\n assert sent < size\n\n receiver, received = interact_in_memory(client, server)\n assert receiver is server\n\n # We can rely on all of these bytes being received at once because\n # loopback passes 2 ** 16 to recv - more than 2 ** 15.\n assert len(received) == sent",
"def _write(self, s):\n try:\n self._sock.sendall(s)\n except socket.error, e:\n if e.args[0] == 32:\n # broken pipe\n self.disconnect()\n raise ConnectionError(\"Error %s while writing to socket. %s.\" % tuple(e.args))",
"async def write(self, data: bytes):\n while data:\n await self.wait_for_write()\n try:\n sent = self.socket.send(data)\n except OSError as e:\n self.logger.debug(\"Failed to write: %s\", e)\n raise asyncio.TimeoutError()\n data = data[sent:]",
"def write(self, buf: AnyReadableBuf, /) -> int:",
"def writable(self):\n ping_buffer = MOLO_CLIENT_APP.get_ping_buffer()\n if ping_buffer:\n self.append_send_buffer += ping_buffer\n\n return self.append_connect or (self.append_send_buffer)",
"def write(self, data):\n if self.closed:\n raise ConnectionResetError(\n 'Transport closed - cannot write on %s' % self\n )\n else:\n t = self.transport\n if self._paused or self._buffer:\n self._buffer.appendleft(data)\n self._buffer_size += len(data)\n self._write_from_buffer()\n if self._buffer_size > 2 * self._b_limit:\n if self._waiter and not self._waiter.cancelled():\n self.logger.warning(\n '%s buffer size is %d: limit is %d ',\n self._buffer_size, self._b_limit\n )\n else:\n t.pause_reading()\n self._waiter = self._loop.create_future()\n else:\n t.write(data)\n self.changed()\n return self._waiter",
"def recv(self, timeout=None):\n raise NotImplementedError(\"Trying to read from a write only bus?\")",
"def _read_write(self):\n sockets = [self.client, self.target]\n try:\n while 1:\n (receive, _, error) = select.select(sockets, [], sockets, 10)\n if error:\n break\n elif receive:\n for source in receive:\n data = source.recv(BUFFER_LENGTH)\n if not data:\n return\n if source is self.client:\n destination = self.target\n else:\n destination = self.client\n destination.sendall(data)\n finally:\n self.client.close()\n self.target.close()",
"def send(self, packet: Packet) -> None:\n packet_size_data = packet.SIZE.to_bytes(2, 'little')\n write_count = self._stream.write(packet_size_data + packet)\n if write_count != packet.SIZE + 2:\n raise racetools.errors.StreamWriteError(packet.SIZE + 2, write_count)",
"def sendall_with_size(sock, message):\n sock.sendall(str(len(message)))\n\n OK = sock.recv(1024)\n\n if(OK == \"OK\"):\n sock.sendall(message)\n\n else:\n print \"sendall_with_size had a problem with %s.\" % message",
"def writable(self):\n return bool(self.buffer)"
] |
[
"0.6269928",
"0.62377745",
"0.62377745",
"0.6100103",
"0.6100103",
"0.59965086",
"0.59965086",
"0.5949343",
"0.59197575",
"0.59163684",
"0.58904225",
"0.5877699",
"0.5872574",
"0.5820368",
"0.5811556",
"0.5802184",
"0.58004016",
"0.5764541",
"0.57305753",
"0.5728187",
"0.5725836",
"0.5705458",
"0.5697911",
"0.56920505",
"0.56478304",
"0.5638004",
"0.56280285",
"0.5624505",
"0.5613777",
"0.56108046"
] |
0.6564836
|
0
|
Iterate over the items in the wishlist and get the products from the database.
|
def __iter__(self):
product_ids = self.wishlist.keys()
products = Product.objects.filter(id__in=product_ids)
for product in products:
self.wishlist[str(product.id)]['product'] = product
for item in self.wishlist.values():
yield item
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __iter__(self):\n products_ids = self.wishlist.keys()\n # get the products objects and add them to the wishlist\n products = Product.objects.filter(id__in=products_ids)\n\n wishlist_session = self.wishlist.copy()\n wishlist = {}\n for product in products:\n wishlist[str(product.id)] = {'product': product}\n\n for item, item_s in zip(wishlist.values(), wishlist_session.values()):\n item['per_now'] = 0\n item['price'] = Decimal(item_s['price'])\n if product.price != item['price']:\n new_price = Decimal(item['product'].price)\n old_price = Decimal(item['price'])\n per_now = (new_price - old_price) / old_price * Decimal('100')\n item['per_now'] = int(per_now)\n yield item",
"def return_items(self):\n cur = self.cursor\n cur.execute(f\"SELECT * FROM {self.product_name}\")\n products = cur.fetchall()\n return products",
"def wishlist(request):\n items = []\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n existingWishlist = WishlistItem.objects.filter(\n wishlist=wishlist_user).exists()\n\n if existingWishlist:\n user_wishlist = get_list_or_404(WishlistItem, wishlist=wishlist_user)\n for obj in user_wishlist:\n product = get_object_or_404(Product, name=obj)\n items.append(product)\n context = {\n 'wishlist': True,\n 'products': items\n }\n return render(request, 'wishlist/wishlist.html', context)\n\n else:\n context = {\n 'wishlist': False,\n }\n return render(request, 'wishlist/wishlist.html', context)",
"def get_all_products(self):\n\t\tpass",
"def get_products(self):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products;\")\n res = cur.fetchall()\n if res:\n prdcts=[]\n for prodct_item in res:\n picked_prdct = {\n 'product_id':prodct_item[0],\n 'product_name':prodct_item[1],\n 'price':prodct_item[2],\n 'quantity':prodct_item[3]\n }\n prdcts.append(picked_prdct)\n return jsonify({\"Products\": prdcts}), 200\n return jsonify({\"message\":\"No products in store\"})",
"def fill_item_list(self):\n return_list = []\n with Transaction().start(DBNAME, 1):\n self.productlist = self.Product.search([('description', '=', 'Stock'), ('type', '=', 'goods')])\n for i in self.productlist:\n return_list.append(i.template.name)\n return return_list",
"def generateProducts(self):\r\n\r\n # Creates items in each category\r\n for i in range(self.num_of_items):\r\n self.ID_DICT[i+self.num_of_items] = random.randint(1, 10)\r\n self.ID_DICT[i+self.num_of_items*2] = random.randint(1, 10)\r\n self.ID_DICT[i+self.num_of_items*3] = random.randint(1, 10)\r\n self.ID_DICT[i+self.num_of_items*4] = random.randint(1, 10)\r\n self.ID_DICT[i+self.num_of_items*5] = random.randint(1, 10)\r\n self.ID_DICT[i+self.num_of_items*6] = random.randint(1, 10)\r\n\r\n\r\n # Sort for easy selection\r\n sorted(self.ID_DICT)\r\n\r\n for product in self.ID_DICT.keys():\r\n temp_int = self.ID_DICT[product]\r\n self.c.execute(\"INSERT INTO Products (ProductID, Price) VALUES (?, ?)\", (product, self.ID_DICT[product]))\r\n self.conn.commit()\r\n\r\n if self.print_items:\r\n print(\"\\nAll items in store:\")\r\n print(self.ID_DICT)\r\n print()",
"def products(self):\r\n return self._products",
"def get_products(self):\n page = 1\n out = []\n while True:\n resp = self.get_session().Product.find(limit=10,page=page)\n if not len(resp):\n return\n yield resp\n page += 1",
"def get_some_by_product(self, product, how_many):\n stores = self.db.query(f\"\"\"\n SELECT store.id, store.name from store\n JOIN product_store ON product_store.store_id = store.id\n JOIN product ON product_store.product_id = product.id\n WHERE product.id = :id\n LIMIT :how_many\n \"\"\", id=product.id, how_many=how_many).all(as_dict=True)\n return [self.model(**store) for store in stores]",
"def __iter__(self):\n product_ids = self.basket.keys()\n products = Product.products.filter(id__in=product_ids)\n basket = self.basket.copy()\n\n for product in products:\n basket[str(product.id)]['product'] = product\n\n for item in basket.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['qty']\n yield item",
"def get_products(self) -> dict:\n\t\tproducts = dict()\n\n\t\tdb = Database()\n\t\tdb.create_connection(self._file_path)\n\t\trows = db.get_products()\n\t\tdb.close_connection()\n\n\t\tfor row in rows:\n\t\t\tif row[0] not in products:\n\t\t\t\ttry:\n\t\t\t\t\tproducts[row[0]] = Product(row[0], row[1], row[2], row[3]) # code, price, lastupdate, currency\n\t\t\t\texcept Exception as e: \n\t\t\t\t\t# IF the database was not correct parsed, the item will be discarted, \n\t\t\t\t\t# the event will be logged in the log file and the program will continue\n\t\t\t\t\tlogging.error(str(datetime.now())+': ' + e)\n\t\t\t\t\tcontinue\n\n\t\treturn products",
"def see_products_for_rent_handler():\n\n products = ShowProductsAndCustomers()\n my_list = products.see_products_for_rent()\n my_result_list = []\n for product in my_list:\n my_result_list.append(product)\n print(product)\n return my_result_list",
"def get_products(self):\n\n lst = []\n for product in self.products.findall('product'):\n id = product.find('id').text\n name = product.find('name').text\n dispensary_id = product.find('dispensary_id').text\n dispensary_name = product.find('dispensary_name').text\n canabis_brand = product.find('canabis_brand').text\n canabis_strain = product.find('canabis_strain').text\n category = product.find('category').text\n subcategory = product.find('subcategory').text\n thc_level = product.find('thc_level').text\n cbd_level = product.find('cbd_level').text\n cbn_level = product.find('cbn_level').text\n thc_level_type = product.find('thc_level_type').text\n cbd_level_type = product.find('cbd_level_type').text\n cbn_level_type = product.find('cbn_level_type').text\n\n description = product.find('description').text\n created_at = product.find('created_at').text\n updated_at = product.find('updated_at').text\n\n prices = []\n urls = []\n images = []\n\n for child in product:\n if child.tag == 'prices':\n for cost in child.findall('cost'):\n prices.append(Price(cost.attrib['unit'], cost.text))\n\n if child.tag == 'urls':\n admin = child.find('admin').text\n public = child.find('public').text\n urls.append(UrlInfo(admin, public))\n\n if child.tag == 'images':\n for image in child.findall('image'):\n images.append(Image(image.attrib['main'], image.text,))\n\n lst.append(Product(id, name, dispensary_id, dispensary_name,\n canabis_brand, canabis_strain,\n category, subcategory, thc_level, cbd_level,\n cbn_level, thc_level_type, cbd_level_type,\n cbn_level_type, prices, urls, images,\n description, created_at, updated_at))\n\n return lst",
"def return_products():\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"\n SELECT id_product, product_name, product_price, in_stock, description\n FROM Products\n \"\"\")\n return cursor.fetchall()",
"def fetch_all_products():\n products = []\n client = ProductsClient()\n for product in client.get_products():\n products.append(Product(\n base_currency=product[0],\n quote_currency=product[1],\n ))\n return products",
"def product_list(id):\r\n\r\n db = get_db()\r\n product_list = db.execute(\r\n \"SELECT product_id, product_name, quantity FROM product WHERE for_business = ? AND quantity > 0\",\r\n (id,),\r\n ).fetchall()\r\n return product_list",
"def process_products(scrape_delay_seconds, *, chrome_exec_path, chrome_webdriver_path):\n for prod in Product.objects.all():\n\n # Get the Xpath List for Price\n xpath_tup_list = get_price_xpaths(prod)\n\n # Scrape the Price\n result = scraper.scrape_page(\n chrome=chrome_exec_path,\n chrome_webdriver=chrome_webdriver_path,\n url=prod.full_url,\n xpath_tup_list=xpath_tup_list\n )\n\n # Process Price Data\n process_price_scrape_result(prod, result)\n\n # Sleep\n time.sleep(scrape_delay_seconds)",
"def get_favorite_by_product(self, product):\n products = self.db.query(f\"\"\"\n SELECT product.id, product.name from store\n JOIN product_store ON product_store.store_id = store.id\n JOIN product ON product_store.product_id = product.id\n WHERE store.id = :id\n \"\"\", id=product.id).all(as_dict=True)\n return [self.model(**product) for product in products]",
"def show_available_products(*args):\n logger.info(f\"Preparing dict of available prodcuts...\")\n available_products = {}\n\n with MONGO:\n mdb = eval(Settings.connect_string)\n products = mdb[\"product\"]\n for doc in products.find():\n del doc[\"_id\"]\n if int(doc[\"quantity_available\"]) > 0:\n product_id = doc[\"product_id\"]\n del doc[\"product_id\"]\n available_products[product_id] = doc\n\n return available_products",
"def __iter__(self):\n #gets product data keys e.g price, quantity\n product_ids = self.cart.keys()\n\n #checks if the product exist in the database by filtering by product_ids\n products = Product.objects.filter(id__in=product_ids)\n cart = self.cart.copy()\n\n #loop through the products 1 by 1 and re-assigns them to the product.id in the cart\n for product in products:\n cart[str(product.id)][\"product\"] = product\n\n # get price and quatity of items and mutiplies price by quantity to get total price of items\n for item in cart.values():\n item[\"price\"] = Decimal(item[\"price\"])\n item[\"total_price\"] = item[\"price\"] * item[\"qty\"]\n yield item",
"def get_product_ingredients(self, driver):\n pass",
"def list_products(self):\n return self._make_get_request(self._urls['products'])",
"def __iter__(self):\n return self._products.__iter__()",
"def shopify_list_all_products(self, result):\n sum_product_list = []\n catch = \"\"\n while result:\n page_info = \"\"\n sum_product_list += result\n link = shopify.ShopifyResource.connection.response.headers.get('Link')\n if not link or not isinstance(link, str):\n return sum_product_list\n for page_link in link.split(','):\n if page_link.find('next') > 0:\n page_info = page_link.split(';')[0].strip('<>').split('page_info=')[1]\n try:\n result = shopify.Product().find(page_info=page_info, limit=250)\n except Exception as e:\n if e.response.code == 429 and e.response.msg == \"Too Many Requests\":\n time.sleep(int(float(e.response.headers.get('Retry-After', 5))))\n result = shopify.Product().find(page_info=page_info, limit=250)\n else:\n raise Warning(e)\n if catch == page_info:\n break\n return sum_product_list",
"def fill_products(self):\n cursor = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n categories = dict()\n for page in range(1, 2):\n result = requests.get(\n 'https://fr.openfoodfacts.org/cgi/search.pl?page_size=1000&page={}&action=process&json=1'.format(\n page)).json()\n for element in result['products']:\n try:\n cursor.execute(\n \"INSERT INTO product (name, store, nutrition_grade, url) VALUES (%s, %s, %s, %s) RETURNING \"\n \"id, name\",\n (element[\"product_name\"], element[\"stores\"], element[\"nutrition_grade_fr\"], element[\"url\"]))\n # un except pour éviter les erreurs de clés\n query_result = cursor.fetchone()\n for category in element[\"categories_tags\"]:\n try:\n cursor.execute(\"INSERT INTO product_category(product_id, category_id) VALUES (%s, %s)\",\n (query_result[0], self.categories[category]))\n except KeyError:\n print(\"Categorie insertion failed\")\n\n print(element[\"product_name\"])\n except KeyError:\n print(f'product insertion failed:')\n\n self.conn.commit()\n cursor.close()",
"def _product_generator(self):\n categories = ColesCategoryIterator(self._base_url)\n self.total = len(categories)\n for category in categories:\n # print(\"Searching Category: {}\".format(category))\n # print self._get_url(category)\n self._params['beginIndex'] = \"0\"\n while self._has_next_page():\n self._get_data(category)\n self._update_search_info()\n for product in self._product_data:\n # record the product in the database\n yield product\n self.current += 1",
"def products(self):\n return list(Product.select())",
"def get_products(self, query_args={}):\n endpoint = '/v3/educator/products'\n result = self.request(endpoint, query_args)\n\n products = []\n for data in result.response:\n # Dynamically load product instance.\n class_name = data.type.capitalize()\n product = Product.instance(class_name, data)\n products.append(product)\n\n return products",
"def products(self):\n return self._products"
] |
[
"0.73702574",
"0.6923004",
"0.6526089",
"0.6503565",
"0.64888847",
"0.6464952",
"0.6314977",
"0.62624204",
"0.62511605",
"0.6215542",
"0.6153764",
"0.61509013",
"0.6129298",
"0.6076732",
"0.604918",
"0.60109943",
"0.59867036",
"0.59783274",
"0.595691",
"0.59493494",
"0.5939454",
"0.59263813",
"0.59241205",
"0.591146",
"0.5899213",
"0.58918065",
"0.5876344",
"0.5854451",
"0.5849912",
"0.5831588"
] |
0.74418205
|
0
|
Add or remove a product to the wishlist or update its quantity.
|
def add_remove(self, product):
product_id = str(product.id)
if product_id not in self.wishlist:
self.wishlist[product_id] = {'price': str(product.price_in_dollars)}
else:
del self.wishlist[product_id]
self.save()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add(self, product):\n product_id = str(product.id)\n self.wishlist[product_id] = {'price': str(product.price)}\n self.save()",
"def add_to_wishlist(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n wishlist = get_object_or_404(Wishlist, user=request.user)\n\n if product not in wishlist.products.all():\n wishlist.products.add(product)\n messages.info(request,\n f\"{product.name} has been added to your wishlist.\")\n else:\n messages.error(request,\n \"Error, you already have this item in your wishlist!\")\n return redirect(reverse(\"product_detail\", args=[product_id]))",
"def update(self, product, qty):\n product_id = str(product)\n if product_id in self.basket:\n self.basket[product_id]['qty'] = qty\n self.save()",
"def remove(self, product):\n product_id = str(product.id)\n if product_id in self.wishlist:\n del self.wishlist[product_id]\n self.save()",
"def update(self, product, qty):\n product_id = str(product)\n if product_id in self.cart:\n self.cart[product_id]['qty'] = qty\n self.save()",
"def add(self, product, qty):\n product_id = str(product.id)\n\n if product_id in self.basket:\n self.basket[product_id]['qty'] = qty\n else:\n self.basket[product_id] = {'price': str(product.price), 'qty': qty}\n\n self.save()",
"def add_to_wishlist(request, product_id):\n redirect_url = request.POST.get('redirect_url')\n\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n\n product = Product.objects.get(pk=product_id)\n if request.POST:\n existingWishlistItem = WishlistItem.objects.filter(\n wishlist=wishlist_user, product=product).exists()\n if existingWishlistItem:\n messages.error(request, \"Item already in your wishlist\")\n return redirect(redirect_url)\n\n else:\n added_item = WishlistItem(\n wishlist=wishlist_user, product=product, date_added=timezone.now())\n added_item.save()\n messages.success(request, \"Product added to your wishlist\")\n return redirect(redirect_url)\n else:\n messages.error(request, \"Click 'Add to wishlist' to add a item \")\n return render(request, 'home/index.html')",
"def update(self, user, product, quantity):\n\n cart_product = CartProduct.update(user, product, quantity)\n CartProductsView.update(cart_product)",
"def add_to_bag(request, item_id):\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n bag = request.session.get('bag', {})\n\n if item_id in list(bag.keys()):\n bag[item_id] += quantity\n messages.success(request,\n f'Updated {product.name} quantity to {bag[item_id]}!')\n else:\n bag[item_id] = quantity\n messages.success(request, f'Added {product.name} to your bag!')\n\n # remove product from wishlist if added from it\n if redirect_url == '/wishlist/':\n profile = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get(user_profile=profile)\n wishitems = WishLineItem.objects.get(\n product=product, wishlist=wishlist.id)\n wishitems.delete()\n messages.success(\n request, f'Removed {product.name} from your wishlist!')\n\n request.session['bag'] = bag\n return redirect(redirect_url)",
"def add(self, product, product_qty):\n product_id = str(product.id)\n if product_id in self.cart:\n self.cart[product_id][\"qty\"] = product_qty\n else:\n self.cart[product_id] = {'price': str(product.price), 'qty':int(product_qty)}\n self.save()",
"def add_product(cust_id,wishlist_id,pid):\n # \"\"\" Add product ID to a wishlist \"\"\"\n # TODO add products changes as well, for now just asses the wishlists\n if Customer.check_custid(cust_id):\n message = Customer.find_by_id(cust_id,wishlist_id)\n if message:\n result = Customer.addProduct(cust_id,wishlist_id,pid)\n res = Customer.find_by_id(cust_id,wishlist_id)\n return make_response(jsonify(res), status.HTTP_200_OK)\n else:\n message = {'Error': 'Wishlist with given ID not found'}\n return make_response(jsonify(message), status.HTTP_404_NOT_FOUND)\n else:\n message = {'Invalid' : 'Invalid customer ID'}\n return make_response(jsonify(message), status.HTTP_404_NOT_FOUND)",
"def adjust_basket(request, product_id):\n\n try:\n product = Product.objects.get(pk=product_id)\n except Product.DoesNotExist:\n product = None\n messages.error(\n request,\n f'Product ({product_id}) not found.',\n 'from__adjust_basket'\n )\n\n basket = request.session.get('basket', {})\n\n if product is not None:\n quantity = request.POST.get('quantity')\n if quantity is not None:\n quantity = int(quantity)\n\n if is_product_hidden(product) is True:\n product = set_product_instance_unavailable(product)\n max_per_purchase = product.max_per_purchase\n if max_per_purchase > product.stock:\n max_per_purchase = product.stock\n\n if quantity > 0 and max_per_purchase > 0:\n if quantity == basket[product_id]:\n messages.info(\n request,\n f'Your basket already contains {quantity}x ' +\n f'{product.name}.',\n f'from__adjust_basket,id__{product_id}'\n )\n else:\n if quantity > max_per_purchase:\n quantity = max_per_purchase\n messages.info(\n request,\n f'No more than {max_per_purchase} of {product.name} ' +\n ' may be added to an order. ' +\n f'Adjusted {product.name} quantity to ' +\n f'{quantity}.',\n f'from__adjust_basket,id__{product_id}'\n )\n else:\n adjustment = 'Increased'\n if quantity < basket[product_id]:\n adjustment = 'Reduced'\n messages.success(\n request,\n f'{adjustment} {product.name} quantity to ' +\n f'{quantity}.',\n f'from__adjust_basket,id__{product_id}'\n )\n basket[product_id] = quantity\n elif quantity is None:\n messages.error(\n request,\n 'Unable to update basket. No quantity supplied for ' +\n f'{product.name}.',\n 'from__adjust_basket'\n )\n elif quantity < 1:\n qty = basket[product_id]\n basket.pop(product_id)\n messages.success(\n request,\n f'Removed {qty}x {product.name} from your ' +\n 'basket.',\n f'from__adjust_basket,id__{product_id}'\n )\n elif max_per_purchase < 1:\n basket.pop(product_id)\n messages.info(\n request,\n f'Unable to update quantity of {product.name} due to ' +\n 'insufficient stock.',\n f'from__adjust_basket,id__{product_id}'\n )\n else:\n if product_id in basket:\n basket.pop(product_id)\n messages.info(\n request,\n f'Removed non-existant product ({product_id}) from your ' +\n 'basket.',\n 'from__adjust_basket'\n )\n else:\n messages.info(\n request,\n f'Non-existant product ({product_id}) was already removed ' +\n 'from your basket.',\n 'from__adjust_basket')\n\n request.session['basket'] = basket\n return redirect(reverse('view_basket'))",
"def add_to_basket(request, product_id):\n\n redirect_url = request.POST.get('redirect_url')\n try:\n product = Product.objects.get(pk=product_id)\n except Product.DoesNotExist:\n product = None\n messages.error(\n request,\n f'Unable to update basket. Product ({product_id}) not found.',\n 'from__add_to_basket'\n )\n\n if product is not None:\n if is_product_hidden(product) is True:\n product = set_product_instance_unavailable(product)\n if product.stock > 0:\n quantity = request.POST.get('quantity')\n if quantity is not None:\n quantity = int(quantity)\n if quantity > 0:\n basket = request.session.get('basket', {})\n max_per_purchase = product.max_per_purchase\n if max_per_purchase > product.stock:\n max_per_purchase = product.stock\n if product_id in basket:\n if basket[product_id] >= max_per_purchase:\n messages.error(\n request,\n f'No more than {max_per_purchase} of ' +\n f'{product.name} may be added to an order.',\n f'from__add_to_basket,id__{product_id}'\n )\n else:\n basket[product_id] += quantity\n if basket[product_id] > max_per_purchase:\n basket[product_id] = max_per_purchase\n messages.info(\n request,\n f'No more than {max_per_purchase} of ' +\n f'{product.name} may be added to an order. ' +\n f'Increased {product.name} quantity to ' +\n f'{basket[product_id]}.',\n f'from__add_to_basket,id__{product_id}'\n )\n else:\n messages.success(\n request,\n f'Increased {product.name} quantity to ' +\n f'{basket[product_id]}.',\n f'from__add_to_basket,id__{product_id}'\n )\n else:\n if quantity > max_per_purchase:\n quantity = max_per_purchase\n messages.info(\n request,\n f'No more than {max_per_purchase} of ' +\n f'{product.name} may be added to an order. ' +\n f'Added {quantity}x {product.name} to your ' +\n 'basket.',\n f'from__add_to_basket,id__{product_id}'\n )\n else:\n messages.success(\n request,\n f'Added {quantity}x {product.name} to your ' +\n 'basket.',\n f'from__add_to_basket,id__{product_id}'\n )\n basket[product_id] = quantity\n request.session['basket'] = basket\n else:\n msg = f'You may not add {product.name} with a quantity of '\n msg += 'less than 1.'\n if quantity is None:\n msg = f'No quantity supplied for {product.name}.'\n messages.error(\n request,\n 'Unable to update basket. ' + msg,\n 'from__add_to_basket'\n )\n else:\n messages.error(\n request,\n f'Unable to add {product.name} to basket. Insufficient ' +\n 'stock.',\n f'from__add_to_basket,id__{product_id}'\n )\n if redirect_url is None:\n return redirect(reverse('view_basket'))\n return redirect(redirect_url)",
"def add_product_to_basket(request, item_id):\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n basket = request.session.get('basket', {})\n\n if item_id in list(basket.keys()):\n basket[item_id] += quantity\n messages.success(request, f'{product.name} quantity updated to \\\n {basket[item_id]}')\n else:\n basket[item_id] = quantity\n messages.success(request, f'{product.name} added to basket')\n\n request.session['basket'] = basket\n\n return redirect(redirect_url)",
"def remove_from_wishlist(request, product_id):\n\n redirect_url = request.POST.get('redirect_url')\n\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n if request.POST:\n product = Product.objects.get(pk=product_id)\n\n # look for item in the user's wishlistItem - returns true if it exists\n existingWishlistItem = WishlistItem.objects.filter(\n product=product).exists()\n\n if existingWishlistItem:\n product = WishlistItem.objects.get(product=product)\n product.delete()\n messages.success(request, \"Item removed from wishlist\")\n return redirect(redirect_url)\n\n if existingWishlistItem is None:\n messages.error(\n request, \"You can not delete a item thats not in the wishlist\")\n return redirect(redirect_url)\n else:\n messages.error(request, 'Item can not be deleted from your wishlist')\n return render(request, 'home/index.html')",
"def remove_wishlist_item(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n wishlist = get_object_or_404(Wishlist, user=request.user)\n origin = request.GET.get('origin')\n\n if product in wishlist.products.all():\n wishlist.products.remove(product)\n messages.info(\n request,\n f\"Success! {product.name} has been removed from your wishlist!\")\n else:\n messages.error(request, \"Error! Please try again\")\n\n if origin == 'wishlist':\n return redirect(reverse(\"view_wishlist\"))\n else:\n return redirect(reverse(\"product_detail\", args=[product_id]))",
"def modify_product(self, product_id,product_name,price,quantity):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products WHERE product_id=%(product_id)s\",\\\n {\"product_id\":product_id})\n found_id = cur.fetchall()\n if found_id:\n cur.execute(\"UPDATE products SET product_name=%s, price=%s, \\\n quantity= %s WHERE product_id=%s\",\\\n (product_name, price, quantity, product_id))\n con.commit()\n return make_response(jsonify({'message': 'Product modified'}), 200)\n return jsonify({\"message\":\"Couldn't find product ID\"})",
"def increase_product_quantity(product_id, quantity_change) -> int:\n with db_session() as session:\n product = get_product_with_id(product_id)\n new_quantity = product.quantity + quantity_change\n product.quantity = new_quantity\n session.commit()\n return product.quantity",
"def add_product(cls, product_name, price, quantity):\n Product.insert(product_name=product_name,\n product_price=price,\n product_quantity=quantity,\n date_updated=date.today()).on_conflict(\n conflict_target=[Product.product_name],\n preserve=[Product.product_price,\n Product.product_quantity,\n Product.date_updated]).execute()\n print(f'\\nProduct added successfully!')\n print(f'Product: {product_name} ' +\n f'Price: ${int(price) / 100:.2f} ' +\n f'Quantity: {quantity}\\n')",
"def add_product_to_cart(user_name, product_id, quantity, store_name):\n\n user_name = auth.get_username_from_hash(user_name)\n store_handler.check_product_exists_in_store(product_id, store_name)\n user_handler.add_product(user_name, store_name, product_id, quantity)\n users.add_to_cart(store_name=store_name, user_name=user_name, quantity=quantity, product_name=product_id)",
"def add_to_cart(update, context):\n query = update.callback_query\n bot = context.bot\n # loads json received from callback_data into dictionary\n ids = json.loads(query.data)\n category_id = ids['category_id']\n product_id = ids['product_id']\n\n chat_id = update.effective_chat.id\n user = update.effective_user\n # checks if chat already made an order\n if chat_id in cart:\n # checks if user already made an order\n if user.id in cart[chat_id]:\n # checks user already ordered from category\n if category_id in cart[chat_id][user.id]:\n # checks if user already ordered product\n if product_id in cart[chat_id][user.id][category_id]:\n # increase count how often product was ordered\n cart[chat_id][user.id][category_id][product_id] += 1\n else:\n cart[chat_id][user.id][category_id][product_id] = 1\n else:\n cart[chat_id][user.id][category_id] = {product_id: 1}\n else:\n cart[chat_id][user.id] = {category_id: {product_id: 1}}\n else:\n cart[chat_id] = {user.id: {category_id: {product_id: 1}}}\n\n # option to order more or go back to start menu\n keyboard = [[InlineKeyboardButton(\"order more\", callback_data=str(TWO))],\n [InlineKeyboardButton(\"back to menu\", callback_data=str(ONE))]]\n\n # add last message text to product ordered\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text='Added ' + menu[category_id]['products'][product_id][\n 'name'] + ' to your order!',\n reply_markup=InlineKeyboardMarkup(keyboard))\n\n return SEVENTH",
"def add(self, product):\n pass",
"def add(self, item, quantity=1, update_quantity=False): \n item_id = str(item_id)\n if item_id not in self.cart:\n self.cart[item_id] = {'quantity': 0, 'price': str(item.price)}\n if update_quantity:\n self.cart[item_id]['quantity'] = quantity\n else:\n self.cart[item_id]['quantity'] += quantity\n self.save()",
"def wish_item_update(request):\n result = {}\n\n u = request.user\n\n try:\n w = Wishlist.objects.get(party=u, id=int(request.POST['wish_id']))\n except Wishlist.DoesNotExist:\n result[\"result\"] = '-1'\n return JSONHttpResponse(result)\n\n comment = request.POST.get('comment', None)\n if comment:\n w.comment = comment\n max_price = request.POST.get('max_price', None)\n if max_price:\n w.max_price = max_price\n w.save()\n\n result[\"result\"] = str(w.id)\n\n return JSONHttpResponse(result)",
"def wishlist_add(request):\n\n result = {}\n\n u = request.user\n\n p = Product.objects.get_by_sku(request.POST['sku'])\n\n if p is None:\n result[\"result\"] = '0'\n else:\n w, created = Wishlist.objects.get_or_create(party=u, product=p)\n if created:\n w.comment=request.POST['comment']\n w.max_price=float(request.POST['max_price'])\n w.save() \n result[\"result\"] = str(w.id)\n else:\n result[\"result\"] = '-1'\n \n # add a feed\n f = Feed(actor=u, action=Feed.WISHLIST, product=p) \n f.save()\n \n return JSONHttpResponse(result)",
"def updateItem(request):\n # Getting the data when you add to cart. Body of JSON\n data = json.loads(request.body)\n # Getting values we sent to body as JSON. prodID and Action\n productId = data['prodId']\n action = data['action']\n\n # Get curr customer\n customer = request.user.customer\n product = BobaProduct.objects.get(id=productId)\n\n # get order associated with customer\n order, created = CustomerOrder.objects.get_or_create(customer=customer)\n\n # Get value of curr order. If it exist, want to just change it\n orderItem, created = OrderItem.objects.get_or_create(order=order, product=product)\n\n if action == 'add':\n orderItem.quantity += 1\n elif action == 'remove':\n orderItem.quantity -= 1\n orderItem.save() #saving this order item\n\n # If the quantity of the order goes below 1, delete the orderItem\n\n if orderItem.quantity < 1:\n orderItem.delete()\n return JsonResponse('Item was added', safe=False)",
"def add_item(self, quantity: int, weight: float, item: Item):\n if self.item_list:\n if item in self.item_list:\n for i in range(0, len(self.item_list)):\n if item.product_id == self.item_list[i].product_id:\n item.quantity = int(item.quantity)\n item.quantity += 0 if not quantity else quantity\n item.weight += 0 if not weight else weight\n else:\n self.item_list.append(item)\n else:\n self.item_list.append(item)",
"def add_to_cart(db, itemid, quantity):",
"def adjust_basket(request, item_id):\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n basket = request.session.get('basket', {})\n basket[item_id] = quantity\n messages.success(request, f'{product.name} quantity updated to \\\n {basket[item_id]}')\n request.session['basket'] = basket\n\n return redirect(reverse('view_basket'))",
"def add_favorite(self, product_id: str, substitute_id: str) -> None:\n add_favorite_request = \"INSERT INTO substituted_product VALUES (%s, %s)\"\n self.insert(add_favorite_request, (substitute_id, product_id))"
] |
[
"0.7479784",
"0.7347317",
"0.7177983",
"0.7005374",
"0.68797404",
"0.681493",
"0.6800347",
"0.6763431",
"0.6707623",
"0.66252685",
"0.66090685",
"0.6464602",
"0.644463",
"0.64385396",
"0.63563824",
"0.6327167",
"0.6306546",
"0.6236816",
"0.6137776",
"0.6130651",
"0.6107709",
"0.60792357",
"0.60603845",
"0.60552174",
"0.6049958",
"0.60298985",
"0.602171",
"0.60211456",
"0.59839636",
"0.5983044"
] |
0.7711961
|
0
|
Permute elements of a tensor along a dimension `dim`. If permutation is None do nothing.
|
def apply_permutation(tensor: Tensor, dim: int, permutation: Optional[Tensor]):
if permutation is None:
return tensor
return tensor.index_select(dim, permutation)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def permute(p, dims, perm):\n if issparse(p):\n return _permute_sparse(p, dims, perm)\n return _permute_dense(p, dims, perm)",
"def permute(x, order=None):\n ndims = len(x.shape)\n if order is None:\n raise ValueError(\"Permute: Order must be specified.\")\n\n if order.__class__ == list or order.__class__ == tuple:\n order = np.array(order)\n\n if ndims != len(order):\n raise ValueError(\"Permute: Invalid permutation order.\")\n\n if not (sorted(order) == np.arange(ndims)).all():\n raise ValueError(\"Permute: Invalid permutation order.\")\n\n x = x.transpose(order)\n\n return x",
"def permTensor(rock, dim, rowcols=False):\n if rock is None or not hasattr(rock, \"perm\"):\n raise ValueError(\"Rock structure must have attribute `perm`.\")\n\n nc, nk = rock.perm.shape\n if dim == 1:\n print(\"TODO: Add tests for permTensor dim==1\")\n assert nk == 1, \\\n \"A one dimensional domain does not support multi-component tensors.\"\n K = rock.perm\n r, c = 0, 0\n\n elif dim == 2:\n print(\"TODO: Add tests for permTensor dim==2\")\n\n if nk == 1:\n # Isotropic\n K = np.dot(rock.perm, np.array([[1,0,1]]))\n\n elif nk == 2:\n # Diagonal\n K = np.column_stack((rock.perm[:,0], np.zeros(nc), rock.perm[:,1]))\n\n elif nk == 3:\n # Full, symmetric 2-by-2\n K = rock.perm\n\n else:\n raise ValueError(dim+\"-component permeability value is not \"+\\\n \"supported in two space dimensions\")\n K = K[:, [0,1,1,2]]\n r = np.array([0,0,1,1])\n c = np.array([0,1,0,1])\n\n elif dim == 3:\n if nk == 1:\n # Isotropic\n K = np.dot(rock.perm, np.array([[1,0,0,1,0,1]]))\n\n elif nk == 3:\n # Diagonal\n K = np.column_stack((\n rock.perm[:,0],\n np.zeros(nc),\n np.zeros(nc),\n rock.perm[:,1],\n np.zeros(nc),\n rock.perm[:,2]\n ))\n elif nk == 6:\n # Full, symmetric, 3-by-3\n K = rock.perm\n\n else:\n raise ValueError(dim+\"-component permeability value is not \"+\\\n \"supported in three space dimensions\")\n\n K = K[:, [0,1,2,1,3,4,2,4,5]]\n r = np.array([0,0,0,1,1,1,2,2,2])\n c = np.array([0,1,2,0,1,2,0,1,2])\n\n if rowcols:\n return K, r, c\n else:\n return K",
"def _permute_dense(p, dims, perm):\n p, perm = np.asarray(p), np.asarray(perm)\n d = prod(dims)\n\n if isop(p):\n return (\n p.reshape([*dims, *dims])\n .transpose([*perm, *(perm + len(dims))])\n .reshape([d, d])\n )\n\n return p.reshape(dims).transpose(perm).reshape([d, 1])",
"def permute(ts: Tensor, axes) -> Tensor:\n permute_op = PermuteOp(axes)\n return permute_op(ts, None)",
"def permute(self, ordering: np.ndarray, *, axis: int) -> None:\n\t\tif axis == 0:\n\t\t\tself.values = self.values[ordering, :]\n\t\telif axis == 1:\n\t\t\tself.values = self.values[:, ordering]\n\t\telse:\n\t\t\traise ValueError(\"axis must be 0 or 1\")",
"def permutation(random_state, size=None, n=1, ndim=None, dtype='int64'):\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size)\r\n #print \"NDIM\", ndim, size\r\n op = RandomFunction(permutation_helper,\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast + (False,)),\r\n ndim_added=1)\r\n return op(random_state, size, n)",
"def permutation(self, n: Union[int, Tensor], *, dtype: str = \"int32\"):\n _seed = self._seed() if callable(self._seed) else self._seed\n if isinstance(n, int):\n return _permutation(\n n=n, seed=_seed, device=self._device, handle=self._handle, dtype=dtype\n )\n assert isinstance(n, Tensor)\n return _shuffle(inp=n, seed=_seed, handle=self._handle)",
"def _permute_sparse(a, dims, perm):\n perm, dims = np.asarray(perm), np.asarray(dims)\n\n # New dimensions & stride (i.e. product of preceding dimensions)\n new_dims = dims[perm]\n odim_stride = np.multiply.accumulate(dims[::-1])[::-1] // dims\n ndim_stride = np.multiply.accumulate(new_dims[::-1])[::-1] // new_dims\n\n # Range of possible coordinates for each subsys\n coos = (tuple(range(dim)) for dim in dims)\n\n # Complete basis using coordinates for current and new dimensions\n basis = np.asarray(tuple(itertools.product(*coos, repeat=1)))\n oinds = np.sum(odim_stride * basis, axis=1)\n ninds = np.sum(ndim_stride * basis[:, perm], axis=1)\n\n # Construct permutation matrix and apply it to state\n perm_mat = sp.coo_matrix((np.ones(a.shape[0]), (ninds, oinds))).tocsr()\n if isop(a):\n return dot(dot(perm_mat, a), dag(perm_mat))\n return dot(perm_mat, a)",
"def permute(seq, permutation):\n return [seq[i] for i in permutation]",
"def permute(self, arr):\n\n return arr[self.permutation_idxs]",
"def permutation(self, size=None, n=1, ndim=None, dtype='int64'):\r\n return self.gen(permutation, size, n, ndim=ndim, dtype=dtype)",
"def permute_to_N_HWA_K(tensor, K):\n assert tensor.dim() == 4, tensor.shape\n N, _, H, W = tensor.shape\n tensor = tensor.view(N, -1, K, H, W)\n tensor = tensor.permute(0, 3, 4, 1, 2)\n tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K)\n return tensor",
"def permutation_helper(random_state, n, shape):\r\n # n should be a 0-dimension array\r\n assert n.shape == ()\r\n # Note that it is important to convert `n` into an integer, because if it\r\n # is a long, the numpy permutation function will crash on Windows.\r\n n = int(n.item())\r\n\r\n if shape is None:\r\n # Draw only one permutation, equivalent to shape = ()\r\n shape = ()\r\n out_shape = list(shape)\r\n out_shape.append(n)\r\n out = numpy.empty(out_shape, int)\r\n for i in numpy.ndindex(*shape):\r\n out[i] = random_state.permutation(n)\r\n\r\n #print 'RETURNING', out.shape\r\n return out",
"def inverse_permutation(perm):\r\n return permute_row_elements(\r\n arange(perm.shape[-1], dtype=perm.dtype),\r\n perm,\r\n inverse=True)",
"def apply_permutation(hyper, pol, perm):\n pass",
"def permute_2d(m, p):\r\n return m[p][:, p]\r\n # unused below\r\n m_t = transpose(m)\r\n r_t = take(m_t, p, axis=0)\r\n return take(transpose(r_t), p, axis=0)",
"def inverse_permutation(permutation):\n arange = torch.arange(permutation.size(-1), device=permutation.device)\n res = torch.zeros_like(permutation).scatter_(-1, permutation, arange.expand_as(permutation))\n return res",
"def permute_sentence(sentence, permutation_set):\n pass",
"def flat_shuffle(tensor):\n shape_ = tensor.size()\n flat_tensor = tensor.view(-1)\n shuffled_flat_tensor = shuffle(flat_tensor)\n return shuffled_flat_tensor.view(shape_)",
"def repeat_2D_tensor(tensor, k):\n if len(tensor.size()) > 2:\n raise ValueError(\"Cannot repeat a non-2D tensor with this method.\")\n return tensor.repeat(k, 1, 1)",
"def transpose(x: torch.Tensor, dims):\n _dims = list(dims)\n for i in range(len(_dims)):\n if _dims[i] != i:\n x = x.transpose(i, _dims[i])\n j = _dims.index(i)\n _dims[i], _dims[j] = i, _dims[i]\n return x",
"def repeat_2D_tensor(tensor, k):\n if len(tensor.size()) > 2:\n raise ValueError('Cannot repeat a non-2D tensor with this method.')\n return tensor.repeat(k, 1, 1)",
"def apply_dct_permutation(self, data, permutation):\n n = data.shape[0]\n\n for i in range(n):\n for c in range(self.n_channel):\n xdct = dct(dct(data[i, :, :, c]).T)\n xdct = self.apply_sign_permutation(xdct, permutation)\n data[i, :, :, c] = idct(idct(xdct).T)\n nrm = np.sqrt(np.sum(data[i, :, :, c]**2))\n data[i, :, :, c] /= nrm\n\n return data",
"def Mat_CorrectPerm(X0,X):\n\n Xout = dp(X)\n\n nX = np.shape(X)\n\n for rx in range(nX[2]):\n for ry in range(nX[3]):\n Xt = X[:,:,rx,ry]\n xx,p=CorrectPerm(X0,Xt)\n Xout[:,:,rx,ry]=xx\n\n return Xout",
"def scramble(self, permutation=None, return_permutation=False):\n if permutation is None:\n permutation = np.random.permutation(self.nb_rows)\n for i, col in enumerate(self.list_col):\n if col is not None:\n self.list_col[i] = col[permutation]\n\n if return_permutation:\n return permutation",
"def permute(self):\n raise NotImplementedError()",
"def permute(n, r):\n\n product = 1\n for i in range(n - r + 1, n + 1):\n product *= i\n return product",
"def prod(tensor, axis=None):\n raise NotImplementedError",
"def permute(variable, output_order=('t', 'z', 'zb', 'y', 'x')):\n input_dimensions = variable.dimensions\n\n # filter out irrelevant dimensions\n dimensions = [x for x in output_order if x in input_dimensions]\n\n # create the mapping\n mapping = [dimensions.index(x) for x in input_dimensions]\n\n if mapping:\n return np.transpose(variable[:], mapping)\n else:\n return variable[:] # so that it does not break processing \"mapping\""
] |
[
"0.67399627",
"0.6282171",
"0.6174553",
"0.6107188",
"0.6091974",
"0.604093",
"0.5971631",
"0.5741166",
"0.5669643",
"0.56401783",
"0.5604093",
"0.5555404",
"0.5337477",
"0.5284809",
"0.5278606",
"0.52729297",
"0.5257068",
"0.5256239",
"0.5248782",
"0.5200968",
"0.5109756",
"0.50979805",
"0.50976765",
"0.5097298",
"0.5087068",
"0.50658125",
"0.502454",
"0.5014947",
"0.5012015",
"0.49806857"
] |
0.7508003
|
0
|
Iterate through all the layers and through all directions within each layer. Arguments should be listlike of length ``num_layers num_directions`` where each element corresponds to (layer, direction) pair. The corresponding elements of each of these lists will be iterated over.
|
def iterate_layers(self, *args):
for layer in range(self.num_layers):
yield layer, (
(
direction,
tuple(arg[self.num_directions * layer + direction] for arg in args),
)
for direction in range(self.num_directions)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __iter__(self) -> Iterable[\"AbstractLane\"]:\n for origin in self.graph:\n for destination in self.graph[origin]:\n for index, lane in self.graph[origin][destination].items():\n yield lane",
"def step(self):\n for layer in self.layers:\n layer.step()",
"def iterate_edges(\n self, verbs=None, directions=None, nodes=None\n ) -> Iterable[Edge]:",
"def iteration_layers(model, speedup, session, indepth_layer=None):\n if speedup is True:\n layer_names_reduced = ['conv2d1',\n 'conv2d2',\n 'mixed3b',\n 'mixed4b',\n 'mixed5b']\n layer_tensors = [session.graph.get_tensor_by_name(name + \":0\") for name in layer_names_reduced]\n else:\n layer_tensors = model.layer_tensors\n\n return layer_tensors",
"def multilayer(self, n_layers):\n for i in range(1, n_layers+1):\n print(\"Layer nr: \", i)\n # For the first layer, take the input image\n if i == 1:\n # Convolution\n self.convolution(i, self.img)\n # Use the output of the convolution as input of the pooling layer\n img = Image.open(str(\"conv\" + str(i) + \".png\"))\n # Pooling\n self.pool(img.convert('RGB'), i)\n else:\n # Use the output of the pooling as input of the convolution layer\n img = Image.open(str(\"conv_pool\" + str(i-1) + \".png\"))\n # Convolution\n self.convolution(i, img.convert(\"RGB\"))\n # Use the output of the convolution as input of the pooling layer\n img = Image.open(str(\"conv\"+ str(i) + \".png\"))\n # Pooling\n self.pool(img.convert('RGB'), i)",
"def test_delta_layer_iterator(self):\n\n\t\tdelta_iter = self.watcher.make_delta_layer_iterator(base_model=self.model, model=self.model)\n\t\n\t\tfor ww_layer in delta_iter:\n\t\t\t\n\t\t\tprint(ww_layer.layer_id, ww_layer.name)\n\t\t\tself.assertEquals(1, len(ww_layer.Wmats))\n\t\t\tW = ww_layer.Wmats[0]\n\t\t\t\n\t\t\tlayer_norm = np.linalg.norm(W)\n\t\t\tlayer_sum = np.sum(W)\n\n\t\t\tself.assertAlmostEqual(0.0, layer_norm)\n\t\t\tself.assertAlmostEqual(0.0, layer_sum)\n\n\t\treturn",
"def neighbour_directions(self, index, grid_size):\n neighbours = []\n for direction in DIRECTIONS:\n neighbour = self.index_in_direction(index, grid_size, direction)\n if neighbour is not None:\n neighbours.append(neighbour)\n\n return neighbours",
"def run_stepwise(self, inputs, stop=None):\n # type: (dict, str) -> (int, str, dict, numpy.ndarray, numpy.ndarray)\n fancy_logger.banner(\"RUN NET STEPWISE\")\n\n inputs.update(self.params)\n\n for layer_idx, layer in enumerate(self.net):\n\n # logger.info(\"-----------------------\")\n # logger.info(\"Run layer idx: {}, op_name: {}\"\n # .format(layer_idx, layer.name))\n\n inpts = [inputs[name] for name in layer.inputs]\n\n outpt = layer.forward_exec(inpts)\n\n # TODO: can we make this more elegant?\n if layer.type in ['Convolution']:\n quant_outpt = layer.get_output_for_quantization(inpts)\n else:\n quant_outpt = outpt\n\n # TODO: remove unnecessary data as we keep track of\n # the outputs of all layers\n inputs[layer.name] = outpt\n\n yield (\n layer_idx,\n layer,\n inpts,\n outpt,\n quant_outpt\n )\n\n if stop is not None and layer.name == stop:\n break",
"def run(self, inputs, outputs=[], stop=None, force_stepwise=True):\n # (Dict[str,numpy.ndarray], List[str], str, bool)\n # -> (List[numpy.ndarray]/numpy.ndarray)\n fancy_logger.banner(\"RUN NET\")\n\n inputs.update(self.params)\n res = {}\n for layer_idx, layer in enumerate(self.net):\n\n logger.info(\"-----------------------\")\n logger.info(\"Run layer idx: {}, op_name: {}\"\n .format(layer_idx, layer.name))\n logger.info(\"Inputs: {}\".format(layer.inputs))\n inpts = [inputs[name] for name in layer.inputs]\n\n outpt = layer.forward_exec(inpts)\n\n # TODO: remove unnecessary data as we keep track of\n # the outputs of all layers\n inputs[layer.name] = outpt\n\n if layer.name in outputs:\n res[layer.name] = outpt\n\n if stop is not None and layer.name == stop:\n break\n\n if len(outputs) == 0:\n res['output'] = outpt\n\n return [res[outpt] for outpt in outputs] \\\n if len(outputs) > 0 else [res['output']]",
"def walk_forward_from_iter(self, initial_layers: Set[str]) -> Iterator[Tuple[str, bool, bool]]:\n self._check_input_layers(initial_layers)\n return self._create_iter(\n initial_layers,\n self._sinks,\n self._forward_mapping,\n self._backward_mapping,\n )",
"def layers(self, layers):\n self._layers = layers\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer",
"def layers(self, layers):\n self._layers = layers\n self.thetas = []\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer\n self.thetas.extend(layer.thetas())",
"def all_layers(parent=None):\n if parent is None:\n parent = QgsProject.instance().layerTreeRoot()\n result = []\n def do_a_group(grp, level=0):\n for child in grp.children():\n if isinstance(child, QgsLayerTreeGroup):\n do_a_group(child, level=level + 1)\n elif isinstance(child, QgsLayerTreeLayer):\n result.append(child)\n\n do_a_group(parent)\n return result",
"def iter_chunks(self, chunk_size, depths=True, step_size=None):\n step_size = step_size or chunk_size\n\n i = 0\n while i < self.height:\n if depths:\n yield self.img[i:i+chunk_size], self.depths[i:i+chunk_size]\n else:\n yield self.img[i:i+chunk_size]\n i += step_size",
"def feedforward(self, inputs: np.matrix):\n outs = inputs\n for layer in self.__layers:\n outs = layer.propagate(outs)\n return outs",
"def traverse_grid(self, start_cell, direction, num_steps):\n elements = []\n\n for step in range(num_steps):\n row = start_cell[0] + step * direction[0]\n col = start_cell[1] + step * direction[1]\n elements.append(self._grid[row][col])\n\n return elements",
"def iter_all_chains(self):\n for model in self.model_list:\n for chain in model.chain_list:\n yield chain",
"def iter_all(self):\n for i in range(self.num_nodes):\n self.iter_node(i)",
"def steiner3D(imOut, n, directions, grid=DEFAULT_GRID3D):\n imOut.reset()\n (w,h) = imOut.getSize()\n l = imOut.getLength()\n \n v = computeMaxRange(imOut[0])[1]\n imOut.setPixel(v, (w/2,h/2,l/2))\n \n ses = []\n for d in directions:\n ses.append(structuringElement3D([0,d],grid))\n \n for i in range(n):\n for se in ses:\n dilate3D(imOut, imOut, 1, se=se)\n imOut.updateDisplay()",
"def iter_dfs(self, depth=0):\n yield self, depth\n yield from self.left.iter_dfs(depth=depth + 1)\n yield from self.right.iter_dfs(depth=depth + 1)",
"def test_delta_layer_iterator_1(self):\n\t\t\n\t\t# create a Fake model, set weight matrix to all ones\n\t\tbase_model = self.create_fake_model()\n\t\tbase_model.layer.weight.data.fill_(0.0) \n\t\t\n\t\tmodel = self.create_fake_model()\n\t\tmodel.layer.weight.data.fill_(1.0) \n\n\t\texpected_layer_matrix = np.ones((100, 200))\n\t\texpected_norm = np.linalg.norm(expected_layer_matrix, 'fro')\n\t\texpected_sum = np.sum(expected_layer_matrix)\n\t\t \n\t\tdelta_iter = self.watcher.make_delta_layer_iterator(base_model=base_model, model=model)\n\t\t \n\t\tfor ww_layer in delta_iter:\n\t\t \n\t\t\tprint(ww_layer.layer_id, ww_layer.name)\n\t\t\tself.assertEquals(1, len(ww_layer.Wmats))\n\t\t\tW = ww_layer.Wmats[0]\n\t\t \n\t\t\tlayer_norm = np.linalg.norm(W)\n\t\t\tlayer_sum = np.sum(W)\n\t\t \n\t\t\tprint(layer_norm, layer_sum)\n\t\t \n\t\t\tself.assertAlmostEqual(expected_norm, layer_norm)\n\t\t\tself.assertAlmostEqual(expected_sum, layer_sum)\n\n\t\treturn",
"def _compile_networks(self):\n\n _header_ = self._header_ + '_compile_networks(): '\n\n if self.verbose:\n print(_header_ + 'Compiling all networks ...')\n\n networks = []\n\n all_nidx = set(self.nidx2lidx.keys())\n\n while all_nidx:\n\n nidx0 = [all_nidx.pop()]\n network = set(nidx0)\n\n while nidx0 and all_nidx:\n\n nidx = set()\n\n for l in nidx0:\n lidx = self.nidx2lidx[l]\n for n in lidx:\n nidx |= self.lidx2nidx[n]\n\n nidx -= network\n network |= nidx\n all_nidx -= nidx\n nidx0 = nidx.copy()\n\n networks.append(network)\n\n if self.verbose:\n print(_header_ + 'Found %d networks' % len(networks))\n for i, network in enumerate(networks):\n print(' Network %d - %s' % (i, ','.join([str(j) for j in network])))\n\n return networks",
"def update_layers(self):\n\n # Para cada layer atualiza utilizando o gradiente descendente e o learning rate\n for layer in self.layers:\n layer.update_layer(self.learning_rate)",
"def get_directions(board_ndim):\n directions = [\n [[0 for _ in range(board_ndim)] for _ in range(2)]\n for _ in range(board_ndim)\n ]\n for ind in range(board_ndim):\n directions[ind][0][ind] = 1\n directions[ind][1][ind] = -1\n return directions",
"def run_all_layers(self, img): # noqa\n s1_outputs = [s1(img) for s1 in self.s1_units]\n\n # Each C1 layer pools across two S1 layers\n c1_outputs = []\n for c1, i in zip(self.c1_units, range(0, len(self.s1_units), 2)):\n c1_outputs.append(c1(s1_outputs[i:i+2]))\n\n s2_outputs = [s2(c1_outputs) for s2 in self.s2_units]\n c2_outputs = [c2(s2) for c2, s2 in zip(self.c2_units, s2_outputs)]\n\n return s1_outputs, c1_outputs, s2_outputs, c2_outputs",
"def get_layers(self):\n layers = []\n\n for s in self.surfaces:\n n = self.miller_to_direction(s)\n r = np.dot(self.get_positions() - self.center, n).max()\n d = self.get_layer_distance(s, 2)\n l = 2 * np.round(r / d).astype(int)\n\n ls = np.arange(l-1,l+2)\n ds = np.array([self.get_layer_distance(s, i) for i in ls])\n\n mask = (np.abs(ds - r) < 1e-10)\n\n layers.append(ls[mask][0])\n\n return np.array(layers, int)",
"def directions(self):\n return []",
"def _generate_iterator(self) -> Iterable:\n params: List[Tensor] = []\n for angle_range in self._ranges:\n lin_space: Tensor = linspace(angle_range[0], angle_range[1], steps=self._num_steps)\n params.append(lin_space)\n power: int\n dims: int\n for i in range(0, self._num_params):\n power = len(self._ranges) - 1 - i\n dims = i\n params[i] = params[i].repeat_interleave(self._num_steps ** power)\n params[i] = params[i].broadcast_to((self._num_steps ** dims, self._num_steps ** (power + 1))).flatten()\n return zip(*params)",
"def run(layers):",
"def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target"
] |
[
"0.56702995",
"0.5390838",
"0.51307994",
"0.51242316",
"0.5109984",
"0.5058537",
"0.5057159",
"0.4929793",
"0.49037892",
"0.49035177",
"0.48847336",
"0.48763973",
"0.4873756",
"0.4857415",
"0.48254174",
"0.48151118",
"0.4796563",
"0.47938234",
"0.4784105",
"0.477571",
"0.4763999",
"0.4760416",
"0.4699146",
"0.46894678",
"0.46860808",
"0.46807",
"0.4672112",
"0.46645483",
"0.4648299",
"0.46266943"
] |
0.7397223
|
0
|
Scale Flickr url_s image to fit Folium popup. Popup shape is hardcoded as `self.popup_width` in both length and height. Function rescales so that the long axis exactly fits within this box.
|
def scale_image_to_frame(self, width, height):
aspect = width / height
# Image is landscape.
if aspect >= 1:
scale = self.popup_width / width
return (self.popup_width, int(scale * height))
# Otherwise image is portrait.
scale = self.popup_width / height
return (int(scale * width), self.popup_width)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def scale_widget_to_image_size(self):\n if self._image is not None:\n im = self._image.make_image()\n self.width = im.shape[1]\n self.height = im.shape[0]",
"def zoom_to_size(self, *p):\n\t\tif self.image is None or self.allocation is None:\n\t\t\treturn\n\t\tif __debug__: print self.allocation.width, self.image.get_width()\n\t\tif __debug__: print self.allocation.width, self.image.get_width(), self.allocation.width/self.image.get_width()\n\t\tz = min(\n\t\t\tself.allocation.width/self.image.get_width(),\n\t\t\tself.allocation.height/self.image.get_height()\n\t\t\t)\n\t\tif __debug__: print \"zoom_to_size\", \"z=\", z\n\t\tself.zoom = z",
"def get_url_for_min_resolution(self, min_height, min_width, image):",
"def scale_image_button(src):\n return pygame.transform.scale(pygame.image.load(src), (30, 30))",
"def get_thumbnail_size(self, thumbnail_name, forced=False):",
"def resizePreview(self):\n ratio = float(self.qIma.width()) / float(self.qIma.height())\n if self.qIma.width() > self.qIma.height():\n width = 300\n height = int(float(width) / ratio)\n else:\n height = 170\n width = int(float(height) / ratio)\n if 'prodManager' in os.path.basename(self._ima):\n width = 300\n height = 170\n self.lPreview.setMinimumSize(width, height)\n self.lPreview.setMaximumSize(width, height)",
"def bigThumbnail(self):\n\t\tfileCount = len(self.fileList)\n\t\tthumbSize = (200, 200)\n\t\timgHoriz = int(self.get_screen().get_width() / (thumbSize[1] + 20))\n\t\timgSize = (self.get_screen().get_width(), (thumbSize[1] + 20) * (int(fileCount / imgHoriz) + 2))\n\n\t\tpixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, imgSize[0], imgSize[1])\n\t\tfor file in range(len(self.fileList)):\n\t\t\ttry:\n\t\t\t\ttimg = gtk.gdk.pixbuf_new_from_file(self.fileList[file])\n\t\t\texcept:\n\t\t\t\tprint >> sys.stderr, \"Failed to load image %s\" % self.fileList[file]\n\t\t\t\tcontinue\n\t\t\ttimgSize = [timg.get_width(), timg.get_height()]\n\t\t\tif timgSize[0] > thumbSize[0] or timgSize[1] > thumbSize[1]:\n\t\t\t\tscaleFactor = 1.0 * thumbSize[0] / timgSize[0]\n\t\t\t\tif timgSize[1] * scaleFactor > thumbSize[1]:\n\t\t\t\t\tscaleFactor = 1.0 * thumbSize[1] / timgSize[1]\n\t\t\t\tself.scaleFactor = scaleFactor\n\t\t\t\ttimgSize[0] = int(timgSize[0] * scaleFactor)\n\t\t\t\ttimgSize[1] = int(timgSize[1] * scaleFactor)\n\t\t\t\ttimg = timg.scale_simple(timgSize[0], timgSize[1], gtk.gdk.INTERP_BILINEAR)\n\t\t\tpos = ( (file % imgHoriz) * (thumbSize[0] + 20) + 10 + (thumbSize[0] - timgSize[0]) / 2,\n\t\t\t\tint(file / imgHoriz) * (thumbSize[1] + 20) + 10)\n\n\t\t\tprint \" Rendering thumbnails; %d of %d\\r\" % (file, len(self.fileList)),\n\t\t\tsys.stdout.flush()\n\n\t\t\ttimg.copy_area(0, 0, timgSize[0], timgSize[1], pixbuf, pos[0], pos[1])\n\t\t\tdel timg\n\t\t\tgc.collect()\n\t\tprint\n\t\tself.currentPixbuf = pixbuf\n\t\tself.fileList = [ \"#\" ]\n\t\tself.fileName = \"#\"\n\t\tself.autoScale()\n\t\tself.display()",
"def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r",
"def autoResize(self):\n\t\t#self.infoLabelBox.set_size_request(1,1)\n\t\timgSize = [self.currentPixbuf.get_width() * self.scaleFactor, self.currentPixbuf.get_height() * self.scaleFactor]\n\t\timgSize = map(lambda x: max(int(x), 1), imgSize)\n\t\tif not self.fullscreenToggle:\n\t\t\tself.resize(imgSize[0], imgSize[1])\n\t\t\tposition = ( int(0.5 * (self.get_screen().get_width() - imgSize[0])),\n\t\t\t\tint(0.5 * (self.get_screen().get_height() - imgSize[1])))\n\t\t\tself.move(position[0], position[1])\n\t\t\tself.fixed.move(self.imgDisplay, 0, 0)\n\t\t\tif not self.hideTransparent and self.imgTrans.bgOn:\n\t\t\t\tself.imgTrans.set_size_request(imgSize[0], imgSize[1])\n\t\t\t# make eventbox the same size as image\n\t\t\t# this will not be correct when infoLabelBox is visible\n\t\t\tself.eventBox.set_size_request(imgSize[0], imgSize[1])\n\t\telse:\n\t\t\tself.fixed.move(self.imgDisplay, max(0, int((self.get_size()[0] - imgSize[0]) / 2)),\n\t\t\t\tmax(0, int((self.get_size()[1] - imgSize[1]) / 2)))\n\t\t\tif not self.hideTransparent and self.imgTrans.bgOn:\n\t\t\t\tself.imgTrans.set_size_request(int(self.get_size()[0]), int(self.get_size()[1]))\n\t\t\t# make eventbox the same size as screen\n\t\t\tself.eventBox.set_size_request(self.get_size()[0],self.get_size()[1])",
"def resize_profile_pic(sender, instance, **kwargs):\n profile_pic = instance.profile_picture\n if profile_pic.name != \"default.png\":\n img = Image.open(profile_pic.path)\n if img.height > 300 or img.width > 300:\n output_size = (300, 300)\n img.thumbnail(output_size)\n img.save(profile_pic.path)",
"def _resize_image(self, event):\n self.window_width = event.width\n self.window_height = event.height",
"def zoom_augmentation():\n # Get the width and the height of the zoomed version\n x_len, y_len = np.random.randint(250, 350, size=2)\n # Get left upper ,right and lower bound of the pixels in the original image\n left = np.random.randint(x_size-x_len)\n upper = np.random.randint(y_size-y_len)\n right, lower = left + x_len, upper+y_len\n # Crops the box and resizes it to the original image size\n box = (left, upper, right, lower)\n return lambda image: image.transform(image.size, Image.EXTENT, box)",
"def autoScale(self):\n\t\tif self.autoscaleToggle:\n\t\t\tif not self.fullscreenToggle:\n\t\t\t\tmaxSize = (self.get_screen().get_width() - 100, self.get_screen().get_height() - 100)\n\t\t\telse:\n\t\t\t\tmaxSize = (self.get_screen().get_width(), self.get_screen().get_height())\n\t\t\timgSize = [self.currentPixbuf.get_width(), self.currentPixbuf.get_height()]\n\n\t\t\tif imgSize[0] > maxSize[0] or imgSize[1] > maxSize[1]:\n\t\t\t\tscaleFactor = 1.0 * maxSize[0] / imgSize[0]\n\t\t\t\tif imgSize[1] * scaleFactor > maxSize[1]:\n\t\t\t\t\tscaleFactor = 1.0 * maxSize[1] / imgSize[1]\n\t\t\t\tself.scaleFactor = scaleFactor\n\t\t\t\timgSize[0] = int(imgSize[0] * scaleFactor)\n\t\t\t\timgSize[1] = int(imgSize[1] * scaleFactor)",
"def setImageDimensions(*args):",
"def Pane_Resized( self, new_sizes ):\r\n if(new_sizes[0] > 200 ):\r\n cb.xtotal = new_sizes[0]-100\r\n self.canvas_one.config(width = new_sizes[0])\r\n self.canvas_scale.config(width = new_sizes[0])\r\n else:\r\n cb.xtotal = 200-100\r\n self.canvas_one.config(width = 200)\r\n self.canvas_scale.config(width = 200)\r\n if (len(new_sizes) > 1 ):\r\n self.canvas_two.config(width=new_sizes[1])\r\n self.system.Draw()",
"def resize(img):\n size = (500, 500)\n img.thumbnail(size)\n return img",
"def img_scale(self):\n return min(400, abs(self.size))",
"def FlyResize( image, log_mess, nimages, method = Image.ANTIALIAS ):\n oldw, oldh = image.size\n resl = [8, 10, 14, 16, 20, 22, 24, 32, 40, 48, 64, 96, 128, 256]\n \n if oldw > 256 or oldh > 256:\n newsiz = min(resl, key = lambda x:abs(x - max(oldw, oldh)))\n image.thumbnail((newsiz, newsiz), method)\n neww, newh = image.size\n log_mess += ' and new size scaled = %s x %s' %(neww, newh)\n elif nimages > 1:\n log_mess += ' and size = %s x %s' %(oldw, oldh)\n \n return oldw, oldh, image, log_mess",
"def resize(self, *args):\n if self.parent is None: # when deleted\n return\n if self.parent.render_window is None: # BasePlotter\n return\n\n if self._prior_window_size != self.parent.window_size:\n self._prior_window_size = self.parent.window_size\n\n actor = self._actors['background']\n image_data = actor.GetInput()\n origin = image_data.GetOrigin()\n extent = image_data.GetExtent()\n spacing = image_data.GetSpacing()\n xc = origin[0] + 0.5 * (extent[0] + extent[1]) * spacing[0]\n yc = origin[1] + 0.5 * (extent[2] + extent[3]) * spacing[1]\n yd = (extent[3] - extent[2] + 1) * spacing[1]\n dist = self.camera.distance\n\n # make the longest dimensions match the plotting window\n img_dim = np.array(image_data.dimensions[:2])\n self.camera.focus = np.array([xc, yc, 0.0])\n self.camera.position = np.array([xc, yc, dist])\n\n ratio = img_dim / np.array(self.parent.window_size)\n scale_value = 1\n if ratio.max() > 1:\n # images are not scaled if larger than the window\n scale_value = ratio.max()\n\n if self._scale is not None:\n scale_value /= self._scale\n\n self.camera.parallel_scale = 0.5 * yd / self._scale",
"def set_size(self, width, height):\r\n \r\n self.image = pygame.transform.scale(self.image, (width, height))\r\n self.rect = self.image.get_rect()",
"def on_scale (self):\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_foreground()\n\t\t\tself.redraw_background()\n\n\t\tif self.expand2 == _('Use a scrollbar'):\n\t\t\tself.width = int((self.icon_size * 2 * self.rows + ((self.border_size+self.shadow_size)*2)+15 ) + 24/self.scale)\n\t\t\tself.update_scrollbar()",
"def main():\r\n original = SimpleImage(\"images/poppy.png\")\r\n original.show()\r\n # shrink function\r\n after_shrink = shrink('images/poppy.png')\r\n after_shrink.show()",
"def resize(self, size):\n return Image(self.pil_image.resize(size, PIL.Image.ANTIALIAS))",
"def thumbnail(self, item):\n if self._has_image_field(item) and self._field_is_visible(\"image\"):\n tile_conf = self.get_tile_configuration()\n image_conf = tile_conf.get(\"image\", None)\n if image_conf:\n scaleconf = image_conf[\"imgsize\"]\n # scale string is something like: 'mini 200:200' and\n # we need the name only: 'mini'\n if scaleconf == \"_original\":\n scale = None\n else:\n scale = scaleconf.split(\" \")[0]\n scales = item.restrictedTraverse(\"@@images\")\n return scales.scale(\"image\", scale)",
"def image_size(size):\n l_max = max(size)\n if l_max > 300:\n num = l_max/300\n else:\n num = 1\n w = round(size[0] / num)\n h = round(size[1] / num)\n new_size = [w, h]\n return new_size",
"def on_scale_changed(self):\n\n options = self.get_options()\n\n label = \"Result: {0}x{1}\".format(int(options[\"width\"]),\n int(options[\"height\"]))\n\n self.scale_result.setText(label)",
"def resizeEvent(self, event):\n self.image_canvas.fit_in_view()",
"def checkFigureSize(self, figure):\n # scale factor\n f = 3\n width, height = self.getWidgetDims(figure)\n\n if (width < 100) and (height < 100):\n return figure.scaled(width*f, height*f, self.ratioOption, QtCore.Qt.SmoothTransformation)\n elif width<100:\n return figure.scaled(width*f, self.height, self.ratioOption, QtCore.Qt.SmoothTransformation)\n\n elif height<100:\n return figure.scaled(self.width, height*f, self.ratioOption, QtCore.Qt.SmoothTransformation)\n else:\n return figure.scaled(self.width, self.height, self.ratioOption, QtCore.Qt.SmoothTransformation)",
"def resizeEvent(self, event):\n event.accept()\n self.overlay.resize(event.size())\n # Move gif to the center of the widget\n self.overlay.move(self.rect().center() - self.overlay.rect().center())",
"def thumbnail(self, item):\n if self._has_image_field(item) and self._field_is_visible('image'):\n tile_conf = self.get_tile_configuration()\n image_conf = tile_conf.get('image', None)\n if image_conf:\n scaleconf = image_conf['imgsize']\n # Scale string is something like: 'mini 200:200'.\n # We need the name only: 'mini'.\n scale = scaleconf.split(' ')[0]\n scales = ploneapi.content.get(path='@@images')\n return scales.scale('image', scale)"
] |
[
"0.5512757",
"0.54284984",
"0.530285",
"0.5234716",
"0.5216585",
"0.5185741",
"0.517777",
"0.5155664",
"0.5120164",
"0.5117348",
"0.5089344",
"0.50772893",
"0.5074432",
"0.50080764",
"0.4998123",
"0.4992161",
"0.4959421",
"0.49268898",
"0.49229434",
"0.4913981",
"0.48858967",
"0.48827764",
"0.4878325",
"0.48470604",
"0.48418382",
"0.48325056",
"0.48148745",
"0.48137298",
"0.479622",
"0.47956702"
] |
0.6120272
|
0
|
Set the lowest four bits of all the 16 bits tiff file in a folder to zero.
|
def tiff_low_four_bits_set_zero(path_tiff_source, path_tiff_out):
dir = glob.glob(path_tiff_source + '\\' + '*.tiff')
for source_file in dir:
tif = TIFF.open(source_file, mode='r')
img = tif.read_image()
img = img - img % 16 #lowest four bits set zero
img_name = os.path.basename(source_file)
out_file = path_tiff_out + '\\' + img_name #get the path of the output file
tif_processed = TIFF.open(out_file, 'w')
tif_processed.write_image(img)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reset(self):\n self.all_files_idx = np.arange(self._div*self._nb_dir)\n\n if self.shuffle>1:\n np.random.shuffle(self.all_files_idx)\n\n self.idx_folder = self.all_files_idx//self._div\n self.idx_file = self.all_files_idx % self._div\n self.current_folder = self.idx_folder[0]\n self.current_file = self.idx_file[0]",
"def clear(self):\n self._tiff = np.array([], dtype=\"uint8\")\n self._offset = 0",
"def resetBin(this):\n\t\tthis._BINARY = EmptyFrom(this._FRAME, 1)",
"def reset_file_stat(self):\n # FIXME: this state does not make sense\n self.file_spdx_id_set = False\n self.file_comment_set = False\n self.file_type_set = False\n self.file_chksum_set = False\n self.file_conc_lics_set = False\n self.file_license_comment_set = False\n self.file_notice_set = False\n self.file_copytext_set = False",
"def convert_masks():\n for fn in sorted(glob.glob('../input/extra_data/*/masks/*.png')):\n print(fn)\n img = skimage.io.imread(fn)\n # utils.print_stats('mask', img)\n img[img > 0] = 255\n skimage.io.imsave(fn, img)",
"def set_dacs_zero(self):\n # First set all \"parameters\" to zero.\n # this ensures that the safe slow rampdown is used and that the\n # correct values are known to the instrument.\n for ch in self.channel_map:\n self.set(ch, 0)\n\n # \"brute-set\" all sources in known modules to zero, this is because\n # this is also a safety method that should ensure we are in an all\n # zero state.\n for s in self.current_sources.values():\n for dac in range(4):\n s.set_current(dac, 0.0)",
"def reset(self):\n self._write(0x16, 1, 3, 0x08)",
"def reset(self):\r\n self._root_dir = None",
"def clear(self):\n\t\tself.n = 0\n\t\tself.bv = BitVector.BitVector(size = self.m)",
"def zeroLickCount (self, chanList):\n global gLickArray\n for chan in chanList:\n gLickArray [chan] = 0",
"def clear_bit(num, i):\n return num & ~(1 << i)",
"def reset_phis ( self ) :\n for f in self.__phis : f.setVal(0)",
"def _reset(self):\n if self.filename is None and not hasattr(self, 'coco_gt'):\n self.coco_gt = MaskCOCO()",
"def zero_to_nodata(base_raster):\n target_raster = base_raster.copy()\n target_raster[target_raster == 0] = _IC_NODATA\n return target_raster",
"def clearImageFolder():\n filelist = listImageFolder()\n for f in filelist:\n os.remove('{}/{}'.format(imageFolder, f))",
"def zero_mask(self):\n accum = 0\n for i in range(self.data.itemsize):\n accum += (0x55 << (i << 3))\n return accum",
"def clear_exclude_bits(self):\n self.bitcell_array.init_graph_params()",
"def reset_mask(self):\n\n self.mask = np.ones(self.dispersion.shape, dtype=bool)",
"def set_empty(self):\n pattern = [[0,0,0,0],\n [0,0,0,0],\n [0,0,0,0],\n [0,0,0,0]]\n self.set_pattern(pattern)",
"def clear(self):\n self.cmd(0x33) # $33 8-bit mode\n self.cmd(0x32) # $32 8-bit mode\n self.cmd(0x28) # $28 8-bit mode\n self.cmd(0x0C) # $0C 8-bit mode\n self.cmd(0x06) # $06 8-bit mode\n self.cmd(0x01) # $01 8-bit mode",
"def reset(self) -> int:\n self.flags &= 0\n return self.flags",
"def _reset_mask(self, reset_to=False):\n self.data.mask = reset_to",
"def _mask_mode(self):\r\n self._mode_select(0)",
"def reset(self):\n q.system.fs.removeDirTree(self.metadataPath)\n self.__init__(self.metadataPath,self.root)",
"def setZeroes(self, matrix: List[List[int]]) -> None:\n colsToZero = set()\n rowsToZero = set()\n for rowIdx, row in enumerate(matrix):\n for colIdx, num in enumerate(row): \n if num == 0: \n colsToZero.add(colIdx)\n rowsToZero.add(rowIdx)\n \n for col in colsToZero:\n self.writeZeroCol(col, matrix)\n for row in rowsToZero:\n self.writeZeroRow(row, matrix)",
"def clear_images(self):\r\n\r\n # audio = self.MutagenType(self['filename'])\r\n self.audio.pop(\"metadata_block_picture\", None)\r\n self.audio.pop(\"coverart\", None)\r\n self.audio.pop(\"coverartmime\", None)\r\n self.audio.save()",
"def reset(self,):\n \n self.i = 0\n self.pi = 1.0\n self.si = 0.0\n self.pi_min = float(\"inf\")\n self.si_min = float(\"inf\")",
"def set_bitmap_size(self):\n self.bitmap = bitarray(self.hash_prime)\n self.bitmap.setall(0)",
"def normalise_image_16bit(file_name, max_possible_intensity=65535):\n print(file_name)\n normalised_image_name = file_name[:-4] + 'normed16bit.png'\n image = cv2.imread(file_name, cv2.IMREAD_ANYDEPTH)\n\n minimum_intensity = np.amin(image)\n maximum_intensity = np.amax(image)\n factor = float(max_possible_intensity) / float(maximum_intensity - minimum_intensity)\n\n\n subtracted_image = np.subtract(image, np.full(np.shape(image), minimum_intensity))\n subtracted_scaled_image = (subtracted_image * factor)\n normalised_image = subtracted_scaled_image.astype(np.uint16)\n cv2.imwrite(normalised_image_name, normalised_image)",
"def resetFlags():\r\n for flag in flags:\r\n flags[flag] = False"
] |
[
"0.5840278",
"0.57339275",
"0.5498971",
"0.54269385",
"0.5342206",
"0.52571565",
"0.52047503",
"0.5122611",
"0.5109874",
"0.5103661",
"0.5101632",
"0.50586116",
"0.50525945",
"0.5020488",
"0.49911335",
"0.4984211",
"0.49590093",
"0.49564266",
"0.49439636",
"0.49412417",
"0.4922759",
"0.49144307",
"0.48915273",
"0.48777407",
"0.48609203",
"0.4859544",
"0.48514292",
"0.48487478",
"0.48432094",
"0.4842304"
] |
0.7614919
|
0
|
This function splits one 16bits tiff file to two different 8bits tiff files. The strategy of split is setting the lowest 8 bits of the source 16 bits file as the pixel value of one split file and the same as the highest 8 bits.
|
def tiff_split(path_tiff_source, path_tiff_out):
path_out_low = path_tiff_out + '\\' + 'low8bits'
path_out_hig = path_tiff_out + '\\' + 'high8bits'
path_out_list=[path_out_hig,path_out_low]
makdir(path_out_low)
makdir(path_out_hig)
for file in glob.glob(path_tiff_source + '\\' + '*.tiff'):
tif = TIFF.open(file, 'r')
img = tif.read_image()
img_low_8bits = img % 256
img_hig_8bits = img / 256
img_low_8bits = img_low_8bits.astype(np.uint8)
img_hig_8bits = img_hig_8bits.astype(np.uint8)
img_name = os.path.basename(file)
tif_split_low = TIFF.open(path_out_low + '\\' + img_name, 'w')
tif_split_low.write_image(img_low_8bits)
tif_split_hig = TIFF.open(path_out_hig + '\\' + img_name, 'w')
tif_split_hig.write_image(img_hig_8bits)
return path_out_list
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def split(bin_lid):\n resolvers = resolver.parse_stream(RESOLVER)\n suffixes = ['_cfa_' + camera for camera in 'LR']\n outdirs = [scratch(bin_lid,bin_lid + suffix) for suffix in suffixes]\n for od in outdirs:\n mkdirs(od)\n imagenames = list(list_images(bin_lid))\n (h,w)=(None,None)\n tiff = None\n # read an image to determine h,w\n for imagename in imagenames:\n for outdir,suffix in zip(outdirs,suffixes):\n LRout = os.path.join(outdir,remove_extension(imagename) + suffix + '.tif')\n if h is None:\n if tiff is None:\n tiff = as_tiff(imagename)\n cfain = resolvers['image'].resolve(pid=as_tiff(imagename)).value\n (h,w) = imread(cfain,plugin='freeimage').shape\n # now fork\n pids = []\n for n in range(NUM_PROCS):\n pid = os.fork()\n if pid == 0:\n for imagename in imagenames[n::NUM_PROCS]:\n tiff = None\n for outdir,suffix,offset in zip(outdirs,suffixes,[0,1]):\n LRout = os.path.join(outdir,remove_extension(imagename) + suffix + '.tif')\n if not os.path.exists(LRout):\n if tiff is None:\n tiff = as_tiff(imagename)\n cfain = resolvers['image'].resolve(pid=as_tiff(imagename)).value\n logging.info('loading %s' % cfain)\n cfa = imread(cfain,plugin='freeimage')\n (h,w) = cfa.shape\n if not os.path.exists(LRout):\n logging.info('splitting %s -> %s' % (cfain, LRout))\n half = w / 2\n off = offset * half\n imsave(LRout,cfa[:,off:off+half],plugin='freeimage')\n os._exit(0)\n else:\n pids += [pid]\n for pid in pids:\n os.waitpid(pid,0)\n logging.info('joined splitting process %d' % pid)\n return (h,w),outdirs",
"def unpack_mraw_frame_10bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*10/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*10/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::5]\n bytes_2 = int_array[1::5] \n bytes_3 = int_array[2::5]\n bytes_4 = int_array[3::5] \n bytes_5 = int_array[4::5]\n\n \n # Here 4 pixels from the image are shared between 5 bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 | byte 4 | byte 5 |\n # |o o o o o o o o | o o | o o o o o o | o o o o | o o o o | o o o o o o | o o | o o o o o o o o|\n # | Pixel 1 | Pixel 2 | Pixel 3 | Pixel 4 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(4.0*bytes_1 + np.right_shift(bytes_2,6),dtype=np.uint16)\n pix_2 = np.array(16.0*np.bitwise_and(bytes_2,0b111111) + np.right_shift(bytes_3,4),dtype=np.uint16)\n pix_3 = np.array(64.0*np.bitwise_and(bytes_3,0b1111) + np.right_shift(bytes_4,2),dtype=np.uint16)\n pix_4 = np.array(256.0*np.bitwise_and(bytes_4,0b11) + bytes_5,dtype=np.uint16)\n #try:\n image = (np.dstack([pix_1,pix_2,pix_3,pix_4])).reshape((1,n_pixels))[0]\n #except:\n # image = np.zeros(n_pixels)\n return image",
"def _process_image(filename, subset_idx, is_test):\n root_path = test_roots[subset_idx] if is_test else train_roots[subset_idx]\n left_image_path = os.path.join(root_path, left_paths[subset_idx], filename)\n right_image_path = os.path.join(root_path, right_paths[subset_idx], filename) \n _left_image = misc.imread(left_image_path)\n _right_image = misc.imread(right_image_path)\n \n # Check that image converted to RGB, left and right image have the same shape\n assert len(_left_image.shape) == 3\n assert _left_image.shape[2] == 3\n assert np.all(_left_image.shape == _right_image.shape)\n \n if is_test:\n return _left_image, _right_image\n \n disparity_image_path = os.path.join(root_path, disparity_paths[subset_idx], filename)\n _disparity, _mask = load_uint16PNG(disparity_image_path)\n\n return _left_image, _right_image, _disparity, _mask",
"def unpack_mraw_frame_12bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*12/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*12/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::3]\n bytes_2 = int_array[1::3] \n bytes_3 = int_array[2::3]\n\n \n # Here 2 pixels from the image are shared between three bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 |\n # |o o o o o o o o|o o o o | o o o o|o o o o o o o o|\n # | Pixel 1 | Pixel 2 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(16.0*bytes_1 + np.right_shift(bytes_2,4),dtype=np.uint16)\n pix_2 = np.array(256.0*np.bitwise_and(bytes_2,0b1111) + bytes_3,dtype=np.uint16)\n \n try:\n image = (np.dstack([pix_1,pix_2])).reshape((1,n_pixels))[0]\n except:\n image = np.zeros(n_pixels)\n return image",
"def split_in_half(keys_56bits):\n left_keys, right_keys = keys_56bits[:28], keys_56bits[28:]\n return left_keys, right_keys",
"def splitMerge(self):\n\t\tpath_merge = self.aug_merge_path\n\t\tpath_train = self.aug_train_path\n\t\tpath_label = self.aug_label_path\n\t\tfor i in range(self.slices):\n\t\t\tpath = path_merge + \"/\" + str(i)\n\t\t\t# print(path)\n\t\t\ttrain_imgs = glob.glob(path+\"/*.\"+self.img_type)\n\t\t\t# print(len(train_imgs))\n\t\t\t# break\n\t\t\tfor imgname in train_imgs:\n\t\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\t\timg = cv2.imread(imgname)\n\t\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\t\timg_label = img[:,:,0]\n\t\t\t\tcv2.imwrite(path_train+\"/\"+midname+\".\"+self.img_type,img_train)\n\t\t\t\tcv2.imwrite(path_label+\"/\"+midname+\".\"+self.img_type,img_label)",
"def split_images(x, y=None, size=(128, 128), num_part=4):\n x_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n x_imgs = x_patches.transform(x)\n # Check if number of channels is the same for grayscale\n if x.shape[-1] != x_imgs.shape[-1]:\n x_imgs = x_imgs[:, :, :, np.newaxis]\n\n if not y is None:\n y_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n y_imgs = y_patches.transform(y)\n\n # Check if number of channels is the same for grayscale\n if y.shape[-1] != y_imgs.shape[-1]:\n y_imgs = y_imgs[:, :, :, np.newaxis]\n\n return x_imgs, y_imgs\n\n return x_imgs",
"def testSplitImage(root):\n\n s1, s2 = splitImage(\"vck.tif\")\n v = OR(s1, s2).view(root)\n return v",
"def testSplitImageG(root):\n\n p, c, v1, v2 = splitImageG(root, \"guido.tif\")\n p.renderOnCanvas(v2.canvas())\n v2.psprint(\"guido-decrypted.ps\")\n return v2",
"def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()",
"def load2Channel(self):\n if self.endBytePos <= self.totalBytes:\n with open(self.inputFilenames['ofd'], 'rb') as f:\n self.rawBScan = cp.fromfile(f, count=self.scanSettings['frameSizeBytes'], offset=self.startBytePos * 2,\n dtype='uint16').copy()\n rawX = cp.reshape(self.rawBScan[0::2], (self.reconstructionSettings['numSamples'],\n self.scanSettings['numAlinesPerRawFrame']), order=\"F\")\n rawY = cp.reshape(self.rawBScan[1::2], (self.reconstructionSettings['numSamples'],\n self.scanSettings['numAlinesPerRawFrame']), order=\"F\")\n self.ch1 = rawX[:, self.scanSettings['AlinesToProcTomo']].astype(cp.int)\n self.ch2 = rawY[:, self.scanSettings['AlinesToProcTomo']].astype(cp.int)\n\n else:\n logging.warning('End of frame byte location: {} , Total number of bytes: {}'.format(self.totalBytes /\n self.endBytePos))",
"def tiff_low_four_bits_set_zero(path_tiff_source, path_tiff_out):\n\n dir = glob.glob(path_tiff_source + '\\\\' + '*.tiff')\n for source_file in dir:\n tif = TIFF.open(source_file, mode='r')\n img = tif.read_image()\n img = img - img % 16 #lowest four bits set zero\n img_name = os.path.basename(source_file)\n out_file = path_tiff_out + '\\\\' + img_name #get the path of the output file\n tif_processed = TIFF.open(out_file, 'w')\n tif_processed.write_image(img)",
"def raw_to_tif(file, channel=None ):\n \n def read_uint12(data_chunk):\n data = np.frombuffer(data_chunk, dtype=np.uint8)\n fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n # fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n # snd_uint12 = (lst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n fst_uint12 = (fst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n snd_uint12 = (lst_uint8 << 4) + (mid_uint8 >> 4)\n return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n# snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = ((mid_uint8 & 0x0F) << 8) | fst_uint8\n# snd_uint12 = (lst_uint8 << 4) | ((mid_uint8 & 0xF0) >> 4)\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n \n # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n\n # in_path = 'p:\\\\NEMO\\Posnetki\\\\20201014_GoreMorje_data\\cele\\\\'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_image_files = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"d\")]\n\n \n # infile = in_path + in_image_files[i]\n with open(file, 'rb', buffering=10) as f: # problem pri branju podatkov?\n byte = f.read()\n print(file)\n # # ar = open(infile, 'rb')\n # buffer = BytesIO()\n # byte = BytesIO(ar)\n \n img = read_uint12(byte)\n print(img)\n \n if channel==\"P\":\n img = img.reshape((2748, 3664)) # PAN\n else:\n img = img.reshape((2050, 2448)) # MS\n # img = img.reshape((2748, 3664)) # PAN\n\n size = img.shape\n \n \n out = file[:-4]+ \"_py.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n outRaster = driver.Create(out, size[1], size[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(img)\n outband.FlushCache()",
"def img_split(img):\n\tbands = img.shape[2]\n\tif bands is 1:\n\t\treturn \"Image already is 1D. Why would you split it?\"\n\n\tband1 = img[:, :, 0]\n\tband2 = img[:, :, 1]\n\tband3 = img[:, :, 2]\n\tif bands is 4:\n\t\tband4 = img[:, :, 4]\n\t\treturn(band1, band2, band3, band4)\n\treturn(band1, band2, band3)",
"def make_split(self, feature_index, threshold, X_subset, y_subset):\n\n # YOUR CODE HERE\n ind = (X_subset[:, feature_index] < threshold)\n \n X_left, y_left = X_subset[ind], y_subset[ind]\n X_right, y_right = X_subset[~ind], y_subset[~ind]\n \n return (X_left, y_left), (X_right, y_right)",
"def _split_to_fold(\n self,\n img_dir1: Path,\n img_dir2: Path,\n label_dir: Path,\n save_im_dir: Path,\n save_mask_dir: Path,\n fold: int,\n copy: bool = True,\n ) -> None:\n Path(save_im_dir).mkdir(parents=True, exist_ok=True)\n Path(save_mask_dir).mkdir(parents=True, exist_ok=True)\n info_path = label_dir / \"info.csv\"\n info = np.genfromtxt(info_path, dtype=\"str\", delimiter=\",\", skip_header=True)\n info = info[info[:, -1] == str(fold)]\n for i in range(info.shape[0]):\n fn, _, _ = info[i]\n\n p1 = img_dir1 / f\"{fn}.png\"\n p2 = img_dir2 / f\"{fn}.png\"\n src_im = p1 if p1.exists() else p2\n src_mask = label_dir / \"Labels\" / f\"{fn}.mat\"\n\n if copy:\n shutil.copy(src_im, save_im_dir)\n shutil.copy(src_mask, save_mask_dir)\n else:\n src_im.rename(save_im_dir / src_im.name)\n src_mask.rename(save_mask_dir / src_mask.name)",
"def split_data(x, y, ratio, index=None):\n m = x.shape[0]\n splitter = np.cumsum(ratio)\n train_start = 0\n val_start = batch_size * ((splitter[0] * m) // batch_size)\n test_start = batch_size * ((splitter[1] * m) // batch_size)\n test_end = batch_size * ((splitter[2] * m) // batch_size)\n\n val_start = int(val_start)\n test_start = int(test_start)\n test_end = int(test_end)\n\n if index is not None:\n split = ( x[train_start:val_start, :], y[train_start:val_start, :],\n index[train_start:val_start],\n x[val_start:test_start, :], y[val_start:test_start, :],\n index[val_start:test_start],\n x[test_start:test_end, :], y[test_start:test_end, :],\n index[test_start:test_end]\n )\n\n\n\n else:\n split = ( x[train_start:val_start, :], y[train_start:val_start, :],\n x[val_start:test_start, :], y[val_start:test_start, :],\n x[test_start:test_end, :], y[test_start:test_end, :]\n )\n\n return split",
"def split_image(image_name):\n #pil_image = Image.fromarray(image_name)\n red, green, blue = img.split()\n\n return red, green, blue",
"def split_file(filename, split_num):\n root, ext = os.path.splitext(filename)\n with open(filename) as f:\n lines = f.readlines()\n total_line = len(lines)\n\n print lines[0].split('\\t')\n\n size = total_line / split_num\n\n print 'Total line: %d, splited file line number: %d' % (total_line, size)\n\n total_line - size * split_num\n for i in range(0, split_num):\n split_file = root + '_' + str(i+1) + ext\n\n start = i * size;\n end = (i+1) * size;\n if i == split_num - 1:\n end = total_line\n\n print 'splite file %s: line from %d to %d' % (split_file, start, end)\n\n with open(split_file, 'w') as fw:\n for j in range(start, end):\n fw.write('%s' % lines[j])",
"def test_write_read_fif_split_file():\n bids_root = _TempDir()\n tmp_dir = _TempDir()\n bids_path = _bids_path.copy().update(root=bids_root, datatype='meg')\n raw = _read_raw_fif(raw_fname, verbose=False)\n n_channels = len(raw.ch_names)\n n_times = int(2.2e9 / (n_channels * 4)) # enough to produce a split\n data = np.empty((n_channels, n_times), dtype=np.float32)\n raw = mne.io.RawArray(data, raw.info)\n big_fif_fname = pathlib.Path(tmp_dir) / 'test_raw.fif'\n raw.save(big_fif_fname)\n raw = _read_raw_fif(big_fif_fname, verbose=False)\n write_raw_bids(raw, bids_path, verbose=False)\n\n raw1 = read_raw_bids(bids_path=bids_path)\n assert 'split-01' in str(bids_path.fpath)\n\n bids_path.update(split='01')\n raw2 = read_raw_bids(bids_path=bids_path)\n bids_path.update(split='02')\n raw3 = read_raw_bids(bids_path=bids_path)\n assert len(raw) == len(raw1)\n assert len(raw) == len(raw2)\n assert len(raw) > len(raw3)",
"def split_file(self):\n title = \"row_id,x,y,accuracy,time,place_id\\n\"\n print \"splitting files into grid files...\"\n sub_folder = os.path.join(Setting.grid_path, str(self.xsplit)+\"_\"+str(self.ysplit))\n if not os.path.exists(sub_folder):\n os.mkdir(sub_folder)\n for m in range(self.xsplit):\n # to avoid open too many files (ysplit should less than 1000 here)\n print \"starting No.\", m, \" subprocess...\"\n train_writers = []\n for n in range(self.ysplit):\n xfolder = os.path.join(sub_folder, str(m))\n if not os.path.exists(xfolder):\n os.mkdir(xfolder)\n yfolder = os.path.join(xfolder, str(n))\n if not os.path.exists(yfolder):\n os.mkdir(yfolder)\n train_file = os.path.join(yfolder, \"train.csv\")\n train_writers.append(open(train_file, \"w\"))\n train_writers[-1].write(title)\n\n for record in read_record(self.train_path):\n place_id = record[-1]\n rec_str = \",\".join([str(x) for x in record])\n for n in range(self.ysplit):\n row_id = 1\n slot = m*self.ysplit + n\n if place_id in self.grid_place[slot]:\n train_writers[n].write(str(row_id) + \",\" + rec_str + \"\\n\")\n row_id += 1\n\n for writer in train_writers:\n writer.close()\n\n test_writers = []\n for n in range(self.ysplit):\n test_file = os.path.join(sub_folder, str(m), str(n), \"test.csv\")\n test_writers.append(open(test_file, \"w\"))\n test_writers[-1].write(title)\n\n for record in read_record(self.test_path):\n x_ind, y_ind = grid_cut(record[0], record[1], self.xsplit, self.ysplit)\n grid_slot = x_ind*self.ysplit + y_ind\n for n in range(self.ysplit):\n row_id = 1\n slot = m*self.ysplit + n\n if grid_slot == slot:\n rec_str = \",\".join([str(x) for x in record])\n test_writers[n].write(str(row_id) + \",\" + rec_str + \"\\n\")\n row_id += 1\n\n for writer in test_writers:\n writer.close()",
"def splitTransform(self):\n\t\t#path_merge = \"transform\"\n\t\t#path_train = \"transform/data/\"\n\t\t#path_label = \"transform/label/\"\n\t\tpath_merge = \"train/merge\"\n\t\tpath_train = \"train/image\"\n\t\tpath_label = \"train/label\"\n\t\ttrain_imgs = glob.glob(path_merge+\"/*.\"+self.img_type)\n\t\tfor imgname in train_imgs:\n\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\timg = cv2.imread(imgname)\n\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\timg_label = img[:,:,0]\n\t\t\tcv2.imwrite(path_train+midname+\".\"+self.img_type,img_train)\n\t\t\tcv2.imwrite(path_label+midname+\".\"+self.img_type,img_label)",
"def split_image(origindatadir,traindir,overload = False):\n \"\"\"origindatadir: from where to import train_data\"\"\"\n \"\"\"traindir: where to save the split data \"\"\"\n \"\"\"overload: if True and traindir and data already exist, delete traindir and split origin data again\"\"\"\n if not os.path.exists(origindatadir):\n return\n cats_dir = traindir+'/cats'\n dogs_dir = traindir+'/dogs'\n if not os.path.exists(traindir):\n os.mkdir(traindir)\n os.mkdir(cats_dir)\n os.mkdir(dogs_dir)\n else:\n #print(traindir)\n if get_subdir_filenum(traindir) > 0:\n if overload:\n shutil.rmtree(traindir)\n os.mkdir(traindir) \n os.mkdir(cats_dir)\n os.mkdir(dogs_dir)\n else:\n print(\"Destination directory already exist:\",traindir)\n return\n #开始复制\n filenames = os.listdir('train')\n for file in filenames:\n if str(file).startswith('cat'):\n shutil.copyfile(origindatadir+'/'+file, cats_dir+'/'+file) \n elif str(file).startswith('dog'):\n shutil.copyfile(origindatadir+'/'+file, dogs_dir+'/'+file)",
"def split(filepath, nsamples):\n start = np.cumsum([0] + list(nsamples[:-1]))\n if filepath[-10:] == 'analog.brw':\n filename = filepath[:-10]\n analog = read_3brain_analog(filepath)\n for i, (s,n) in enumerate(zip(start, nsamples)):\n name = f\"{filename}_part_{i}_analog.npz\"\n print(f\"Saving {name}\")\n sampling_rate = glia.sampling_rate(filepath)\n np.savez(name, analog=analog[s:s+n],\n sampling_rate=sampling_rate)\n elif filepath[-4:] == \".bxr\":\n filename = filepath[:-4]\n # split spike-sorted data\n with h5py.File(filepath, 'r') as h5:\n # shared setup for the concatenated arrays\n sampling_rate = float(h5[\"3BRecInfo\"][\"3BRecVars\"][\"SamplingRate\"][0])\n channel_map = h5[\"3BRecInfo\"][\"3BMeaStreams\"][\"Raw\"][\"Chs\"][()]\n \n # map 3brain unit num\n # numbers typically from -4 to 9000\n # where negative numbers appear across multiple channels\n # and thus are presumably bad units...?\n # positive-numbered units appear on one channel\n unit_id_2_num = {}\n\n n_unit_nums = 0\n if \"SpikeUnits\" in h5[\"3BResults\"][\"3BChEvents\"]:\n for chunk in iter_chunks(h5['3BResults/3BChEvents/SpikeUnits'], 10000):\n n_unit_nums = max(n_unit_nums, chunk.max())\n \n unit_map = {}\n channel_unit_count = {}\n\n\n # operate on each of the concatenated arrays, one at a time\n for i, (s,n) in enumerate(zip(start, nsamples)):\n startTime = s / sampling_rate\n first_idx = None\n for chunk in iter_chunks(h5['3BResults/3BChEvents/SpikeTimes'], 10000):\n valid_idxs = np.argwhere(h5[\"3BResults/3BChEvents/SpikeTimes\"] > s)\n if len(valid_idxs) > 0:\n first_idx = valid_idxs[0][0]\n break\n assert not first_idx is None\n print(f\"identified start idx of {first_idx}.\")\n\n # for simplicity, we just iterate again, could have faster implementation\n last_idx = len(h5['3BResults/3BChEvents/SpikeTimes'])\n chunk_size = 10000\n for j, chunk in enumerate(iter_chunks(h5['3BResults/3BChEvents/SpikeTimes'], chunk_size)):\n invalid_idxs = np.argwhere(chunk > s + n)\n if len(invalid_idxs) > 0:\n last_idx = invalid_idxs[0][0] + j*chunk_size\n break\n print(f\"identified stop idx of {last_idx}.\")\n \n spike_channel_ids = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeChIDs\"][first_idx:last_idx]\n spike_unit_ids = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeUnits\"][first_idx:last_idx]\n # poorly named; time is in units of 1/sampling_rate\n # aka sample number\n # subtract to adjust start time\n spike_times = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeTimes\"][first_idx:last_idx] - s\n \n\n \n csv_name = f'{filename}_part_{i}_spikes.csv'\n spikes = zip(spike_channel_ids, spike_unit_ids, spike_times)\n tot_spikes = spike_times.shape[0]\n print(f\"creating {csv_name} ...\")\n with open(csv_name, 'w', newline='') as csvfile:\n fieldnames = ['channel_i', 'channel_j', 'unit', \"spike_time\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for channel, unit_id, spike_time in tqdm(spikes,\n total=tot_spikes):\n c = channel_map[channel]\n # convert to tuple\n # account for 1-indexing\n c = (c[0]-1,c[1]-1)\n \n # count num units on channel\n # first check if we've seen this channel before\n if not c in channel_unit_count:\n # if not, initialize channel_unit_count for the channel\n channel_unit_count[c] = 1\n unit_num = 0\n # add unit\n unit_id_2_num[unit_id] = unit_num\n else:\n \n # then check if we've seen this unit before\n if not unit_id in unit_id_2_num:\n # if not, assign unit_num for this new unit\n unit_num = channel_unit_count[c]\n unit_id_2_num[unit_id] = unit_num\n channel_unit_count[c] += 1\n else:\n # otherwise, look it up\n unit_num = unit_id_2_num[unit_id]\n \n \n t = spike_time / sampling_rate\n writer.writerow({\"channel_i\": c[0],\n \"channel_j\": c[1],\n \"unit\": unit_num,\n \"spike_time\": t})\n \n np.save(f\"{filename}_channel_map.npy\", channel_map)",
"def split(self):\n if(self.back == 'y'):\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1\n backNames = [self.file_path + str(num) + 'b' for num in range(len(files))]\n for num,file in enumerate(files):\n open(backNames[num],'w').write(file)\n else:\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1",
"def image_splitter(foreground, filename, outfolder_random, outfolder_art, the_class):\n imarray = numpy.random.rand(256, 256, 3) * 255\n background = Image.fromarray(imarray.astype('uint8')).convert('RGBA')\n background2 = Art().redraw()\n foreground = foreground.convert(\"RGBA\")\n datas = foreground.getdata()\n\n new_data = []\n for item in datas:\n if item[0] < 10 and item[1] < 10 and item[2] < 10:\n new_data.append((0, 0, 0, 0))\n else:\n new_data.append(item)\n\n foreground.putdata(new_data)\n\n if not os.path.isdir(outfolder_random + \"/\" + the_class):\n os.makedirs(outfolder_random + \"/\" + the_class)\n if not os.path.isdir(outfolder_art + \"/\" + the_class):\n os.makedirs(outfolder_art + \"/\" + the_class)\n\n background.paste(foreground, (0, 0), foreground)\n new_name = filename[:-17]\n background.save(outfolder_random + \"/\" + the_class + \"/\" + new_name + \"jpg\", \"JPEG\")\n\n background2.paste(foreground, (0, 0), foreground)\n background2.save(outfolder_art + \"/\" + the_class + \"/\" + new_name + 'jpg', \"JPEG\")",
"def splitImage(image, shareFile1=\"share1.tif\", shareFile2=\"share2.tif\"):\n\n _, expandedPad = makePad(Image.open(image).size, shareFile1)\n expandedCiphertext = makeCryptograph(str(image), shareFile2)\n print(expandedPad,expandedCiphertext)\n return expandedPad, expandedCiphertext",
"def splitting():\n n = 1\n with open('numbers.txt', 'r+') as f:\n f.readline()\n seek_2 = f.tell()\n seek_1 = 0\n\n while seek_1 != seek_2:\n print(n)\n n += 1\n with open('numbers.txt', 'r+') as f, open('numbers.txt', 'r+') as f_2:\n f.seek(seek_1)\n f_2.seek(seek_2)\n seek_1, seek_2 = merge(f, f_2)\n\n make_result_file(seek_1)",
"def split(directory='', name=''):\n d = directory\n r_path = build_path(d, path.splitext(name)[0] + '_r.png')\n g_path = build_path(d, path.splitext(name)[0] + '_g.png')\n b_path = build_path(d, path.splitext(name)[0] + '_b.png')\n a_path = build_path(d, path.splitext(name)[0] + '_a.png')\n Image.open(build_path(d, name)).convert('RGBA').getchannel(0).save(r_path)\n Image.open(build_path(d, name)).convert('RGBA').getchannel(1).save(g_path)\n Image.open(build_path(d, name)).convert('RGBA').getchannel(2).save(b_path)\n Image.open(build_path(d, name)).convert('RGBA').getchannel(3).save(a_path)",
"def split_data_by_image(self, test_fraction=0.5):\n image_id = BaseModel.get_image_id(self.inputs)\n test_idx = np.random.random(image_id.max()+1) <= test_fraction\n\n # Low image count edge case (mostly just for testing purposes)\n if True not in test_idx:\n test_idx[0] = True\n elif False not in test_idx:\n test_idx[0] = False\n \n test_idx = test_idx[image_id]\n if BaseModel.is_laue(self.inputs):\n train, test = self.split_laue_data_by_mask(test_idx)\n else:\n train, test = self.split_mono_data_by_mask(test_idx)\n\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test"
] |
[
"0.5542672",
"0.55242944",
"0.5498628",
"0.543143",
"0.54110205",
"0.53161323",
"0.5312914",
"0.5309105",
"0.5258685",
"0.5163846",
"0.5151364",
"0.5114061",
"0.51118064",
"0.5084054",
"0.50608444",
"0.5029268",
"0.49768984",
"0.49599314",
"0.4956773",
"0.49552044",
"0.49548042",
"0.49518806",
"0.49500924",
"0.49382448",
"0.4923333",
"0.4909954",
"0.48954445",
"0.48744327",
"0.48740405",
"0.48727712"
] |
0.6982899
|
0
|
unblock the given ip
|
def unblock_ip_view(request, ip):
if request.method == 'POST':
unblock_ip(ip)
return HttpResponseRedirect(reverse("defender_blocks_view"))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def unblock_ip(ip, logger, dashboard_log, firewall_ip_and_port):\n try:\n request = requests.delete(f\"http://{firewall_ip_and_port}/firewall/{ip}\")\n if not request.ok:\n logger.error(f\"Unblocking IP {ip} was unsuccessful. Code {request.status_code}\")\n dashboard_log.append({\"message\": f\"Unblocking IP {ip} was unsuccessful. Code {request.status_code}\",\n \"time\": time.time()})\n return False\n return True\n except requests.exceptions.ConnectionError as e:\n logger.error(f\"Can't connect to firewall wrapper. {e}\")\n dashboard_log.append({\"message\": \"Can't connect to firewall wrapper.\",\n \"time\": time.time()})\n return False",
"def ip_drop(self, ip=None):\n if ip is None:\n self.request('/v1.1/unregister', 'POST')\n else:\n self.request('/v1.1/unregister/%s' % ip, 'POST')",
"async def unlight(self, ip: str) -> None:\n miner = self.miners[ip]\n await miner.unlight()",
"def deny(ip):\n return __apf_cmd(\"-d {}\".format(ip))",
"def block_ip(ip, logger, dashboard_log, firewall_ip_and_port):\n if not is_already_blocked(ip, firewall_ip_and_port):\n try:\n data = {\n \"ip\": ip,\n \"port\": 0,\n \"reason\": \"\"\n }\n request = requests.post(f\"http://{firewall_ip_and_port}/firewall/blocked\", json=data)\n if not request.ok:\n logger.error(f\"Blocking IP {ip} was unsuccessful. Code {request.status_code}\")\n dashboard_log.append({\"message\": f\"Blocking IP {ip} was unsuccessful. Code {request.status_code}\",\n \"time\": time.time()})\n return False\n return True\n except requests.exceptions.ConnectionError as e:\n logger.error(f\"Can't connect to firewall wrapper. {e}\")\n dashboard_log.append({\"message\": f\"Can't connect to firewall wrapper.\",\n \"time\": time.time()})\n return False\n # error, continue program",
"def block_ip_address(self, ip_address):\n\n rule = \"iptables -A INPUT -s \" + ip_address + \" -j DROP\\n\"\n rules = open('resources/rules.sh', 'r')\n regex = re.compile(ip_address, re.MULTILINE)\n match = regex.search(rules.read())\n rules.close()\n # check if a rule to block this ip has already been written, this can happen due to threading\n if not match:\n f = open('resources/rules.sh', 'r')\n rules = f.readlines()\n f.close()\n\n rules.insert(6, rule)\n\n f = open('resources/rules.sh', 'w')\n rules = \"\".join(rules)\n f.write(rules)\n f.close()\n subprocess.call([\"chmod\", \"755\", \"resources/rules.sh\"])\n subprocess.call(\"./resources/rules.sh\")\n print(\"IP address \" + ip_address + \" blocked\")",
"def remove_ban(self, vapor_id_or_ip):\n identity = vapor_id_or_ip if len(vapor_id_or_ip) == 36 else vapor_id_or_ip.split(\":\")[0] \\\n if ':' in vapor_id_or_ip else vapor_id_or_ip\n cmd = '{}removeBan {}'.format(self.console, identity)\n self.write_command(cmd)",
"async def unblock(self, ctx, *, url):\n blocked = await self.db.get('blocked', [])\n if url not in blocked:\n return await ctx.send('😾 That image isn\\'t blocked.')\n blocked.remove(url)\n await self.db.set('blocked', blocked)\n await ctx.send('🐱 That image has been unblocked.')",
"def unblock(user_id):\n user = User.query.filter_by(id=user_id).first()\n if not user:\n raise ObjectNotFound(\"This user does not exist\")\n\n user.unblock()\n return jsonify(message='User account succesfully unblocked'), 201",
"def block_nginx(ip, output_file):\n output_file.write(\"deny {ip};\\n\".format(ip=ip))",
"def block_iptables(ip):\n try:\n subprocess.check_call(['iptables', '-A', 'INPUT', '-s', ip, '-j', 'DROP'])\n except OSError as e:\n if (e[0] == errno.EPERM):\n print(\"Since this script modifies the firewall with iptables it must be run with root privileges.\", file=sys.stderr)\n sys.exit(1)\n print(\"Dropping all packets from \" + ip)\n return True",
"def test_exclude_ip_ban(self):\n pass",
"def unblock(self, source):\n raise NotImplementedError",
"def block_ip_address (ip_address):\n update_aging_hash(ip_hash, ip_address, ip_hash_threshold)",
"async def unban(self, ctx, name: str):\n try:\n bans = await self.bot.get_bans(ctx.message.server)\n user = discord.utils.get(bans, name=name)\n if user is not None:\n await self.bot.unban(ctx.message.server, user)\n except discord.Forbidden:\n await self.bot.say('I do not have the proper permissions')\n except discord.HTTPException:\n await self.bot.say('Unbanning failed')\n else:\n await self.bot.say('\\N{OK HAND SIGN}')",
"def unblock_list(blocked_ips_list, to_block_list):\n to_be_unblocked_list = []\n for blocked in blocked_ips_list:\n found_ip = False\n blocked_ip = blocked['ip']\n for host in to_block_list:\n if host['host']['ip_address'] == blocked_ip:\n found_ip = True\n # if the blocked_ip was not found in list of blockings, unblock it\n if not found_ip:\n to_be_unblocked_list.append(blocked_ip)\n return to_be_unblocked_list",
"def unblock(self):\n data = {'container': self._reddit.user.me().fullname,\n 'name': str(self), 'type': 'enemy'}\n url = API_PATH['unfriend'].format(subreddit='all')\n # PRAW5 REMOVE (return statement)\n return self._reddit.post(url, data=data)",
"async def unblock(self, TargetId: int):\n\n e = await self.request.request(url=f'https://accountsettings.roblox.com/v1/users/{TargetId}/unblock',\n method='post',\n )\n return e",
"def unblock(self, node):\n\n self.blocked[node] = False\n Bnode = self.b_map[node]\n while Bnode:\n next_node = Bnode.pop(0)\n if self.blocked[next_node]:\n self.unblock(next_node)",
"async def global_unban(self, ctx: commands.Context, *, name: str):\n if re.match(r'^[\\d\\.-]*$', name) is None:\n query = 'SELECT ip FROM ddnet_bans WHERE name = $1;'\n ips = [r['ip'] for r in await self.bot.pool.fetch(query, name)]\n if not ips:\n return await ctx.send(f'`{escape_backticks(name)}` isn\\'t banned')\n else:\n ips = [name]\n\n for ip in ips:\n try:\n await self.ddnet_unban(ip)\n except RuntimeError as exc:\n await ctx.send(exc)\n else:\n await ctx.send(f'Successfully unbanned `{ip}`')",
"def delete(self, ip): # pylint: disable=invalid-name\n return self.request(\"DELETE\", ip)",
"def remove(ip):\n return __apf_cmd(\"-u {}\".format(ip))",
"def detach_public_ip(self, name=None, ip=None):\n raise NotImplementedError",
"def unban (phenny, input):\n if not input.admin: return\n text = input.group().split()\n argc = len(text)\n if argc < 2: return\n opt = text[1]\n banmask = opt\n channel = input.sender\n if opt.startswith('#'):\n if argc < 3: return\n channel = opt\n banmask = text[2]\n banmask = configureHostMask(banmask)\n if banmask == '': return\n phenny.write(['MODE', channel, '-b', banmask])",
"def ban(sock, user):\r\n chat(sock, \"/ban {}\".format(user))",
"def is_already_blocked(ip, firewall_ip_and_port):\n return requests.get(f\"http://{firewall_ip_and_port}/firewall/{ip}\").ok",
"def ban_ip(self, ip_id, length=BAN_TIME_IP):\n self.sql('UPDATE ip_addresses SET ban_until = UNIX_TIMESTAMP(NOW()) + %s, ban_count = ban_count + 1 WHERE id = %s', length, ip_id)\n \n if not PRODUCTION_SERVER:\n print 'Banned IP {} for {} seconds'.format(ip_id, length)\n \n return length",
"def stop_blocking(block_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n blocked_user = User.query.get_or_404(block_id)\n g.user.blocked_users.remove(blocked_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/blocked-users\")",
"def unblock(self):\n self.failed_logins = 0\n self.blocked = False",
"def deregister_elastic_ip(ElasticIp=None):\n pass"
] |
[
"0.754202",
"0.7157946",
"0.67682874",
"0.67535746",
"0.65734464",
"0.646841",
"0.64320874",
"0.638271",
"0.6320117",
"0.6222433",
"0.6198811",
"0.6190392",
"0.6181117",
"0.60549074",
"0.6036554",
"0.6029521",
"0.6020202",
"0.5999968",
"0.59976965",
"0.59874886",
"0.589189",
"0.5889558",
"0.57924056",
"0.5765341",
"0.57561886",
"0.57538635",
"0.56957203",
"0.56751275",
"0.56712353",
"0.5643511"
] |
0.76020145
|
0
|
unblock he given username
|
def unblock_username_view(request, user_id):
if request.method == 'POST':
username = User.objects.get(id=user_id).username
unblock_username(username)
log_user_unlock.send(sender=unblock_username_view, request=request, username=username)
return HttpResponse(json.dumps({"status": "unlocked"}), content_type='application/json')
else:
return create_custom_JSON_error_response(422, "Wrong request method")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def unblock(user_id):\n user = User.query.filter_by(id=user_id).first()\n if not user:\n raise ObjectNotFound(\"This user does not exist\")\n\n user.unblock()\n return jsonify(message='User account succesfully unblocked'), 201",
"async def unban(self, ctx, name: str):\n try:\n bans = await self.bot.get_bans(ctx.message.server)\n user = discord.utils.get(bans, name=name)\n if user is not None:\n await self.bot.unban(ctx.message.server, user)\n except discord.Forbidden:\n await self.bot.say('I do not have the proper permissions')\n except discord.HTTPException:\n await self.bot.say('Unbanning failed')\n else:\n await self.bot.say('\\N{OK HAND SIGN}')",
"def unsafe_block_by_username(self, username: str) -> None:\n uid = InstagramAPI.username_to_id(username)\n self.api.block(uid)",
"def unblock(self):\n data = {'container': self._reddit.user.me().fullname,\n 'name': str(self), 'type': 'enemy'}\n url = API_PATH['unfriend'].format(subreddit='all')\n # PRAW5 REMOVE (return statement)\n return self._reddit.post(url, data=data)",
"def absorb(user):",
"def ban(sock, user):\r\n chat(sock, \"/ban {}\".format(user))",
"def block_user():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n to_block = get_id_from_username(request.form['block_user'])\n if not to_block or to_block==user_id:\n #TODO: some sort of error if blockee doesn't exist\n return redirect(url_for('users.account_page', username=username))\n block_user_db(user_id, to_block)\n return redirect(url_for('users.account_page', username=username))",
"async def xpunblock(self, ctx, *, user_or_role : str = None):\r\n\r\n\t\tusage = 'Usage: `{}xpunblock [user_or_role]`'.format(ctx.prefix)\r\n\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\r\n\t\tif user_or_role == None:\r\n\t\t\tawait ctx.message.channel.send(usage)\r\n\t\t\treturn\r\n\r\n\t\troleName = user_or_role\r\n\t\tis_user = True\r\n\t\tif type(user_or_role) is str:\r\n\t\t\t# Check user first\r\n\t\t\tuser_or_role = DisplayName.memberForName(roleName, ctx.guild)\r\n\t\t\tif not user_or_role:\r\n\t\t\t\tis_user = False\r\n\t\t\t\t# Check role\r\n\t\t\t\tif roleName.lower() == \"everyone\" or roleName.lower() == \"@everyone\":\r\n\t\t\t\t\tuser_or_role = ctx.guild.default_role\r\n\t\t\t\telse:\r\n\t\t\t\t\tuser_or_role = DisplayName.roleForName(roleName, ctx.guild)\r\n\t\t\t\t\t\r\n\t\t\tif not user_or_role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(Nullify.escape_all(roleName))\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\t\t\r\n\t\tif is_user:\r\n\t\t\tur_name = DisplayName.name(user_or_role)\r\n\t\telse:\r\n\t\t\tur_name = Nullify.escape_all(user_or_role.name)\r\n\r\n\t\t# If we're here - then the role is a real one\r\n\t\tpromoArray = self.settings.getServerStat(ctx.message.guild, \"XpBlockArray\")\r\n\r\n\t\tfor aRole in promoArray:\r\n\t\t\t# Check for Name\r\n\t\t\tif str(aRole) == str(user_or_role.id):\r\n\t\t\t\t# We found it - let's remove it\r\n\t\t\t\tpromoArray.remove(aRole)\r\n\t\t\t\tself.settings.setServerStat(ctx.message.guild, \"XpBlockArray\", promoArray)\r\n\t\t\t\tmsg = '**{}** removed successfully.'.format(ur_name)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we didn't find it\r\n\t\tmsg = '**{}** not found in list.'.format(ur_name)\r\n\t\tawait ctx.message.channel.send(msg)",
"def cmd_disable_private(self, argument):\n if self.is_admin:\n self.bot.admins.remove(self.nick)\n self.send(self.nick, _(\"User %s removed from admins\"), self.nick)\n self.logger.info(\"User %s removed from admins\" % self.nick)",
"async def unban(self, ctx, user_id: int, *, reason: str = None):\r\n author = ctx.message.author\r\n server = ctx.message.guild\r\n channel = ctx.message.channel\r\n action = \"Unban\"\r\n if str(server.id) not in self._time:\r\n self._time[str(server.id)] = {}\r\n dataIO.save_json(self._time_file, self._time)\r\n if \"unbantime\" not in self._time[str(server.id)]:\r\n self._time[str(server.id)][\"unbantime\"] = 0\r\n dataIO.save_json(self._time_file, self._time)\r\n try:\r\n user = await self.bot.get_user_info(user_id)\r\n except discord.errors.NotFound:\r\n await ctx.send(\"The user was not found :no_entry:\")\r\n return\r\n except discord.errors.HTTPException:\r\n await ctx.send(\"The ID specified does not exist :no_entry:\")\r\n return\r\n can_ban = channel.permissions_for(ctx.me).ban_members\r\n if not can_ban:\r\n await ctx.send(\"I need the `BAN_MEMBERS` permission :no_entry:\")\r\n return\r\n ban_list = await server.bans()\r\n invite = await channel.create_invite(max_age=86400, max_uses=1)\r\n s = discord.Embed(title=\"You have been unbanned from {}\".format(server.name),\r\n description=\"Feel free to join back whenever.\", colour=000000,\r\n timestamp=__import__('datetime').datetime.utcnow())\r\n s.set_thumbnail(url=server.icon_url)\r\n s.add_field(name=\"Moderator\", value=\"{} ({})\".format(author, str(author.id)), inline=False)\r\n s.add_field(name=\"Invite\", value=\"{} (This will expire in 1 week)\".format(str(invite)))\r\n if user == author:\r\n await ctx.send(\"You can't unban yourself :no_entry:\")\r\n return\r\n if user == self.bot.user:\r\n await ctx.send(\"I'm not even banned ¯\\_(ツ)_/¯\")\r\n return\r\n i = 0\r\n n = 0\r\n if user in [x.user for x in ban_list]:\r\n pass\r\n else:\r\n await ctx.send(\"That user is not banned :no_entry:\")\r\n return\r\n try:\r\n await server.unban(user, reason=\"Unban made by {}\".format(author))\r\n self._time[str(server.id)][\"unbantime\"] = datetime.datetime.utcnow().timestamp()\r\n dataIO.save_json(self._time_file, self._time)\r\n except discord.errors.Forbidden:\r\n await ctx.send(\"I need the **Ban Members** permission to unban :no_entry:\")\r\n return\r\n await ctx.send(\"**{}** has been unbanned :white_check_mark:\".format(user))\r\n try:\r\n await self._log(author, server, action, reason, user)\r\n except:\r\n pass\r\n try:\r\n await user.send(embed=s)\r\n except:\r\n pass",
"def disable_user(UserName=None, AuthenticationType=None):\n pass",
"def unban_user(self, session, chat_id: int) -> None:\n\n user = session.query(User).get(chat_id)\n if user.is_banned is True:\n user.is_banned = False\n session.commit()",
"async def hackban(self, ctx, user_id: int):\n try:\n await self.liara.http.ban(str(user_id), str(ctx.guild.id))\n await ctx.send('Done. Good riddance.')\n except discord.NotFound:\n await ctx.send('That user doesn\\'t exist.')\n except discord.Forbidden:\n await ctx.send('Sorry, I don\\'t have permission to ban that person here.')\n except discord.HTTPException:\n await ctx.send('That ID is invalid.')",
"def stop_blocking(block_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n blocked_user = User.query.get_or_404(block_id)\n g.user.blocked_users.remove(blocked_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/blocked-users\")",
"def unmute(self, nick, chan, arg):\n if not arg:\n \tbot.msg(chan, get_doc())\n self.state.unmute(arg)\n self.msg(chan, \"%s: You are now allowed to use this bot\" % (arg))",
"def ban_user(self, user):\n # salvo l'id dell'utente o del bot\n # print(\"Sto negando l'accesso all'user \" + str(user['id']))\n self.execute(TABELLE['id_users']['insert']['complete_user'],\n (user['id'], False, False, False, False, True))",
"async def user_unbanned_button(self, payload: discord.RawReactionActionEvent) -> None:\n\n self.bits = flip_action_bits(LoggingActions.USER_UNBANNED, self.bits)\n await self.update_embed()",
"def block_uid(self, uid):",
"def del_user(self, username):\n pass",
"def unban_member(self, *args, **kwargs):\n return self.bot.unban_chat_member(self.id, *args, **kwargs)",
"async def unban(self, ctx, *, member): # don't convert to discord.Member as it isn't a server member, just a string\n banned_users = await ctx.guild.bans() # pulls ban list\n member_name, member_discriminator = member.split('#') # split the member name from the numerical discriminator\n for ban_entry in banned_users:\n user = ban_entry.user\n if (user.name, user.discriminator) == (member_name, member_discriminator):\n await ctx.guild.unban(user)\n await ctx.send(f'Unbanned {user.name}#{user.discriminator}')\n return",
"async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n await ctx.send(\"User is not blacklisted.\")\r\n else:\r\n self.settings['blacklist'].remove(user.id)\r\n await ctx.send(\"User removed from blacklist.\")",
"async def unban(ctx, *, member):\n banned_users = await ctx.guild.bans()\n member_name, member_discriminator = member.split(\"#\")\n\n for ban_entry in banned_users:\n user = ban_entry.user\n\n if (user.name, user.discriminator) == (member_name, member_discriminator):\n await ctx.guild.unban(user)\n await ctx.send(f\"Unbanned {user.mention}\")\n return",
"def unblock(self):\n self.failed_logins = 0\n self.blocked = False",
"async def unblock(self, TargetId: int):\n\n e = await self.request.request(url=f'https://accountsettings.roblox.com/v1/users/{TargetId}/unblock',\n method='post',\n )\n return e",
"def ban(sock, chan, user):\n chat(sock, \".ban {}\\r\\n\".format(user))\n console.info(\"banned user {} from channel {}\".format(user, chan))",
"def removeUser(self, username):\r\n try:\r\n self.getUser(username)\r\n for line in fileinput.input(self.filename, inplace=1):\r\n if self.scanner.match(line).groups()[0] != username:\r\n print(line[:-1])\r\n except KeyError:\r\n raise CredentialError('No such user')",
"async def unmute(self, ctx, user: Redeemed):\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot unmute yourself!\")\n return \n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\"Muted\"))\n await ctx.send(f\"{user.mention} has been unmuted\")",
"def _remove_user(self):\n name = False\n while not name: #While name not set\n name = input(\"Please enter the username of the user you would like to remove: \").lower()\n userID = self._get_user_id(name)\n if not userID:\n name = False\n command = \"remove_user {0}\\r\\n\".format(userID)\n return(command)",
"def _unlisten(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is: \")\n for i in users:\n print(users[i][\"name\"])\n name = False\n while not name: #Loop until valid name given\n name = input(\"Please enter the user that you would no longer like to be listening to events for: \")\n userID = self._get_user_id(name)\n if not userID:\n name = False\n #Output\n command = \"unlisten {0}\".format(userID)\n return(command)"
] |
[
"0.72230184",
"0.7210059",
"0.71730417",
"0.7066073",
"0.67244923",
"0.65640134",
"0.6544314",
"0.6491108",
"0.64267445",
"0.6417066",
"0.6388073",
"0.63744146",
"0.63177145",
"0.6310767",
"0.62980276",
"0.6290632",
"0.6267844",
"0.62429726",
"0.62315124",
"0.62009305",
"0.619752",
"0.61628956",
"0.61606187",
"0.6132203",
"0.60888267",
"0.607971",
"0.60577947",
"0.6056739",
"0.6040432",
"0.6038635"
] |
0.7502431
|
0
|
Returns adapter implementing the ICAVLService interface using CAVL_SERVICE setting
|
def get_cavl_service() -> ICAVLService:
try:
return import_string(settings.CAVL_SERVICE)()
except ImportError as e:
msg = "Could not import '%s' for API setting 'CAVL_SERVICE'. %s: %s." % (
settings.CAVL_SERVICE,
e.__class__.__name__,
e,
)
raise ImportError(msg)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_adapter(self):\n\t\timportlib.import_module('app.adapters.{0}'.format(self.builder.name))\n\n\t\tclasses = inspect.getmembers(\n\t\t\tsys.modules['app.adapters.{0}'.format(self.builder.name)],\n\t\t\tinspect.isclass\n\t\t)\n\n\t\tadapter = next(\n\t\t\tcls_ for cls_ in classes \\\n\t\t\tif hasattr(cls_[1], 'tech') \\\n\t\t\t and cls_[1].tech == self.builder.__class__.tech \\\n\t\t\t and hasattr(cls_[1], 'ctx') \\\n\t\t\t and cls_[1].ctx == self.builder.__class__.ctx\n\t\t)[1]\n\n\t\treturn adapter(self.builder())",
"def get_service(self):",
"def get_adapter(cls):\n pass",
"def get_adapter(self, name = \"memory\", *args, **kwargs):\r\n\r\n name_f = name.title() + \"Adapter\"\r\n adapter_c = getattr(netius.adapters, name_f)\r\n adapter = adapter_c(*args, **kwargs)\r\n return adapter",
"def get_adapter(group, **adapter_kwargs):\n return ks_loading.load_adapter_from_conf_options(CONF, group,\n **adapter_kwargs)",
"def _find_adapter(self):\n required_interfaces = [GATT_MANAGER_IFACE, LE_ADVERTISING_MANAGER_IFACE]\n object_manager = dbus.Interface(self.bus.get_object(BLUEZ_SERVICE_NAME, '/'), DBUS_OM_IFACE)\n objects = object_manager.GetManagedObjects()\n\n for object_path, properties in objects.items():\n missing_interfaces = [i for i in required_interfaces if i not in properties.keys()]\n if missing_interfaces:\n continue\n return object_path.rsplit('/', 1)[1]\n\n return None",
"def serviceProvider(self, iTag, srvType, addr):\r\n return ROSServiceProvider(self, iTag, srvType, addr)",
"def getService(self, interfaceClass: java.lang.Class) -> object:\n ...",
"def __init__(self, service_id, service_type, device_configuration):\n\n # Call the Service constructor\n super(Direct_Downlink_APRS_Service,self).__init__(service_id, service_type)\n\n # Configuration settings\n self.update_interval = device_configuration['update_interval']\n self.aprs_fallback_timeout = device_configuration['aprs_fallback_timeout']\n self.aprs_update_timeout = device_configuration['aprs_update_timeout']\n self.api_key = device_configuration['api_key']\n\n # Load the ground station's location\n self._global_config = Configuration\n self._station_longitude = self._global_config.get('station-longitude')\n self._station_latitude = self._global_config.get('station-latitude')\n self._station_altitude = self._global_config.get('station-altitude')\n\n # Set the service attributes\n self._reset_tracker_state()",
"def tempest_ceph_services_vlan(self):\n self.helper_ceph_services('vlan')",
"def __getitem__(self, name):\r\n return Service(self, name)",
"def get_network_adapter() -> network.NetworkAdapter:\n if (ip := os.getenv('ref_ip')) is not None: # noqa: SIM112\n return network.get_adapter_containing_ip(ip)\n # get next available loopback adapter\n return next(adapter for adapter in network.get_adapters() if adapter.is_loopback)",
"def get_listener(self, service, bigip):\n vip = self.service_adapter.get_virtual_name(service)\n obj = self.vs_helper.load(bigip=bigip,\n name=vip[\"name\"],\n partition=vip[\"partition\"])\n return obj",
"def find_service(iface, context, name):",
"def __init__(self, alias, adapter=None):\n\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n\n self.bus = dbus.SystemBus()\n\n if not adapter:\n adapter = self._find_adapter()\n if not adapter:\n logger.error(\"Could not find any adapter implementing GattManager1 + LEAdvertisingManager1 interfaces\")\n raise BleNotSupportedException(\n \"No adapter implementing GattManager1 + LEAdvertisingManager1 found\")\n self._adapter_path = '/org/bluez/' + adapter\n self._device_properties_changed_signal = None\n self._adapter_properties_changed_signal = None\n self._main_loop = None\n self.on_remote_disconnected = None\n\n self._adapter_props = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path), DBUS_PROP_IFACE)\n\n self._disable_br_edr()\n\n logger.info(\"Creating BLE Peripheral with alias: %s\" % alias)\n\n self.alias = alias\n self.is_powered = True\n self.discoverable_timeout = 0\n self.is_advertising = False\n\n # Prepare Managers:\n\n self._ad_manager = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path),\n LE_ADVERTISING_MANAGER_IFACE)\n\n self._gatt_manager = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path),\n GATT_MANAGER_IFACE)\n\n # Create Advertisement and GATT Application:\n\n self._advertisement = Advertisement(self.bus, 0, 'peripheral')\n self._app = Application(self.bus)",
"def serviceClient(self, iTag, srvType, addr):\r\n return ROSServiceClient(self, iTag, srvType, addr)",
"def service_instance(self):\n return self.service_class(self)",
"def appMgr( *varg , **kwarg ) :\n import GaudiPython.Bindings\n _g = GaudiPython.Bindings.AppMgr()\n if not 'LoKiSvc' in _g.ExtSvc :\n logger.debug ('appMgr: add LoKiSvc into the list of services')\n _g.ExtSvc += [ 'LoKiSvc']\n return _g",
"def list_virtualfibrechannel_clientadapter(self, ip, logicalpartition_id, x_api_session):\n log.log_debug(\"fc adarpter object list is started\")\n list_object = ListModule.ListModule()\n object_list = list_object.listing(\"uom\", ip, self.root, self.content_type,\n \"VirtualFibreChannelClientAdapter\", x_api_session,\n logicalpartition_id)\n log.log_debug(\"fc adarpter object list is returned\")\n return object_list",
"def get_adapters(self, domain):\n pass",
"def create_controller_query_service():\r\n return ControllerQueryService(settings.OPEN_ENDED_GRADING_INTERFACE, SYSTEM)",
"def base_service(self):\n return self",
"def service(self):\n pass",
"def driver_load(self, name):\r\n return AbstractServiceManager.service_load(self, name)",
"def getService(self):\n return self.serviceClass",
"def __init__(self, **kwargs):\n # Initialise the superclass ApiAdapter - this parses the keyword arguments\n # into the options used below.\n super(InterfaceAdapter, self).__init__(**kwargs)\n\n\n interface_options = {\n\n 'working_dir' : str(self.options.get('working_directory')),\n 'data_dir' : str(self.options.get(\"data_directory\")),\n 'fem_ip' : str(self.options.get(\"fem_ip\")),\n 'fem_port' : str(self.options.get(\"fem_port\")),\n 'server_ctrl_ip' : str(self.options.get(\"server_ctrl_ip\")),\n 'server_data_ip' : str(self.options.get(\"server_data_ip\")),\n 'camera_ctrl_ip' : str(self.options.get(\"camera_ctrl_ip\")),\n 'camera_data_ip' : str(self.options.get(\"camera_data_ip\")),\n 'resistor_defaults' : str(self.options.get('settings')),\n }\n\n #print interface_options\n # Retrieve adapter options from incoming argument list\n self.update_interval = float(self.options.get('update_interval', 0.05))\n \n self.interface_data = InterfaceData(**interface_options)\n\n # Start the update loop\n self.update_loop()",
"def service(self) -> interface.BaseService:\n for protocol in DEFAULT_PRIORITIES:\n service = self._config.get_service(protocol)\n if service:\n return service\n\n raise RuntimeError(\"no service (bug)\")",
"def _get_adapter_config(self):\n proxy = self.core.get_proxy('/')\n try:\n config = proxy.get('/adapters/' + self.adapter_name)\n return config\n except KeyError:\n return None",
"def get_model_adapter(config):\n if config['task'] == 'joint':\n return JointModelAdapter()\n elif config['task'] == 'keypoints':\n return KeypointsModelAdapter()\n elif config['task'] == 'headsegmentation':\n return HeadSegmentationModelAdapter()\n elif config['task'] == 'detect':\n return DetectionModelAdapter(config['model'])\n return ClassificationModelAdapter()",
"def getServices(self, interfaceClass: java.lang.Class) -> List[object]:\n ..."
] |
[
"0.557375",
"0.54590374",
"0.5451513",
"0.5436211",
"0.5365986",
"0.5205334",
"0.5173588",
"0.51281923",
"0.5052206",
"0.4985571",
"0.49650833",
"0.49015048",
"0.4838178",
"0.47969443",
"0.47782078",
"0.47619662",
"0.47539312",
"0.46973038",
"0.46862763",
"0.46629298",
"0.4627983",
"0.46269545",
"0.4617618",
"0.46096262",
"0.46065113",
"0.46059358",
"0.459955",
"0.45986956",
"0.45702538",
"0.45593044"
] |
0.7333546
|
0
|
Returns adapter implementing INotification interface using NOTIFIER setting
|
def get_notifications() -> INotifications:
notifiers = {"django": DjangoNotifier, "govuk-notify": GovUKNotifyEmail}
notifier = getattr(settings, "NOTIFIER", "django")
notifier_class = notifiers[notifier]
return notifier_class()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_notifier(publisher_id):\n global NOTIFIER\n return NOTIFIER.prepare(publisher_id=publisher_id)",
"def notifier(self, name):\n\n # Look up the notifier\n notifier = self.notifiers.get(name, self.notifiers.get(None))\n\n # Return the driver\n return notifier.driver",
"def notifier(self):\n\n return self.config.notifier(self._notifier)",
"def notification(self, notification_id):\r\n return Notification(self, notification_id)",
"def notification(self, sid):\r\n return notifications.Notification(self, sid)",
"def intf_get_notif_serializer():\n serializer = sl_interface_pb2.SLInterfaceGetNotifMsg()\n return serializer",
"def get_notifiers(self):\n _notifiers = []\n for notifier in (self.notifiers or\n settings.ACTIVITIES_DEFAULT_NOTIFIERS):\n if isinstance(notifier, str):\n notifier = notifier_registry.get(notifier)\n else:\n notifier = notifier_registry.get_or_register(notifier)\n _notifiers.append(notifier)\n return _notifiers",
"def _create_notify(knx_module: XKNX, config: ConfigType) -> XknxNotification:\n return XknxNotification(\n knx_module,\n name=config[CONF_NAME],\n group_address=config[CONF_ADDRESS],\n )",
"def get(self, notifier_id):\n return self.registry.get(notifier_id)",
"def notifications(self):\r\n return notifications.Notifications(self)",
"def notifications(self):\r\n return notifications.Notifications(self)",
"def get_adapter(cls):\n pass",
"def getInfo(notification):",
"def _get_lsp_config_notify_isis(self):\n return self.__lsp_config_notify_isis",
"def notification_config(self) -> 'outputs.NotificationConfigResponse':\n return pulumi.get(self, \"notification_config\")",
"def notification(self):\n return self._notification",
"def get_adapters(self, domain):\n pass",
"def bfd_get_notif_serializer():\n serializer = sl_bfd_common_pb2.SLBfdGetNotifMsg()\n return serializer",
"def on_notify(self, name):\r\n pass",
"def get_adapter(self):\n\t\timportlib.import_module('app.adapters.{0}'.format(self.builder.name))\n\n\t\tclasses = inspect.getmembers(\n\t\t\tsys.modules['app.adapters.{0}'.format(self.builder.name)],\n\t\t\tinspect.isclass\n\t\t)\n\n\t\tadapter = next(\n\t\t\tcls_ for cls_ in classes \\\n\t\t\tif hasattr(cls_[1], 'tech') \\\n\t\t\t and cls_[1].tech == self.builder.__class__.tech \\\n\t\t\t and hasattr(cls_[1], 'ctx') \\\n\t\t\t and cls_[1].ctx == self.builder.__class__.ctx\n\t\t)[1]\n\n\t\treturn adapter(self.builder())",
"def get_adapter(self, name = \"memory\", *args, **kwargs):\r\n\r\n name_f = name.title() + \"Adapter\"\r\n adapter_c = getattr(netius.adapters, name_f)\r\n adapter = adapter_c(*args, **kwargs)\r\n return adapter",
"def notification(message: str):\n # initialize the notification\n notify2.init(\"notifywhenLOAD\")\n notifyObj = notify2.Notification(\"Emergency Alert!\", message)\n notifyObj.set_timeout(12000)\n return notifyObj",
"def open_notifications(self) -> 'WebDriver':\n ext_name = 'mobile: openNotifications'\n try:\n self.assert_extension_exists(ext_name).execute_script(ext_name)\n except UnknownMethodException:\n # TODO: Remove the fallback\n self.mark_extension_absence(ext_name).execute(Command.OPEN_NOTIFICATIONS, {})\n return cast('WebDriver', self)",
"def test_get_notifier(self, mock_provider):\n from notifiers import get_notifier\n p = get_notifier('mock')\n assert p\n assert isinstance(p, Provider)",
"def get_comment_notification_session(self, comment_receiver=None):\n raise Unimplemented()",
"def get_imap(self, instrument):\n return self",
"def get_notifiers_list() -> List[AbstractNotifier]:\n\n cfg = read_config()\n notifiers = cfg.get('notifier')\n notifiers_list = list()\n for notifier in notifiers:\n notifier_fabric = notifiers_fabrics_dict.get(str(notifier))\n if notifier_fabric:\n notifier = notifier_fabric.create_notifier()\n notifiers_list.append(notifier)\n else:\n raise NotifierWasNotRealised\n\n if len(notifiers_list) < 0:\n raise NoNotifiersFound\n return notifiers_list",
"def get_notification(self, id):\n url = \"https://api.imgur.com/3/notification/{0}\".format(id)\n resp = self._send_request(url)\n return Notification(resp, self)",
"def intf_notif_op_serializer(batch):\n serializer = sl_interface_pb2.SLInterfaceNotifMsg()\n if 'interfaces' in batch:\n interfaces = []\n for interface in batch['interfaces']:\n entry = sl_common_types_pb2.SLInterface()\n if 'if_name' in interface:\n entry.Name = interface['if_name']\n interfaces.append(entry)\n serializer.Entries.extend(interfaces)\n return serializer",
"def _get_interface(self):\n return self.__interface"
] |
[
"0.66353315",
"0.5884832",
"0.5845214",
"0.5780354",
"0.5691237",
"0.5644371",
"0.5612159",
"0.55944556",
"0.5469027",
"0.5379494",
"0.5379494",
"0.5340072",
"0.5338836",
"0.52982986",
"0.52318954",
"0.52235496",
"0.5178805",
"0.51663077",
"0.5131492",
"0.51082945",
"0.51061565",
"0.5013109",
"0.4998267",
"0.49948844",
"0.49941146",
"0.49940687",
"0.49833274",
"0.49768358",
"0.49700722",
"0.49167672"
] |
0.65746456
|
1
|
saveAsim(samplePt, obj_animatLabModel, fldrSimFiles, ix, indexLen, verbose=3) samplePt Dict of simulation parameters to update with key as [element name].[element property] obj_animatLabModel AnimatLabModel object to update fldrSimFiles Folder path where simulation files are saved ix Incremented file index to avoid overwriting data indexLen Length of file index string for padding with 0's verbose Debugging variable DESCRIPTION saveAsim() loads an AnimatLabModel object, updates its parameters, and saves a new .asim file.
|
def saveAsim(samplePt, obj_animatLabModel, fldrSimFiles, ix, indexLen=3, verbose=3):
#cols = ['ERROR']
cols = []
basename = os.path.split(obj_animatLabModel.asimFile)[-1].split('.')[0]
saveFile = {}
# Generate new .asim file name
filename = basename + '-' + str(ix+1).zfill(indexLen) + '.asim'
# Iterate through each parameter in samplePt
for ptVar in samplePt:
# Add each parameter as a column heading for asims-log.csv file
# This is for auditing purposes!
if ptVar not in cols:
cols.append(ptVar)
# Find the AnimatLab element by name
name, param = ptVar.split('.')
node = obj_animatLabModel.getElementByName(name)
print "\n\n%s = %s >> %s" % (ptVar, node.find(param).text, samplePt[ptVar])
# Update the AnimatLab element value
node.find(param).text = str(samplePt[ptVar])
# Save the new .asim file!
obj_animatLabModel.saveXML(fileName=os.path.join(fldrSimFiles, filename), overwrite=True)
#samplePt["ERROR"] = os.path.getsize(os.path.join(fldrSimFiles, filename))
# Update the output dictionary for auditing purposes. See asims-log.csv file.
saveFile[filename] = samplePt
# Do some memory management...
del obj_animatLabModel
return (saveFile, cols)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _save_mayavi_figure(self, fig, filename, azimuth=153, elevation=62,\n distance=400, focalpoint=[25., 63., 60.], aa=16,\n size=(1024, 1024)):\n scene = fig.scene\n\n scene.anti_aliasing_frames = aa\n\n mlab.view(azimuth=azimuth, elevation=elevation, distance=distance,\n focalpoint=focalpoint)\n\n scene.save(filename, size=size)\n\n return filename",
"def save_simulation_file(self):\n a = self.ui.inputfile.text()\n a = self.get_root_file_name(a)\n a = a.split('_a.txt')\n output_suffix = self.ui.output_suffix.text()\n simfile_name = self.input_dir+'/'+sgGL.SIMFILES_PATH + a[0] + '_' +\\\n sgcom.create_file_suffix(self.algorithm,output_suffix,self.ciclos)+\\\n '.sim'\n simulation_selected_filename = QtGui.QFileDialog.getSaveFileName(self,\n \"Save simulation parameters\",\n simfile_name)\n if len(simulation_selected_filename)>0:\n simulation_params.write2file(simulation_selected_filename)",
"def make_asims(self, obj_simSet):\n \n if type(obj_simSet) is not SimulationSet:\n raise TypeError(\"obj_simSet must be a SimulationSet object!\")\n \n cols = ['FileName']\n saveFiles = {} \n \n # Calculate size of text buffer for naming files\n countLength = len(str(obj_simSet.get_size()))\n\n # Instantiate a pool of CPUs to make .asim files\n pool = multiprocessing.Pool()\n # Assign cores in multiprocessing.Pool to generate .asim files\n results = pool.map(saveAsimWrapper, [(pts, copy(self.aproj), self.simRunner.simFiles, ix, countLength, verbose) for ix, pts in enumerate(obj_simSet.samplePts)])\n # Release cores and recover some memory!!\n pool.close()\n \n # Iterate through the resulting .asim files and generate format dicts/lists\n # for the csv log file\n for result in results:\n fileInfo, colInfo = result\n \n fileKey = fileInfo.keys()[0]\n saveFiles[fileKey] = fileInfo[fileKey]\n \n for col in colInfo:\n if col not in cols:\n cols.append(col)\n \n if verbose > 0:\n print \"WRITING LOG FILE...\"\n \n # Create the asims_log.csv log file for auditing purposes\n f = open(os.path.join(self.simRunner.rootFolder, self.projName + '-asims_log.csv'), 'w')\n f.write(self.simRunner.simFiles+'\\n')\n f.write(','.join(cols)+'\\n')\n \n for fName in sorted(saveFiles.keys()):\n colTemplate = ['']*len(cols)\n \n colTemplate[0] = fName\n for key in saveFiles[fName]:\n colTemplate[cols.index(key)] = str(saveFiles[fName][key])\n \n f.write(','.join(colTemplate) + '\\n')\n \n f.close()\n \n return True",
"def analysis_save(plotables,analysis_attributes,file_name):\n#TODO implement error handling\n for (plotable,plt_type), ana_attr in zip(plotables,analysis_attributes):\n scipy.io.savemat(\n \"{0}_{1}\".format(file_name,str(ana_attr)),\n { plt_type: plotable }\n )",
"def analysis_save_dm(analyzed_datas,plotables,analysis_attributes,file_name):\n#TODO implement error handling\n save_dict = {}\n for i,ana_attr in enumerate(analysis_attributes):\n if ana_attr.method == 'FSD':\n save_dict['FSD_SMAP'] = analyzed_datas[i][0]\n np.save(file_name,save_dict)",
"def saveanimation(frames,address=\"./movie.gif\"):\n imageio.mimsave(address, frames)",
"def save(self):\n\n ogassay = getattr(ann.data.sample, f\"_original_{self.assaykey}\")\n assay = getattr(ann.data.sample, self.assaykey)\n for key in dir(self):\n if not key.startswith(\"__\") and key not in self.EXC_ATTR:\n val = getattr(self, key)\n mkey = self.metakey(key)\n\n if key not in self.attributes:\n raise ImplementationError(key)\n\n if isinstance(val, self.STORED_TYPES):\n ogassay.add_metadata(mkey, val)\n assay.add_metadata(mkey, val)\n\n for key in assay.metadata:\n ogassay.add_metadata(key, assay.metadata[key])\n\n for key in assay.row_attrs:\n ogassay.add_row_attr(key, assay.row_attrs[key])",
"def save(self, objectivefunctions, parameter, simulations):\r\n # Save the effiency and the parameters in outfile_param\r\n line=str(objectivefunctions[0])+ \",\" + str(objectivefunctions[1])+ \",\" +str(objectivefunctions[2])+ ','+str(list(parameter)).strip('[]')\r\n self.outfile_param.write(line+'\\n')\r\n self.outfile_param.flush()\r\n # If the model run is ok save the results in outfile_sim\r\n if objectivefunctions[0] > self.simthreshold_NS and abs(objectivefunctions[1]) <= self.simthreshold_pbias and objectivefunctions[2] <= self.simthreshold_rsr:\r\n # shift the whole timeseries by one day to hit peaks better\r\n if self.shift_one_day:\r\n self.outfile_sim.write(line + \",\" + ',' + str(list(simulations)).strip('[]')+'\\n')\r\n self.outfile_sim.flush() \r\n else:\r\n self.outfile_sim.write(line + ',' + str(list(simulations)).strip('[]')+'\\n')\r\n self.outfile_sim.flush()",
"def save(sans, describer, minParams, minPars, stats, location, fitInfo, description):\n\n while path.exists(location) == False:\n print('error: file path does not exist. Please input a valid file path')\n location = input('file path: ')\n\n # for idx, char in enumerate(sans.expData.shear[0]):\n # if char != ' ':\n # continue\n # else:\n # shearIdx = idx\n # break\n\n # Build name for modelled scattering data\n # shear = sans.expData.shear[0][0:shearIdx]\n shear = sans.expData.shear[0]\n\n name = sans.expData.sample[0] + '_' + shear + 'ps'\n post1 = '_sim'\n type1 = '.dat'\n\n saveName1 = name + post1 + describer + '_'\n # versionNum1 = input(\"Input a version number: \" )\n versionNum1 = description\n\n # Write modelled scattering data to 3 column dat file\n write_3_column(location + saveName1 + versionNum1 + type1, sans)\n\n # Build name for modelled scattering data statistics\n post2 = '_simInfo'\n type2 = '.txt'\n\n saveName2 = name + post2 + describer + '_'\n\n output = []\n\n # Build output file\n output.append('qmin = ' + str(sans.qmin))\n output.append('ftol = ' + str(fitInfo[0]))\n output.append('method = ' + str(fitInfo[1]))\n output.append(' ')\n\n for key, val in minParams.items():\n if type(val) == str:\n output.append(str(key) + '=' + str(val) + ',')\n else:\n output.append(str(key) + '=' + str(round(val, sans.dp)) + ',')\n output.append(' ')\n\n output.append(' static parameters ')\n for key, val in sans.staticPars.items():\n if type(val) == str:\n output.append(str(key) + '=' + str(val) + ',')\n else:\n output.append(str(key) + '=' + str(round(val, sans.dp)) + ',')\n\n output.append(' ')\n\n output.append('Fitting_performed_over_the_following_parameters:')\n for key in minPars.keys():\n output.append(str(key))\n\n output.append('Returned_the_following_goodness_of_fit_measures:')\n output = output + stats\n output.append(str(datetime.datetime.now()))\n\n # Write output to txt file\n with open(location + saveName2 + versionNum1 + type2, 'w') as file:\n for lines in output:\n file.write(lines)\n file.write(\"\\n\")\n\n print('file was saved with filename: ' + saveName1 + versionNum1 + type1)\n return",
"def saveExperimentAs(self, frameToSave):\n ext = _EXTS_EXPERIMENT[0]\n wildcard = 'Transport experiment (*.%s)|*.%s' % (ext, ext)\n dialog = wx.FileDialog(self, \"Save Experiment As\",\n c.getExperimentFolder(), '', wildcard, wx.FD_SAVE)\n if dialog.ShowModal() == wx.ID_OK:\n newPath = dialog.GetPath()\n if not newPath.endswith(ext):\n newPath += '.' + ext\n frameToSave.experimentPath = newPath\n experimentName = os.path.basename(newPath)\n if experimentName.endswith(ext):\n experimentName = experimentName[:-len(ext)-1]\n frameToSave.experimentName = experimentName\n self._saveExperiment(frameToSave.experiment, newPath)\n self.renameExperiment(frameToSave, experimentName)\n frameToSave.edited = False",
"def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)",
"def writeto(self, save_to_path, method='ascii',\n\t\ttell_sp=None):\n\t\t#pixel = np.delete(np.arange(1024),list(self.mask))\n\t\tpixel = np.arange(len(self.oriWave))\n\t\t## create the output mask array 0=good; 1=bad\n\t\tif (self.apply_sigma_mask) or (self.mask != []):\n\t\t\tmask = np.zeros((len(self.oriWave),),dtype=int)\n\t\t\tnp.put(mask,self.mask,int(1))\n\t\telse:\n\t\t\tmask = np.zeros((len(self.oriWave),),dtype=int)\n\n\t\tif method == 'fits':\n\t\t\t#fullpath = self.path + '/' + self.name + '_' + str(self.order) + '_all.fits'\n\t\t\t#hdulist = fits.open(fullpath, ignore_missing_end=True)\n\t\t\t#hdulist.writeto(save_to_path)\n\t\t\t#hdulist.close()\n\t\t\tif self.header['NAXIS1'] == 1024:\n\t\t\t\tsave_to_path2 = save_to_path + self.header['FILENAME'].split('.')[0]\\\n\t\t\t\t+ '_O' + str(self.order)\n\t\t\telse:\n\t\t\t\tsave_to_path2 = save_to_path + self.header['OFNAME'].split('.')[0]\\\n\t\t\t\t+ '_O' + str(self.order)\n\t\t\t## wavelength\n\t\t\thdu1 = fits.PrimaryHDU(self.wave/10000, header=self.header)\n\t\t\tsave_to_path2_1 = save_to_path2 + '_wave.fits'\n\t\t\thdu1.writeto(save_to_path2_1)\n\t\t\t## flux\n\t\t\thdu2 = fits.PrimaryHDU(self.flux, header=self.header)\n\t\t\tsave_to_path2_2 = save_to_path2 + '_flux.fits'\n\t\t\thdu2.writeto(save_to_path2_2)\n\t\t\t## uncertainty\n\t\t\thdu3 = fits.PrimaryHDU(self.noise, header=self.header)\n\t\t\tsave_to_path2_3 = save_to_path2 + '_uncertainty.fits'\n\t\t\thdu3.writeto(save_to_path2_3)\n\t\t\t## pixel\n\t\t\thdu4 = fits.PrimaryHDU(pixel, header=self.header)\n\t\t\tsave_to_path2_4 = save_to_path2 + '_pixel.fits'\n\t\t\thdu4.writeto(save_to_path2_4)\n\t\t\t## mask\n\t\t\thdu5 = fits.PrimaryHDU(mask, header=self.header)\n\t\t\tsave_to_path2_5 = save_to_path2 + '_mask.fits'\n\t\t\thdu5.writeto(save_to_path2_5)\n\n\t\t\tif tell_sp is not None:\n\t\t\t\ttell_sp2 = copy.deepcopy(tell_sp)\n\t\t\t\t# the telluric standard model\n\t\t\t\twavelow = tell_sp2.wave[0] - 20\n\t\t\t\twavehigh = tell_sp2.wave[-1] + 20\n\t\t\t\ttell_mdl = smart.getTelluric(wavelow=wavelow,wavehigh=wavehigh)\n\t\t\t\t# continuum correction for the data\n\t\t\t\ttell_sp2 = smart.continuumTelluric(data=tell_sp2, \n\t\t\t\t\tmodel=tell_mdl,order=tell_sp2.order)\n\t\t\t\t# telluric flux\n\t\t\t\thdu6 = fits.PrimaryHDU(tell_sp.flux, header=tell_sp.header)\n\t\t\t\tsave_to_path2_6 = save_to_path2 + '_telluric_flux.fits'\n\t\t\t\thdu5.writeto(save_to_path2_6)\n\t\t\t\t# telluric uncertainty\n\t\t\t\thdu7 = fits.PrimaryHDU(tell_sp.noise, header=tell_sp.header)\n\t\t\t\tsave_to_path2_7 = save_to_path2 + '_telluric_uncertainty.fits'\n\t\t\t\thdu5.writeto(save_to_path2_7)\n\t\t\t\t# telluric model\n\t\t\t\thdu8 = fits.PrimaryHDU(tell_mdl.flux, header=tell_sp.header)\n\t\t\t\tsave_to_path2_8 = save_to_path2 + '_telluric_model.fits'\n\t\t\t\thdu5.writeto(save_to_path2_8)\n\t\t\t\t\n\n\t\telif method == 'ascii':\n\t\t\tif '.txt' not in save_to_path:\n\t\t\t\tif self.header['NAXIS1'] == 1024:\n\t\t\t\t\tsave_to_path2 = save_to_path + self.header['FILENAME'].split('.')[0]\\\n\t\t\t\t\t+ '_O' + str(self.order) + '.txt'\n\t\t\t\telse:\n\t\t\t\t\tsave_to_path2 = save_to_path + self.header['OFNAME'].split('.')[0]\\\n\t\t\t\t\t+ '_O' + str(self.order) + '.txt'\n\t\t\telse:\n\t\t\t\tsave_to_path2 = save_to_path\n\n\t\t\tif tell_sp is None:\n\t\t\t\tdf = pd.DataFrame(data={'wavelength':list(self.oriWave/10000),\n\t\t\t\t\t'flux':list(self.oriFlux),\n\t\t\t\t\t'uncertainty':list(self.oriNoise),\n\t\t\t\t\t'pixel':list(pixel),\n\t\t\t\t\t'mask':list(mask)})\n\t\t\t\tdf.to_csv(save_to_path2, index=None, sep='\\t', mode='a',\n\t\t\t\t\theader=True, columns=['wavelength', 'flux', 'uncertainty',\n\t\t\t\t\t'pixel', 'mask'])\n\t\t\t\n\t\t\telif tell_sp is not None:\n\t\t\t\ttell_sp2 = copy.deepcopy(tell_sp)\n\t\t\t\ttell_sp2 = smart.continuumTelluric(data=tell_sp2\n\t\t\t\t\t,order=self.order)\n\t\t\t\tlsf0 = smart.getLSF(tell_sp2)\n\t\t\t\ttell_sp2.flux = tell_sp2.oriFlux\n\t\t\t\ttell_sp2.wave = tell_sp2.oriWave\n\t\t\t\ttell_mdl = smart.convolveTelluric(lsf0, tell_sp2)\n\n\t\t\t\tprint(len(self.oriWave), len(self.oriFlux), len(self.oriNoise), len(tell_sp.oriFlux),\n\t\t\t\t\tlen(tell_sp.oriNoise), len(tell_mdl.flux), len(pixel), len(mask))\n\n\t\t\t\tdf = pd.DataFrame(data={'wavelength':list(self.oriWave/10000),\n\t\t\t\t\t'flux':list(self.oriFlux),\n\t\t\t\t\t'uncertainty':list(self.oriNoise),\n\t\t\t\t\t'telluric_flux':list(tell_sp.oriFlux),\n\t\t\t\t\t'telluric_uncertainty':list(tell_sp.oriNoise),\n\t\t\t\t\t'telluric_model':list(tell_mdl.flux),\n\t\t\t\t\t'pixel':list(pixel),\n\t\t\t\t\t'mask':list(mask)})\n\n\n\t\t\t\tdf.to_csv(save_to_path2, index=None, sep='\\t', mode='a',\n\t\t\t\t\theader=True, columns=['wavelength', 'flux', 'uncertainty', \n\t\t\t\t\t'telluric_flux', 'telluric_uncertainty', 'telluric_model',\n\t\t\t\t\t'pixel', 'mask'])",
"def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)",
"def save(self):\n from settings import PROCESSORS\n from .filesystem import makedirs\n\n if self.im is None:\n # If we got here something very strange is going on that I can't even\n # predict.\n return # pragma: no cover\n makedirs(self.output_path)\n for action, arg in self.actions:\n action = PROCESSORS[action]\n if self.frames:\n new_frames = []\n for frame in self.frames:\n new_frames.append(action.process(frame, arg))\n self.frames = new_frames\n else:\n self.im = action.process(self.im, arg)\n\n self.im = optimize.optimize(self.im, fmt=self.format, quality=self.quality)\n\n kwargs = {\n 'format': self.format,\n 'optimize': True,\n 'quality': self.quality,\n }\n if self.format == 'jpeg':\n kwargs['progressive'] = True\n\n if self.filename.startswith('s3://'):\n import cStringIO\n from filesystem import s3\n output = cStringIO.StringIO()\n if self.frames:\n images2gif.write_gif(output, self.frames)\n else:\n self.im.save(output, **kwargs)\n output.reset()\n s3.put_file(output, self.filename)\n else:\n if self.frames:\n images2gif.write_gif(self.filename, self.frames)\n else:\n self.im.save(self.filename, **kwargs)",
"def save(self):\n for name, param in self.components.items():\n param_path = os.path.join(self.model_path, \"%s.mat\" % name)\n if hasattr(param, 'params'):\n param_values = {p.name: p.get_value() for p in param.params}\n else:\n param_values = {name: param.get_value()}\n scipy.io.savemat(param_path, param_values)",
"def create_aiodrive_info_file(data_path,\n pkl_prefix='aiodrive',\n save_path=None,\n relative_path=True):\n splits = get_split()\n\n print('Generating info. this may take several minutes.')\n if save_path is None:\n save_path = Path(data_path)\n else:\n save_path = Path(save_path)\n\n # Save Train\n seq_idx_pairs = get_seq_idx_pairs(osp.join(data_path, \"trainval/calib\"), splits['train'])\n aiodrive_infos_train = get_aiodrive_info(\n data_path,\n training=True,\n seq_idx_pairs=seq_idx_pairs,\n relative_path=relative_path\n )\n filename = save_path / f'{pkl_prefix}_infos_train.pkl'\n print(f'AIODrive info train file is saved to {filename}')\n mmcv.dump(aiodrive_infos_train, filename)\n\n # Save Val\n seq_idx_pairs = get_seq_idx_pairs(osp.join(data_path, \"trainval/calib\"), splits['val'])\n aiodrive_infos_val = get_aiodrive_info(\n data_path,\n training=True,\n seq_idx_pairs=seq_idx_pairs,\n relative_path=relative_path\n )\n filename = save_path / f'{pkl_prefix}_infos_val.pkl'\n print(f'AIODrive info val file is saved to {filename}')\n mmcv.dump(aiodrive_infos_val, filename)\n\n # Save TrainVal\n filename = save_path / f'{pkl_prefix}_infos_trainval.pkl'\n print(f'AIODrive info trainval file is saved to {filename}')\n mmcv.dump(aiodrive_infos_train + aiodrive_infos_val, filename)\n\n # Save Test\n seq_idx_pairs = get_seq_idx_pairs(osp.join(data_path, \"test/calib\"), splits['test'])\n aiodrive_infos_test = get_aiodrive_info(\n data_path,\n training=False,\n label_info=False,\n seq_idx_pairs=seq_idx_pairs,\n relative_path=relative_path\n )\n filename = save_path / f'{pkl_prefix}_infos_test.pkl'\n print(f'AIODrive info test file is saved to {filename}')\n mmcv.dump(aiodrive_infos_test, filename)",
"def create_pathsDict_for_im_asr(\n projID, subjLab, expLab, asrID, mod, fname, pathsDict=None\n ):\n \n pathsDict, keys\\\n = create_pathsDict_for_exp(projID, subjLab, expLab, pathsDict)\n \n if not 'assessors' in keys:\n pathsDict['projects'][projID]['subjects'][subjLab]['experiments']\\\n [expLab].update({'assessors' : {}})\n \n keys = list(\n pathsDict['projects'][projID]['subjects'][subjLab]['experiments']\\\n [expLab]['assessors'].keys())\n \n if not asrID in keys:\n pathsDict['projects'][projID]['subjects'][subjLab]['experiments']\\\n [expLab]['assessors'].update({asrID : {}})\n \n keys = list(\n pathsDict['projects'][projID]['subjects'][subjLab]['experiments']\\\n [expLab]['assessors'][asrID].keys())\n \n if not 'resources' in keys:\n pathsDict['projects'][projID]['subjects'][subjLab]['experiments']\\\n [expLab]['assessors'][asrID].update({'resources' : {}})\n \n keys = list(\n pathsDict['projects'][projID]['subjects'][subjLab]['experiments']\\\n [expLab]['assessors'][asrID]['resources'].keys())\n \n if not mod in keys:\n pathsDict['projects'][projID]['subjects'][subjLab]['experiments']\\\n [expLab]['assessors'][asrID]['resources'].update({mod : {}})\n \n keys = list(\n pathsDict['projects'][projID]['subjects'][subjLab]['experiments']\\\n [expLab]['assessors'][asrID]['resources'][mod].keys()\n )\n \n if not 'files' in keys:\n pathsDict['projects'][projID]['subjects'][subjLab]['experiments']\\\n [expLab]['assessors'][asrID]['resources'][mod]\\\n .update({'files' : {}})\n \n keys = list(\n pathsDict['projects'][projID]['subjects'][subjLab]['experiments']\\\n [expLab]['assessors'][asrID]['resources'][mod]['files'].keys()\n )\n \n if not fname in keys:\n pathsDict['projects'][projID]['subjects'][subjLab]['experiments']\\\n [expLab]['assessors'][asrID]['resources'][mod]['files']\\\n .update({fname : {}})\n \n return pathsDict, keys",
"def test_atm_psf_save_file(self):\n psf_file = os.path.join(self.test_dir, 'save_atm_psf.pkl')\n config = {\n 'psf': {\n 'type': 'AtmosphericPSF'\n },\n 'input': {\n 'atm_psf': {\n 'airmass': self.opsim_data['airmass'],\n 'rawSeeing': self.opsim_data['rawSeeing'],\n 'band': self.opsim_data['band'],\n 'screen_scale': 6.4,\n 'boresight': {\n 'type': 'RADec',\n 'ra': { 'type': 'Degrees', 'theta': self.opsim_data['rightascension'], },\n 'dec': { 'type': 'Degrees', 'theta': self.opsim_data['declination'], }\n },\n 'save_file': psf_file\n }\n },\n 'image_pos': galsim.PositionD(0,0), # This would get set appropriately during\n # normal config processing.\n 'image' : {\n 'random_seed': 1234,\n 'wcs': {\n 'type' : 'Tan',\n 'dudx' : 0.2,\n 'dudy' : 0.,\n 'dvdx' : 0.,\n 'dvdy' : 0.2,\n 'ra' : '@input.atm_psf.boresight.ra',\n 'dec' : '@input.atm_psf.boresight.dec',\n }\n }\n }\n\n if os.path.isfile(psf_file):\n os.remove(psf_file)\n\n config['wcs'] = galsim.config.BuildWCS(config['image'], 'wcs', config)\n config1 = galsim.config.CopyConfig(config)\n config2 = galsim.config.CopyConfig(config)\n\n # The first time, it will build the psf from scratch and save the screens.\n t0 = time.time()\n galsim.config.ProcessInput(config1)\n t1 = time.time()\n\n assert os.path.isfile(psf_file)\n\n # The second time, it will be faster, since it loads the screens from the file.\n t2 = time.time()\n galsim.config.ProcessInput(config2)\n t3 = time.time()\n\n print('Times = ',t1-t0,t3-t2)\n assert t1-t0 > t3-t2\n\n # Both input objects will make the same PSF at the same location:\n psf1 = galsim.config.BuildGSObject(config1, 'psf')[0]\n psf2 = galsim.config.BuildGSObject(config2, 'psf')[0]\n assert psf1 == psf2",
"def testSave(self):\n\n # Generate temp file path\n index = os.path.join(tempfile.gettempdir(), \"bm25\")\n os.makedirs(index, exist_ok=True)\n\n model = self.method(\"bm25\")\n model.save(index)\n model.load(index)",
"def test_save_load_meta_parameter(self):\n params_devices = ConstantStepResistiveDeviceParameters(w_max=0.987)\n params_forward = AnalogTileInputOutputParameters(inp_noise=0.321)\n params_backward = AnalogTileBackwardInputOutputParameters(inp_noise=0.456)\n params_update = AnalogTileUpdateParameters(desired_bl=78)\n\n # Create the device and the array.\n resistive_device = ConstantStepResistiveDevice(\n params_devices, params_forward, params_backward, params_update)\n\n model = self.get_model(resistive_device=resistive_device)\n\n # Save the model to a file.\n file = TemporaryFile()\n save(model, file)\n\n # Load the model.\n file.seek(0)\n new_model = load(file)\n file.close()\n\n # Assert over the new model tile parameters.\n parameters = new_model.analog_tile.tile.get_parameters()\n self.assertAlmostEqual(parameters.forward_io.inp_noise, 0.321)\n self.assertAlmostEqual(parameters.backward_io.inp_noise, 0.456)\n self.assertAlmostEqual(parameters.update.desired_bl, 78)",
"def ApplyModelAndSaveOutput(model, modelname, imageArray, ImageName,NameArea, noiseReduction, numberOfClasses, classesNamesList, fusionClassesY_N, maskY_N, InfoY_N, imageY_N, NFMaskY_N, BiggestBlobY_N, chosenArea, OutputMaskName, OutputimageName, OutputNFMaskName, ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes):\n ### Create the test data array\n start_timeTestData = time.monotonic()\n \n TestData=creatTestData(imageArray)\n \n end_timeTestData = time.monotonic() \n RunningTime=timedelta(seconds=end_timeTestData - start_timeTestData)\n sec=float(RunningTime.days*86400+RunningTime.seconds+RunningTime.microseconds/1000000)\n ListTestDataTimes.append(sec)\n \n ### Apply the model to the test data\n start_timeModel = time.monotonic()\n \n Resultmodel=ApplyModel(TestData, modelname, model)\n \n end_timeModel = time.monotonic() \n RunningTime=timedelta(seconds=end_timeModel - start_timeModel)\n sec=float(RunningTime.days*86400+RunningTime.seconds+RunningTime.microseconds/1000000)\n ListApplyModelTimes.append(sec)\n \n ### Create and save the output \n start_timeOutput = time.monotonic()\n \n Mask=Resultmodel.reshape(np.shape(imageArray)[0],np.shape(imageArray)[1])\n \n #Save the non filtered mask in shades of gray\n if NFMaskY_N=='Y':\n NFMask=Mask.astype('int')\n NFMask=(NFMask/(numberOfClasses-1))*255\n NFMask=NFMask.astype(int)\n cv2.imwrite(OutputNFMaskName,NFMask)\n \n # apply a noise reduction filter to the mask\n FilteredMask=noiseRemoval(Mask, noiseReduction, numberOfClasses) \n \n if numberOfClasses>2 and fusionClassesY_N=='N' :\n #create a colored mask with 1 color=1class\n coloredMask=colorfilter(FilteredMask)\n if maskY_N=='Y': \n cv2.imwrite(OutputMaskName,coloredMask)\n \n if imageY_N=='Y':\n MaskedImage=0.3*coloredMask+0.7*imageArray\n cv2.imwrite(OutputimageName,MaskedImage)\n \n else:\n # create a black and white mask with the class of interest in white\n BandWMask=FilteredMask*0\n List=[]\n for AreaName in chosenArea:\n if AreaName in classesNamesList:\n List.append(classesNamesList.index(AreaName))\n \n for AreaNumber in List:\n BandWMask[FilteredMask==(AreaNumber)]=255\n \n BandWMask=BandWMask.astype('uint8')\n \n #If the user choosed to only keep the biggest blob and do shape analysis\n if BiggestBlobY_N=='Y':\n \n # Detect the blobs and there contour in this black and white mask\n im2, contours, hierarchy = cv2.findContours(BandWMask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n \n #find the biggest blob, erase the others and keep the smaller black blobs in the biggest white blob\n if contours!=[]:\n surfaceMainBlob = 0\n contourMainBlob=[]\n RankMainBlob=0\n for i in range(len(contours)):\n if cv2.contourArea(contours[i])>surfaceMainBlob:\n contourMainBlob=contours[i]\n surfaceMainBlob=cv2.contourArea(contours[i])\n RankMainBlob=i\n \n ListSecondaryBlod=[]\n \n for i in range(len(hierarchy[0])):\n if hierarchy[0,i][3] ==RankMainBlob:\n ListSecondaryBlod.append(contours[i]) \n \n FilteredMask2=imageArray*0\n L=[]\n L.append(contourMainBlob)\n FilteredMask2=cv2.drawContours(FilteredMask2, L, 0, (255,255,255), -1)\n FilteredMask2=cv2.drawContours(FilteredMask2, ListSecondaryBlod, -1, (0,0,0), -1)\n \n #Save the final mask\n if maskY_N=='Y': \n cv2.imwrite(OutputMaskName,FilteredMask2)\n \n # calculate some of the properties of the main blob \n hull = cv2.convexHull(contourMainBlob)\n rect = cv2.minAreaRect(contourMainBlob)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n axes=rect[1]\n axe1=axes[0]\n axe2=axes[1]\n \n if axe1<axe2:\n a=axe1\n axe1=axe2\n axe2=a\n \n # Save the masked image and draw some of the blob properties (convexhull, rectangle, main axes...) \n if imageY_N=='Y':\n FilteredMask3=FilteredMask2\n FilteredMask3[FilteredMask2==255]=1\n FilteredMask3[FilteredMask2==0]=0.1 \n MaskedImage=FilteredMask3*imageArray\n \n MaskedImage=cv2.drawContours(MaskedImage,[box],0,(0,255,0),1)\n MaskedImage=cv2.ellipse(MaskedImage,rect,(0,255,0),1)\n \n x1,y1=box[0]\n x2,y2=box[1]\n x3,y3=box[2]\n x4,y4=box[3]\n \n l1x1=int((x3+x2)/2)\n l1y1=int((y3+y2)/2)\n \n l1x2=int((x4+x1)/2)\n l1y2=int((y4+y1)/2) \n \n l2x1=int((x1+x2)/2)\n l2y1=int((y1+y2)/2)\n \n l2x2=int((x4+x3)/2)\n l2y2=int((y4+y3)/2) \n \n MaskedImage=cv2.line(MaskedImage,(l1x1,l1y1),(l1x2,l1y2),(255,255,0),1) # blue\n MaskedImage=cv2.line(MaskedImage,(l2x1,l2y1),(l2x2,l2y2),(255,255,255),1) # white\n L=[]\n L.append(hull)\n MaskedImage=cv2.drawContours(MaskedImage, L, 0, (0,0,255), 1)\n \n cv2.imwrite(OutputimageName,MaskedImage)\n \n #Save the information in ListAirs\n if InfoY_N=='Y':\n for i in ListSecondaryBlod:\n surfaceSecondaryBlobi=cv2.contourArea(i)\n surfaceMainBlob=surfaceMainBlob-surfaceSecondaryBlobi\n \n \n x,y,w,h = cv2.boundingRect(contourMainBlob)\n aspect_ratio = float(w)/h\n rect_area = w*h\n extent = float(surfaceMainBlob)/rect_area\n equi_diameter = np.sqrt(4*surfaceMainBlob/np.pi)\n hull_area = cv2.contourArea(hull)\n solidity = float(surfaceMainBlob)/hull_area\n \n TotalSurface=len(imageArray)*len(imageArray[0])\n ListAirs=np.vstack([ListAirs, [NameArea, ImageName , surfaceMainBlob, surfaceMainBlob/TotalSurface, aspect_ratio, extent, solidity,equi_diameter, axe1, axe2]])\n \n \n else: #if No blob is found, just save a black rectangle\n FilteredMask2=imageArray*0\n if maskY_N=='Y': \n cv2.imwrite(OutputMaskName,FilteredMask2)\n if imageY_N=='Y':\n cv2.imwrite(OutputimageName,FilteredMask2)\n if InfoY_N=='Y':\n ListAirs=np.vstack([ListAirs, [NameArea, ImageName , 0, 0, 0, 0, 0, 0,0,0]])\n \n #If the user decided to keep all the blobes and not do the shape analysis\n else:\n #Save the final mask\n if maskY_N=='Y': \n cv2.imwrite(OutputMaskName,BandWMask)\n \n # Save the masked image and draw some of the blob properties (convexhull, rectangle, main axes...) \n if imageY_N=='Y':\n FilteredMask3=np.zeros((len(BandWMask),len(BandWMask[0]),3))\n FilteredMask3[BandWMask==255]=[1,1,1]\n FilteredMask3[BandWMask==0]=[0.1,0.1,0.1] \n MaskedImage=FilteredMask3*imageArray\n \n cv2.imwrite(OutputimageName,MaskedImage)\n \n #Save the information in ListAirs\n if InfoY_N=='Y':\n surfaceClassOfInterest=np.sum(BandWMask)/255\n TotalSurface=len(imageArray)*len(imageArray[0])\n ListAirs=np.vstack([ListAirs, [NameArea, ImageName , surfaceClassOfInterest, surfaceClassOfInterest/TotalSurface]])\n \n \n \n \n \n end_timeOutput = time.monotonic() \n RunningTime=timedelta(seconds=end_timeOutput - start_timeOutput)\n sec=float(RunningTime.days*86400+RunningTime.seconds+RunningTime.microseconds/1000000)\n ListSaveOutputTimes.append(sec)\n return ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes",
"def save_nifti(self, path):\n meta = {'te': self.te, 'tr': self.tr, 'sw': self.sw}\n if self.sequence_type == 'STEAM':\n meta['tm'] = self.tm\n\n # store real and imaginary components in last 2 dims\n component_fid = np.stack((np.real(self.fid),np.imag(self.fid)), -2)\n nifti = nib.Nifti2Image(component_fid, self.transform.get_matrix(), extra=meta)\n nib.save(nifti, path)",
"def _save(self):\n\t\t\n\t\tdirectory = self.Output_path\n\n\t\t# replace with \n\t\t# file_name = hermes.mk_themis_file_name(themis_obj = self)\n\t\tfile_name = f'Themis_{self.CELL_ID[\"experiment\"]}_u{self.CELL_ID[\"unit\"]}_c{self.CELL_ID[\"cell\"]}_r{self.CELL_ID[\"run\"]}.pkl'\n\n\t\tsave_path = directory / file_name\n\n\t\t# Atomic saving (helpful?)\n\t\ttemp_path = save_path.with_suffix(save_path.suffix + '.tmp')\n\t\t\n\t\tself.SavePath = save_path\n\n\t\t\n\t\twith open(temp_path, 'wb') as f:\n\t\t\tpickle.dump(self, f)\n\n\t\ttemp_path.rename(save_path)\n\n\t\tprint(f'Saved {self.RUN_KEY} as {save_path}')",
"def SAV(self, loc):\n cmd = f\"*SAV {loc}\"\n self.instr.write(cmd)",
"def save_data(interpolated_acti, norm_acti, smoothed_acti, flag_roi, trials_to_drop, roi_tensor, meta_df, animal, name, arguments, selection):\r\n\r\n\tconfiguration = pd.concat([pd.DataFrame(arguments, index=[0]), pd.DataFrame(selection, index=[0])], axis=1)\r\n\t\r\n\tpath = os.path.join(paths.path2Output, animal, str(arguments['Function']), \r\n\t\t\t\t\t\tstr(arguments['Init']), str(arguments['Rank']), name)\r\n\ttry:\r\n\t os.makedirs(path)\r\n\texcept:\r\n\t FileExistsError\r\n\t\r\n\tconfiguration.to_csv(os.path.join(path, 'configuration.csv'))\r\n\tnp.save(os.path.join(path, 'acti'), interpolated_acti)\r\n\tnp.save(os.path.join(path, 'norm_acti'), norm_acti)\r\n\tnp.save(os.path.join(path, 'smoothed_acti'), smoothed_acti)\r\n\tnp.save(os.path.join(path,'flag_roi'), flag_roi)\r\n\tnp.save(os.path.join(path,'trials_to_drop'), trials_to_drop)\r\n\tnp.save(os.path.join(path,'roi_tensor'), roi_tensor)\r\n\tmeta_df.to_csv(os.path.join(path, 'meta_df.csv'))",
"def saveAnim(self):\n animLabel = str(self.nameEditLine.text())\n selectionList = cmds.ls(sl=1)\n if animLabel:\n if selectionList:\n currentItem = self.fileDir.selectedItems()\n if currentItem:\n # export anim curve\n\n currentAnimGIFPath = exportAnimCurve.exportAnimCurve(selectionList, currentItem[-1].toolTip(0),\n animLabel, self.tempGIFDir, self.iconsDir)\n\n self.nameEditLine.clear()\n self.recordBtn.loadGIF2Button(path=currentAnimGIFPath)\n\n # refresh\n # self.loadCurrentFolder()\n logger.info('Successfully Save Anim Curve')\n else:\n QtWidgets.QMessageBox.warning(self, 'Warning', 'No folder selected!\\nPlease select the folder!',\n QtWidgets.QMessageBox.Ok)\n logger.warning('No folder selected!\\nPlease select the folder!')\n else:\n QtWidgets.QMessageBox.warning(self, 'Warning',\n 'No item selected!\\nPlease select the Control Object(s)!',\n QtWidgets.QMessageBox.Ok)\n logger.warning('No item selected!\\nPlease select the Control Object(s)!')\n else:\n QtWidgets.QMessageBox.warning(self, 'Warning',\n 'No pose file name enter!\\nPlease input the pose file name!',\n QtWidgets.QMessageBox.Ok)\n logger.warning('No pose file name enter!\\nPlease input the pose file name!')",
"def save_ma(self, Interferogram, mask_array):\n phsFile = self.DataPrefix + 'Data_' + Interferogram.Name[:-4]\n mskFile = self.MaskPrefix + 'Mask_' + Interferogram.Name[:-4]\n np.save(os.path.join(self.ProcDir, phsFile), mask_array.data)\n np.save(os.path.join(self.ProcDir, mskFile), mask_array.mask)\n\n return phsFile",
"def save_ims(self, save_dir):\n save_ims = Save_Ims(model = self, save_dir = save_dir)\n save_ims()",
"def intf_MMSAVEAS(E):\n global SAVEFILE\n check= ''\n if E.The.StackSize() >= 1: # Ensure something is here.\n checkob= E.The.StackCopyItemLast() # Input verification. Next item on stack now.\n check=checkob.whatami\n if not check == \"TXT\":\n print(\"Input Error: mmsaveas\")\n print(intf_MMSAVEAS.__doc__)\n return # Without doing much of anything.\n SAVEFILE= E.The.StackPop().val\n print(\"Current file set to: %s\\n\" % SAVEFILE)\n intf_MMSAVE(E) # Actual saving done in one location.",
"def save_fida(self, path):\n # input is Dimensions are channel x rep x mega x isis x t\n # FID-A seems to accomodate only 4: t x chan x rep x subSpecs\n # TODO: see if ISIS and MEGA subspecs are differentiated\n\n # permute the axes to t x chan x rep x mega x isis\n fids = np.transpose(self.fid, (4, 0,1,2,3))\n specs = np.transpose(self.spec, (4, 0,1,2,3))\n # reshape to combine subspecs\n dims = list(fids.shape[0:-2])\n dims.append(-1)\n fids = np.reshape(fids, tuple(dims))\n specs = np.reshape(specs, tuple(dims))\n\n # remove last dimensi if there are no subSpecs\n fids = np.squeeze(fids)\n specs = np.squeeze(specs)\n\n # fp to avoid int64 errors\n dim_dict = {'t': 1.0, 'coils': 2.0, 'averages': 3.0, 'subSpecs': 0.0, 'extras': 0.0}\n\n # there are still subSpectra\n if fids.ndim == 4:\n subspecs = fids.shape[-1]\n rawSubspecs = fids.shape[-1]\n dim_dict['subSpecs'] = 4.0\n else:\n subspecs = 0\n rawSubspecs = 0\n\n if self.fid.shape[0]==1:\n addedrcvrs = 1\n else:\n addedrcvrs = 0\n\n B0 = self.larmor/util.GYROMAGNETIC_RATIO[self.nucleus]\n\n n_averages = float(self.fid.shape[self.dimnames['rep']])\n # fids - time domain MRS data.\n # specs - frequency domain MRS data.\n # t - vector of time values for plotting in the time domain [s]\n # ppm - vector of frequency values for plotting in the frequency domain\n # [ppm]\n # sz - size of the fids and specs arrays\n # date - date that the data was acquired or simulated\n # averages - number of averages in the dataset (possibly altered by\n # processing)\n # rawAverages - number of averages in the original dataset (not altered by\n # processing).\n # subspecs - number of subspectra (ISIS, edit on/off, etc) in the dataset\n # (possibly altered by processing).\n # rawSubspecs - number of subspectra (ISIS, edit on/off, etc) in the original\n # dataset (not altered by processing). Bo - magnetic field strength [Tesla]\n # txfrq - Centre frequnecy [MHz];\n # linewidth - linewidth of data (only used for simulated data) [Hz]\n # n - number of spectral points\n # dwelltime - dwell time of the data in the time domain [s] (dwelltime =\n # 1/spectralwidth)\n # sim - type of simulation (ideal vs. shaped pulses), only used for\n # simulated data.\n # te seq dims\n # - echo time of acquisition [ms], only used for simulated data - type of sequence used (only used for simulated data).\n # - structure specifying which data dimensions are stored along\n # which dimensions of the fids/specs arrays. Fields include:\n # t - time/frequency dimension (usually this is 1, the first\n # dimension of the fids/specs array).\n # coils - for multiple receiver array, this is the dimension of\n # the arrayed receiver data (can be 2, 3 or 4). averages - for multiple averages, this is the dimension of the\n # averages (can be 2, 3 or 4).\n # subSpecs - in the case of subtraction data (ISIS, MEGA-PRESS), this\n # is the dimension of the subSpectra (can be 2, 3 or 4).\n\n\n mdict = {'fids': fids, 'specs': specs, 't': self.t,\n 'ppm': self.ppm, 'sz': np.float_(fids.shape), 'date': '',\n 'averages': n_averages, 'rawAverages': n_averages,\n 'subspecs': float(subspecs), 'rawSubspecs': float(rawSubspecs), 'Bo': B0,\n 'txfrq': self.larmor, 'dwelltime': 1.0/self.sw,\n 'spectralwidth': self.sw, 'seq': self._sequence_name,\n 'dims': dim_dict, 'te': self.te * 1e3, 'tr': self.tr * 1e3,\n 'pointsToLeftshift': 0}\n\n # writtentostruct\n # gotparams\n # filtered\n # zeropadded\n # freqcorrected\n # phasecorrected\n # averaged\n # addedrcvrs\n # Subtracted\n # Writtentotext\n # Downsampled\n # avgNormalized\n # isISIS\n # - Has the dataset been written to a structure (1 or 0)\n # - Have the parameters been retrieved from the dataset (1 or 0)\n # - Has the dataset been filtered (1 or 0)\n # - Has the dataset been zeropadded (1 or 0)\n # - Has the dataset been frequency corrected (1 or 0) - Has the dataset been phase corrected (1 or 0)\n # - Have the averages been combined (1 or 0)\n # - Have the rcvr channels been combined (1 or 0).\n # - Have the subspecs been subtracted (1 or 0)\n # - Has the data been written to text file (1 or 0) - has the data been resampled to a different\n # spectral resolution (1 or 0)\n # - Has the data been amplitude scaled following\n # combination of the averages (1 or 0)\n # - Does the dataset contain ISIS subspectra (1 or 0)\n\n flags = {'writtentostruct': 1, 'gotparams': 1, 'filtered': 0,\n 'zeropadded': 0, 'freqcorrected': 0, 'phasecorrected': 0,\n 'averaged': int(n_averages == 1), 'addedrcvrs': addedrcvrs,\n 'subtracted': 0, 'Writtentotext': 0, 'Downsampled': 0,\n 'avgNormalized': 0, 'isISIS': int(self.is_special),\n 'leftshifted': 0}\n\n if self.sequence_type == 'STEAM':\n mdict['tm'] = self.tm\n\n mdict['flags'] = flags\n scipy.io.savemat(path, {'svs': mdict}, format='5', long_field_names=True)"
] |
[
"0.5425702",
"0.5371059",
"0.53118604",
"0.5278772",
"0.52680564",
"0.5233465",
"0.51136476",
"0.50579697",
"0.5051095",
"0.5047712",
"0.50206965",
"0.50204813",
"0.4988163",
"0.49863473",
"0.49829757",
"0.4980291",
"0.49471554",
"0.4935383",
"0.49254158",
"0.49232134",
"0.4891882",
"0.4877691",
"0.48648068",
"0.48593464",
"0.48590472",
"0.48526898",
"0.48506382",
"0.4848934",
"0.48443425",
"0.48270246"
] |
0.87497056
|
0
|
__init__(projName, obj_aproj=None, obj_simRunner=None, obj_simSet=None) Initiate ProjectManager object projName Unique name for ProjectManager object obj_aproj AnimatLabModel object obj_simrunner SimRunner object
|
def __init__(self, projName, obj_aproj=None, obj_simRunner=None):
self.projName = projName
self.activityLog = {}
self.errorLog = {}
if (type(obj_aproj) == AnimatLabModel) or (obj_aproj is None):
self.aproj = obj_aproj
else:
raise TypeError("obj_aproj must be an AnimatLabModel object!")
if (type(obj_simRunner) == AnimatLabSimulationRunner) or (obj_simRunner is None):
self.simRunner = obj_simRunner
else:
raise TypeError("obj_simRunner must be an AnimatLabSimulationRunner object!")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, main_params, projs):\n self.__dict__ = vars(main_params)\n self.projs = projs\n self.proj, self.model, self.var, self.region = (\n self.str_to_aospy_iterable(main_params.proj, main_params.model,\n main_params.var, main_params.region)\n )\n self.run = self.create_child_run_obj(self.model, self.run, self.proj)\n self.region = [aospy.utils.io.dict_name_keys(self.region)]",
"def __init__(self):\n\n # Public attributes\n self.model = None\n \"\"\"Model: A Model object\"\"\"\n\n self.domain = None\n \"\"\"Domain: A Domain object\"\"\"\n\n self.jobs = []\n \"\"\"list: a list containing Job objects\"\"\"\n\n self.scheduler = None\n \"\"\"Scheduler: A scheduler object to use for each Job in self.jobs\"\"\"\n\n self.output = SimulationOutput()\n \"\"\"CompletedSim: A CompletedSim object returned by the self.collect() method\"\"\"\n\n self.base_hydro_namelist = Namelist()\n \"\"\"dict: base hydro namelist produced from model and domain\"\"\"\n\n self.base_hrldas_namelist = Namelist()\n \"\"\"dict: base hrldas namelist produced from model and domain\"\"\"",
"def __init__(self, task_queue, results_queue, individuals):\n Process.__init__(self)\n \n self.proc_name = self.name\n \n logger.info(\"Setting up variant_annotator: {0}\".format(\n self.proc_name))\n \n logger.debug(\"Setting up task queue\")\n self.task_queue = task_queue\n \n logger.debug(\"Setting up results queue\")\n self.results_queue = results_queue\n\n logger.debug(\"Setting up individuals\")\n self.individuals = individuals\n \n if len(self.individuals) == 1:\n self.models = ['AR_comp', 'AR_comp_dn', 'AD', 'AD_dn']\n else:\n self.models = ['AR_comp', 'AR_comp_dn']",
"def __init__(self, project, job_name):\n super(ParameterMaster, self).__init__(project, job_name)\n self.__name__ = \"ParameterMaster\"\n self.__version__ = \"0.0.1\"\n self._job_generator = ParameterJobGenerator(self)\n self.iteration_frame = pandas.DataFrame({\"ENCUT\": [], \"KPOINT_MESH\": []})",
"def __init__(self, model_filename, sim_filename, include_paths = None):\n\n self.model_filename = model_filename\n self.sim_filename = sim_filename\n self.include_paths = include_paths\n \n self.simulation = None\n self.fit_input = None",
"def __init__(\n self, \n name: str, \n projects: ProjectCache, \n backend: VizierBackend, \n packages: Dict[str,PackageIndex]\n ):\n self.name = name\n self.projects = projects\n self.backend = backend\n self.packages = packages\n # Maintain an internal dictionary of running tasks\n self.tasks: Dict[str,Any] = dict()",
"def __init__(self,simulation_manager):\n self.simulation_manager = simulation_manager",
"def __init__(self):\n\n ProjectBase.__init__(self)\n\n self.title = Title() # TITLE project title\n self.options = General() # OPTIONS analysis options\n self.report = Report() # REPORT output reporting instructions\n self.files = Files() # FILES interface file options\n self.backdrop = BackdropOptions() # BACKDROP bounding rectangle and file name of backdrop image\n self.map = MapOptions() # MAP map's bounding rectangle and units\n self.raingages = SectionAsList(\"[RAINGAGES]\") # (list of RainGage) # RAINGAGES rain gage information\n\n self.hydrographs = SectionAsList(\"[HYDROGRAPHS]\") # (list of UnitHydrograph)\n # unit hydrograph data used to construct RDII inflows\n\n self.evaporation = Evaporation() # EVAPORATION evaporation data\n self.temperature = Temperature() # TEMPERATURE air temperature and snow melt data\n self.adjustments = Adjustments() # ADJUSTMENTS monthly climate adjustments\n self.subcatchments = SectionAsList(\"[SUBCATCHMENTS]\") # (list of Subcatchment)\n # basic subcatchment information\n\n # self.subareas = [Section] # SUBAREAS subcatchment impervious/pervious sub-area data\n\n self.infiltration = SectionAsList(\"[INFILTRATION]\") # (list of str)\n # subcatchment infiltration parameters\n\n self.lid_controls = SectionAsList(\"[LID_CONTROLS]\") # (list of LIDControl)\n # low impact development control information\n\n self.lid_usage = SectionAsList(\"[LID_USAGE]\") # (list of LIDUsage)\n # assignment of LID controls to subcatchments\n\n self.aquifers = SectionAsList(\"[AQUIFERS]\") # (list of Aquifer)\n # groundwater aquifer parameters\n\n self.groundwater = SectionAsList(\"[GROUNDWATER]\") # (list of Groundwater)\n # subcatchment groundwater parameters\n\n self.gwf = SectionAsList(\"[GWF]\") # (list of Groundwater Flow Equations)\n # groundwater flow equation parameters\n\n self.snowpacks = SectionAsList(\"[SNOWPACKS]\") # (list of SnowPack)\n # subcatchment snow pack parameters\n\n self.junctions = SectionAsList(\"[JUNCTIONS]\") # (list of Junction)\n # junction node information\n\n self.outfalls = SectionAsList(\"[OUTFALLS]\") # (list of Outfall)\n # outfall node information\n\n self.dividers = SectionAsList(\"[DIVIDERS]\") # (list of Divider)\n # flow divider node information\n\n self.storage = SectionAsList(\"[STORAGE]\") # (list of StorageUnit)\n # storage node information\n\n self.conduits = SectionAsList(\"[CONDUITS]\") # (list of Conduit)\n # conduit link information\n\n self.pumps = SectionAsList(\"[PUMPS]\") # (list of Pump)\n # pump link information\n\n self.orifices = SectionAsList(\"[ORIFICES]\") # (list of Orifice)\n # orifice link information\n\n self.weirs = SectionAsList(\"[WEIRS]\") # (list of Weir)\n # weir link information\n\n self.outlets = SectionAsList(\"[OUTLETS]\") # (list of Outlet)\n # outlet link information\n\n self.xsections = SectionAsList(\"[XSECTIONS]\") # (list of CrossSection)\n # conduit, orifice, and weir cross-section geometry\n\n self.transects = Transects() # transect geometry for conduits with irregular cross-sections\n\n self.controls = Controls()\n # rules that control pump and regulator operation\n\n self.events = SectionAsList(\"[EVENTS]\") # (list of Events)\n\n self.landuses = SectionAsList(\"[LANDUSES]\") # (list of Landuse) # land use categories\n\n self.buildup = SectionAsList(\"[BUILDUP]\") # (list of Buildup)\n # buildup functions for pollutants and land uses\n\n self.washoff = SectionAsList(\"[WASHOFF]\") # (list of Washoff)\n # washoff functions for pollutants and land uses\n\n self.pollutants = SectionAsList(\"[POLLUTANTS]\") # (list of Pollutant)\n # pollutant information\n\n self.coverages = Coverages() # COVERAGES assignment of land uses to subcatchments\n self.treatment = SectionAsList(\"[TREATMENT]\") # (list of Treatment)\n # pollutant removal functions at conveyance system nodes\n\n self.inflows = SectionAsList(\"[INFLOWS]\") # (list of DirectInflow)\n # INFLOWS # external hydrograph/pollutograph inflow at nodes\n\n self.dwf = SectionAsList(\"[DWF]\") # (list of DryWeatherInflow)\n # baseline dry weather sanitary inflow at nodes\n\n self.patterns = SectionAsList(\"[PATTERNS]\") # (list of Pattern)\n # periodic variation in dry weather inflow\n\n self.rdii = SectionAsList(\"[RDII]\") # (list of RDIInflow)\n # rainfall-dependent I/I information at nodes\n\n self.loadings = InitialLoadings()\n # initial pollutant loads on subcatchments\n\n self.curves = SectionAsList(\"[CURVES]\") # (list of Curve)\n # CURVES x-y tabular data referenced in other sections\n\n self.timeseries = SectionAsList(\"[TIMESERIES]\") # (list of TimeSeries)\n # time series data referenced in other sections\n\n self.labels = SectionAsList(\"[LABELS]\") # (list of Label)\n # X, Y coordinates and text of labels\n\n self.subcentroids = SectionAsList(\"[SUBCENTROIDS]\") # (list of subcentroids)\n # X, Y coordinates and text of subcentroids\n\n self.sublinks = SectionAsList(\"[SUBLINKS]\") # (list of sublinks)\n # sublinks information\n\n self.sections = [\n self.title,\n self.options,\n self.evaporation,\n self.raingages,\n self.subcatchments,\n self.infiltration,\n self.junctions,\n self.dividers,\n self.storage,\n self.outfalls,\n self.conduits,\n self.pumps,\n self.orifices,\n self.weirs,\n self.outlets,\n self.xsections,\n self.landuses,\n self.coverages,\n self.pollutants,\n self.timeseries,\n self.patterns,\n self.curves,\n self.dwf,\n self.rdii,\n self.loadings,\n self.buildup,\n self.washoff,\n self.report,\n self.events,\n self.files,\n self.backdrop,\n self.map,\n self.hydrographs,\n self.temperature,\n self.adjustments,\n self.lid_controls,\n self.lid_usage,\n self.aquifers,\n self.groundwater,\n self.snowpacks,\n self.transects,\n self.controls,\n self.treatment,\n self.inflows,\n self.labels,\n self.subcentroids,\n self.sublinks] # Start with a sensible order of sections.\n self.add_sections_from_attributes() # Add any sections not added in the line above, should not be any left.",
"def __init__(self, travel_model_dir_name, mode='full', years_to_run=None, procedure_file=\"opus.par\"):\n\n\ttravel_model_configuration = {}\n\t\n\ttravel_model_configuration.update( {'visum_version_number': 10} )\n\t\n\t### mapping from visum matrice name to urbansim travel_data variable name\n\t## dict key is used as matrix number for VisumPy.helpers.GetODMatrix and VisumPy.helpers.GetSkimMatrix\n\t## dict value is used as attribute name for urbansim travel_data table\n\ttm_to_urbansim_variables = {\n\t'od':{\n\t ## need data for zone index, e.g.\n # -1:'from_zone_id',\n\t # -2:'to_zone_id',\n\t1:'transit_trips', #'transit (PuT - public transport) trips',\n\t2:'auto_trips', #'auto trips',\n\t}, \n\t'skim':{ \n\t ## need data for zone index, e.g.\n # -1:'from_zone_id',\n\t # -2:'to_zone_id',\n\t1: 'auto_travel_time', #'auto assigned travel time (ttc)',\n\t2: 'transit_in_vehicle_time' #'PuT in-vehicle time (ivt)',\n\t} \n\t}\n \n\t### TAZ attributes to be transferred from urbansim to visum\n\turbansim_to_tm_variables = [\n\t 'TAZ=(zone.zone_id).astype(int16)',\n\t 'retail_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_retail)', \n\t ## the employment groups below need to be defined in employment_adhoc_sector_groups and \n\t ## employment_adhoc_sector_group_definitions before they can be used\n\t #'fires_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_fires)',\n\t #'gov_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_gov)',\n\t #\"educ_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_educ)\",\n\t #\"wtcu_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_wtcu)\",\n\t #\"manu_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_manu)\",\n\t #\"univ_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_univ)\",\n\t ## need to change income categories to 4 instead of 3\n\t \"low_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_low_income_households)\",\n\t \"mid_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_mid_income_households)\",\n\t #\"upper_mid_income_hh_by_taz=?\",\n\t \"upper_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_high_income_households)\",\n\t ## need variable specification\n\t #\"pctmf=?\",\n\t #\"gqi=?\",\n\t #\"gqn=?\",\n\t #\"fteuniv=?\",\n\t #\"density=?\"\n ]\n \n\ttravel_model_configuration.update( {\n\t \"tm_to_urbansim_variables\":tm_to_urbansim_variables,\n\t \"urbansim_to_tm_variables\":urbansim_to_tm_variables,\n\t} )\n\t\n\tself.__add_models(travel_model_configuration, mode)\n\tself.__add_years(travel_model_configuration, travel_model_dir_name, years_to_run, procedure_file)\n\n\tself.merge(travel_model_configuration)",
"def __init__(__self__, *,\n agent_id: pulumi.Input[int],\n project: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"agent_id\", agent_id)\n pulumi.set(__self__, \"project\", project)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if name is not None:\n pulumi.set(__self__, \"name\", name)",
"def __init__(self, xml_name, recompile_cpp=False, rendering=True):\n if recompile_cpp:\n self._update_wrapper()\n\n if sys.platform.startswith('darwin'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.dylib\")\n elif sys.platform.startswith('linux'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.so\")\n elif sys.platform.startswith('win32'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.dll\")\n else:\n raise EnvironmentError(\"Unknown operating system found.\")\n\n model_path = os.path.join(pathlib.Path(__file__).parent, \"mujoco_model/\", xml_name).encode('utf-8')\n self.rendering = rendering\n\n # C++ control engine.\n self.wrapper = ctypes.CDLL(cdll_path)\n self.instance = self.wrapper.get_instance(ctypes.c_char_p(model_path), ctypes.c_bool(rendering))\n\n # Indices of the object bodies.\n self.obstacle_body_index = self.get_body_index(\"obstacle\")\n self.agent_body_index = self.get_body_index(\"agent\")\n\n # Indices of the joints.\n self.obstacle_jnt_index = self.get_jnt_index(\"slider:obstacle\")\n self.agent_jnt_x_index = self.get_jnt_index(\"slider:agent-obstacle_x\")\n self.agent_jnt_y_index = self.get_jnt_index(\"slider:agent-y\")\n\n # Initial positions from the configuration.\n self.obstacle_pos = self.get_body_ini_pos(self.obstacle_body_index)\n self.agent_pos = self.get_body_ini_pos(self.agent_body_index)",
"def __init__(self, configs, simulator, wait_time=3):\n self.configs = configs\n self.sim = simulator.sim\n self.gripper = VREP_Gripper()\n self.open()",
"def __init__(self, **kwargs):\n\n self.options = {**self.DEFAULTS, **kwargs}\n self.engine = self.start_matlab_engine()\n self.spm_directory = self.get_spm_directory()",
"def __init__(self, name=None, params=None, params_from_file=False, params_from_user=False):\n\n print(\"\")\n if name:\n self._name = name\n else:\n self._name = input(\"Simulation Name : \")\n\n print(\"Name : \"+str(self._name))\n\n self.plot_path = os.getcwd()+'/session/'+self._name+'_plots/'\n try:\n os.mkdir(self.plot_path)\n except (FileExistsError, FileNotFoundError):\n beep = lambda x: os.system(\"echo '\\a';sleep 0.5;\" * x)\n beep(1)\n print(\"WARNING : FOLDER PATH ALREADY EXISTS\")\n print(self.plot_path)\n print(\"WRITING OVER\")\n for fn in os.listdir(self.plot_path):\n os.remove(self.plot_path+fn)\n\n if params:\n self.params = params\n else:\n if params_from_file:\n self.params = load_input_pickle(params_from_file)\n elif params_from_user:\n self.params = get_user_params()\n else:\n #Define default params\n self.params = load_input_pickle('default')\n\n self.default_runs = [] # array of simulation runs with default parameters\n self.mod_runs = [] # array of tuples that contain 0) a list of simulation runs\n # and 1) a dictionary clarifying which parameter was given\n # which value for each run. (for convenience, can also\n # determine by comparing the simulation_run.params\n # directly\n\n\n print(\"Running Model with Default Parameters...\")\n self.run_default()\n print(\"\")",
"def __init__(__self__, *,\n project: Optional[pulumi.Input[str]] = None):\n if project is not None:\n pulumi.set(__self__, \"project\", project)",
"def __init__(self, jco, par_info_file=None):\n\n self._Pest = Pest(jco, par_info_file=par_info_file)\n self.parinfo = self._Pest.parinfo\n\n self.la = errvar(jco)\n self.parinfo = None\n if par_info_file is not None:\n self.parinfo = pd.read_csv(par_info_file, index_col='Name')\n self.ident_df = None",
"def __init__(self, opts: dict, solver_opts: dict):\n self.name = opts.get(\"name\", \"Undefined\") # Name of the problem\n self.gp = opts.get(\"grid_points\") # Number of grid points\n self.nadir_p = opts.get(\"nadir_points\") # Nadir points\n self.eps = opts.get(\"penalty_weight\", 1e-3) # Penalty weight\n self.round = opts.get(\"round_decimals\", 9) # Decimal places to round to\n self.nadir_r = opts.get(\"nadir_ratio\", 1) # Nadir ratio\n self.logdir = opts.get(\"logging_folder\", \"logs\") # Folder to save logs\n self.early_exit = opts.get(\"early_exit\", True) # Whether to enable early exit\n self.bypass = opts.get(\"bypass_coefficient\", True) # Whether to enable bypass coefficient\n self.flag = opts.get(\"flag_array\", True) # Whether to use flag array\n self.cpu_count = opts.get(\"cpu_count\", cpu_count()) # Number of CPUs to use\n self.redivide_work = opts.get(\"redivide_work\", True) # Whether to redivide work\n self.model_fn = opts.get(\"pickle_file\", \"model.p\") # Pickle file name\n self.shared_flag = opts.get(\"shared_flag\", True) # Whether to use shared flag array\n self.output_excel = opts.get(\"output_excel\", True) # Whether to output to Excel\n self.process_logging = opts.get(\"process_logging\", False) # Whether to enable process logging\n self.process_timeout = opts.get(\"process_timeout\", None) # Timeout for processes\n self.solver_name = opts.get(\"solver_name\", \"gurobi\") # Name of solver\n self.solver_io = opts.get(\"solver_io\", \"python\") # IO mode of solver\n\n self.solver_opts = solver_opts # Solver options\n self.solver_opts[\"MIPGap\"] = solver_opts.get(\"MIPGap\", 0.0) # MIP gap\n self.solver_opts[\"NonConvex\"] = solver_opts.get(\"NonConvex\", 2) # Nonconvex setting\n\n # Remove None values from dict when user has overriden them\n for key, value in dict(self.solver_opts).items():\n if value is None or value:\n del self.solver_opts[key]\n\n self.time_created = time.strftime(\"%Y%m%d-%H%M%S\") # Time the options object was created\n self.log_name = self.name + \"_\" + str(self.time_created) # Name of log file",
"def __init__(self, WORKDIR, JOBNAME, debug):\n #read in parameters\n self.workdir = WORKDIR\n self.jobname = JOBNAME",
"def __init__(self, name, cover_photo):\r\n self.__name = name\r\n self.__cover_photo = cover_photo\r\n self.__projects = [None] * NUM_PROJS",
"def __init__(self, job, project=None):\n self.job = job\n self.build_definition = job.build_definition\n if project:\n self.project = project\n else:\n self.project = self.job.project\n self.current_index = -1\n self.plan = []",
"def __init__(self, project_info):\n self.global_conf.subjects = project_info.subjects\n self.global_conf.subject = project_info.subject\n\n if len(project_info.subject_sessions) > 0:\n self.global_conf.subject_session = project_info.subject_session\n self.subject_directory = os.path.join(\n project_info.base_directory,\n project_info.subject,\n project_info.subject_session,\n )\n else:\n self.global_conf.subject_session = \"\"\n self.subject_directory = os.path.join(\n project_info.base_directory, project_info.subject\n )\n\n self.derivatives_directory = os.path.abspath(project_info.output_directory)\n self.output_directory = os.path.abspath(project_info.output_directory)\n\n self.stages = {\n \"Preprocessing\": PreprocessingStage(\n bids_dir=project_info.base_directory, output_dir=self.output_directory\n ),\n \"Registration\": RegistrationStage(\n pipeline_mode=\"Diffusion\",\n fs_subjects_dir=project_info.freesurfer_subjects_dir,\n fs_subject_id=os.path.basename(project_info.freesurfer_subject_id),\n bids_dir=project_info.base_directory,\n output_dir=self.output_directory,\n ),\n \"Diffusion\": DiffusionStage(\n bids_dir=project_info.base_directory, output_dir=self.output_directory\n ),\n \"Connectome\": ConnectomeStage(\n bids_dir=project_info.base_directory, output_dir=self.output_directory\n ),\n }\n\n Pipeline.__init__(self, project_info)\n\n self.subject = project_info.subject\n self.diffusion_imaging_model = project_info.diffusion_imaging_model\n\n self._init_and_add_listeners_to_stage_traits()",
"def __init__(self, cfgfile, args):\n logging.info(\"Initialising SimController\")\n # set defaults\n self.lmap = None # Ref to LogicalMap object\n self.gui = None # Ref to Gui object\n self.agent = None # Ref to Agent object\n self.gen = None # Ref to step generator\n self.current = None # current search coordinates\n self.pathcost, self.pathsteps, self.pathtime = 0, 0, 0\n self.timeremaining = float('inf')\n self.timeout = float('inf')\n\n self.path = set() # set of all coordinates displayed as part of path\n self.keptpath = None\n self.fullsearchflag = False # set to True if map is populated with extra coords\n self.coordsets = None # sets of coordinates that will need to be reset\n\n self.cfg = args # Default params as modified via CLI\n self.gotscript = False\n self.script = {} # Allows for dynamic changes\n\n # we distinguish 3 modes - config file, CLI or batch\n if cfgfile is not None:\n self.readConfig()\n self.gen = self.stepGenerator(self.cfg[\"START\"], self.cfg[\"GOAL\"])\n elif self.cfg[\"BATCH\"] is not None:\n try:\n self.runBatch(*self.cfg[\"BATCH\"])\n logging.info(\"\\nBatch process completed. Results written to \" + self.cfg[\"BATCH\"][1] + \".\\n\")\n except Exception as e:\n logging.warning(\n \"\\nAn error has occurred. Batch results may be incomplete. l\"\n \" the exception: \\n {}\".format(e))\n finally:\n raise SystemExit()\n else:\n try:\n self.setStart(ast.literal_eval(self.cfg.get(\"START\")))\n self.setGoal(ast.literal_eval(self.cfg.get(\"GOAL\")))\n\n self.initAgent()\n self.processMap() # imports map to model may return BadMap exception\n self.processPrefs() # passes heuristic and deadline preferences to model\n self.resetVars()\n\n except p4.BadAgentException:\n logging.error(\"Bad Agent. Irrecoverable error. Terminating...\")\n raise SystemExit()\n\n except p4.BadMapException:\n logging.error(\"Bad Map. Irrecoverable error. Terminating...\")\n raise SystemExit()\n\n except:\n logging.error(\"Irrecoverable error. Terminating...\")\n logging.error(\"Trace-back: \\n {}\".format(traceback.format_exc()))\n raise SystemExit()\n\n if self.cfg.get(\"GUI\"):\n self.initGui()\n else:\n self.search()",
"def __init__(self, ea_optimizer, is_chief, task_index):\n self._ea_optimizer = ea_optimizer\n self._is_chief = is_chief\n self._task_index = task_index",
"def __init__(self, random_org_parameters, composition_space, constraints):\n\n self.name = 'random organism creator'\n\n # defaults\n #\n # number of random organisms to make (only used for epa searches)\n self.default_number = 28\n # max number of atoms\n if composition_space.objective_function == 'epa':\n # make sure we can sample cells with two formula units\n target_number = constraints.min_num_atoms + 6\n num_formulas = target_number/composition_space.endpoints[\n 0].num_atoms\n if num_formulas < 2:\n min_of_max = int(2*composition_space.endpoints[0].num_atoms)\n else:\n min_of_max = int(round(\n num_formulas)*composition_space.endpoints[0].num_atoms)\n else:\n min_of_max = constraints.min_num_atoms + 6\n self.default_max_num_atoms = min(min_of_max, constraints.max_num_atoms)\n # allow structure with compositions at the endpoints (for pd searches)\n self.default_allow_endpoints = True\n # volume scaling behavior\n # default volumes per atom of elemental ground state structures\n # computed from structures on materials project (materialsproject.org)\n self.all_default_vpas = {'H': 13.89, 'He': 15.79, 'Li': 20.12,\n 'Be': 7.94, 'B': 7.25, 'C': 10.58,\n 'N': 42.73, 'O': 13.46, 'F': 16.00,\n 'Ne': 19.93, 'Na': 37.12, 'Mg': 23.04,\n 'Al': 16.47, 'Si': 20.44, 'P': 23.93,\n 'S': 36.03, 'Cl': 34.90, 'Ar': 44.87,\n 'K': 73.51, 'Ca': 42.42, 'Sc': 24.64,\n 'Ti': 17.11, 'V': 13.41, 'Cr': 11.57,\n 'Mn': 11.04, 'Fe': 11.55, 'Co': 10.92,\n 'Ni': 10.79, 'Cu': 11.82, 'Zn': 15.56,\n 'Ga': 20.34, 'Ge': 23.92, 'As': 22.45,\n 'Se': 38.13, 'Br': 37.53, 'Kr': 65.09,\n 'Rb': 90.44, 'Sr': 54.88, 'Y': 32.85,\n 'Zr': 23.50, 'Nb': 18.31, 'Mo': 15.89,\n 'Tc': 14.59, 'Ru': 13.94, 'Rh': 14.25,\n 'Pd': 15.45, 'Ag': 18.00, 'Cd': 23.28,\n 'In': 27.56, 'Sn': 36.70, 'Sb': 31.78,\n 'Te': 35.03, 'I': 50.34, 'Xe': 83.51,\n 'Cs': 116.17, 'Ba': 63.64, 'Hf': 22.50,\n 'Ta': 18.25, 'W': 16.19, 'Re': 15.06,\n 'Os': 14.36, 'Ir': 14.55, 'Pt': 15.72,\n 'Au': 18.14, 'Hg': 31.45, 'Tl': 31.13,\n 'Pb': 32.30, 'Bi': 36.60, 'La': 37.15,\n 'Ce': 26.30, 'Pr': 36.47, 'Nd': 35.44,\n 'Pm': 34.58, 'Sm': 33.88, 'Eu': 46.28,\n 'Gd': 33.33, 'Tb': 32.09, 'Dy': 31.57,\n 'Ho': 31.45, 'Er': 30.90, 'Tm': 30.30,\n 'Yb': 40.45, 'Lu': 29.43, 'Ac': 45.52,\n 'Th': 32.03, 'Pa': 25.21, 'U': 19.98,\n 'Np': 18.43, 'Pu': 18.34}\n\n self.default_vpas = self.get_default_vpas(composition_space)\n\n # set to defaults\n if random_org_parameters in (None, 'default'):\n self.number = self.default_number\n self.max_num_atoms = self.default_max_num_atoms\n self.allow_endpoints = self.default_allow_endpoints\n self.vpas = self.default_vpas\n # parse the parameters and set to defaults if necessary\n else:\n # the number to make\n if 'number' not in random_org_parameters:\n self.number = self.default_number\n elif random_org_parameters['number'] in (None, 'default'):\n self.number = self.default_number\n else:\n self.number = random_org_parameters['number']\n\n # the max number of atoms\n if 'max_num_atoms' not in random_org_parameters:\n self.max_num_atoms = self.default_max_num_atoms\n elif random_org_parameters['max_num_atoms'] in (None, 'default'):\n self.max_num_atoms = self.default_max_num_atoms\n elif random_org_parameters['max_num_atoms'] > \\\n constraints.max_num_atoms:\n print('The value passed to the \"max_num_atoms\" keyword in the '\n 'InitialPopulation block may not exceed the value passed'\n ' to the \"max_num_atoms\" keyword in the Constraints '\n 'block.')\n print('Quitting...')\n quit()\n elif random_org_parameters['max_num_atoms'] < \\\n constraints.min_num_atoms:\n print('The value passed to the \"max_num_atoms\" keyword in the '\n 'InitialPopulation block may not be smaller than the '\n 'value passed to the \"min_num_atoms\" keyword in the '\n 'Constraints block.')\n print('Quitting...')\n quit()\n else:\n self.max_num_atoms = random_org_parameters['max_num_atoms']\n\n # allowing composition space endpoints (only used for pd searches)\n if 'allow_endpoints' not in random_org_parameters:\n self.allow_endpoints = self.default_allow_endpoints\n elif random_org_parameters['allow_endpoints'] in (None, 'default'):\n self.allow_endpoints = self.default_allow_endpoints\n else:\n self.allow_endpoints = random_org_parameters['allow_endpoints']\n\n # volume scaling\n self.vpas = self.default_vpas\n if 'volumes_per_atom' not in random_org_parameters:\n pass\n elif random_org_parameters['volumes_per_atom'] in (None,\n 'default'):\n pass\n else:\n # replace the specified volumes per atom with the given values\n for symbol in random_org_parameters['volumes_per_atom']:\n self.vpas[symbol] = random_org_parameters[\n 'volumes_per_atom'][symbol]\n\n self.num_made = 0 # number added to initial population\n self.is_successes_based = True # it's based on number added\n self.is_finished = False",
"def __init__(self, project=None):\n HyppopySolver.__init__(self, project)",
"def __init__(self, model_info, alg_config, **kwargs):\n import_config(globals(), alg_config)\n super().__init__(\n alg_name=kwargs.get(\"name\") or \"muzero\",\n model_info=model_info[\"actor\"],\n alg_config=alg_config,\n )\n # self.buff = ReplayBuffer(BUFFER_SIZE)\n self.buff = PrioritizedReplayBuffer(BUFFER_SIZE, alpha=1)\n self.discount = GAMMA\n self.unroll_step = UNROLL_STEP\n self.td_step = TD_STEP\n self.async_flag = False",
"def __init__(self):\n self.label = \"RAPID Tools\"\n self.alias = \"RAPIDTools\"\n\n # List of tool classes associated with this toolbox\n self.tools = [AddSPTFields,\n AutomaticRAPIDfileGenerator, \n CopyDataToServer,\n CreateNetworkConnectivityFile,\n CreateNetworkConnectivityFileNHDPlus,\n CreateMuskingumParameterFiles,\n CreateMuskingumKFile,\n CreateMuskingumKfacFile,\n CreateMuskingumXField, \n CreateMuskingumXFile, \n CreateRivIDGageFile, \n CreateSubsetFile,\n CreateWeightTableFromWRFGeogrid,\n CreateInflowFileFromWRFHydroRunoff,\n CreateWeightTableFromECMWFRunoff,\n CreateInflowFileFromECMWFRunoff,\n CreateWeightTableFromLDASRunoff,\n CreateWeightTableFrom2DLatLonRunoff,\n CreateDischargeTable,\n CreateDischargeMap,\n FlowlineToPoint,\n DEMtoStreamNetwork,\n PublishDischargeMap,\n StreamNetworktoRAPID,\n StreamNetworktoSPT,\n UpdateWeightTable,\n UpdateDischargeMap,\n ]",
"def __init__(self):\n self.robot = None\n self.humans = None\n self.global_time = None\n self.human_times = None\n # Simulation configuration\n self.config = None\n self.time_limit = None\n self.time_step = None\n self.end_on_collision = True\n self.side = None\n self.pixel_side = None\n self.closed = None\n self.goal_radius = None\n self.max_humans = None\n self.min_humans = None\n self.human_num_mode = None\n self.human_num = None\n self.perpetual = None\n self.rotate_path = None\n self.randomize_attributes = None\n self.square_width = None\n self.circle_radius = None\n # Reward function\n self.success_reward = None\n self.collision_penalty = None\n self.discomfort_dist = None\n self.discomfort_scale = None\n self.discomfort_penalty_factor = None\n self.group_discomfort_penalty = None\n self.time_penalty = None\n self.progress_reward = None\n self.initial_distance = None\n self.previous_distance = None\n # Internal environment configuration\n self.case_capacity = None\n self.case_size = None\n self.case_counter = None\n self.parallel = None\n self.max_tries = None\n self.train_val_sim = None\n self.test_sim = None\n # For visualization\n self.force_list = [\n \"desired_force\",\n \"social_force\",\n \"obstacle_force\",\n \"group_coherence_force\",\n \"group_repulsive_force\",\n \"group_gaze_force\",\n ] # TODO Configure this?\n self.forces = None\n self.states = None\n self.action_values = None\n self.attention_weights = None\n # For information return\n self.obs_history = np.array([])\n self.episode_info = dict()\n self.movie_file = \"\"\n\n self.scene_manager = None\n self.use_groups = None\n self.min_group_num = None\n self.max_group_num = None\n self.centralized_planning = None\n self.centralized_planner = None\n\n self.enable_intent = None\n self.intent_type = None\n\n self.obstacles = [] # xmin,xmax,ymin,ymax\n\n self.app = None",
"def __init__(self):\n self.name = \"Osyczka\"\n objectives = [ob_os_1, ob_os_2]\n constraints = [con_os_1, con_os_2, con_os_3, con_os_4, con_os_5, con_os_6]\n decisions = [Decision(0, 10), Decision(0, 10), Decision(1, 5), Decision(0, 6), Decision(1, 5), Decision(0, 10)]\n Model.__init__(self, objectives, constraints, decisions)",
"def __init__(self):\r\n self.label = \"ExportPRJ\"\r\n self.description = \"This tool takes an input WRF Geogrid file in NetCDF format\" + \\\r\n \" and uses the specified variable's projection parameters\" + \\\r\n \" to produce a projection file.\"\r\n self.canRunInBackground = True\r\n self.category = \"Utilities\""
] |
[
"0.6410015",
"0.6122314",
"0.6073041",
"0.6057213",
"0.59716004",
"0.5962218",
"0.5923332",
"0.59220415",
"0.58743334",
"0.5863776",
"0.58579",
"0.5841253",
"0.5841239",
"0.5840288",
"0.5816639",
"0.5808202",
"0.5787629",
"0.57743895",
"0.57699895",
"0.5768949",
"0.57636404",
"0.57378685",
"0.5696346",
"0.56746316",
"0.5670438",
"0.5660087",
"0.5647027",
"0.56396097",
"0.56315976",
"0.56280047"
] |
0.8615024
|
0
|
set_aproj(obj_aproj) Sets the AnimatLabModel object for the ProjectManager obj_aproj AnimatLabModel object for basis of simulations
|
def set_aproj(self, obj_aproj):
if type(obj_aproj) == AnimatLabModel:
self.aproj = obj_aproj
else:
raise TypeError("obj_aproj must be an AnimatLabModel object!")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, projName, obj_aproj=None, obj_simRunner=None):\n \n self.projName = projName\n self.activityLog = {}\n self.errorLog = {}\n \n if (type(obj_aproj) == AnimatLabModel) or (obj_aproj is None):\n self.aproj = obj_aproj\n else:\n raise TypeError(\"obj_aproj must be an AnimatLabModel object!\")\n \n if (type(obj_simRunner) == AnimatLabSimulationRunner) or (obj_simRunner is None):\n self.simRunner = obj_simRunner\n else:\n raise TypeError(\"obj_simRunner must be an AnimatLabSimulationRunner object!\")",
"def set_model(self, pth, testModel=True):\n # make sure this is a valid path\n if not os.path.isdir(pth):\n assert False, f\"{pth} is not a valid directory\"\n\n self.simpath = pth\n\n # get MODFLOW 6 output file names\n fpth = os.path.join(pth, \"mfsim.nam\")\n mf6inp, mf6outp = get_mf6_files(fpth)\n self.outp = mf6outp\n\n # determine comparison model\n self.setup_comparison(pth, pth, testModel=testModel)\n # if self.mf6_regression:\n # self.action = \"mf6_regression\"\n # else:\n # self.action = get_mf6_comparison(pth)\n if self.action is not None:\n if \"mf6\" in self.action or \"mf6_regression\" in self.action:\n cinp, self.coutp = get_mf6_files(fpth)",
"def probinit(self, aaa, n_obj):\n # Set algorithm...\n if aaa == 'nsga':\n algo = nsga_II(m=0.05)\n else:\n algo = jde(memory=True)\n #algo = mde_pbx()\n #algo = de_1220()\n\n # ...and initialize problem with instance atributes\n prob = mga_1dsm(seq = self.FBseq,\n multi_objective = n_obj,\n dsm_dv_barrier = self.MAX_DV)\n\n prob.set_vinf((self.C3)**0.5)\n prob.set_tof(self.TOF[0], self.TOF[1])\n prob.set_entry_barrier(self.entry_barrier)\n prob.set_launch_window(self.EPOCHSTART, self.EPOCHEND)\n return prob, algo",
"def set_model(self, likelihood_model_instance):\n pass",
"def set_model(self, likelihood_model_instance):\n pass",
"def __init__(self, model = None, cso = None, fast_classification = True, paper = None):\n self.cso = cso #Stores the CSO Ontology\n self.paper = paper #Paper to analyse\n self.model = model #contains the cached model\n self.min_similarity = 0.90 #Initialises the min_similarity\n self.fast_classification = fast_classification # if will use the full model or not\n self.explanation = dict()",
"def setUp(cls):\n arkane = Arkane()\n job_list = arkane.load_input_file(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'data', 'methoxy.py'))\n pdepjob = job_list[-1]\n cls.kineticsjob = job_list[0]\n pdepjob.active_j_rotor = True\n network = pdepjob.network\n cls.Nisom = len(network.isomers)\n cls.Nreac = len(network.reactants)\n cls.Nprod = len(network.products)\n cls.Npath = len(network.path_reactions)\n cls.PathReaction2 = network.path_reactions[2]\n cls.TminValue = pdepjob.Tmin.value\n cls.Tmaxvalue = pdepjob.Tmax.value\n cls.TmaxUnits = pdepjob.Tmax.units\n cls.TlistValue = pdepjob.Tlist.value\n cls.PminValue = pdepjob.Pmin.value\n cls.Pcount = pdepjob.Pcount\n cls.Tcount = pdepjob.Tcount\n cls.GenTlist = pdepjob.generate_T_list()\n cls.PlistValue = pdepjob.Plist.value\n cls.maximum_grain_size_value = pdepjob.maximum_grain_size.value\n cls.method = pdepjob.method\n cls.rmgmode = pdepjob.rmgmode",
"def SetObject(self, obj):\n return _gmat_py.EphemManager_SetObject(self, obj)",
"def __init__(self, api_config):\n AbstractOptimizer.__init__(self, api_config)\n \n api_space = BoEI.api_manipulator(api_config) # used for GPyOpt initialization\n\n self.space_x = JointSpace(api_config) # used for warping & unwarping of new suggestions & observations\n\n self.hasCat, self.cat_vec = BoEI.is_cat(api_config)\n \n self.dim = len(self.space_x.get_bounds())\n\n self.objective = GPyOpt.core.task.SingleObjective(None)\n\n self.space = GPyOpt.Design_space(api_space)\n \n self.model = GPyOpt.models.GPModel(optimize_restarts=5,verbose=False)\n \n self.aquisition_optimizer = GPyOpt.optimization.AcquisitionOptimizer(self.space)\n \n \n self.aquisition = AcquisitionEI(self.model, self.space, optimizer=self.aquisition_optimizer, cost_withGradients=None)\n \n self.batch_size = None",
"def objective(self, objective):\n\n self._objective = objective",
"def __init__(self):\n self.name = \"Osyczka\"\n objectives = [ob_os_1, ob_os_2]\n constraints = [con_os_1, con_os_2, con_os_3, con_os_4, con_os_5, con_os_6]\n decisions = [Decision(0, 10), Decision(0, 10), Decision(1, 5), Decision(0, 6), Decision(1, 5), Decision(0, 10)]\n Model.__init__(self, objectives, constraints, decisions)",
"def set_Model(newModel):\n global model\n model = newModel\n print(\"model is set\")\n print(model)",
"def opt_settings(self, objective=None,\n pipe_model=None, allow_flow_reversal=None):\n if objective is not None: # TODO Do we need this to be defined at the top level of modesto?\n self.set_objective(objective)\n if pipe_model is not None:\n self.pipe_model = pipe_model\n if allow_flow_reversal is not None:\n self.allow_flow_reversal = allow_flow_reversal",
"def set_model(self):\n self.model = self.get_model()",
"def __init__(self, model_info, alg_config, **kwargs):\n import_config(globals(), alg_config)\n super().__init__(\n alg_name=kwargs.get(\"name\") or \"muzero\",\n model_info=model_info[\"actor\"],\n alg_config=alg_config,\n )\n # self.buff = ReplayBuffer(BUFFER_SIZE)\n self.buff = PrioritizedReplayBuffer(BUFFER_SIZE, alpha=1)\n self.discount = GAMMA\n self.unroll_step = UNROLL_STEP\n self.td_step = TD_STEP\n self.async_flag = False",
"def set_by_gui(self):\n\n # Use the GetFromGui class (below):\n user_choice = GetFromGui(None, -1, 'Params')\n # success is achieved if the user presses 'done': \n if user_choice.success: \n user_params = {\n \"subject\" : user_choice.subject,\n \"texture_dur\" : float(user_choice.SOA)/1000.,\n \"demo\": user_choice.demo,\n }\n else:\n user_choice.Destroy()\n raise ValueError(\"Program stopped by user\")\n # Stop execution of the window\n user_choice.Destroy()\n \n for k in user_params.keys():\n self.__setattr__(k,user_params[k])",
"async def gpt2_set_model(self, ctx, *, arg=None):\n print('Command gpt2_set_model triggered')\n if arg:\n if arg in VALID_DEFAULT_MODELS:\n self.update_config(model_name=arg)\n else:\n await ctx.send(f\"ERROR: Invalid model name {arg}\")\n else:\n await ctx.send(\"ERROR: Argument required\")",
"def __init__(self, mode, path):\n\n\t\tmodel = load_model('data/model.h5') \n\n\t\tif mode == \"test\":\n\n\t\t\tX_test, Y_test = self._load_dataset(path)\n\t\t\tpreds = model.evaluate(X_test, Y_test)\n\t\t\tprint (\"Loss = \" + str(preds[0]))\n\t\t\tprint (\"Test Accuracy = \" + str(preds[1]))\n\n\n\t\telif mode == \"predict\":\t\t\t\n\t\t\t\n\t\t\tlabel_dict = {'airplane':0, 'automobile':1, 'bird':2, 'cat':3, 'deer':4,\n\t\t\t'dog':5, 'frog':6, 'horse':7, 'ship':8, 'truck':9}\n\n\t\t\timg = image.load_img(path, target_size=(64, 64))\n\t\t\tx = image.img_to_array(img)\n\t\t\tx = np.reshape(x, (1,64,64,3))\n\t\t\ttemp_pred = model.predict(x)\n\t\t\tidx = np.argmax(temp_pred)\n\t\t\t\n\t\t\tprint(\"The object detected in the picture is a(n) : \" + \n\t\t\t\tlist(label_dict.keys())[list(label_dict.values()).index(idx)])",
"def __init__(self, ea_optimizer, is_chief, task_index):\n self._ea_optimizer = ea_optimizer\n self._is_chief = is_chief\n self._task_index = task_index",
"def set_objective_fn(self, objective_fn):\n self.objective_fn = objective_fn",
"def set_simRunner(self, obj_simRunner):\n \n if type(obj_simRunner) == AnimatLabSimulationRunner:\n self.simRunner = obj_simRunner\n else:\n raise TypeError(\"obj_simRunner must be an AnimatLabSimulationRunner object!\")",
"def set_best_model(self):\n if (self.metric == 'bic'):\n self.best_gmm = self.best_gmm_bic\n elif(self.metric == 'aic'):\n self.best_gmm = self.best_gmm_aic",
"def SetActiveObject(self):",
"def set_model(self, model):\n '''returns a model'''\n if self.model==\"Lasso\":\n modelo = Lasso()\n elif self.model==\"Ridge\":\n modelo = Ridge()\n elif self.model == \"RandomForest\":\n modelo = RandomForestRegressor(random_state = 42)\n else:\n if self.model == \"XGBoost\":\n modelo = xgb.XGBRegressor()\n #modelo = xgb.XGBRegressor(booster = 'gbtree', objective ='reg:squarederror',\n # colsample_bytree = 0.3, learning_rate = 0.35,\n # max_depth = 10, alpha = 0.1, n_estimators = 500)\n\n\n return modelo",
"def set_model(self, model):\n\n # attach the model to the object\n self._likelihood_model = model\n\n # the position for the point source is freed\n for key in self._likelihood_model.point_sources.keys():\n self._likelihood_model.point_sources[key].position.ra.free = True\n self._likelihood_model.point_sources[key].position.dec.free = True\n\n # set proper priors for the coordinates\n self._likelihood_model.point_sources[key].position.ra.prior = Uniform_prior(lower_bound=0., upper_bound=360)\n self._likelihood_model.point_sources[key].position.dec.prior = Cosine_Prior(lower_bound=-90., upper_bound=90)",
"def set_permmodel(dat, zonelist, index, permmodel_dict):\n perm_mod = fdata.fmodel('permmodel', index=index,\n zonelist=zonelist)\n # Set required permeability\n for key, value in permmodel_dict.iteritems():\n perm_mod.param[key] = value\n dat.add(perm_mod)\n return dat",
"def init_process(mech):\n gases[mech] = ct.Solution(mech)\n gases[mech].transport_model = 'Multi'",
"def setAnnotation(self, *args):\n return _libsbml.Model_setAnnotation(self, *args)",
"def setPredictor(self, predictor, id):\n\n def getoutvar(predictors):\n \"\"\"return outcome variable, if consistent among predictors, else None\"\"\"\n if not len(predictors):\n return None\n ov = predictors[0].classVar\n for predictor in predictors[1:]:\n if ov != predictor.classVar:\n self.warning(0, \"Mismatch in class variable (e.g., predictors %s and %s)\" % (predictors[0].name, predictor.name))\n return None\n return ov\n\n # remove the classifier with id, if empty\n if not predictor:\n if self.predictors.has_key(id):\n del self.predictors[id]\n if len(self.predictors) == 0:\n self.clear()\n else:\n self.predictorlabel = \"%d\" % len(self.predictors)\n return\n\n # set the classifier\n self.predictors[id] = predictor\n self.predictorlabel = \"%d\" % len(self.predictors)\n\n # set the outcome variable\n ov = getoutvar(self.predictors.values())\n if len(self.predictors) and not ov:\n self.tasklabel = \"N/A (type mismatch)\"\n self.classes = []\n self.selectedClasses = []\n self.clear()\n self.outvar = None\n return\n self.warning(0) # clear all warnings\n\n if ov != self.outvar:\n self.outvar = ov\n # regression or classification?\n if self.outvar.varType == orange.VarTypes.Continuous:\n self.copt.hide();\n self.tasklabel = \"Regression\"\n else:\n self.copt.show()\n self.classes = [str(v) for v in self.outvar.values]\n self.selectedClasses = []\n self.tasklabel = \"Classification\"\n\n if self.data:\n self.setTable()\n self.table.show()\n self.checksendpredictions()\n self.checkenable()",
"def __init__(self, env, args):\n super(Agent_PG,self).__init__(env)\n\n ##################\n # YOUR CODE HERE #\n ##################\n self.print_every = args.print_every\n self.n_episode = args.episode\n self.gamma = args.gamma\n self.episode_len = args.episode_len\n self.update_every = args.update_every\n self.var_reduce = args.var_reduce\n self.gae = args.gae\n self.step_upd = args.step_upd\n self.max_step = args.step_train\n self.clip = args.clip\n\n if not self.gae:\n if args.cnn:\n self.model = Model2()\n else:\n self.model = Model()\n else:\n if args.cnn:\n self.model = ModelGAE2()\n else:\n self.model = ModelGAE()\n\n print(self.model)\n\n if args.cnn:\n self.opt = optim.RMSprop(self.model.parameters(), lr=args.learning_rate, weight_decay=0.99)\n else:\n self.opt = optim.Adam(self.model.parameters(), lr=args.learning_rate)\n\n self.state = np.zeros((1, 80, 80))\n self.log_probs = []\n self.rewards = []\n if self.gae:\n self.values = []\n self.entropies = []\n\n self.model_fn = args.model\n if self.model_fn == '':\n self.model_fn = 'agent_pg.pt'\n if args.test_pg:\n self.model_fn = 'pg.baseline.pt'\n\n if args.test_pg:\n #you can load your model here\n print('loading trained model :%s.' % self.model_fn)\n state_dict = torch.load(self.model_fn, map_location=lambda storage, location: storage)\n self.model.load_state_dict(state_dict)\n if USE_CUDA:\n self.model.cuda()"
] |
[
"0.63675725",
"0.54510826",
"0.51495695",
"0.5112119",
"0.5112119",
"0.5082331",
"0.5074758",
"0.49699888",
"0.49410748",
"0.49406776",
"0.49163842",
"0.48847675",
"0.4861831",
"0.48597294",
"0.48274288",
"0.48030266",
"0.47882882",
"0.47839254",
"0.47794473",
"0.477288",
"0.47579643",
"0.47380498",
"0.47338495",
"0.47327426",
"0.47273058",
"0.47207633",
"0.4699933",
"0.46972215",
"0.46950915",
"0.46865004"
] |
0.8719895
|
0
|
set_simRunner(obj_simRunner) Sets the simRunner object for the ProjectManager obj_simRunner SimulationRunner object for organizing simulations
|
def set_simRunner(self, obj_simRunner):
if type(obj_simRunner) == AnimatLabSimulationRunner:
self.simRunner = obj_simRunner
else:
raise TypeError("obj_simRunner must be an AnimatLabSimulationRunner object!")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setSimulation(self, simulation):\r\n raise NotImplementedError()",
"def run_simulator(self):\n\n self.update_settings()\n\n # Pass in the progress bar and the master so that the simulator can\n # update the progress bar and then refresh the screen when the progress\n # checkpoints are hit\n\n self.sim_results = self.sim.run(self.progress_bar, self.master)\n self.graph_results()",
"def setUpClass(cls):\n cls.sim1 = Simulation(logging_level=50)\n cls.sim1.set_simulation_parameters(\n seed=1, task=47, output_directory=\"output\", min_speciation_rate=0.01, sigma=2, deme=1, sample_size=0.01\n )\n cls.sim1.set_map_files(\n \"null\", fine_file=\"sample/SA_sample_fine.tif\", reproduction_map=\"sample/SA_sample_reproduction.tif\"\n )\n cls.sim1.run()\n cls.sim2 = Simulation(logging_level=50)\n cls.sim2.set_simulation_parameters(\n seed=1, task=48, output_directory=\"output\", min_speciation_rate=0.01, sigma=2, deme=1, sample_size=0.01\n )\n cls.sim2.set_map_files(\"null\", fine_file=\"sample/SA_sample_fine.tif\")\n cls.sim2.run()\n cls.sim3 = Simulation(logging_level=50)\n cls.sim3.set_simulation_parameters(\n seed=2, task=47, output_directory=\"output\", min_speciation_rate=0.01, sigma=2, deme=1, sample_size=0.01\n )\n cls.sim3.set_map_files(\n \"null\",\n fine_file=\"sample/SA_sample_fine.tif\",\n death_map=\"sample/SA_sample_reproduction.tif\",\n reproduction_map=\"sample/SA_sample_reproduction.tif\",\n )\n cls.sim3.run()\n cls.sim4 = Simulation(logging_level=50)\n cls.sim4.set_simulation_parameters(\n seed=4, task=47, output_directory=\"output\", min_speciation_rate=0.01, sigma=2, deme=1, sample_size=0.01\n )\n cls.sim4.set_map_files(\"null\", fine_file=\"sample/SA_sample_coarse_pristine.tif\")\n cls.sim4.add_reproduction_map(reproduction_map=\"sample/SA_reproduction_coarse.tif\")\n cls.sim4.add_death_map(death_map=\"sample/SA_death.tif\")\n cls.sim4.add_dispersal_map(dispersal_map=\"sample/dispersal_fine2.tif\")\n cls.sim4.run()\n cls.coal1 = CoalescenceTree(cls.sim1)\n cls.coal2 = CoalescenceTree(cls.sim2)\n cls.coal3 = CoalescenceTree(cls.sim3)\n cls.coal4 = CoalescenceTree(cls.sim4)",
"def _addsimulation(\n self,\n sim: Simulation\n ):\n\n if type(sim) is not Simulation:\n raise ValueError(\"A non-simulation object can not be \"\n \"added to the cycle object as a simulation.\")\n\n if sim.model.compile_log is None:\n raise ValueError(\"Only simulations with compiled model objects \"\n \"can be added to an ensemble simulation.\")\n\n sim_copy = copy.deepcopy(sim)\n\n # Ensure that the jobs and scheduler are empty and None\n sim_copy.jobs = []\n sim_copy.scheduler = None\n\n self._simulation = sim_copy",
"def run_spec(self, run_spec):\n\n self._run_spec = run_spec",
"def set_run(self, run_id: str):\n self.run_id = run_id",
"def __init__(self,simulation_manager):\n self.simulation_manager = simulation_manager",
"def _run_simulator(self):\n os.chdir(self.test_cases_path)\n\n simulator_config_filename = self.simulator_config_filename\n script, options = runner.parse_commands(simulator_config_filename)\n\n if sys.platform.startswith('win'):\n subprocess.call([script] + options, shell=True)\n else:\n subprocess.call([script] + options)\n\n os.chdir(self.this_file_path)",
"def __init__(self, configs, simulator, wait_time=3):\n self.configs = configs\n self.sim = simulator.sim\n self.gripper = VREP_Gripper()\n self.open()",
"def choose_trial_to_run(self, trial_runner):\n\n raise NotImplementedError",
"def test_simulation(self):\n\t\tprint \"Simulation is being tested\"\n\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Debug Flag Set!\"\n\t\t\tprint self.getConfig()\n\n\t\tif toggles.PACKING:\n\t\t\ttoggles.OUTPUT_PATH = toggles.OUTPUT_PATH+toggles.RUN_NAME+'/'\n\t\t\tpackageMaker(toggles.OUTPUT_PATH,self.getConfig())\n\t\tif toggles.IDEAL_GRID:\n\t\t\tself.consensusGrid()\n\n\t\tif toggles.REAL_DATA:\n\t\t\tsampleData = self.load_data()\n\t\t\tif toggles.RUN_DATA_STATS:\n\t\t\t\tself.output_data_stats(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_AVERAGE_COST:\n\t\t\t\tself.sim_average_cost(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_SINGLE_PAIR:\n\t\t\t\tself.sim_single_pair_cost(sampleData, pending_eddy(self.pick_worker([0], [0])))\n\t\t\t\tself.reset_database()\n\t\telse:\n\t\t\tsampleData = {}\n\t\t\tsyn_load_data()\n\n\t\tif toggles.RUN_ITEM_ROUTING and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: item Routing\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif PRED_SCORE_COUNT and not (RUN_TASKS_COUNT or RUN_MULTI_ROUTING):\n\t\t\tif DEBUG_FLAG:\n\t\t\t\tprint \"Running: Pred Score count\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\n\n\t\tif toggles.COUNT_TICKETS and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: ticket counting\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif toggles.SELECTIVITY_GRAPH and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: selectivity amounts over time\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\tcorrectAnswers = self.get_correct_answers(toggles.INPUT_PATH + toggles.ITEM_TYPE + '_correct_answers.csv')\n\t\t\tpassedItems = self.get_passed_items(correctAnswers)\n\n\n\t\tif toggles.RUN_OPTIMAL_SIM:\n\t\t\tcountingArr=[]\n\t\t\tself.reset_database()\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running optimal_sim \" +str(i)\n\t\t\t\tself.num_tasks = self.optimal_sim(sampleData)\n\t\t\t\tcountingArr.append(self.num_tasks)\n\t\t\t\tself.reset_database()\n\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_optimal_tasks'\n\t\t\tgeneric_csv_write(dest+'.csv',[countingArr])\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote File: \" + dest+'.csv'\n\n\n\n\t\tif toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING or toggles.RUN_CONSENSUS_COUNT:\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\t#print \"Running: task_count\"\n\t\t\t\t#f = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv', 'a')\n\t\t\t\t#f1 = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_incorrect_count.csv', 'a')\n\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\toutputArray = []\n\n\t\t\trunTasksArray = []\n\t\t\tgoodArray, badArray = [], []\n\t\t\tgoodPoints, badPoints = [], []\n\t\t\taccCount = []\n\t\t\tlocArray = [[],[],[],[]]\n\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running simulation \" + str(i+1)\n\t\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\t\trunTasksArray.append(self.num_tasks)\n\n\t\t\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\t\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\t\t\tnum_incorrect = self.final_item_mismatch(passedItems)\n\t\t\t\t\taccCount.append(num_incorrect)\n\t\t\t\tif toggles.RUN_CONSENSUS_COUNT or toggles.VOTE_GRID:\n\t\t\t\t\tdonePairs = IP_Pair.objects.filter(Q(num_no__gt=0)|Q(num_yes__gt=0))\n\t\t\t\t\tif toggles.TEST_ACCURACY:\n\t\t\t\t\t\tgoodPairs, badPairs = [], []\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tval = bool((pair.num_yes-pair.num_no)>0)\n\t\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\t\tcorrect = ((correctAnswers[(pair.item,pair.predicate)]) == val)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcorrect = (pair.true_answer == val)\n\t\t\t\t\t\t\tif correct:\n\t\t\t\t\t\t\t\tgoodArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tbadArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tbadPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tgoodArray.append(pair.num_no + pair.num_yes)\n\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\n\t\t\t\t\t#print \"This is number of incorrect items: \", num_incorrect\n\n\t\t\t\tself.reset_database()\n\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\tgeneric_csv_write(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_tasks_count.csv',[runTasksArray])\n\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv'\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\tif len(runTasksArray)>1:\n\t\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.png'\n\t\t\t\t\t\ttitle = toggles.RUN_NAME + ' Cost distribution'\n\t\t\t\t\t\thist_gen(runTasksArray, dest, labels = ('Cost','Frequency'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + dest\n\t\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"only ran one sim, not running hist_gen\"\n\n\t\t\tif toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_Eddy_sys_' + str(toggles.EDDY_SYS) + '_multi_routing.png'\n\t\t\t\t\ttitle = toggles.RUN_NAME + ' Average Predicate Routing'\n\t\t\t\t\tquestions = toggles.CHOSEN_PREDS\n\t\t\t\t\tarrayData = []\n\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\tarrayData.append([])\n\t\t\t\t\tfor routingL in ROUTING_ARRAY:\n\t\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\t\tarrayData[i].append(routingL[i])\n\t\t\t\t\tmrsavefile = open(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv','w')\n\t\t\t\t\tmrwriter = csv.writer(mrsavefile)\n\t\t\t\t\tmrwriter.writerow(questions)\n\t\t\t\t\tfor row in arrayData:\n\t\t\t\t\t\tmrwriter.writerow(row)\n\t\t\t\t\tmrsavefile.close()\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \"+toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\tstats_bar_graph_gen(arrayData, questions, dest, labels = ('Predicate','# of Items Routed'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.png'\n\t\t\tif toggles.ACCURACY_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_acc_count'\n\t\t\t\tgeneric_csv_write(dest+'.csv',[accCount])\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\thist_gen(accCount, dest+'.png')\n\n\t\t\tif toggles.RUN_CONSENSUS_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_consensus_count'\n\t\t\t\tif len(goodArray)>1:\n\t\t\t\t\tif len(badArray) == 0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray])\n\t\t\t\t\t\t#print goodArray\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray,badArray])\n\t\t\t\t\t\t#print goodArray,badArray\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \" + dest + '.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = 'Normalized Distribution of Tasks before Consensus'\n\t\t\t\t\t\tlabels = ('Number of Tasks', 'Frequency')\n\t\t\t\t\t\tif len(badArray) < 2:\n\t\t\t\t\t\t\thist_gen(goodArray, dest+'.png',labels=labels,title=title)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tleg = ('Correctly Evaluated IP pairs','Incorrectly Evaluated IP pairs')\n\t\t\t\t\t\t\tmulti_hist_gen([goodArray,badArray],leg,dest+'.png',labels=labels,title=title)\n\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"only ran one sim, ignoring results\"\n\t\t\tif toggles.VOTE_GRID:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_vote_grid'\n\t\t\t\tif len(goodPoints)>1:\n\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',goodPoints)\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'_good.csv',goodPoints)\n\t\t\t\t\t\tgeneric_csv_write(dest+'_bad.csv',badPoints)\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = \"Vote Grid Graph\"\n\t\t\t\t\t\tlabels = (\"Number of No Votes\",\"Number of Yes Votes\")\n\t\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\t\txL,yL=zip(*goodPoints)\n\t\t\t\t\t\t\tline_graph_gen(xL,yL,dest+'.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tgX,gY = zip(*goodPoints)\n\t\t\t\t\t\t\tbX,bY = zip(*badPoints)\n\t\t\t\t\t\t\tmulti_line_graph_gen((gX,bX),(gY,bY),('Correct','Incorrect'),dest+'_both.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(gX,gY,dest+'_good.png',title=title+\" goodPoints\",labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(bX,bY,dest+'_bad.png',title=title+\" badPoints\",labels=labels,scatter=True,square=True)\n\t\tif toggles.TIME_SIMS:\n\t\t\tself.timeRun(sampleData)\n\n\t\tif toggles.RUN_ABSTRACT_SIM:\n\t\t\tself.abstract_sim(sampleData, toggles.ABSTRACT_VARIABLE, toggles.ABSTRACT_VALUES)",
"def __init__(self, simulator):\r\n self.initialize(simulator)",
"def _register_simulator(cls, simulator_factory):\n if not callable(simulator_factory):\n raise ValueError(\"Expected a callable for simulator_factory\")\n cls._simulator_factory = simulator_factory",
"def update(self, work_dir: Path, tb_name: str, sim: Optional[SimAccess] = None) -> None:\n if work_dir is not None:\n self._work_dir = work_dir.resolve()\n self._work_dir.mkdir(parents=True, exist_ok=True)\n if tb_name:\n self._tb_name = tb_name\n if sim is not None:\n self._sim = sim",
"def start_simulation(self):\n regime_name = str(self.regime_list.item(self._current_regime_index).text())\n self.statusLabel.setText(u\"simulating {}\".format(regime_name))\n self._logger.info(u\"Simulating: {}\".format(regime_name))\n\n self.actSimulate.setDisabled(True)\n self.shortRunSimulation.setEnabled(False)\n self.shortRunRegimeBatch.setEnabled(False)\n self.actExecuteRegimes.setDisabled(True)\n self.guiProgress = QtGui.QProgressBar(self)\n self.sim.simulationProgressChanged.connect(self.guiProgress.setValue)\n self.statusBar().addWidget(self.guiProgress)\n self.runSimulation.emit()",
"def __init__(self, runManager,runs):\n self.runs = runs\n self.runManager = runManager",
"def runAnalyticalSim(self, sim_rounds = 10**7, factor=\"mu\"):\n # create simulation agents\n M = self.getPopulationSize()\n N = self.getSampleSize()\n\n available_strategies = self.getAvailableStrategies()\n sim_agents = [Agent(available_strategies) for i in range(M)]\n tot_count = [0 for strategy in available_strategies]\n\n # count strategies in current population\n strat_count = [0 for strategy in available_strategies]\n for sim_agent in sim_agents:\n strat_count[available_strategies.index(sim_agent.getStrategy())] += 1\n\n # repeat 10 million times\n for i in range(sim_rounds):\n\n # handle each agent\n for focal_player in sim_agents:\n\n # update frequencies for avg payoffs\n self.clearFrequencies()\n for i, strategy in enumerate(available_strategies):\n self.setFrequency(strategy, strat_count[i])\n\n # option 1: random switch strategy\n mu_proba = np.random.random()\n if mu_proba <= self.getExplorationRate():\n strat_count[available_strategies.index(focal_player.getStrategy())] -= 1\n focal_player.switchToOtherAvailableStrategy()\n strat_count[available_strategies.index(focal_player.getStrategy())] += 1\n\n # option 2: choose model to (maybe) imitate\n else:\n # select model player\n model_player_index = np.random.randint(0, M-1)\n while model_player_index == sim_agents.index(focal_player):\n model_player_index = np.random.randint(0, M-1)\n model_player = sim_agents[model_player_index]\n\n # define imitation outcome\n proba_copy = self.Fermi(self.getPayoff(model_player.getStrategy()), self.getPayoff(focal_player.getStrategy()))\n proba_event = np.random.random()\n if proba_event <= proba_copy:\n strat_count[available_strategies.index(focal_player.getStrategy())] -= 1\n focal_player.setStrategy(model_player.getStrategy())\n strat_count[available_strategies.index(focal_player.getStrategy())] += 1\n\n # remember population strategies\n for i in range(len(tot_count)):\n tot_count[i] += strat_count[i]\n\n # obtain final frequency\n for i in range(len(strat_count)):\n strat_count[i] = strat_count[i] / M\n\n # obtain total frequency\n for i, strategy in enumerate(available_strategies):\n tot_count[i] = tot_count[i] / (sim_rounds * M)\n\n # export to file: strat_count (enables comparison of both results)\n self.saveResults(tot_count, \"{}\".format(self.getCase()), factor)",
"def _setup_simulation(self\n ) -> None:\n pass",
"def runSim(self):\n self.simKillResults = {}\n self.simHitResults = {}\n if self.fromArmy == False:\n self.attackingSquad = copy.deepcopy(squad.Squads[self.attackingSpin.get()])\n for num in range(eval(self.simulationSpin.get())):\n defSquad = copy.deepcopy(squad.DefSquads[self.defendingSpin.get()])\n result = self.attackingSquad.squadFire(defSquad)\n if result[0] not in self.simHitResults:\n self.simHitResults[result[0]] = 0\n self.simHitResults[result[0]] += 1\n if result[1] not in self.simKillResults:\n self.simKillResults[result[1]] = 0\n self.simKillResults[result[1]] += 1\n self.simResultsFrame = Frame(self.__mainWindow, padx=15, pady=15)\n self.simResultsFrame.grid(row=2,column=0,sticky=\"nsew\")\n self.hitResultsFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.hitResultsFrame.grid(row=0, column=0,sticky=\"nsew\")\n self.killResultsFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.killResultsFrame.grid(row=0, column=1,sticky=\"nsew\")\n self.maxPosFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.maxPosFrame.grid(row=1, sticky=\"nsew\")\n numHitPoss = 0\n numWoundsPoss = 0\n if isinstance(self.attackingSquad, squad.Squad):\n for unit in self.attackingSquad.units:\n numHitPoss += eval(unit.ranged_weapon.attacks)\n else:\n for i in range(self.attackingSquad.current_size):\n for weapon in self.attackingSquad.ranged_weapons:\n numHitPoss += eval(weapon.attacks)\n for unit in squad.DefSquads[self.defendingSpin.get()].units:\n numWoundsPoss += unit.wounds\n rf = 1\n Label(self.hitResultsFrame, text=\"{} hits possible\".format(min(numWoundsPoss,numHitPoss)), font=__item_format__).grid(row=0)\n for hit in self.simHitResults:\n percent = self.simHitResults[hit]/eval(self.simulationSpin.get())*100\n t = \"{} hits: {:6.2f}%\".format(hit, percent)\n Label(self.hitResultsFrame, text=t, font=__item_format__).grid(row=rf)\n rf+=1\n Label(self.killResultsFrame, text=\"{} kills possible\".format(defSquad.current_size), font=__item_format__).grid(row=0)\n for kill in self.simKillResults:\n percent = self.simKillResults[kill]/eval(self.simulationSpin.get())*100\n t = \"{} kills: {:6.2f}%\".format(kill, percent)\n Label(self.killResultsFrame, text=t, font=__item_format__).grid(row=rf)\n rf+=1",
"def _simulate(ctx, gui):\n ctx.env['SFFUnits'] = load_SFFUnits(ctx)\n\n \"\"\"\n Creates the directory path and nodes in the build directory.\n Creates a taskgen from each other library in units_hdl\n \"\"\"\n\n top = ctx.env['SFFUnits'].getunit(ctx.env.top_level)\n\n for u in top.synu_deps + top.simu_deps:\n lib = u.script.parent.get_bld().make_node('work_vlib')\n lib.mkdir()\n u.b['vlib'] = lib\n\n if u.use('use'):\n tsk = ModelsimTask(\n name=u.name,\n target=lib,\n source=u.use('src'),\n includes=u.use('includes'),\n after=u.use('use'),\n output=lib,\n scan=SFF_verilog_scan,\n env=ctx.env)\n ctx.add_to_group(tsk)\n else:\n tsk = ModelsimTask(\n name=u.name,\n target=lib,\n source=u.use('src'),\n output=lib,\n includes=u.use('includes'),\n scan=SFF_verilog_scan,\n env=ctx.env)\n ctx.add_to_group(tsk)\n\n\n \"\"\"\n Create the testbench taskgen last as it is always at the top dep\n \"\"\"\n ctx.add_group()\n tb_lib = top.script.parent.get_bld().make_node('work_vlib')\n tb_lib.mkdir()\n top.b['tbvlib'] = tb_lib\n\n tsk = ModelsimTask(\n name=top.use('tb'),\n target=tb_lib,\n source=top.use('tb_src'),\n output=tb_lib,\n includes=top.use('tb_includes'),\n after=ctx.env.top_level,\n scan=SFF_verilog_scan,\n env=ctx.env )\n ctx.add_to_group(tsk)\n ctx.add_group()\n\n \"\"\"\n Run the Modelsim command with gui options provided.\n \"\"\"\n ##Run vsim\n ctx(name='vsim',\n rule='vsim %s -lib %s %s' % (gui,top.b['tbvlib'], top.use('tb')[0]),\n always = True)",
"def startSimulation(self):\n self.saveParameters()\n self.simulation.main()",
"def get_simulator(self) -> Game:\n return self.__sim",
"def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)",
"def simulate(self, x_obj_f=1.5, y_obj_f=0,\n theta_init=-np.pi / 4, theta_final=-3 * np.pi / 4,\n t_sim=5, kappa_c=5, kappa_s=0.2,\n gif_name=None, gif_fps=3, plot_fps=3):\n # Initialize parameters\n obj_coords = self._sim_init(\n x_obj_f, y_obj_f, theta_init, theta_final, t_sim,\n kappa_c, kappa_s, gif_name, gif_fps, plot_fps\n )\n\n # Main simulation\n for t_step in range(int(self._t_sim / self._dt) + 1):\n\n # Object motion\n obj_coords = self._object_step(obj_coords)\n\n # Robot Kinematic Equation\n self._compute_jacobian()\n\n # Forward kinematics\n self._forward_kinematics_step(t_step)\n\n # Update q considering both control subtasks\n self._q += (\n self._visual_servo_control(t_step, obj_coords)\n + self._relative_angle_control(obj_coords)\n )\n\n # Save Plot Data\n self._obj_coords_plot[t_step] = obj_coords\n\n # Plot simulation\n config_plots = self._plot_arm()\n self._plot_camera_view()\n if self._gif_name is not None:\n mimsave(gif_name, config_plots, fps=self._gif_fps)\n return self",
"def set_execution_target(backend_id='qasm_simulator',\n provider_module_name=None, provider_name=None, provider_backend=None,\n hub=None, group=None, project=None):\n global backend\n authentication_error_msg = \"No credentials for {0} backend found. Using the simulator instead.\"\n \n # if a custom provider backend is given, use it ...\n if provider_backend != None:\n backend = provider_backend\n \n # handle QASM simulator specially\n elif backend_id == 'qasm_simulator':\n backend = Aer.get_backend(\"qasm_simulator\") \n \n # otherwise use the given backend_id to find the backend\n else:\n if provider_module_name and provider_name:\n # if provider_module and provider_name is provided, assume a custom provider\n provider = getattr(importlib.import_module(provider_module_name), provider_name)\n try:\n # not all custom providers have the .stored_account() method\n provider.load_account()\n backend = provider.get_backend(backend_id)\n except:\n print(authentication_error_msg.format(provider_name))\n else:\n # otherwise, assume IBMQ\n if IBMQ.stored_account():\n # load a stored account\n IBMQ.load_account()\n \n # then create backend from selected provider\n provider = IBMQ.get_provider(hub=hub, group=group, project=project)\n backend = provider.get_backend(backend_id)\n else:\n print(authentication_error_msg.format(\"IBMQ\"))\n\n # create an informative device name\n device_name = backend_id\n metrics.set_plot_subtitle(f\"Device = {device_name}\")\n #metrics.set_properties( { \"api\":\"qiskit\", \"backend_id\":backend_id } )",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n raise NotImplementedError",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n raise NotImplementedError",
"def run_simulator(scene, output_dir):\n with tempfile.TemporaryDirectory() as tmpdir:\n status = subprocess.run([\n SIMULATOR_BIN, '--no-cache', '--no-gui', '--no-initial-pause',\n '--output-dir', output_dir, scene\n ])",
"def __init__(\n\t\tself, executable_sim=None, directory_template=None,\n\t\tcolumn_name_gid=\"genome_ID\", column_name_ncbi=\"NCBI_ID\", column_name_source=\"source\", separator='\\t',\n\t\tfilename_prefix=\"simulated_\", keep_original=True,\n\t\tmax_processors=1, tmp_dir=None, logfile=None, verbose=True, debug=False, seed=None):\n\t\tsuper(StrainSimulationWrapper, self).__init__(logfile, verbose)\n\t\tassert isinstance(keep_original, bool)\n\t\tassert isinstance(separator, str)\n\t\tassert isinstance(column_name_gid, str)\n\t\tassert isinstance(column_name_ncbi, str)\n\t\tassert isinstance(column_name_source, str)\n\t\tassert isinstance(filename_prefix, str)\n\t\tassert isinstance(debug, bool)\n\n\t\tif tmp_dir is None:\n\t\t\ttmp_dir = tempfile.gettempdir()\n\n\t\tself._debug = debug\n\t\tif debug:\n\t\t\tself._logger.set_level(self._logger.DEBUG)\n\n\t\tif seed is not None:\n\t\t\trandom.seed(seed)\n\t\t\tnp_random.seed(abs(hash(seed)) % 4294967295) # numpy accepts only 32 bit integers\n\n\t\tassert isinstance(max_processors, int)\n\t\tself._max_processors = max_processors\n\n\t\tself._separator = separator\n\t\tself._column_name_gid = column_name_gid\n\t\tself._column_name_ncbi = column_name_ncbi\n\t\tself._column_name_source = column_name_source\n\t\tself._filename_prefix = filename_prefix\n\t\tself._keep_original = keep_original\n\t\tself._directory_template = directory_template\n\n\t\tdirectory_sgevolver = self.get_full_path(os.path.join(os.path.dirname(__file__), \"sgEvolver\"))\n\t\tself._executable_sim = executable_sim\n\t\tif self._executable_sim is None:\n\t\t\tself._executable_sim = os.path.join(directory_sgevolver, \"simujobrun.pl\")\n\t\tassert self.validate_file(self._executable_sim, executable=True)\n\n\t\tif self._directory_template is None:\n\t\t\tself._directory_template = self.get_full_path(os.path.join(os.path.dirname(__file__), \"sgEvolver\", \"simulation_dir\"))\n\t\tassert self.validate_dir(self._directory_template, file_names=[self._filename_tree, self._filename_parameter])\n\n\t\tself._tmp_dir = tmp_dir\n\t\tassert self.validate_dir(self._tmp_dir)\n\n\t\tself._directory_strain = self.get_full_path(os.path.join(self._tmp_dir, \"{gid}.strains\"))\n\t\tfile_path_template_newick_tree = os.path.join(self._directory_template, self._directory_template_filenames[1])\n\t\tself._filenames_strains = self.get_filenames_strains(file_path_template_newick_tree)\n\t\tassert len(self._filenames_strains) > 0",
"def run_sim(self):\n \n OS = self.OpticalSystem\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n \n # TODO: start using this self.currentSep\n # set occulter separation if haveOcculter\n if OS.haveOcculter == True:\n self.currentSep = Obs.occulterSep\n \n # choose observing modes selected for detection (default marked with a flag)\n allModes = OS.observingModes\n det_modes = list(filter(lambda mode: 'imag' in mode['inst']['name'], allModes))\n # and for characterization (default is first spectro/IFS mode)\n spectroModes = list(filter(lambda mode: 'spec' in mode['inst']['name'], allModes))\n if np.any(spectroModes):\n char_modes = spectroModes\n # if no spectro mode, default char mode is first observing mode\n else:\n char_modes = [allModes[0]]\n \n # begin Survey, and loop until mission is finished\n log_begin = 'OB%s: survey beginning.'%(TK.OBnumber + 1)\n self.logger.info(log_begin)\n self.vprint(log_begin)\n t0 = time.time()\n sInd = None\n ObsNum = 0\n while not TK.mission_is_over(OS, Obs, det_modes[0]):\n \n # acquire the NEXT TARGET star index and create DRM\n old_sInd = sInd #used to save sInd if returned sInd is None\n DRM, sInd, det_intTime, waitTime, det_mode = self.next_target(sInd, det_modes)\n \n if sInd is not None:\n ObsNum += 1\n\n if OS.haveOcculter == True:\n # advance to start of observation (add slew time for selected target)\n success = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n \n # beginning of observation, start to populate DRM\n DRM['star_ind'] = sInd\n DRM['star_name'] = TL.Name[sInd]\n DRM['arrival_time'] = TK.currentTimeNorm.copy().to('day')\n DRM['OB_nb'] = TK.OBnumber\n DRM['ObsNum'] = ObsNum\n pInds = np.where(SU.plan2star == sInd)[0]\n DRM['plan_inds'] = pInds.astype(int)\n log_obs = (' Observation #%s, star ind %s (of %s) with %s planet(s), ' \\\n + 'mission time at Obs start: %s')%(ObsNum, sInd, TL.nStars, len(pInds), \n TK.currentTimeNorm.to('day').copy().round(2))\n self.logger.info(log_obs)\n self.vprint(log_obs)\n\n # PERFORM DETECTION and populate revisit list attribute\n DRM['det_info'] = []\n detected, det_fZ, det_systemParams, det_SNR, FA = \\\n self.observation_detection(sInd, det_intTime, det_mode)\n # update the occulter wet mass\n if OS.haveOcculter == True:\n DRM = self.update_occulter_mass(DRM, sInd, det_intTime, 'det')\n det_data = {}\n det_data['det_status'] = detected\n det_data['det_SNR'] = det_SNR\n det_data['det_fZ'] = det_fZ.to('1/arcsec2')\n det_data['det_params'] = det_systemParams\n det_data['det_mode'] = dict(det_mode)\n det_data['det_time'] = det_intTime.to('day')\n del det_data['det_mode']['inst'], det_data['det_mode']['syst']\n DRM['det_info'].append(det_data)\n\n # PERFORM CHARACTERIZATION and populate spectra list attribute\n DRM['char_info'] = []\n if char_modes[0]['SNR'] not in [0, np.inf]:\n characterized, char_fZ, char_systemParams, char_SNR, char_intTime = \\\n self.observation_characterization(sInd, char_modes)\n else:\n char_intTime = None\n lenChar = len(pInds) + 1 if True in FA else len(pInds)\n characterized = np.zeros((lenChar,len(char_modes)), dtype=float)\n char_SNR = np.zeros((lenChar,len(char_modes)), dtype=float)\n char_fZ = np.array([0./u.arcsec**2, 0./u.arcsec**2])\n char_systemParams = SU.dump_system_params(sInd)\n\n for mode_index, char_mode in enumerate(char_modes):\n char_data = {}\n assert char_intTime != 0, \"Integration time can't be 0.\"\n # update the occulter wet mass\n if OS.haveOcculter == True and char_intTime is not None:\n char_data = self.update_occulter_mass(char_data, sInd, char_intTime, 'char')\n if np.any(characterized):\n vprint(' Char. results are: {}'.format(characterized[:-1, mode_index]))\n # populate the DRM with characterization results\n char_data['char_time'] = char_intTime.to('day') if char_intTime else 0.*u.day\n char_data['char_status'] = characterized[:-1, mode_index] if FA else characterized[:,mode_index]\n char_data['char_SNR'] = char_SNR[:-1, mode_index] if FA else char_SNR[:, mode_index]\n char_data['char_fZ'] = char_fZ[mode_index].to('1/arcsec2')\n char_data['char_params'] = char_systemParams\n # populate the DRM with FA results\n char_data['FA_det_status'] = int(FA)\n char_data['FA_char_status'] = characterized[-1, mode_index] if FA else 0\n char_data['FA_char_SNR'] = char_SNR[-1] if FA else 0.\n char_data['FA_char_fEZ'] = self.lastDetected[sInd,1][-1]/u.arcsec**2 \\\n if FA else 0./u.arcsec**2\n char_data['FA_char_dMag'] = self.lastDetected[sInd,2][-1] if FA else 0.\n char_data['FA_char_WA'] = self.lastDetected[sInd,3][-1]*u.arcsec \\\n if FA else 0.*u.arcsec\n \n # populate the DRM with observation modes\n char_data['char_mode'] = dict(char_mode)\n del char_data['char_mode']['inst'], char_data['char_mode']['syst']\n DRM['char_info'].append(char_data)\n \n DRM['exoplanetObsTime'] = TK.exoplanetObsTime.copy()\n\n # append result values to self.DRM\n self.DRM.append(DRM)\n \n else:#sInd == None\n sInd = old_sInd#Retain the last observed star\n if(TK.currentTimeNorm.copy() >= TK.OBendTimes[TK.OBnumber]): # currentTime is at end of OB\n #Conditional Advance To Start of Next OB\n if not TK.mission_is_over(OS, Obs, det_mode):#as long as the mission is not over\n TK.advancetToStartOfNextOB()#Advance To Start of Next OB\n elif(waitTime is not None):\n #CASE 1: Advance specific wait time\n success = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n self.vprint('waitTime is not None')\n else:\n startTimes = TK.currentTimeAbs.copy() + np.zeros(TL.nStars)*u.d # Start Times of Observations\n observableTimes = Obs.calculate_observableTimes(TL,np.arange(TL.nStars),startTimes,self.koMap,self.koTimes,self.mode)[0]\n #CASE 2 If There are no observable targets for the rest of the mission\n if((observableTimes[(TK.missionFinishAbs.copy().value*u.d > observableTimes.value*u.d)*(observableTimes.value*u.d >= TK.currentTimeAbs.copy().value*u.d)].shape[0]) == 0):#Are there any stars coming out of keepout before end of mission\n self.vprint('No Observable Targets for Remainder of mission at currentTimeNorm= ' + str(TK.currentTimeNorm.copy()))\n #Manually advancing time to mission end\n TK.currentTimeNorm = TK.missionLife\n TK.currentTimeAbs = TK.missionFinishAbs\n else:#CASE 3 nominal wait time if at least 1 target is still in list and observable\n #TODO: ADD ADVANCE TO WHEN FZMIN OCURS\n inds1 = np.arange(TL.nStars)[observableTimes.value*u.d > TK.currentTimeAbs.copy().value*u.d]\n inds2 = np.intersect1d(self.intTimeFilterInds, inds1) #apply intTime filter\n inds3 = self.revisitFilter(inds2, TK.currentTimeNorm.copy() + self.dt_max.to(u.d)) #apply revisit Filter #NOTE this means stars you added to the revisit list \n self.vprint(\"Filtering %d stars from advanceToAbsTime\"%(TL.nStars - len(inds3)))\n oTnowToEnd = observableTimes[inds3]\n if not oTnowToEnd.value.shape[0] == 0: #there is at least one observableTime between now and the end of the mission\n tAbs = np.min(oTnowToEnd)#advance to that observable time\n else:\n tAbs = TK.missionStart + TK.missionLife#advance to end of mission\n tmpcurrentTimeNorm = TK.currentTimeNorm.copy()\n success = TK.advanceToAbsTime(tAbs)#Advance Time to this time OR start of next OB following this time\n self.vprint('No Observable Targets a currentTimeNorm= %.2f Advanced To currentTimeNorm= %.2f'%(tmpcurrentTimeNorm.to('day').value, TK.currentTimeNorm.to('day').value))\n else:#TK.mission_is_over()\n dtsim = (time.time() - t0)*u.s\n log_end = \"Mission complete: no more time available.\\n\" \\\n + \"Simulation duration: %s.\\n\"%dtsim.astype('int') \\\n + \"Results stored in SurveySimulation.DRM (Design Reference Mission).\"\n self.logger.info(log_end)\n print(log_end)"
] |
[
"0.57825583",
"0.5381525",
"0.535339",
"0.5258198",
"0.51377577",
"0.5133689",
"0.50597525",
"0.50033647",
"0.49955642",
"0.49677426",
"0.49385235",
"0.48960555",
"0.48833895",
"0.48766336",
"0.48765796",
"0.48746789",
"0.48729342",
"0.48449382",
"0.48236495",
"0.4810407",
"0.4804181",
"0.4803524",
"0.47958007",
"0.479552",
"0.47713205",
"0.4764969",
"0.4764969",
"0.4748936",
"0.47428596",
"0.47391832"
] |
0.87587506
|
0
|
make_asims(obj_simSet) obj_simSet SimulationSet object used to generate parameter combinations DESCRIPTION Generates .asim files used as the basis for AnimatLab simulations based on the parameter dictionary formatted by obj_simSet.
|
def make_asims(self, obj_simSet):
if type(obj_simSet) is not SimulationSet:
raise TypeError("obj_simSet must be a SimulationSet object!")
cols = ['FileName']
saveFiles = {}
# Calculate size of text buffer for naming files
countLength = len(str(obj_simSet.get_size()))
# Instantiate a pool of CPUs to make .asim files
pool = multiprocessing.Pool()
# Assign cores in multiprocessing.Pool to generate .asim files
results = pool.map(saveAsimWrapper, [(pts, copy(self.aproj), self.simRunner.simFiles, ix, countLength, verbose) for ix, pts in enumerate(obj_simSet.samplePts)])
# Release cores and recover some memory!!
pool.close()
# Iterate through the resulting .asim files and generate format dicts/lists
# for the csv log file
for result in results:
fileInfo, colInfo = result
fileKey = fileInfo.keys()[0]
saveFiles[fileKey] = fileInfo[fileKey]
for col in colInfo:
if col not in cols:
cols.append(col)
if verbose > 0:
print "WRITING LOG FILE..."
# Create the asims_log.csv log file for auditing purposes
f = open(os.path.join(self.simRunner.rootFolder, self.projName + '-asims_log.csv'), 'w')
f.write(self.simRunner.simFiles+'\n')
f.write(','.join(cols)+'\n')
for fName in sorted(saveFiles.keys()):
colTemplate = ['']*len(cols)
colTemplate[0] = fName
for key in saveFiles[fName]:
colTemplate[cols.index(key)] = str(saveFiles[fName][key])
f.write(','.join(colTemplate) + '\n')
f.close()
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def createArmySims(self):\n # create army sims for player\n for systemID in self.game.myArmies.keys():\n self.createPlayerArmySim(systemID)\n \n # create army sims representing other empires\n for systemID in self.game.otherArmies.keys():\n self.createOtherArmySim(systemID)",
"def make_sims(self):\n self.sims = [Simulation(conf=c) for c in self.sim_confs]",
"def createArmadaSims(self):\n # create armada sims for player\n for systemID in self.game.myArmadas.keys():\n self.createPlayerArmadaSim(systemID)\n \n # create armada sims representing other empires\n for systemID in self.game.otherArmadas.keys():\n self.createOtherArmadaSim(systemID)",
"def generate_modelSED_photo_fit(sp=None,sfh_form=4,filters=None,add_igm_absorption=0,igm_type=0,params_fsps=None,DL_Gpc=0.0,cosmo='flat_LCDM',\n\tH0=70.0,Om0=0.3,params_val=None,interp_filters_waves=[],interp_filters_trans=[]):\n\n\tdef_params_fsps, params_assoc_fsps, status_log = list_params_fsps()\n\n\tformed_mass = pow(10.0,params_val['log_mass'])\n\n\t# input model parameters to FSPS:\n\tfor pp in range(len(params_fsps)):\n\t\tstr_temp = params_assoc_fsps[params_fsps[pp]]\n\t\tif status_log[params_fsps[pp]] == 0:\n\t\t\tsp.params[str_temp] = params_val[params_fsps[pp]]\n\t\telif status_log[params_fsps[pp]] == 1:\n\t\t\tsp.params[str_temp] = pow(10.0,params_val[params_fsps[pp]])\n\n\t# generate the SED:\n\tif sfh_form==0 or sfh_form==1:\n\t\tage = pow(10.0,params_val['log_age'])\n\t\twave, extnc_spec = sp.get_spectrum(peraa=True,tage=age) ## spectrum in L_sun/AA\n\t\tmass = sp.stellar_mass\n\t\tdust_mass0 = sp.dust_mass ## in solar mass/norm\n\telif sfh_form==2 or sfh_form==3 or sfh_form==4:\n\t\tt0 = pow(10.0,params_val['log_t0'])\n\t\ttau = pow(10.0,params_val['log_tau'])\n\t\tage = pow(10.0,params_val['log_age'])\n\t\talpha = pow(10.0,params_val['log_alpha'])\n\t\tbeta = pow(10.0,params_val['log_beta'])\n\t\tSFR_fSM,mass,wave,extnc_spec,dust_mass0 = csp_spec_restframe_fit(sp=sp,sfh_form=sfh_form,formed_mass=formed_mass,age=age,tau=tau,t0=t0,alpha=alpha,beta=beta)\n\n\t# redshifting\n\tredsh_wave,redsh_spec0 = cosmo_redshifting(DL_Gpc=DL_Gpc,cosmo=cosmo,H0=H0,Om0=Om0,z=params_val['z'],wave=wave,spec=extnc_spec)\n\n\t# IGM absorption:\n\tif add_igm_absorption == 1:\n\t\tif igm_type == 0:\n\t\t\ttrans = igm_att_madau(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\t\telif igm_type == 1:\n\t\t\ttrans = igm_att_inoue(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\n\t# normalize:\n\tnorm0 = formed_mass/mass\n\tredsh_spec = redsh_spec0*norm0\n\tdust_mass = dust_mass0*norm0\n\n\t# filtering:\n\tphoto_SED_flux = filtering_interp_filters(redsh_wave,redsh_spec,interp_filters_waves,interp_filters_trans)\n\n\treturn photo_SED_flux",
"def makeSimObj(self, scat):\n\n dz = np.array(list(itertools.repeat(0, len(self.expData_sort[:, 0]))))\n self.simData = sasmodels.data.Data2D(x=self.expData_sort[:, 0],\n y=self.expData_sort[:, 1],\n z=scat,\n dz=dz)\n return",
"def asn_parameters(model='spk',**kwargs):\n\n pars = {'rhoe' : 6.5e-4,\n 'Ou' : 0.,\n 'Ku' : 100.,\n 'taue' : 1./60,\n 'Gtot' : 200., # MUST BE in [mM]\n 'Og' : 1.5,\n 'taug' : 30.,\n 'alpha': 0.5\n }\n pars = gu.merge_dicts(pars, gtrelease_parameters(),exocytosis_parameters())\n pars['ICs'] = np.asarray([0.,0.,0.05,0.99]) # [G_A,\\Gamma_S,c,h]\n pars['ICr'] = np.asarray([1,0.,0.,1.]) # [x_S,y_S,u_S,x_A]\n ## User-defined parameters\n pars = gu.varargin(pars, **kwargs)\n ## Takes only the first two elements of ICs in the MF model\n if model=='ave':\n pars['ICs'] = pars['ICs'][:2]\n if 'js' in kwargs:\n pars['js'] = kwargs['js']\n else:\n pars['js'] = pars['rhoe']*pars['Og']*1e3*pars['Gtot']*pars['taue']\n for k,item in pars.iteritems():\n if isscalar(item):\n pars[k] = float(item)\n else:\n pars[k] = array(item,dtype=float)\n # pars['Gtot'] *= 1e3 # Convert to [uM]\n return pars",
"def prepare_experiment(assumptions):\n print(\"\\nGenerate species parameters\")\n np.random.seed(assumptions['seed']) \n params = MakeParams(assumptions) \n if assumptions[\"selected_function\"] == \"f5_invader_suppression\":\n print(\"\\nDraw invader feature\")\n params = create_invader(params, assumptions)\n print(params[\"c\"])\n \n print(\"\\nDraw per-capita function and cost\")\n f1_species_smooth, f1_species_rugged, f2_species_smooth, f2_species_rugged = draw_species_function(assumptions)\n params.update({\"f1_species_smooth\": f1_species_smooth, \"f1_species_rugged\": f1_species_rugged, \"f2_species_smooth\": f2_species_smooth, \"f2_species_rugged\": f2_species_rugged})\n gi = draw_species_cost(f1_species_smooth, assumptions)\n params.update({\"g\": gi})\n \n print(\"\\nConstruct plate\")\n np.random.seed(assumptions['seed']) \n plate = make_plate(assumptions,params)\n \n print(\"\\nAdd community function to plate\")\n plate = add_community_function(plate, assumptions, params)\n \n if not pd.isnull(assumptions[\"overwrite_plate\"]) :\n print(\"\\nUpdating the initial plate composition by overwrite_plate\")\n plate = overwrite_plate(plate, assumptions)\n \n print(\"\\nPrepare Protocol\")\n #Extract Protocol from protocol database\n algorithms = make_algorithms(assumptions)\n params_algorithm = algorithms[algorithms['algorithm_name'] == assumptions['protocol']]\n \n #Params_simulation by default contains all assumptions not stored in params.\n params_simulation = dict((k, assumptions[k]) for k in assumptions.keys() if k not in params.keys())\n \n return params, params_simulation , params_algorithm, plate",
"def make_simulations(self):\n pass",
"def gen_spec(model_params):\n xarr11 = SpectroscopicAxis(np.linspace(-40,40,1000)*u.km/u.s,\n velocity_convention='radio',\n refX=freq_dict['oneone']).as_unit(u.GHz)\n xarr22 = SpectroscopicAxis(np.linspace(-40,40,1000)*u.km/u.s,\n velocity_convention='radio',\n refX=freq_dict['twotwo']).as_unit(u.GHz)\n xarr = SpectroscopicAxes([xarr11,xarr22])\n tkin = model_params[0]\n tex = model_params[1]\n ntot = model_params[2]\n width = model_params[3]\n fortho = 0.0\n synthspec = pyspeckit.spectrum.models.ammonia.cold_ammonia(xarr,tkin=tkin,tex=tex,ntot=ntot,\n width=width,fortho=fortho)\n spectrum = pyspeckit.Spectrum(xarr=xarr,data=synthspec)\n return spectrum",
"def prep_matlab(self):\n #allparams = self.__dict__ #NOTE: change to include just needed parameters\n #allparams.update(self.Set.__dict__)\n #print allparams\n # Quick Fix\n if not os.path.isdir(self.ProcDir): os.mkdir(self.ProcDir)\n if not os.path.isdir(self.OutDir): os.mkdir(self.OutDir)\n settings = {'DataDir':self.DataDir,\n 'ProcDir':self.ProcDir,\n 'ScriptDir':self.ScriptDir,\n 'OutDir':self.OutDir,\n 'AuxDir':self.AuxDir,\n 'Cothresh':self.Cothresh,\n 'Igthresh':self.Igthresh,\n 'Damping':self.Damping,\n 'Width':self.Set.Width,\n 'Length':self.Set.Length,\n 'Dates':'\\n'.join(self.Set.Dates.astype('S8')),\n 'DatesSerial':'\\n'.join(self.Set.DatesSerial.astype('S8')),\n 'TimeIntervals':'\\n'.join(self.Set.TimeIntervals.astype('S4')),\n 'TimeIndex':self.Set.TimeIndexString,\n 'Pairs':'\\n'.join(self.Set.PairsString),\n 'PairsSerial':'\\n'.join(self.Set.PairsSerialString),\n #'Names':'\\n'.join(self.Set.Names),\n #'Paths':'\\n'.join(self.Set.Names),\n 'ChronList':'\\n'.join(self.Set.ChronList),\n 'Omissions':'\\n'.join(self.Set.Omissions),\n 'Tandems':'\\n'.join(self.Set.Tandems)}\n\n fullpath = os.path.join(self.RunDir,'defaults.m')\n prerun = open(fullpath, 'w')\n prerun.write(\n\"\"\"\n%% Automatically created parameters file for RunTS.m\n%% created with roi_py.py\n%% =============================================================================\n%% Raw Data Directory\ndataDir = '{DataDir}';\n%% Masked/Tweaked Data Directory\nprocDir = '{ProcDir}';\n%% Output directory\noutDir = '{OutDir}';\n%% Scripts directory\nscriptDir = '{ScriptDir}';\n%% Auxilary files directory\nauxDir = '{AuxDir}';\n\n%% Coherence threshold (pixels with coherence less than 'maskthresh' will be\n%% marked as NaNs for scrapping or interpolation if desired.\nmaskThresh = {Cothresh};\n\n%% IGdensity threshold (pixels with # of non-interpolated data points less\n%% than IGthresh will be set to NaN in deformation_mod.m\nigThresh = {Igthresh};\n\n%% WDLS damping term in inversion_mod.m\ndamping = {Damping};\n\n%% Master scene dimensions\nwidth = {Width};\nleng = {Length};\n\n%% List of SAR acquisition dates for interferogram set\ndates = [{Dates}];\n\n%% SAR acquisition dates in python 'datetime' serial format\ndatesSerial = [{DatesSerial}];\n\n%% Number of days between consecutive SAR acquisitions\ndt = [{TimeIntervals}];\n\n%% Time Index\ntimeIndex = [{TimeIndex}];\n\n%% Interferogram master & slave dates\nigrams = [{Pairs}];\n\n%% Interferogram master & slave dates in serial format\nigramsSerial = [{PairsSerial}];\n\n%% Chronological list of interferogram file names used in matlab routines\nigramsList = [{ChronList}];\n\n%% User-specified ommissions\nomitList = [{Omissions}];\n\n%% Tandem pairs = [{Tandems}];\n\"\"\".format(**settings))\n prerun.close()\n print('Wrote %s, ready for RunTS.m' % fullpath)\n\n #pickle the omissions list for easy re-use later\n #NOTE: ultimately write this all in python and use input/output ascii files\n if hasattr(self,'Omissions'):\n pickle.dump(list(self.Omissions.keys()), os.path.join(self.RunDir,'omissions.p'))\n #to reload set10.omit(IG=pickle.load('omissions.p'))",
"def saveAsim(samplePt, obj_animatLabModel, fldrSimFiles, ix, indexLen=3, verbose=3):\n\n #cols = ['ERROR']\n cols = []\n \n basename = os.path.split(obj_animatLabModel.asimFile)[-1].split('.')[0]\n saveFile = {}\n\n # Generate new .asim file name\n filename = basename + '-' + str(ix+1).zfill(indexLen) + '.asim'\n \n # Iterate through each parameter in samplePt\n for ptVar in samplePt:\n # Add each parameter as a column heading for asims-log.csv file\n # This is for auditing purposes!\n if ptVar not in cols:\n cols.append(ptVar)\n \n # Find the AnimatLab element by name\n name, param = ptVar.split('.')\n node = obj_animatLabModel.getElementByName(name)\n \n print \"\\n\\n%s = %s >> %s\" % (ptVar, node.find(param).text, samplePt[ptVar])\n \n # Update the AnimatLab element value\n node.find(param).text = str(samplePt[ptVar])\n \n # Save the new .asim file!\n obj_animatLabModel.saveXML(fileName=os.path.join(fldrSimFiles, filename), overwrite=True) \n\n #samplePt[\"ERROR\"] = os.path.getsize(os.path.join(fldrSimFiles, filename))\n \n # Update the output dictionary for auditing purposes. See asims-log.csv file.\n saveFile[filename] = samplePt\n \n # Do some memory management...\n del obj_animatLabModel\n \n return (saveFile, cols)",
"def createSystemSims(self):\n # create systems\n import anwp.sims\n self.systemSims = []\n for systemID, systemDict in self.game.allSystems.iteritems():\n empireDict = self.game.allEmpires[systemDict['myEmpireID']]\n imageFileName = '%s%s.png' % (self.game.app.simImagePath, systemDict['imageFile']) \n \n # create sim\n sim = SystemEntity(self, anwp.sims.categories.ClickableCategory(imageFileName,'system'), systemDict, empireDict)\n \n # add sim to world\n self.systemSims.append(sim)\n x = systemDict['x']\n y = systemDict['y']\n facing = 0\n speed = 0\n sim.turnRate = 0\n self.world.addToWorld(sim, x, y, facing, speed)",
"def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')",
"def make_phys():\n for rn in dcm_dict.keys():\n # PPG\n if not dcm_dict[rn]['ppg_file'] == 'File missing':\n # Files\n ppg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ppg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['ppg_file'],ppg_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 100.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(ppg_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # Respiration\n if not dcm_dict[rn]['resp_file'] == 'File missing':\n # Files\n resp_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.tsv.gz')\n resp_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 25.0\n data['StartTime'] = -30.0\n data['Columns'] = 'respiratory'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # ECG\n # What to do if they have PPG and ECG?\n if not dcm_dict[rn]['ecg_file'] == 'File missing':\n # Files\n ecg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ecg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 1000.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)",
"def make_all(self):\n # General matrices #\n self.tsv_seq_to_concepts()\n self.tsv_seq_to_names()\n self.list_sequence_concept()\n # Only in the with 'samples' case #\n if self.a.abundances: self.tsv_samples_to_names()\n if self.a.abundances: self.biom_output()\n # Graphical outputs #\n self.per_seq_dot_files()\n if self.a.abundances: self.per_sample_dot_files()",
"def _write_gener(parameters):\n from ._common import generators\n\n # Handle multicomponent generators\n generator_data = []\n keys = [key for key in generators.keys() if key != \"type\"]\n for k, v in parameters[\"generators\"].items():\n # Load data\n data = deepcopy(generators)\n data.update(v)\n\n # Check that data are consistent\n if not isinstance(data[\"type\"], str):\n # Number of components\n num_comps = len(data[\"type\"])\n\n # Check that values in dict have the same length\n for key in keys:\n if data[key] is not None:\n if not isinstance(data[key], (list, tuple, numpy.ndarray)):\n raise TypeError()\n if len(data[key]) != num_comps:\n raise ValueError()\n\n # Split dict\n for i in range(num_comps):\n generator_data.append(\n (\n k,\n {\n key: (data[key][i] if data[key] is not None else None)\n for key in generators.keys()\n },\n )\n )\n else:\n # Only one component for this element\n # Check that values are scalar or 1D array_like\n for key in keys:\n if numpy.ndim(data[key]) not in {0, 1}:\n raise ValueError()\n generator_data.append((k, data))\n\n # Format\n label_length = len(max(parameters[\"generators\"], key=len))\n fmt = block_to_format[\"GENER\"]\n fmt1 = str2format(fmt[label_length])\n fmt2 = str2format(fmt[0])\n\n out = []\n for k, v in generator_data:\n # Table\n ltab = None\n if v[\"times\"] is not None and isinstance(\n v[\"times\"], (list, tuple, numpy.ndarray)\n ):\n ltab = len(v[\"times\"])\n for key in [\"rates\", \"specific_enthalpy\"]:\n if v[key] is not None:\n if not isinstance(v[key], (list, tuple, numpy.ndarray)):\n raise TypeError()\n if not (ltab > 1 and ltab == len(v[key])):\n raise ValueError()\n else:\n # Rates and specific enthalpy tables cannot be written without a\n # time table\n for key in [\"rates\", \"specific_enthalpy\"]:\n if v[key] is not None and numpy.ndim(v[key]) != 0:\n raise ValueError()\n\n itab = (\n 1\n if isinstance(v[\"specific_enthalpy\"], (list, tuple, numpy.ndarray))\n else None\n )\n\n # Record 1\n values = [\n k,\n v[\"name\"],\n v[\"nseq\"],\n v[\"nadd\"],\n v[\"nads\"],\n ltab,\n None,\n v[\"type\"],\n itab,\n None if ltab else v[\"rates\"],\n None if ltab else v[\"specific_enthalpy\"],\n v[\"layer_thickness\"],\n ]\n out += write_record(values, fmt1)\n\n # Record 2\n out += write_record(v[\"times\"], fmt2, multi=True) if ltab else []\n\n # Record 3\n out += write_record(v[\"rates\"], fmt2, multi=True) if ltab else []\n\n # Record 4\n if ltab and v[\"specific_enthalpy\"] is not None:\n if isinstance(v[\"specific_enthalpy\"], (list, tuple, numpy.ndarray)):\n specific_enthalpy = v[\"specific_enthalpy\"]\n else:\n specific_enthalpy = numpy.full(ltab, v[\"specific_enthalpy\"])\n\n out += write_record(specific_enthalpy, fmt2, multi=True)\n\n return out",
"def setParameters(self, sx_sim=None):\n # TODO rething that ..\n #if sx_sim is not None:\n #if ds_model is not None:\n #if di_model is not None:\n self.sx_sim = sx_sim\n p = defaultParams(chord=self._chord, rho=self._rho, sx=self.sx_sim, ds=self.ds_model, di=self.di_model,\n M=self._M33, C=self._C33, K=self._K33)\n p['beta'] = self._beta\n if len(p['Iq'])==0:\n raise Exception('No states are present')\n\n # --- Dynamic inflow / induction\n p['a0'] = self._a0\n p['ap0'] = self._ap0\n p['di_tau1'] = self.di_tau1\n p['di_tau2'] = self.di_tau2\n\n # --- Aerodynamic parameters\n if self._y_AQ>0: \n print('[WARN] y_AQ positive is unconventional')\n p['y_AQ'] = self._y_AQ\n if self._y_AT is None:\n p['y_AT'] = self._y_AQ+self._chord/2 # default is approximatively half a chord behind\n else:\n p['y_AT'] = self._y_AT\n p['x_AQ'] = self._x_AQ\n p['x_AT'] = self._x_AT\n if self._ppol is None:\n raise Exception('Polar parameters need to be set')\n p.update(self._ppol)\n # # p.update({'linModel':False, 'drag':drag})\n\n self.p_sim = p",
"def main():\n\t#Necessary Parameters for Simulation\n\tAmplitudes = ['230','260','290']\n\tConditions = ['No EES','EES','EES+A08','EES+A08+ProIncrease']\n\n\n\n\t#eesAmplitude = \"230\"\n\teesAmplitudeName = \"230\"\n\tdelay = \"2\"\n\ttoAddname = \"\"\n\tspecies = \"rat\"\n\t#Paramters initialization\n\ttotSimTime = rp.get_tot_sim_time()\n\tgaitCyclesFileName = rp.get_gait_cycles_file()\n\tmuscles = rp.get_muscles()\n\ttemplateFile = \"templateFrwSimRORaReal.txt\"\n\tw1 = 0.011\n\tw2 = -0.005\n\n\ttemplateFile = \"A08.txt\"\n\n\ttls.modify_network_structure(templateFile,templateFile,delay,[w1,w2])\n\n\teesFrequencies = range(0,41,40)\n\tnProc = 4\n\tseed = \"1\"\n\n\tnSim = len(eesFrequencies)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\n\t# run simulations\n\tfor j,eesAmplitude in enumerate(Amplitudes):\n\t\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t\tfor condition in Conditions:\n\t\t\t\t#name = \"Tonic_FFS_\"+inputFileName+\"_freq_\"+str(eesFrequency)\n\t\t\t\tinputFileName = condition\n\t\t\t\tinputFile = \"generatedStructures/\"+inputFileName+\".txt\"\n\t\t\t\tname = \"Tonic_FFS_\"+condition+\"_freq_\"+str(eesFrequency)\n\t\t\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\t\t\tif not resultFile:\n\t\t\t\t\tprogram = ['python','./scripts/runForSimMuscleSpindles_RORa.py',\\\n\t\t\t\t\t\tstr(eesFrequency),eesAmplitude,inputFile,name,\"--simTime\",str(totSimTime),\"--seed\",seed,\"--noPlot\"]\n\n\t\t\t\tif not resultFile: gt.run_subprocess(program)\n\n\t\t\t\tcount+=1\n\t\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"\n\n\n\n\t\"\"\" create plots \"\"\"\n\terrParams = dict(lw=0.5, capsize=1, capthick=0.5)\n\twith open(gaitCyclesFileName, 'r') as pickle_file:\n\t\theelStrikes = pickle.load(pickle_file)\n\t\tfootOffs = pickle.load(pickle_file)\n\n\n\t# Figure 5 plot all gait cycles- afferent and efferents\n\t#if not phasicStim:\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_firingRates.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_firingRates.pdf\")\n\tfig, ax = plt.subplots(2, 4,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\n\t\t#name = \"Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\t# get data\n\t\tprint name\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tprint resultFile\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\n\t\t# get gait cycles\n\t\tif not 'heelStrikeSamples' in locals():\n\t\t\tnSamples = len(meanFr[muscles[0]][\"Mn\"])\n\t\t\tdtMeanFr = float(totSimTime)/nSamples\n\t\t\theelStrikeSamples = [int(x) for x in heelStrikes*1000./dtMeanFr]\n\t\t\tfootOffSamples = [int(x) for x in footOffs*1000./dtMeanFr]\n\t\t\tsamples = range(nSamples)\n\t\t\tstance = np.zeros(nSamples).astype(bool)\n\t\t\tfor strike,off in zip(heelStrikeSamples,footOffSamples):\n\t\t\t\tif strike>nSamples: break\n\t\t\t\tstance[strike:off]=True\n\n\t\tfor j,muscle in enumerate(muscles):\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'],color=colors[i])\n\t\t\tax[j,0].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].plot(meanFr[muscle]['IaInt'],color=colors[i])\n\t\t\tax[j,1].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,2].plot(meanFr[muscle]['Mn'],color=colors[i])\n\t\t\tax[j,2].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,3].plot(estimatedEmg[muscle]['Mn'],color=colors[i])\n\t\t\tax[j,3].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\n\n\tfor j,muscle in enumerate(muscles):\n\t\tax[j,0].set_ylim([0,200])\n\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,1].set_ylim([0,200])\n\t\tax[j,1].set_title(\"IaInt firing rate - \"+muscle)\n\t\tax[j,1].set_xlabel(\"Time (ms)\")\n\t\tax[j,1].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,2].set_ylim([0,200])\n\t\tax[j,2].set_title(\"Mn firing rate - \"+muscle)\n\t\tax[j,2].set_xlabel(\"Time (ms)\")\n\t\tax[j,2].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,3].set_ylim([0,200])\n\t\tax[j,3].set_title(\"EMG - \"+muscle)\n\t\tax[j,3].set_xlabel(\"Time (ms)\")\n\t\tax[j,3].set_ylabel(\"Emg amplitude (a.u.)\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)\n\n\n# FIgure 5 plot 2 single gait cycles- afferent and efferents + mn phasicity score\n\tif species == \"rat\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\telif species == \"human\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_single_firingRates.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_single_firingRates.pdf\")\n\tfig, ax = plt.subplots(2, 6,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\tbar_width = 5\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\t# get data\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\n\t\t# compute stats\n\t\tiaIntModDepth = {}\n\t\tactiveMnFr={}\n\t\tfor muscle in muscles:\n\t\t\tiaIntModDepth[muscle]=[]\n\t\t\tactiveMnFr[muscle]=[]\n\t\tfor j in xrange(len(heelStrikeSamples)-1):\n\t\t\tif heelStrikeSamples[j+1]>nSamples-50: break\n\t\t\tif heelStrikeSamples[j]<50:continue # to skip artefacts\n\t\t\tfor muscle in muscles:\n\t\t\t\tiaIntModDepth[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['IaInt'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].max()-meanFr[muscle]['IaInt'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].min())\n\t\t\t\tmnActivityDuringCycle = meanFr[muscle]['Mn'][heelStrikeSamples[j]:heelStrikeSamples[j+1]]\n\t\t\t\tactiveMnFr[muscle].append(\\\n\t\t\t\t\tmnActivityDuringCycle[mnActivityDuringCycle>=0.8*mnActivityDuringCycle.max()].mean())\n\t\t\t\t\t# mnActivityDuringCycle[mnActivityDuringCycle>=1.5*mnActivityDuringCycle.std()].mean())\n\t\t\t\t\t# mnActivityDuringCycle[mnActivityDuringCycle>=np.percentile(mnActivityDuringCycle,90)].mean())\n\t\tiaIntModDepthStats = {}\n\t\tactiveMnFrStats = {}\n\t\tfor muscle in muscles:\n\t\t\tiaIntModDepthStats[muscle] = {\"mean\":np.mean(iaIntModDepth[muscle]),\n\t\t\t\t\"sem\":np.std(iaIntModDepth[muscle])/(np.sqrt(len(iaIntModDepth[muscle])-1))}\n\t\t\tactiveMnFrStats[muscle] = {\"mean\":np.mean(activeMnFr[muscle]),\n\t\t\t\t\"sem\":np.std(activeMnFr[muscle])/(np.sqrt(len(activeMnFr[muscle])-1))}\n\n\t\t# get gait cycles to plot\n\t\tif not 'startPlot' in locals():\n\t\t\tstartPlot = heelStrikeSamples[startGaitCycleN-1]\n\t\t\tstopPlot = heelStrikeSamples[startGaitCycleN+nCycles-1]\n\t\t\tif stopPlot>nSamples: stopPlot=nSamples\n\t\t\treducedSamples = range(stopPlot-startPlot)\n\t\t\treducedStance = stance[startPlot:stopPlot]\n\n\t\tfor j,muscle in enumerate(muscles):\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,0].fill_between(reducedSamples, 0, 200, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].plot(meanFr[muscle]['IaInt'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,1].fill_between(reducedSamples, 0, 250, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,2].bar(eesFrequency,iaIntModDepthStats[muscle][\"mean\"],bar_width,yerr=iaIntModDepthStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\txValsScatter = np.linspace(0,bar_width*0.9,len(iaIntModDepth[muscle]))+eesFrequency-bar_width*0.45\n\t\t\tax[j,2].scatter(xValsScatter,iaIntModDepth[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\n\t\t\tax[j,3].plot(meanFr[muscle]['Mn'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,3].fill_between(reducedSamples, 0, 40, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,4].bar(eesFrequency,activeMnFrStats[muscle][\"mean\"],bar_width,yerr=activeMnFrStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\tax[j,4].scatter(xValsScatter,activeMnFr[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\t\t\tax[j,5].plot(estimatedEmg[muscle]['Mn'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,5].fill_between(reducedSamples, -50, 50, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\n\tfor j,muscle in enumerate(muscles):\n\t\tax[j,0].set_ylim([0,200])\n\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,1].set_ylim([0,250])\n\t\tax[j,1].set_title(\"IaInt firing rate - \"+muscle)\n\t\tax[j,1].set_xlabel(\"Time (ms)\")\n\t\tax[j,1].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,2].set_ylim([0,250])\n\t\tax[j,2].set_title(\"Mean IaInr Fr while active\")\n\t\tax[j,2].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\tax[j,2].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,3].set_ylim([0,40])\n\t\tax[j,3].set_title(\"Mn firing rate - \"+muscle)\n\t\tax[j,3].set_xlabel(\"Time (ms)\")\n\t\tax[j,3].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,4].set_ylim([0,40])\n\t\tax[j,4].set_title(\"Mean Mn Fr while active\")\n\t\tax[j,4].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\tax[j,4].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,5].set_ylim([-50,50])\n\t\tax[j,5].set_title(\"EMG - \"+muscle)\n\t\tax[j,5].set_xlabel(\"Time (ms)\")\n\t\tax[j,5].set_ylabel(\"Emg amplitude (a.u.)\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)\n\n\n\n\n\n\t# FIgure 2-7 plot\n\tif species == \"rat\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\telif species == \"human\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\n\t#if not phasicStim:\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_afferentStats.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_afferentStats.pdf\")\n\tfig, ax = plt.subplots(2, 4,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\tbar_width = 5\n\n\tmeanPerEraserApIaf = []\n\toffsetMeanFr = 0\n\toffsetMeanModDepth = 0\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\n\t\t#name = \"Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\t\t\tmeanPerEraserApIaf.append(pickle.load(pickle_file))\n\n\t\t# compute stats\n\t\tiaModDepth = {}\n\t\tiaMeanFr={}\n\t\tfor muscle in muscles:\n\t\t\tiaModDepth[muscle]=[]\n\t\t\tiaMeanFr[muscle]=[]\n\t\tfor j in xrange(len(heelStrikeSamples)-1):\n\t\t\tif heelStrikeSamples[j+1]>nSamples-50: break\n\t\t\tif heelStrikeSamples[j]<50:continue # to skip artefacts\n\t\t\tfor muscle in muscles:\n\t\t\t\tiaModDepth[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].max()-meanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].min())\n\t\t\t\tiaMeanFr[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].mean())\n\t\tiaModDepthStats = {}\n\t\tiaMeanFrStats = {}\n\t\tfor muscle in muscles:\n\t\t\tiaModDepthStats[muscle] = {\"mean\":np.mean(iaModDepth[muscle]),\n\t\t\t\t\"sem\":np.std(iaModDepth[muscle])/(np.sqrt(len(iaModDepth[muscle])-1))}\n\t\t\tiaMeanFrStats[muscle] = {\"mean\":np.mean(iaMeanFr[muscle]),\n\t\t\t\t\"sem\":np.std(iaMeanFr[muscle])/(np.sqrt(len(iaMeanFr[muscle])-1))}\n\n\t\t# get gait cycles to plot\n\t\tif not 'startPlot' in locals():\n\t\t\tstartPlot = heelStrikeSamples[startGaitCycleN-1]\n\t\t\tstopPlot = heelStrikeSamples[startGaitCycleN+nCycles-1]\n\t\t\tif stopPlot>nSamples: stopPlot=nSamples\n\t\t\treducedSamples = range(stopPlot-startPlot)\n\t\t\treducedStance = stance[startPlot:stopPlot]\n\n\t\tfor j,muscle in enumerate(muscles):\n\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,0].fill_between(reducedSamples, 0, 125, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].bar(eesFrequency,iaMeanFrStats[muscle][\"mean\"],bar_width,yerr=iaMeanFrStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\txValsScatter = np.linspace(0,bar_width*0.9,len(iaMeanFr[muscle]))+eesFrequency-bar_width*0.45\n\t\t\tax[j,1].scatter(xValsScatter,iaMeanFr[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\n\t\t\tax[j,2].bar(eesFrequency,iaModDepthStats[muscle][\"mean\"],bar_width,yerr=iaModDepthStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\tax[j,2].scatter(xValsScatter,iaModDepth[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\t\t\tax[j,3].bar(eesFrequency,meanPerEraserApIaf[-1],5,color=colors[i])\n\n\t\t\tax[j,0].set_ylim([0,125])\n\t\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\t\tax[j,1].set_ylim([0,125])\n\t\t\tax[j,1].set_title(\"Mean Ia firing rate \")\n\t\t\tax[j,1].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\t\tax[j,1].set_ylabel(\"(imp/s)\")\n\t\t\tax[j,2].set_ylim([0,80])\n\t\t\tax[j,2].set_title(\"modulation depth\")\n\t\t\tax[j,2].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\t\tax[j,2].set_ylabel(\"(imp/s)\")\n\t\t\tax[j,3].set_ylim([0,100])\n\t\t\tax[j,3].set_title(\"Percentage erased APs\")\n\t\t\tax[j,3].set_xlabel(\"Stimulation frequency (Hz)\")\n\t\t\tax[j,3].set_ylabel(\"Percentage\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)",
"def set_parameters(self):\n params = {}\n if self.modelname == 'SI':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after splot\n # Ts: Time from split to present, in 2*Na generation units\n names = ['N1', 'N2', 'Ts']\n values = [1, 1, 1]\n upper_bounds = [20, 20, 10]\n lower_bounds = [0.01, 0.01, 0]\n elif self.modelname == 'IM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Ts']\n values = [1, 1, 1, 1, 1]\n upper_bounds = [20, 20, 20, 20, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0]\n elif self.modelname == 'AM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Tam', 'Ts']\n values = [1, 1, 1, 1, 0.1, 1]\n upper_bounds = [20, 20, 20, 20, 2, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'SC':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n names = ['N1', 'N2', 'm21', 'm12', 'Ts', 'Tsc']\n values = [1, 1, 1, 1, 1, 0.1]\n upper_bounds = [20, 20, 20, 20, 10, 2]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'IM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'AM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Tam', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 0.1, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 2, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'SC2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'Tsc', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 2, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n params['Names'] = names\n params['Values'] = values\n params['Upper'] = upper_bounds\n params['Lower'] = lower_bounds\n return params",
"def generate(self):\n\n # create temporary folder for copying image files\n try:\n os.mkdir(ReportGenerator.TEMP_FOLDER)\n except FileExistsError:\n shutil.rmtree(ReportGenerator.TEMP_FOLDER)\n os.mkdir(ReportGenerator.TEMP_FOLDER)\n\n # find all simulations to be included in report\n group_directory = self.config['group_directory']\n sim_directory = self.config['sim_directory']\n assert (group_directory or sim_directory) and not(group_directory and sim_directory), \\\n \"A group directory or a simulation directory must be specified, but not both\"\n\n if group_directory:\n assert os.path.exists(f'results/{group_directory}'), \"ERROR: Group directory does not exist\"\n\n sim_list = os.listdir(f'results/{group_directory}')\n sim_label = group_directory.split('-')[0]\n sim_list = [f'{group_directory}/{x}' for x in sim_list if sim_label in x]\n\n # filters\n if self.config['filters']:\n \n seed_filters = self.config['filters']['seed']\n dilution_filters = self.config['filters']['dilution']\n order_filters = self.config['filters']['order']\n\n for symbol, filters in zip(['S', 'D', 'O'], [seed_filters, dilution_filters, order_filters]):\n if type(filters) == int:\n sim_list = [x for x in sim_list if f'{symbol}{filters}' in x.split('-')[2]]\n elif type(filters) == list:\n sim_list = [x for x in sim_list if any([f\"{symbol}{f}\" in x.split('-')[-2] for f in filters])]\n\n if len(sim_list) == 0:\n raise Exception('Simulation filters resulted in no satisfactory simulations')\n\n else:\n assert os.path.exists(f'results/{sim_directory}'), \"ERROR: Simulation directory does not exist\"\n sim_list = [sim_directory]\n\n # REPORT HEADER\n self.add_heading(level=1, text=self.config['title'])\n now = datetime.now()\n self.add_text(text=f'Report generated on {now.strftime(\"%B %d, %Y\")} at {now.strftime(\"%H:%M:%S\")}')\n\n # REPORT CONTENTS\n for i, sim in enumerate(sim_list):\n for content in self.config['content']:\n content_type = list(content.keys())[0]\n params = content[content_type]\n\n self.add_content(content_type, params, sim)\n\n if i+1 < len(sim_list):\n self.add_content(content_type='break')\n\n # CREATE HTML TEMPLATE\n base_template = self.templateEnv.get_template(f'{ReportGenerator.COMPONENTS_FOLDER}/base.html')\n base_output = base_template.render(content='\\n\\n'.join(self.contents))\n with open(f'{ReportGenerator.TEMP_FOLDER}/template.html', 'w') as html_file:\n html_file.write(base_output)\n\n # CREATE PDF FROM TEMPLATE\n if group_directory:\n HTML(f'{ReportGenerator.TEMP_FOLDER}/template.html').write_pdf(\n f'results/{group_directory}/{self.config[\"title\"]}.pdf', stylesheets=['reports/style.css'])\n else:\n HTML(f'{ReportGenerator.TEMP_FOLDER}/template.html').write_pdf(\n f'results/{sim_directory}/{self.config[\"title\"]}.pdf', stylesheets=['reports/style.css'])\n\n # delete temp folder for storing image files\n shutil.rmtree(f'{ReportGenerator.TEMP_FOLDER}')\n return None",
"def create_sim_path_air(Env):\n \n sim_air = Env.create_path('sim')\n sim_air.set_globals(params={'GearXMLFile': 'gear_air.xml' , 'MaxRecordNumber' : nevents_air}) \n sim_air.add_processor(name=\"InfoSetter\")\n sim_air.add_processor(name=\"ParticleGun\")\n sim_air.add_processor(name=\"FastSim\")\n sim_air.add_processor(name=\"M26Digitizer\")\n sim_air.add_processor(name=\"LCIOOutput\",params={\"LCIOOutputFile\" : rawfile_air })\n\n sim_alu = Env.create_path('sim2')\n sim_alu.set_globals(params={'GearXMLFile': gearfile , 'MaxRecordNumber' : nevents_alu}) \n sim_alu.add_processor(name=\"InfoSetter\")\n sim_alu.add_processor(name=\"ParticleGun\")\n sim_alu.add_processor(name=\"FastSim\")\n sim_alu.add_processor(name=\"M26Digitizer\")\n sim_alu.add_processor(name=\"LCIOOutput\",params={\"LCIOOutputFile\" : rawfile_alu })\n \n simpath = [ sim_air,\n sim_alu, \n ]\n\n return simpath",
"def make_asimov_fit_parameter_plots(self, combined=False):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n \n if combined:\n outdir = os.path.join(self.outdir, 'CombinedBestFits')\n else:\n outdir = os.path.join(self.outdir, 'IndividualBestFits')\n mkdir(outdir)\n \n maintitle = self.make_main_title(\n end='Asimov Analysis'\n )\n\n hrange = self.inj_param_vals[-1]-self.inj_param_vals[0]\n xlims = [self.inj_param_vals[0]-0.1*hrange,\n self.inj_param_vals[-1]+0.1*hrange]\n\n th = self.labels[self.labels.keys()[0]].dict[\n '%s_name'%self.th_to_wh[0]['params']['bestfit']]\n wh = self.labels[self.labels.keys()[0]].dict[\n '%s_name'%self.th_to_wh[0]['params']['altfit']]\n\n th_to_wh_label = \"%s fit to %s fiducial\"%(\n self.tex_axis_label(th),\n self.tex_axis_label(wh)\n )\n wh_to_th_label = \"%s fit to %s fiducial\"%(\n self.tex_axis_label(wh),\n self.tex_axis_label(th)\n )\n fitlabels = [th_to_wh_label, wh_to_th_label]\n\n subtitle = \"True %s Best Fit Parameters\\end{center}\"%(self.tex_axis_label(th))\n\n # Set up multi-plot if needed\n if combined:\n num_rows = self.get_num_rows(\n data=self.th_to_wh[0]['params'],\n omit_metric=False\n )\n plt.figure(figsize=(20, 5*num_rows+2))\n subplotnum = 1\n else:\n subplotnum = None\n\n for param in self.th_to_wh[0]['params'].keys():\n if param not in ['bestfit', 'altfit']:\n ymax = None\n ymin = None\n for fit, fitname, fitlabel in zip(\n [self.th_to_wh, self.wh_to_th],\n ['th_to_wh', 'wh_to_th'],\n fitlabels):\n vals = []\n for param_val in fit[0]['params'][param]:\n val, units = self.parse_pint_string(\n pint_string=param_val\n )\n if param == 'deltam31':\n vals.append(np.abs(float(val)))\n else:\n vals.append(float(val))\n # Specify the subplot, if necessary\n if combined:\n plt.subplot(num_rows, 4, subplotnum)\n self.make_1d_graph(\n xvals=self.inj_param_vals,\n yvals=vals,\n xlabel=self.inj_param_name,\n xunits=self.inj_param_units,\n ylabel=param,\n yunits=units,\n marker=self.marker_style(fitname),\n color=self.plot_colour(fitname),\n plotlabel=fitlabel,\n xlims=xlims\n )\n\n if ymax is None:\n ymax = max(vals)\n else:\n ymax = max(ymax, max(vals))\n if ymin is None:\n ymin = min(vals)\n else:\n ymin = min(ymin, min(vals))\n\n yrange = ymax - ymin\n plt.ylim(ymin-0.1*yrange, ymax+0.2*yrange)\n plt.legend(loc='upper left')\n # Advance the subplot number, if necessary\n if combined:\n subplotnum += 1\n # Else, save/close this plot\n else:\n plt.title(r'%s \\\\ %s'%(maintitle,subtitle))\n plt.tight_layout()\n save_end = \"%s_%s_best_fit_values\"%(self.inj_param_name,\n param)\n self.save_plot(outdir=outdir, end=save_end, truth=th)\n plt.close()\n # Save the whole canvas, if necessary\n if combined:\n plt.suptitle(r'%s \\\\ %s'%(maintitle,subtitle), fontsize=36)\n plt.tight_layout()\n plt.subplots_adjust(top=0.9)\n save_end = \"%s_all_best_fit_values\"%(self.inj_param_name)\n self.save_plot(outdir=outdir, end=save_end, truth=th)\n plt.close()",
"def generate(gui):\n\n global robot_obj\n global ftm_list\n global btm_list\n global fk_list\n global jac_list\n global com_list\n global com_jac_list\n\n ftm = ftm_list if gui.checkBox_ftm.isChecked() else []\n btm = btm_list if gui.checkBox_btm.isChecked() else []\n fk = fk_list if gui.checkBox_fk.isChecked() else []\n jac = jac_list if gui.checkBox_jac.isChecked() else []\n com = com_list if gui.checkBox_com.isChecked() else []\n com_jac = com_jac_list if gui.checkBox_com_jac.isChecked() else []\n\n language = Language(settings[\"language\"])\n optimization_level = settings[\"optimization_level\"]\n\n generate_everything(robot_obj, ftm, btm,\n fk, jac, com, com_jac,\n polynomial_trajectories,\n control_loops_list,\n optimization_level,\n language,\n path + '../generated/' + settings[\"filename\"],\n progressbar=gui.progressBar)",
"def createResourceSims(self):\n if self.game.myEmpire['viewResources'] == 0:\n return\n import anwp.sims\n # remove old sims if any\n self.removeResourceSims()\n # create resource sims\n self.resourceSims = []\n for systemID, systemDict in self.game.allSystems.iteritems():\n if systemDict['myEmpireID'] == self.game.myEmpireID:\n # create resource sims representing resources on system\n i = 0\n for attr in ['AL', 'EC', 'IA']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] - 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1\n \n # create resource sims representing resources being generated\n i = 0\n for attr in ['prodAL', 'prodEC', 'prodIA', 'prodCR']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s_gen.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] + 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1",
"def generate_params(sw):\n\n # List of vlan ids to use for this permutation\n vlan_ids = []\n # Physical ports required for this permutation per L3 interface\n phy_ports = []\n # L3 interfaces to be created\n l3_interfaces = 0\n # List of ip address required for this permutation\n ip_address_sw = []\n # List of ip address for every host\n ip_address_hs = []\n # VxLAN interfaces to be created\n vxlan_ids = []\n # VNIs to be created\n vnis = {}\n # VTEP Peers to be created\n vtep_peers = []\n\n vlan_ids = [VLAN1, VLAN2]\n vxlan_ids = [TUN_NUM]\n vnis = {VNI: {'vlan': [VLAN1], 'vtep_peer': [H2_IP]}}\n l3_interfaces = 1\n phy_ports = [sw.vtysh_ports['if01'], sw.vtysh_ports['if02']]\n ip_address_sw = [S1_IP]\n ip_address_hs = [H1_IP, H2_IP]\n vtep_peers = [H2_IP]\n\n return {'vlan_ids': vlan_ids,\n 'vxlan_ids': vxlan_ids,\n 'vnis': vnis,\n 'vtep_peers': vtep_peers,\n 'l3_interfaces': l3_interfaces,\n 'phy_ports': phy_ports,\n 'ip_address_sw': ip_address_sw,\n 'ip_address_hs': ip_address_hs}",
"def generateParameters(self, seed=_default_seed, output='atmos_db'):\n self.initPointingSequence()\n # Instantiate the Atmosphere class\n self.atmos = Atmosphere(\n self.mjds, self.mjde, self.npoints, seed)\n # Generate main atmosphere parameters sequence\n self.atmos.init_main_parameters()\n # Associate a value of these parameters for each pointing\n for opsim_dict in self.opsim_visits:\n # Get coordinates\n RA, DEC = (opsim_dict['fieldRA'], opsim_dict['fieldDEC'])\n # Get ID and date\n obsid, mjd = (opsim_dict['obsHistID'], opsim_dict['expMJD'])\n # Compute azimuth and elevation angle\n azimuth, z_angle = modtranTools.equatorial2local(RA, DEC,\n mjd, unit='rad')\n # Get atmosphere parameters\n modtran_dict = self.fillModtranDictionary(mjd, obsid, z_angle)\n self.modtran_visits.append(modtran_dict)\n self.aerosol_visits.append(self.atmos.aerosols(mjd) + (z_angle,))\n if output:\n megatupl = (self.modtran_visits, self.aerosol_visits,)\n parmdir = os.getenv('ATMOSPHERE_PARAMETERS_DIR')\n outname = output + '.pck'\n parmpath = os.join.path(parmdir, outname)\n with open(parmpath, 'w') as parmf:\n pickle.dump(megatupl, parmf, seed)\n # Done",
"def _generate(self, **kwargs):\n self._create_parameter_set_hashes()\n self._create_parameter_set_names()\n self._create_parameter_study()\n if self.previous_parameter_study:\n self._merge_parameter_studies()",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def setups():\n setups = []\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F2 = dict()\n kotani2017_F2['name'] = 'kotani2017_F2'\n kotani2017_F2['piltemplate'] = kotani2017_F2_pil\n kotani2017_F2['pilparams'] = [None]\n kotani2017_F2['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F2['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=1'),\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=0.5'),\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=0.05')]\n kotani2017_F2['reporter'] = 'D'\n kotani2017_F2['exp_results'] = [(7733, 7.42), (11333, 6.18), (25533, 1.40)]\n setups.append(kotani2017_F2)\n\n\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F3 = dict()\n kotani2017_F3['name'] = 'kotani2017_F3'\n kotani2017_F3['piltemplate'] = kotani2017_F3_pil\n kotani2017_F3['pilparams'] = [None]\n kotani2017_F3['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F3['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.1'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.01'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.001')]\n kotani2017_F3['reporter'] = 'D'\n kotani2017_F3['exp_results'] = [(21220, 7.72), (64203, 3.12), (86996, 0.69)]\n setups.append(kotani2017_F3)\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F4 = dict()\n kotani2017_F4['name'] = 'kotani2017_F4'\n kotani2017_F4['piltemplate'] = kotani2017_F4_pil\n kotani2017_F4['pilparams'] = [None]\n kotani2017_F4['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F4['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.1'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.01'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.001'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0')]\n kotani2017_F4['reporter'] = 'D'\n kotani2017_F4['exp_results'] = [(6815, 6.06), (9004, 4.78), (10278, 4.03), (10795, 3.73)]\n setups.append(kotani2017_F4)\n\n return setups"
] |
[
"0.55694175",
"0.54515797",
"0.5435121",
"0.54233396",
"0.5224288",
"0.5181293",
"0.51750064",
"0.51612604",
"0.51083934",
"0.51014996",
"0.5083064",
"0.50465715",
"0.5015743",
"0.49896726",
"0.49784303",
"0.49782255",
"0.4967333",
"0.4945393",
"0.49368885",
"0.48952085",
"0.48840344",
"0.48791572",
"0.48753074",
"0.48485836",
"0.4816727",
"0.48063177",
"0.48044407",
"0.4784224",
"0.4768117",
"0.4767233"
] |
0.70615596
|
0
|
run(cores=1) cores Number of cores to use to run simulations. None >> Run simulations in serial (longest time) + [] >> Use up to [] of cores 1 >> Use ALL CPU cores DESCRIPTION ProjectManager.run() is a simple interface function that runs AnimatLab simulations. The cores argument is passed to the simulationRunner class, which then processes the simulations accordingly.
|
def run(self, cores=-1):
self.simRunner.do_simulation(cores=cores)
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def main():\n run_simulation(spectral=False, ml=False, num_procs=1)\n run_simulation(spectral=True, ml=False, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=1)\n run_simulation(spectral=True, ml=True, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=10)\n run_simulation(spectral=True, ml=True, num_procs=10)",
"def main(arg_list: Optional[List[str]] = None) -> Optional[Run]: # pragma: no cover\n return run_simulator_pipeline(\n arg_list,\n CELLSIG_SCRIPT_DIR / \"run_cell_simulations_in_pipeline.py\",\n CELLSIG_SCRIPT_DIR / \"plot_cellsig_predicted_optimum_convergence.py\",\n CellSignallingOptimizerConfig,\n )",
"def compile_sim(self, n_jobs=(1 + mp.cpu_count()//2), args=[]):\n\t\tsp.call(['make', '-j', str(n_jobs), '-f', self.makefile_fn] + args)",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n raise NotImplementedError",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n raise NotImplementedError",
"def set_cores(self, cores):\n self.cores = cores\n return",
"def _run(self):\n\n self.init_simulation()\n\n # Inject ourselves into the core\n self.simthread.inject()\n\n # register steppables in queue\n for _ in range(len(self._steppable_queue)):\n steppable, frequency = self._steppable_queue.pop(0)\n if not isinstance(steppable, SteppablePy):\n steppable = steppable(frequency=frequency)\n CompuCellSetup.register_steppable(steppable=steppable)\n for _ in range(len(self._corespecs_queue)):\n CompuCellSetup.register_specs(self._corespecs_queue.pop(0))\n\n # Load model specs from file if specified\n if self.cc3d_sim_fname is not None:\n assert os.path.isfile(self.cc3d_sim_fname), f\"Could not find simulation file: {self.cc3d_sim_fname}\"\n rollback_importer = RollbackImporter()\n CompuCellSetup.run_cc3d_project(self.cc3d_sim_fname)\n rollback_importer.uninstall()\n\n # Call startup if loaded simulation file didn't already do it\n if CompuCellSetup.persistent_globals.simulator is None:\n CompuCellSetup.run()",
"def run(self):\n self._display_sims(self._compute_sims())",
"def run_simulation(self, num_games=10):\n for _ in range(num_games):\n self.result.append(self.single_game())",
"def test_run_jobs(self):\n qm = QMCalculator(software='mopac',\n method='pm3',\n fileStore=self.fileStore,\n onlyCyclics=True,\n maxRadicalNumber=0,\n )\n output_directory = os.path.join(qm.settings.fileStore, '..', '..')\n qm.set_default_output_directory(output_directory)\n\n spc1 = Species().from_smiles('c1ccccc1')\n spc2 = Species().from_smiles('CC1C=CC=CC=1')\n spc_list = [spc1, spc2]\n\n qm.run_jobs(spc_list, procnum=1)",
"def run(self, steps):\n self.sim.run(steps)",
"def main(out_path, runs, repeats = 128, workers = 0, local = False):\n\n logger.info(\"simulating %i runs\", len(runs))\n\n get_run_data = borg.util.memoize(borg.storage.RunData.from_bundle)\n\n def yield_jobs():\n for run in runs:\n all_data = get_run_data(run[\"bundle\"])\n validation = sklearn.cross_validation.ShuffleSplit(len(all_data), repeats, test_fraction = 0.2, indices = False)\n\n if run[\"portfolio_name\"] == \"-\":\n makers = map(borg.experiments.simulate_runs.SolverMaker, all_data.solver_names)\n else:\n makers = [borg.experiments.simulate_runs.PortfolioMaker(run[\"portfolio_name\"])]\n\n max_instances = len(all_data) * 0.8\n\n for (train_mask, test_mask) in validation:\n for instances in map(int, map(round, numpy.r_[10.0:max_instances:32j])):\n for maker in makers:\n yield (\n simulate_run,\n [\n run,\n maker,\n all_data,\n train_mask,\n test_mask,\n instances,\n run[\"independent\"],\n run[\"mixture\"],\n ],\n )\n\n with borg.util.openz(out_path, \"wb\") as out_file:\n writer = csv.writer(out_file)\n\n writer.writerow([\"description\", \"solver\", \"instances\", \"successes\", \"mean_time\", \"median_time\"])\n\n for (_, row) in condor.do(yield_jobs(), workers, local):\n writer.writerow(row)\n\n out_file.flush()",
"def simulate_run(run, maker, all_data, train_mask, test_mask, instances, independent, mixture):\n\n train_data = all_data.masked(train_mask)\n test_data = all_data.masked(test_mask)\n\n if instances is not None:\n ids = sorted(train_data.run_lists, key = lambda _: numpy.random.rand())[:instances]\n train_data = train_data.filter(*ids)\n\n if independent:\n train_data = train_data.collect_independent(mixture).only_nonempty()\n else:\n train_data = train_data.collect_systematic(mixture).only_nonempty()\n\n budget = test_data.common_budget\n #budget = test_data.common_budget / 2 # XXX\n suite = borg.fake.FakeSuite(test_data)\n\n if maker.subname == \"preplanning-dir\":\n model_kwargs = {\"K\": 64}\n\n if \"set_alpha\" in maker.variants:\n model_kwargs[\"alpha\"] = 1e-2\n else:\n model_kwargs = {}\n\n solver = maker(suite, train_data, model_kwargs = model_kwargs)\n successes = []\n\n for (i, instance_id) in enumerate(test_data.run_lists):\n logger.info(\"simulating run %i/%i on %s\", i, len(test_data), instance_id)\n\n with suite.domain.task_from_path(instance_id) as instance:\n with borg.accounting() as accountant:\n answer = solver.start(instance).run_then_stop(budget)\n\n succeeded = suite.domain.is_final(instance, answer)\n\n logger.info(\n \"%s %s on %s (%.2f CPU s)\",\n maker.name,\n \"succeeded\" if succeeded else \"failed\",\n os.path.basename(instance),\n accountant.total.cpu_seconds,\n )\n\n if succeeded:\n successes.append(accountant.total.cpu_seconds)\n\n logger.info(\n \"%s had %i successes over %i instances\",\n maker.name,\n len(successes),\n len(test_data),\n )\n\n description = \"{0} ({1})\".format(mixture, \"Sep.\" if independent else \"Sys.\")\n\n return (\n description,\n maker.name,\n instances,\n len(successes),\n numpy.mean(successes),\n numpy.median(successes),\n )",
"def run_simulator(self):\n\n self.update_settings()\n\n # Pass in the progress bar and the master so that the simulator can\n # update the progress bar and then refresh the screen when the progress\n # checkpoints are hit\n\n self.sim_results = self.sim.run(self.progress_bar, self.master)\n self.graph_results()",
"def compute_cores(config):\n cores = config.getint('General','cores')\n if cores > mp.cpu_count():\n cores = mp.cpu_count()\n return cores",
"def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)",
"def run(self):\n msg = sfmt.format(\"Run test\", self.name)\n print(msg)\n\n # Set nam as namefile name without path\n nam = None\n\n # run mf6 models\n exe = str(self.targets[\"mf6\"].absolute())\n msg = sfmt.format(\"using executable\", exe)\n print(msg)\n\n if self.parallel:\n print(\"running parallel on\", self.ncpus, \"processes\")\n try:\n success, buff = self.run_parallel(\n exe,\n )\n except Exception as exc: \n msg = sfmt.format(\"MODFLOW 6 run\", self.name)\n print(msg)\n print(exc)\n success = False\n else:\n try:\n success, buff = flopy.run_model(\n exe,\n nam,\n model_ws=self.simpath,\n silent=False,\n report=True,\n )\n msg = sfmt.format(\"MODFLOW 6 run\", self.name)\n if success:\n print(msg)\n else:\n print(msg)\n except:\n msg = sfmt.format(\"MODFLOW 6 run\", self.name)\n print(msg)\n success = False\n\n # set failure based on success and require_failure setting\n if self.require_failure is None:\n msg = \"MODFLOW 6 model did not terminate normally\"\n if success:\n failure = False\n else:\n failure = True\n else:\n if self.require_failure:\n msg = \"MODFLOW 6 model should have failed\"\n if not success:\n failure = False\n else:\n failure = True\n else:\n msg = \"MODFLOW 6 model should not have failed\"\n if success:\n failure = False\n else:\n failure = True\n\n # print end of mfsim.lst to the screen\n if failure and self.is_CI:\n fpth = os.path.join(self.simpath, \"mfsim.lst\")\n msg = self._get_mfsim_listing(fpth) + msg\n\n # test for failure\n assert not failure, msg\n\n self.nam_cmp = None\n if success:\n if self.action is not None:\n if self.action.lower() == \"compare\":\n msg = sfmt.format(\"Comparison files\", self.name)\n print(msg)\n else:\n cpth = os.path.join(self.simpath, self.action)\n key = self.action.lower().replace(\".cmp\", \"\")\n exe = str(self.targets[key].absolute())\n msg = sfmt.format(\"comparison executable\", exe)\n print(msg)\n if (\n \"mf6\" in key\n or \"libmf6\" in key\n or \"mf6_regression\" in key\n ):\n nam = None\n else:\n npth = get_namefiles(cpth)[0]\n nam = os.path.basename(npth)\n self.nam_cmp = nam\n try:\n if self.api_func is None:\n success_cmp, buff = flopy.run_model(\n exe,\n nam,\n model_ws=cpth,\n silent=False,\n report=True,\n )\n else:\n success_cmp, buff = self.api_func(\n exe, self.idxsim, model_ws=cpth\n )\n msg = sfmt.format(\n \"Comparison run\", self.name + \"/\" + key\n )\n print(msg)\n\n # print end of mfsim.lst to the screen\n if \"mf6\" in key:\n if not success and self.is_CI:\n fpth = os.path.join(cpth, \"mfsim.lst\")\n print(self._get_mfsim_listing(fpth))\n\n except:\n success_cmp = False\n msg = sfmt.format(\n \"Comparison run\", self.name + \"/\" + key\n )\n print(msg)\n\n assert success_cmp, \"Unsuccessful comparison run\"\n\n return",
"def run_simulation(run):\n # Write the argument file used by metrosim.\n simulation = run.simulation\n metrosim_dir = settings.BASE_DIR + '/metrosim_files/'\n metrosim_file = '{0}execs/metrosim'.format(metrosim_dir)\n arg_file = (\n '{0}arg_files/simulation_{1!s}_run_{2!s}.txt'.format(metrosim_dir,\n simulation.id,\n run.id)\n )\n with open(arg_file, 'w') as f:\n database = settings.DATABASES['default']\n db_host = database['HOST']\n db_name = database['NAME']\n db_user = database['USER']\n db_pass = database['PASSWORD']\n log = metrosim_dir + 'logs/run_{}.txt'.format(run.id)\n tmp = metrosim_dir + 'output'\n stop = metrosim_dir + 'stop_files/run_{}.stop'.format(run.id)\n arguments = ('-dbHost \"{0}\" -dbName \"{1}\" -dbUser \"{2}\" '\n + '-dbPass \"{3}\" -logFile \"{4}\" -tmpDir \"{5}\" '\n + '-stopFile \"{6}\" -simId \"{7!s}\" -runId \"{8!s}\"'\n ).format(db_host, db_name, db_user, db_pass, log, tmp,\n stop, simulation.id, run.id)\n f.write(arguments)\n\n # Run the script 'prepare_run.py' then run metrosim then run the script \n # 'run_end.py'.\n # The two scripts are run with the run.id as an argument.\n prepare_run_file = settings.BASE_DIR + '/metro_app/prepare_run.py'\n build_results_file = settings.BASE_DIR + '/metro_app/build_results.py'\n log_file = (\n '{0}/website_files/script_logs/run_{1}.txt'.format(\n settings.BASE_DIR, run.id\n )\n )\n # Command looks like: \n #\n # python3 ./metro_app/prepare_results.py y\n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n # && ./metrosim_files/execs/metrosim\n # ./metrosim_files/arg_files/simulation_x_run_y.txt \n # && python3 ./metro_app/build_results.py y \n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n #\n # 2>&1 | tee is used to redirect output and errors to file.\n command = ('python3 {first_script} {run_id} 2>&1 | tee {log} && '\n + '{metrosim} {argfile} && '\n + 'python3 {second_script} {run_id} 2>&1 | tee {log}')\n command = command.format(first_script=prepare_run_file, run_id=run.id,\n log=log_file, metrosim=metrosim_file,\n argfile=arg_file,\n second_script=build_results_file)\n subprocess.Popen(command, shell=True)",
"def test_simulation(self):\n\t\tprint \"Simulation is being tested\"\n\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Debug Flag Set!\"\n\t\t\tprint self.getConfig()\n\n\t\tif toggles.PACKING:\n\t\t\ttoggles.OUTPUT_PATH = toggles.OUTPUT_PATH+toggles.RUN_NAME+'/'\n\t\t\tpackageMaker(toggles.OUTPUT_PATH,self.getConfig())\n\t\tif toggles.IDEAL_GRID:\n\t\t\tself.consensusGrid()\n\n\t\tif toggles.REAL_DATA:\n\t\t\tsampleData = self.load_data()\n\t\t\tif toggles.RUN_DATA_STATS:\n\t\t\t\tself.output_data_stats(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_AVERAGE_COST:\n\t\t\t\tself.sim_average_cost(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_SINGLE_PAIR:\n\t\t\t\tself.sim_single_pair_cost(sampleData, pending_eddy(self.pick_worker([0], [0])))\n\t\t\t\tself.reset_database()\n\t\telse:\n\t\t\tsampleData = {}\n\t\t\tsyn_load_data()\n\n\t\tif toggles.RUN_ITEM_ROUTING and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: item Routing\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif PRED_SCORE_COUNT and not (RUN_TASKS_COUNT or RUN_MULTI_ROUTING):\n\t\t\tif DEBUG_FLAG:\n\t\t\t\tprint \"Running: Pred Score count\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\n\n\t\tif toggles.COUNT_TICKETS and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: ticket counting\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif toggles.SELECTIVITY_GRAPH and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: selectivity amounts over time\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\tcorrectAnswers = self.get_correct_answers(toggles.INPUT_PATH + toggles.ITEM_TYPE + '_correct_answers.csv')\n\t\t\tpassedItems = self.get_passed_items(correctAnswers)\n\n\n\t\tif toggles.RUN_OPTIMAL_SIM:\n\t\t\tcountingArr=[]\n\t\t\tself.reset_database()\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running optimal_sim \" +str(i)\n\t\t\t\tself.num_tasks = self.optimal_sim(sampleData)\n\t\t\t\tcountingArr.append(self.num_tasks)\n\t\t\t\tself.reset_database()\n\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_optimal_tasks'\n\t\t\tgeneric_csv_write(dest+'.csv',[countingArr])\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote File: \" + dest+'.csv'\n\n\n\n\t\tif toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING or toggles.RUN_CONSENSUS_COUNT:\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\t#print \"Running: task_count\"\n\t\t\t\t#f = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv', 'a')\n\t\t\t\t#f1 = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_incorrect_count.csv', 'a')\n\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\toutputArray = []\n\n\t\t\trunTasksArray = []\n\t\t\tgoodArray, badArray = [], []\n\t\t\tgoodPoints, badPoints = [], []\n\t\t\taccCount = []\n\t\t\tlocArray = [[],[],[],[]]\n\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running simulation \" + str(i+1)\n\t\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\t\trunTasksArray.append(self.num_tasks)\n\n\t\t\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\t\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\t\t\tnum_incorrect = self.final_item_mismatch(passedItems)\n\t\t\t\t\taccCount.append(num_incorrect)\n\t\t\t\tif toggles.RUN_CONSENSUS_COUNT or toggles.VOTE_GRID:\n\t\t\t\t\tdonePairs = IP_Pair.objects.filter(Q(num_no__gt=0)|Q(num_yes__gt=0))\n\t\t\t\t\tif toggles.TEST_ACCURACY:\n\t\t\t\t\t\tgoodPairs, badPairs = [], []\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tval = bool((pair.num_yes-pair.num_no)>0)\n\t\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\t\tcorrect = ((correctAnswers[(pair.item,pair.predicate)]) == val)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcorrect = (pair.true_answer == val)\n\t\t\t\t\t\t\tif correct:\n\t\t\t\t\t\t\t\tgoodArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tbadArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tbadPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tgoodArray.append(pair.num_no + pair.num_yes)\n\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\n\t\t\t\t\t#print \"This is number of incorrect items: \", num_incorrect\n\n\t\t\t\tself.reset_database()\n\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\tgeneric_csv_write(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_tasks_count.csv',[runTasksArray])\n\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv'\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\tif len(runTasksArray)>1:\n\t\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.png'\n\t\t\t\t\t\ttitle = toggles.RUN_NAME + ' Cost distribution'\n\t\t\t\t\t\thist_gen(runTasksArray, dest, labels = ('Cost','Frequency'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + dest\n\t\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"only ran one sim, not running hist_gen\"\n\n\t\t\tif toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_Eddy_sys_' + str(toggles.EDDY_SYS) + '_multi_routing.png'\n\t\t\t\t\ttitle = toggles.RUN_NAME + ' Average Predicate Routing'\n\t\t\t\t\tquestions = toggles.CHOSEN_PREDS\n\t\t\t\t\tarrayData = []\n\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\tarrayData.append([])\n\t\t\t\t\tfor routingL in ROUTING_ARRAY:\n\t\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\t\tarrayData[i].append(routingL[i])\n\t\t\t\t\tmrsavefile = open(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv','w')\n\t\t\t\t\tmrwriter = csv.writer(mrsavefile)\n\t\t\t\t\tmrwriter.writerow(questions)\n\t\t\t\t\tfor row in arrayData:\n\t\t\t\t\t\tmrwriter.writerow(row)\n\t\t\t\t\tmrsavefile.close()\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \"+toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\tstats_bar_graph_gen(arrayData, questions, dest, labels = ('Predicate','# of Items Routed'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.png'\n\t\t\tif toggles.ACCURACY_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_acc_count'\n\t\t\t\tgeneric_csv_write(dest+'.csv',[accCount])\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\thist_gen(accCount, dest+'.png')\n\n\t\t\tif toggles.RUN_CONSENSUS_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_consensus_count'\n\t\t\t\tif len(goodArray)>1:\n\t\t\t\t\tif len(badArray) == 0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray])\n\t\t\t\t\t\t#print goodArray\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray,badArray])\n\t\t\t\t\t\t#print goodArray,badArray\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \" + dest + '.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = 'Normalized Distribution of Tasks before Consensus'\n\t\t\t\t\t\tlabels = ('Number of Tasks', 'Frequency')\n\t\t\t\t\t\tif len(badArray) < 2:\n\t\t\t\t\t\t\thist_gen(goodArray, dest+'.png',labels=labels,title=title)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tleg = ('Correctly Evaluated IP pairs','Incorrectly Evaluated IP pairs')\n\t\t\t\t\t\t\tmulti_hist_gen([goodArray,badArray],leg,dest+'.png',labels=labels,title=title)\n\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"only ran one sim, ignoring results\"\n\t\t\tif toggles.VOTE_GRID:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_vote_grid'\n\t\t\t\tif len(goodPoints)>1:\n\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',goodPoints)\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'_good.csv',goodPoints)\n\t\t\t\t\t\tgeneric_csv_write(dest+'_bad.csv',badPoints)\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = \"Vote Grid Graph\"\n\t\t\t\t\t\tlabels = (\"Number of No Votes\",\"Number of Yes Votes\")\n\t\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\t\txL,yL=zip(*goodPoints)\n\t\t\t\t\t\t\tline_graph_gen(xL,yL,dest+'.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tgX,gY = zip(*goodPoints)\n\t\t\t\t\t\t\tbX,bY = zip(*badPoints)\n\t\t\t\t\t\t\tmulti_line_graph_gen((gX,bX),(gY,bY),('Correct','Incorrect'),dest+'_both.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(gX,gY,dest+'_good.png',title=title+\" goodPoints\",labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(bX,bY,dest+'_bad.png',title=title+\" badPoints\",labels=labels,scatter=True,square=True)\n\t\tif toggles.TIME_SIMS:\n\t\t\tself.timeRun(sampleData)\n\n\t\tif toggles.RUN_ABSTRACT_SIM:\n\t\t\tself.abstract_sim(sampleData, toggles.ABSTRACT_VARIABLE, toggles.ABSTRACT_VALUES)",
"async def async_run_simulation(self, tb_lib, tb_cell, outputs, precision=6, sim_tag=None):\n # type: (str, str, Dict[str, str], int, Optional[str]) -> str\n pass",
"def run(self):\n for worker in self.simulation_workers:\n worker.start()",
"def run(self):\n # Create queue of experiment configurations\n queue = collections.deque(self.settings.EXPERIMENT_QUEUE)\n # Calculate number of experiments and number of processes\n self.n_exp = len(queue) * self.settings.N_REPLICATIONS\n self.n_proc = self.settings.N_PROCESSES \\\n if self.settings.PARALLEL_EXECUTION \\\n else 1\n logger.info('Starting simulations: %d experiments, %d process(es)'\n % (self.n_exp, self.n_proc))\n\n if self.settings.PARALLEL_EXECUTION:\n # This job queue is used only to keep track of which jobs have\n # finished and which are still running. Currently this information\n # is used only to handle keyboard interrupts correctly\n job_queue = collections.deque()\n # Schedule experiments from the queue\n while queue:\n experiment = queue.popleft()\n for _ in range(self.settings.N_REPLICATIONS):\n job_queue.append(self.pool.apply_async(run_scenario,\n args=(self.settings, experiment,\n self.seq.assign(), self.n_exp),\n callback=self.experiment_callback))\n self.pool.close()\n # This solution is probably not optimal, but at least makes\n # KeyboardInterrupt work fine, which is crucial if launching the\n # simulation remotely via screen.\n # What happens here is that we keep waiting for possible\n # KeyboardInterrupts till the last process terminates successfully.\n # We may have to wait up to 5 seconds after the last process\n # terminates before exiting, which is really negligible\n try:\n while job_queue:\n job = job_queue.popleft()\n while not job.ready():\n time.sleep(5)\n except KeyboardInterrupt:\n self.pool.terminate()\n self.pool.join()\n\n else: # Single-process execution\n while queue:\n experiment = queue.popleft()\n for _ in range(self.settings.N_REPLICATIONS):\n self.experiment_callback(run_scenario(self.settings,\n experiment, self.seq.assign(),\n self.n_exp))\n if self._stop:\n self.stop()\n\n logger.info('END | Planned: %d, Completed: %d, Succeeded: %d, Failed: %d',\n self.n_exp, self.n_fail + self.n_success, self.n_success, self.n_fail)",
"def test_run_simulator_with_threads_and_numpy_array():\n cluster = LocalCluster(n_workers=2, processes=False, threads_per_worker=1)\n simulator = Simulator(model, sim_shapes=dict(x=(10,)), cluster=cluster)\n\n pars = np.random.random((100, 2))\n sims = dict(x=np.zeros((100, 10)))\n sim_status = np.full(100, SimulationStatus.RUNNING, dtype=np.int)\n\n # the following is non-blocking (it immediately returns)\n simulator.run(\n pars=pars,\n sims=sims,\n sim_status=sim_status,\n indices=np.arange(100, dtype=np.int),\n collect_in_memory=False,\n batch_size=20,\n )\n\n # need to wait for tasks to be completed\n _wait_for_all_tasks()\n\n assert np.all(sim_status == SimulationStatus.FINISHED)\n assert not np.all(np.isclose(sims[\"x\"].sum(axis=1), 0.0))\n simulator.client.close()\n cluster.close()",
"def run_sim(mass, start, stop, sampling_rate):\n axion = Axion(mass=mass)\n return axion.do_fast_axion_sim(start,\n stop,\n sampling_rate)",
"def _run_interface(self, runtime):\n self.mlab.inputs.script = self._make_matlab_command() \t\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\t\t\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime",
"def run_simulation(**kwargs):\n print(\"executing run_simulation() in file\", __file__)\n print(\"got the dictionary kwargs =\", kwargs)\n\n # HERE is where you would usually run your simulation (e.g. DMRG).\n # simulate some heavy calculations:\n for i in range(30):\n print(\"step \", i, flush=True) # (remove `flush=True` for Python 2)\n # the flush=True makes the output appear immediately\n time.sleep(5)\n\n results = {'kwargs': kwargs, 'example_data': np.random.random((2, 2))}\n\n output_filename = kwargs['output_filename']\n print(\"save results to \", output_filename)\n with open(output_filename, 'wb') as f:\n pickle.dump(results, f)",
"def run(num_trials):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.1, display=True) \n # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=num_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n a.performace_report(num_trials)",
"def submit_cpucores():\n # TODO(soltesz): move static value to an external, inventory table.\n submit_generic(_root_hostname, 'cpu_cores', 'gauge', _CPU_COUNT)",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')",
"def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)"
] |
[
"0.64820915",
"0.6010382",
"0.591841",
"0.58024275",
"0.58024275",
"0.57946324",
"0.5790396",
"0.57668465",
"0.5740148",
"0.56931937",
"0.5638104",
"0.5587805",
"0.5572932",
"0.55053955",
"0.5466158",
"0.5466106",
"0.5464781",
"0.5447125",
"0.5446936",
"0.54381967",
"0.5390556",
"0.536743",
"0.535407",
"0.5336607",
"0.53270906",
"0.5315948",
"0.5308019",
"0.5302754",
"0.5297063",
"0.52940845"
] |
0.80584097
|
0
|
m is a the message encoded as a polynomial
|
def encrypt(self,m):
if m._N <= self._P.get_N():
r = self._P.gen_rPoly()
e = (r.scale(self._P.get_p())*self._h+m) % self._P.get_q()
return e # Polynomial representing the encryption message
else:
raise Exception("m is too large, must be equal or under size %d" % N)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_mom_poly(p,m):\n # polynomial.\n pol = 1.\n for key in p.keys():\n pol += (p[key] - m[key])**2\n return pol",
"def encode(generator_polynomial, message):\n power = msb(generator_polynomial)\n message <<= power\n return message ^ divide_polynomials(polynomial1=message, polynomial2=generator_polynomial)[1]",
"def polyder_vec(p, m):\n factorial = np.math.factorial\n m = np.asarray(m, dtype=int) # order of derivative\n p = np.atleast_2d(p)\n order = p.shape[1] - 1\n\n D = np.arange(order, -1, -1)\n num = np.array([factorial(i) for i in D], dtype=object)\n den = np.array([factorial(max(i - m, 0)) for i in D], dtype=object)\n D = (num // den).astype(p.dtype)\n\n p = np.roll(D * p, m, axis=1)\n idx = np.arange(p.shape[1])\n p = np.where(idx < m, 0, p)\n\n return p",
"def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p",
"def associated_legendre_polynomials(l,m):\n z = sym.symbols('z')\n \n P_l_m = [[0]*(j+1) for j in range(m+1)]\n P_l_m[0][0]=1\n P_l_m[1][0]=z\n for j in range(2,l+1):\n P_l_m[j][0] = sym.simplify(((2*j-1)*z*P_l_m[j-1][0] - (j-1)*P_l_m[j-2][0])/j)\n for i in range(1,m):\n P_l_m[i][i] = sym.simplify((1-2*i)*P_l_m[i-1][i-1])\n P_l_m[i+1][i] = sym.simplify((2*i+1)*z*P_l_m[i][i])\n for j in range(i+2,l+1):\n P_l_m[j][i] = sym.simplify(((2*j-1)*z*P_l_m[j-1][i] - (i+j-1)*P_l_m[j-2][i])/(j-i))\n \n P_l_m[m][m] = sym.simplify((1-2*m)*P_l_m[m-1][m-1])\n return P_l_m",
"def encodeMessageString (s, n, e):\n l = list(s)\n l1 = list(map ( lambda c: ord(c), l))\n l2 = list(map ( lambda j: computePow(j,n,e), l1))\n return l2",
"def _AffineGrothendieckPolynomial(self, la, m):\n return self._AffineGrothendieck(la.to_core(self.k).to_grassmannian(),m)",
"def fit_polynomial(self,x,t,m,lambda_reg=0):\n\n phi = self.designMatrix(x,m)\n phi_trans = np.transpose(phi)\n\n a = phi_trans.dot(phi) + lambda_reg*np.identity(phi.shape[1])\n b = np.linalg.inv(a)\n c = b.dot(phi_trans)\n\n w_ml = c.dot(t)\n\n return w_ml, phi",
"def BER_Func(self, Pm):\r\n \r\n SNR = Pm/(1-Pm)\r\n BER = special.erfc(SNR/np.sqrt(2))/2\r\n \r\n return BER",
"def build_poly(x, degree): \n # ***************************************************\n # COPY YOUR CODE FROM EX03 HERE\n # polynomial basis function: TODO\n # this function should return the matrix formed\n # by applying the polynomial basis to the input data\n # ***************************************************\n raise NotImplementedError",
"def linear_polynomial(self, e: 'PFElement') -> Polynomial:\n poly = self.polynomial(-e)\n poly += poly.monic(1)\n return poly",
"def designMatrix(self,x,m):\n\n phi = []\n\n for i in x:\n matric = []\n for j in range(0, m + 1):\n matric.append(np.power(i,j))\n phi.append(matric)\n return np.asarray(phi)",
"def generate_polynomial_features(self, X) :\n\n n,d = X.shape\n\n ### ========== TODO : START ========== ###\n # part b: modify to create matrix for simple linear model\n # part g: modify to create matrix for polynomial model\n Phi = X\n m = self.m_\n\n if m == 1:\n Phi = np.zeros((n,2))\n for i in range(n):\n Phi[i,0] = 1\n Phi[i, 1] = X[i]\n\n else:\n Phi = np.ones((n,m+1))#n*m+1 dimmension\n power_arr = np.arange(0, m+1)\n for index, row in enumerate(Phi):# get every row\n row = np.repeat(X[index],m+1)\n row = np.power(row,power_arr)\n Phi [index,] = row\n #also could use the following\n \"\"\"\n import sklearn.preprocessing as sk\n #X is a N*1 vector\n poly_mat = sk.PolynomialFeatures(3)\n poly.fit_transform(a)\n \"\"\"\n\n\n\n\n\n ### ========== TODO : END ========== ###\n\n return Phi",
"def decode_affine(msg, a, b):\n #Inverse of the modulo\n m = find_coprime(a)\n \n decoded_message = [ RVALUES[(m * (VALUES[i] - b)) % 26] for i in msg ]\n \n return ''.join(decoded_message)",
"def display_poly(f_remote, m):\n # If it is a string return the given value\n if isinstance(f_remote, str):\n return f_remote\n\n # Get the values we need from the object\n f = f_remote.copy()\n\n # Reduce the polynomial\n f = reduce_poly(f, m)\n\n return poly_string(f)",
"def poly_desc(W, b):\n result = 'y = '\n for i, w in enumerate(W):\n result += '{:+.2f} x^{} '.format(w, len(W) - i)\n result += '{:+.2f}'.format(b[0])\n return result",
"def encode_affine(msg, a, b):\n \n #Code to numbers\n encoded_message = [ RVALUES[(a * VALUES[i] + b) % 26] for i in msg ]\n \n return ''.join(encoded_message)",
"def E_polynomial(self):\n\n from nodepy import stability_function\n p, q = self.stability_function()\n return stability_function.E_polynomial(p, q)",
"def poly(x, degree=2):\n x = np.array(x)\n X_trans = np.transpose(np.vstack((x**k for k in range(degree + 1))))\n return np.linalg.qr(X_trans)[0][:, 1:]",
"def vernam(msg):\n global v\n\n l = len(msg)\n\n for i in range(0, len(v)):\n v[i] = 0\n\n for i in range(0, l):\n v[i] = (i_rand_a()) ^ msg[i]\n\n return bytes(v)",
"def parse_polynomial(s):\n\n def parse_n(s):\n '''Parse the number part of a polynomial string term'''\n if not s:\n return 1\n elif s == '-':\n return -1\n elif s == '+':\n return 1\n return float(eval(s))\n\n def parse_p(s, powerPattern):\n '''Parse the power part of a polynomial string term'''\n if not s:\n return 0\n multipliers = powerPattern.findall(s)[0]\n if not multipliers:\n return 1\n return int(multipliers)\n s = str(s).replace(' ', '') # remove all whitespace from string\n m = re.search('[a-zA-Z]+', s)\n try:\n varLetter = m.group(0)\n except AttributeError:\n varLetter = 'P'\n termPattern = re.compile(\n '([+-]?\\d*\\.?\\d*)\\**({var}?\\^?\\d?)'.format(var=varLetter))\n powerPattern = re.compile('{var}\\^?(\\d)?'.format(var=varLetter))\n order_multipliers = {}\n\n for n, p in termPattern.findall(s):\n n, p = n.strip(), p.strip()\n if not n and not p:\n continue\n n, p = parse_n(n), parse_p(p, powerPattern)\n if p in order_multipliers:\n order_multipliers[p] += n\n else:\n order_multipliers[p] = n\n highest_order = max(\n max(order_multipliers.keys()), 1) # order must be at least linear\n multipliers = [0] * (highest_order + 1)\n for key, val in order_multipliers.items():\n multipliers[key] = val\n\n return multipliers",
"def cheby_coeff2(m,s):\r\n c = np.zeros(m+1)\r\n for j in range(m+1):\r\n c[j] = 2*np.exp(-s)*j1(-s)\r\n \r\n return c",
"def to_monomial_basis(self):\n if self.n == 1:\n a = np.empty(get_dimension(self.r, self.m))\n else:\n a = np.empty((get_dimension(self.r, self.m), self.n))\n\n q = dual_monomial_basis(self.r, self.m)\n for i in range(len(q)):\n a[i] = q[i](self)\n\n return Polynomial(a, self.r, self.m)",
"def decode(self,m):\n raise NotImplementedError('subclasses must override decode()!')",
"def buildSumPolySystem(FF, SM3, m, Rx = False):\n \n #number of bounding variables 'U'\n numBoundVars = m - 3\n if Rx == True: #last summation polynomial will be S_3(x_m, u_(m-2), Rx)\n numBoundVars += 1\n SMPR = PolynomialRing(FF, 'x', m + numBoundVars, order='degrevlex')\n \n #X-variables\n variablesX = SMPR.objgens()[1][0:m]\n #bounding variables\n variablesU = SMPR.objgens()[1][m:]\n \n generators = [] \n for k in range(0, numBoundVars):\n if k != 0:\n generators.append(SM3(variablesU[k - 1], variablesU[k], variablesX[k + 1]))\n else:\n generators.append(SM3(variablesX[0], variablesX[1], variablesU[0])) \n \n #Hotfix: in case when we don't need a bounding variable <=> only 1 summation polynomial will be used.\n #And is added manually.\n if len(variablesU) == 0:\n variablesU = [variablesX[0]]\n return generators, variablesX, variablesU, SMPR",
"def encrypt(self,m):\n n,g = self.pubkey\n mods = n*n\n gm = pow(g,m,mods)\n r = 0\n while GCD(r,n) != 1 :\n r = random.randint(0,n-1)\n return (gm * pow(r,n,mods)) % mods",
"def __init__(self, m, x):\r\n mx = m*x\r\n n = int(x + 4*x**(1/3) + 2.5)\r\n psi,dpsi = riccati_jn(n,x)\r\n chi,dchi = riccati_yn(n,x)\r\n psm,dpsm = riccati_jn(n,mx)\r\n xi = psi - chi*1j\r\n dxi = dpsi - dchi*1j\r\n # Bohren and Huffman, eq(4.56),(4.57)\r\n a = (m*psi*dpsi - psi*dpsm)/(m*psm*dxi - xi*dpsm)\r\n b = (psm*dpsi - m*psi*dpsm)/(psm*dxi - m*xi*dpsm)\r\n\r\n self.n = n\r\n self.a = a[1:]\r\n self.b = b[1:]\r\n self.x = x",
"def _poly_func(x, a, b, c, d, e):\n return a * x ** 6 + b * x ** 5 + c * x ** 4 + d * x ** 3 + e * x ** 2",
"def get_coefficients_MaxMSP(self):\n B, A = self.get_coefficients()\n return (B[0], B[1], B[2], A[1], A[2]) # FIXME: Verify this",
"def conv_encode(self, message=None):\n output = []\n if message:\n for bit in message + [0, 0]:\n output += [np.mod(bit + self.flop2, 2), np.mod(bit + self.flop1 + self.flop2, 2)]\n self.flop2 = self.flop1\n self.flop1 = bit\n else:\n bucket = list(generate_bits(10000, 0.5)) # generate lots at once\n self.message += bucket\n # decrease the weights equally every so often\n current_min = min(self.zero_zero_w, self.zero_one_w, self.one_zero_w, self.one_one_w)\n self.zero_zero_w -= current_min\n self.zero_one_w -= current_min\n self.one_zero_w -= current_min\n self.one_one_w -= current_min\n for bit in bucket:\n output += [np.mod(bit + self.flop2, 2), np.mod(bit + self.flop1 + self.flop2, 2)]\n self.flop2 = self.flop1\n self.flop1 = bit\n return output"
] |
[
"0.70983243",
"0.61698157",
"0.587901",
"0.5877278",
"0.5715824",
"0.5712316",
"0.56301624",
"0.5588554",
"0.55859685",
"0.55293375",
"0.5523438",
"0.54995716",
"0.5489549",
"0.5468917",
"0.5468211",
"0.54662186",
"0.54412967",
"0.541935",
"0.5413557",
"0.5356289",
"0.5350215",
"0.53448594",
"0.5330849",
"0.53046745",
"0.5293648",
"0.5289122",
"0.52840656",
"0.52789974",
"0.52674997",
"0.5245091"
] |
0.7049733
|
1
|
Maps a list of strings to their shortest unique suffixes Maps all original strings to the smallest number of chunks, as specified by delim, that are not a suffix of any other original string. If the original string was a suffix of another string, map it to its unaltered self.
|
def _get_shortest_unique_suffix_dict(
input_str_list: List[str], delim: str = "."
) -> Dict[str, str]:
# all input strings must be unique
assert len(input_str_list) == len(set(input_str_list))
if delim == "":
raise ValueError("delim must be a non-empty string.")
suffix_dict = defaultdict(list)
# initialize suffix_dict with last chunk
for istr in input_str_list:
suffix_dict[_get_suffix(istr, delim=delim, n_chunks=1)].append(istr)
max_chunks = max(len(istr.split(delim)) for istr in input_str_list)
if max_chunks == 1:
return {istr: istr for istr in input_str_list}
# the upper range of this loop is `max_chunks + 2` because:
# - `i` needs to take the value of `max_chunks`, hence one +1
# - the contents of the loop are run one more time to check if `all_unique`,
# hence the other +1
for i in range(2, max_chunks + 2):
new_dict = defaultdict(list)
all_unique = True
for suffix, suffix_str_list in suffix_dict.items():
if len(suffix_str_list) > 1:
all_unique = False
for istr in suffix_str_list:
new_dict[_get_suffix(istr, delim=delim, n_chunks=i)].append(istr)
else:
new_dict[suffix] = suffix_str_list
if all_unique:
if len(set(input_str_list)) != len(suffix_dict.keys()):
break
return {
suffix_str_list[0]: suffix
for suffix, suffix_str_list in suffix_dict.items()
}
suffix_dict = new_dict
# If this function has not yet exited, some input strings still share a suffix.
# This is not expected, but in this case, the function will return the identity
# mapping, i.e., a dict with the original strings as both keys and values.
logger.warning(
"Something went wrong. Returning dictionary with original strings as keys and "
"values."
)
return {istr: istr for istr in input_str_list}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def join_bpe(lst:list, s:str):\n\n if s[:2] == \"##\":\n # How to handle situation where first term is double hashed?\n base = lst.pop() # Remove from last position\n new_term = base + s.strip(\"#\")\n return lst + [new_term]\n return lst + [s]",
"def _repair_names_unique(\n names: Sequence[str],\n quiet: bool = False,\n sanitizer: Callable = None,\n base0_: bool = None,\n) -> List[str]:\n base = int(not base0_)\n min_names = _repair_names_minimal(names)\n neat_names = [\n re.sub(r\"(?:(?<!_)_{1,2}\\d+|(?<!_)__)+$\", \"\", name)\n for name in min_names\n ]\n if callable(sanitizer):\n neat_names = [sanitizer(name) for name in neat_names]\n\n new_names = []\n changed_names = []\n for i, name in enumerate(neat_names):\n if neat_names.count(name) > 1 or name == \"\":\n name = f\"{name}__{i + base}\"\n if name != names[i]:\n changed_names.append((names[i], name))\n new_names.append(name)\n if not quiet:\n _log_changed_names(changed_names)\n return new_names",
"def longest_common_prefix(strings: list):\n raise NotImplemented",
"def abbreviate_target_ids(arr):\r\n split_keys = [tuple(a.split('.')) for a in arr]\r\n\r\n split_keys_by_subseq = {}\r\n\r\n def subseq_map(arr, subseq_fn=None, result_cmp_fn=None):\r\n def subseq_map_rec(remaining_arr, subseq, indent=''):\r\n if not remaining_arr:\r\n if subseq_fn:\r\n subseq_fn(arr, subseq)\r\n return subseq\r\n\r\n next_segment = remaining_arr.pop()\r\n next_subseq = tuple([next_segment] + list(subseq))\r\n\r\n skip_value = subseq_map_rec(remaining_arr, subseq, indent + '\\t')\r\n\r\n add_value = subseq_map_rec(remaining_arr, next_subseq, indent + '\\t')\r\n\r\n remaining_arr.append(next_segment)\r\n\r\n if result_cmp_fn:\r\n if not subseq:\r\n # Empty subsequence should always lose.\r\n return add_value\r\n if result_cmp_fn(skip_value, add_value):\r\n return skip_value\r\n return add_value\r\n\r\n return None\r\n\r\n val = subseq_map_rec(list(arr), tuple())\r\n return val\r\n\r\n def add_subseq(arr, subseq):\r\n if subseq not in split_keys_by_subseq:\r\n split_keys_by_subseq[subseq] = set()\r\n if split_key not in split_keys_by_subseq[subseq]:\r\n split_keys_by_subseq[subseq].add(arr)\r\n\r\n for split_key in split_keys:\r\n subseq_map(split_key, add_subseq)\r\n\r\n def return_min_subseqs(subseq1, subseq2):\r\n collisions1 = split_keys_by_subseq[subseq1]\r\n collisions2 = split_keys_by_subseq[subseq2]\r\n return (len(collisions1) < len(collisions2)\r\n or (len(collisions1) == len(collisions2)\r\n and len(subseq1) <= len(subseq2)))\r\n\r\n min_subseq_by_key = {}\r\n\r\n for split_key in split_keys:\r\n min_subseq = subseq_map(split_key, result_cmp_fn=return_min_subseqs)\r\n if not min_subseq:\r\n raise Exception(\"No min subseq found for %s: %s\" % (str(split_key), str(min_subseq)))\r\n min_subseq_by_key['.'.join(str(segment) for segment in split_key)] = '.'.join(min_subseq)\r\n\r\n return min_subseq_by_key",
"def combine_strings(\n strings: Iterable[str],\n delimiter: str = \" \",\n min_parts: int = 0,\n max_parts: int = None,\n) -> Generator[str, None, None]:\n\n min_parts = 0 if min_parts is None else min_parts\n\n if min_parts < 0:\n raise ValueError(\"min_parts cannot be negative.\")\n\n if max_parts is not None and max_parts < 0:\n raise ValueError(\"max_parts cannot be negative.\")\n\n strings_list = list(strings)\n iterations = len(strings_list) + 1\n if max_parts is not None:\n iterations = min(iterations, max_parts + 1)\n\n for i in range(min_parts, iterations):\n string_combinations = itertools.combinations(strings_list, i)\n for combination in string_combinations:\n yield delimiter.join(combination)",
"def rem_str(prelist,names):\n\n for prefix in prelist:\n names=[name.replace(prefix,'') for name in names]\n\n return names",
"def _optimize_separators(separated):\n for udict in separated:\n if udict['content'][0] is None:\n udict['complete'] = (None, None)\n elif udict['noelement']:\n udict['complete'] = udict['content']\n else:\n left = \"%s%s%s\" % (\n udict['encoder'].starttag(\n udict['tagname'],\n udict['attr'].itervalues(),\n udict['closed'],\n ),\n udict['content'][0],\n udict['endtag'],\n )\n right = \"%s%s%s\" % (\n udict['encoder'].starttag(\n udict['tagname'],\n udict['attr'].itervalues(),\n udict['closed'],\n ),\n udict['content'][1],\n udict['endtag'],\n )\n if left == right:\n right = left\n udict['complete'] = (left, right)",
"def rem_str(prelist,names):\n \n for prefix in prelist:\n names=[name.replace(prefix,'') for name in names]\n \n return names",
"def prefixes(S):\r\n result = set()\r\n for s in S:\r\n for i in range(len(s) + 1):\r\n result.add(s[:i])\r\n return result",
"def _get_manber_myers_suffixes(self, seq:str=None) -> List:\n if not seq: seq = self.seq\n return self._sort_manber_myers([i for i in range(len(seq))])",
"def compute_multiple_minhashes(strings: List[str]) -> List[LeanMinHash]:\n return [\n LeanMinHash(mh)\n for mh in MinHash.bulk(\n [string_encoder(s) for s in strings], num_perm=PERMUTATIONS\n )\n ]",
"def prefixes(s):\r\n if s:\r\n yield from prefixes(s[:-1]) # First yield prefixes from s[:-1], then yield the last one s.\r\n yield s",
"def _sort_manber_myers(self, suffix_pos: List) -> List:\n bucket = self._create_bucket(suffix_pos)\n for _, v in sorted(bucket.items()):\n if self.debug: print(f\"_sort_manber_myers function: bucket value: {v}\") \n if len(v) > 1:\n # recursive call for next stage\n self.stage *= 2\n self._sort_manber_myers(v)\n else:\n # otherwise add starting position of suffix to result\n self.suffixes.append(v[0]) \n if self.debug: print(f\"_sort_manber_myers function: suffixes: {self.suffixes}\\n\") \n return self.suffixes",
"def gen_eq_suf_lists(string):\n\n # Reverse the string\n string = string[::-1]\n\n # Split the string into list of sensible words and sort them\n words = re.split('\\W', string)\n words = list(filter(lambda word : word != '', words))\n words.sort()\n\n # Initialise output list with an empty group\n suffix_groups = [ [] ]\n\n # Walk through words...\n cur_suffix = words[0][:3]\n for word in words:\n # Add word to last group if it has the same suffix\n if word[:3] == cur_suffix:\n suffix_groups[-1].append(word[::-1])\n\n # Make a new group on the encounter of a new suffix\n else:\n suffix_groups.append( [ word[::-1] ] )\n\n # Update the suffix that is compare with\n cur_suffix = word[:3]\n\n return suffix_groups",
"def get_unambiguous_suffixes(self, sort_on=None):\n suffixes = self.suffix_dict.keys()\n monos = [suffix for suffix in suffixes if len(self.suffix_dict[suffix])==1]\n if sort_on=='sum':\n monos.sort(lambda x, y: cmp(dict_sum(suffix_dict[y]), dict_sum(suffix_dict[x])))\n else:\n monos.sort()\n return monos",
"def longestCommonPrefix(self, strs: List[str]) -> str:\r\n common = \"\"\r\n if not strs:\r\n return common\r\n shortest_str = min(strs, key=len)\r\n for i in range(len(shortest_str)):\r\n char = shortest_str[i]\r\n for item in strs:\r\n if item[i] != char:\r\n return common\r\n common += char\r\n return common",
"def _get_common_prefix(list_of_strings):\n # https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings\n def all_same(x):\n return all(x[0] == y for y in x)\n\n char_tuples = zip(*list_of_strings)\n prefix_tuples = itertools.takewhile(all_same, char_tuples)\n return \"\".join(x[0] for x in prefix_tuples).strip(\"-\")",
"def prefixes(s):\n if s:\n yield from prefixes(s[:-1])\n yield s",
"def find_common_prefix(strs):\n\n common = []\n for cgroup in zip(*strs):\n if all(x == cgroup[0] for x in cgroup[1:]):\n common.append(cgroup[0])\n else:\n break\n return ''.join(common)",
"def disambiguate(names: list[str], mark: str = \"1\") -> list[str]:\n names_seen = set()\n new_names = []\n for name in names:\n new_name = name\n while new_name in names_seen:\n new_name += mark\n new_names.append(new_name)\n names_seen.add(new_name)\n\n return new_names",
"def _repair_names_universal(\n names: Iterable[str], quiet: bool = False, base0_: bool = None\n) -> List[str]:\n min_names = _repair_names_minimal(names)\n neat_names = [re.sub(r\"[^\\w]\", \"_\", name) for name in min_names]\n new_names = _repair_names_unique(\n neat_names,\n quiet=True,\n sanitizer=lambda name: (\n f\"_{name}\"\n if keyword.iskeyword(name) or (name and name[0].isdigit())\n else name\n ),\n base0_=base0_,\n )\n if not quiet:\n changed_names = [\n (orig_name, new_name)\n for orig_name, new_name in zip(names, new_names)\n if orig_name != new_name\n ]\n _log_changed_names(changed_names)\n return new_names",
"def find_longest_common_prefix_reduce(words:list):\n if not words:\n return ''\n \n def common_start(w1, w2):\n shorter = w1 if len(w1) < len(w2) else w2\n for i in range(0, len(shorter)):\n if w1[i] != w2[i]:\n return shorter[:i]\n return shorter\n \n return reduce(common_start, words)",
"def strip_any_ends(s: str, prefixes: Union[str, Sequence[str]], suffixes: Union[str, Sequence[str]]) -> str:\n\t\tprefixes = [str(z) for z in prefixes] if StringTools.is_true_iterable(prefixes) else [str(prefixes)]\n\t\tsuffixes = [str(z) for z in suffixes] if StringTools.is_true_iterable(suffixes) else [str(suffixes)]\n\t\ts = str(s)\n\t\tfor pre in prefixes:\n\t\t\tif s.startswith(pre):\n\t\t\t\ts = s[len(pre):]\n\t\tfor suf in suffixes:\n\t\t\tif s.endswith(suf):\n\t\t\t\ts = s[:-len(suf)]\n\t\treturn s",
"def neuronyms(input_str, k):\n n = len(input_str)\n result = []\n\n for length in range(k, n-k+1):\n for start in range (1, n - length):\n prefix = input_str[:start]\n suffix = input_str[(start+length):]\n res_str = prefix+str(length)+suffix\n result.append(res_str)\n\n return result",
"def findLongestCommonSubstringManyStrings(listOfStrings):",
"def minpart2(d: str = open(DIR+\"input.txt\", \"r\").read(), s: Set[str] = None, a: int = 0, b: int = 1, p: int = 0) -> str:\n seen: Set[str] = s if s is not None else set()\n lines: List[str] = d.split(\"\\n\")\n if p >= len(lines[a]) or b >= len(lines) or f\"{a}-{b}-{p}\" in seen or \"found\" in seen:\n return \"\"\n seen.add(f\"{a}-{b}-{p}\")\n if lines[a][:p] + lines[a][p + 1:] == lines[b][:p] + lines[b][p + 1:] and seen.add(\"found\") is None:\n return lines[a][:p] + lines[a][p + 1:]\n return max(minpart2(d, seen, a, b + 1, p), minpart2(d, seen, a + 1, b + 1, p), minpart2(d, seen, a, b, p + 1))",
"def suffixes(word: str) -> Iterator[str]:\n if not word:\n return\n for i, _ in enumerate(word):\n yield word[i:]",
"def generate_greedy(lists):\n \n def greedy_step(lists, base=[]):\n \"\"\"Add a single item from the list of strings to the base list.\"\"\"\n lists_copy = lists[:]\n if base == []:\n # Start with any string\n s = lists_copy.pop()\n else:\n l = find_match(lists_copy, base)\n s = add_string(l, base)\n lists_copy.remove(l)\n return lists_copy, s\n\n # This is probably nicer if it's recursive?\n base = []\n while lists:\n lists, base = greedy_step(lists, base)\n return base",
"def solve_conflicts(phrase_list, useSuperSetToken=True):\n to_add = []\n to_remove = []\n length = len(phrase_list)\n for i, entry in enumerate(phrase_list):\n if (entry in to_remove\n or entry in to_add):\n continue\n to_add.append(entry)\n for j in range(i + 1, length):\n if overlap(to_add[-1], phrase_list[j]):\n if (a_contain_b(phrase_list[j], to_add[-1])\n and useSuperSetToken):\n to_remove.append(to_add.pop())\n to_add.append(phrase_list[j])\n else:\n to_remove.append(phrase_list[j])\n to_add = sorted(to_add, key=lambda x: x['start'])\n return to_add",
"def solve(in_str):\n\n return ','.join(sorted(imap(lambda x: ''.join(x),permutations(in_str))))"
] |
[
"0.5399903",
"0.5360808",
"0.5333351",
"0.53276676",
"0.5230604",
"0.518405",
"0.51806355",
"0.51512223",
"0.5076588",
"0.50518227",
"0.5034453",
"0.4994702",
"0.49840856",
"0.49824935",
"0.49758986",
"0.4959224",
"0.494619",
"0.49393862",
"0.4936973",
"0.4900453",
"0.4897777",
"0.48891217",
"0.485917",
"0.48496792",
"0.4839659",
"0.4837408",
"0.48126075",
"0.48110646",
"0.48039275",
"0.47826454"
] |
0.72810227
|
0
|
Extract standard plots for singleobjective optimization. Extracts a list of plots from an Experiment and GenerationStrategy of general interest to an Ax user. Currently not supported are
|
def get_standard_plots(
experiment: Experiment, generation_strategy: Optional[GenerationStrategy]
) -> List[go.Figure]:
objective = not_none(experiment.optimization_config).objective
if isinstance(objective, MultiObjective):
logger.warning(
"get_standard_plots does not currently support MultiObjective "
"optimization experiments. Returning an empty list."
)
return []
if isinstance(objective, ScalarizedObjective):
logger.warning(
"get_standard_plots does not currently support ScalarizedObjective "
"optimization experiments. Returning an empty list."
)
return []
if experiment.fetch_data().df.empty:
logger.info(f"Experiment {experiment} does not yet have data, nothing to plot.")
return []
output_plot_list = []
output_plot_list.append(
_get_objective_trace_plot(
experiment=experiment,
metric_name=not_none(experiment.optimization_config).objective.metric.name,
# TODO: Adjust `model_transitions` to case where custom trials are present
# and generation strategy does not start right away.
model_transitions=not_none(generation_strategy).model_transitions
if generation_strategy is not None
else [],
optimization_direction=(
"minimize"
if not_none(experiment.optimization_config).objective.minimize
else "maximize"
),
)
)
# Objective vs. parameter plot requires a `Model`, so add it only if model
# is alrady available. In cases where initially custom trials are attached,
# model might not yet be set on the generation strategy.
if generation_strategy and generation_strategy.model:
model = not_none(not_none(generation_strategy).model)
try:
output_plot_list.append(
_get_objective_v_param_plot(
search_space=experiment.search_space,
model=model,
metric_name=not_none(
experiment.optimization_config
).objective.metric.name,
trials=experiment.trials,
)
)
output_plot_list.append(_get_cross_validation_plot(model))
except NotImplementedError:
# Model does not implement `predict` method.
pass
return [plot for plot in output_plot_list if plot is not None]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot_sample(self):\n print(u'plot_sample()')\n data_set = self.data_sets[1]\n scenario = u'Greedy Search'\n titles = [u'Collaborative Filtering', u'Content-based']\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for i, rec_type in enumerate(data_set.missions):\n graph = data_set.folder_graphs + rec_type + '_' + str(15) + u'.txt'\n for strategy in Strategy.strategies:\n m = data_set.missions[rec_type][graph][strategy][scenario]\n m.compute_stats()\n ppl.plot(axes[i], np.arange(STEPS_MAX + 1),\n m.stats, label=strategy, linewidth=2)\n axes[i].set_xlabel(u'#Hops')\n axes[i].set_ylabel(u'Success Ratio')\n axes[i].set_ylim(0, 85)\n axes[i].set_xlim(0, STEPS_MAX * 1.01)\n axes[i].set_title(titles[i])\n ppl.legend(axes[i], loc=0)\n\n\n # plt.suptitle(u'Greedy Search on the BookCrossing for N=15',\n # size='xx-large', x=0.5)\n fig.subplots_adjust(left=0.08, right=0.97, top=0.9)\n\n plt.savefig('plots/sample.png')\n plt.savefig('plots/sample.pdf')",
"def test_all_plots_working(self):\n optimizer = \"RandomSearch\"\n name = \"test_init_experiment\"\n param_defs = {\n \"x\": MinMaxNumericParamDef(0, 1),\n \"name\": NominalParamDef([\"A\", \"B\", \"C\"])\n }\n minimization = True\n\n LAss = PrettyLabAssistant()\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)\n LAss.init_experiment(name + \"2\", optimizer, param_defs, minimization=minimization)\n cand = LAss.get_next_candidate(name)\n cand.result = 1\n LAss.update(name, cand)\n LAss.write_out_plots_current_step()\n LAss.plot_result_per_step([name], show_plot=False)\n LAss.exp_assistants[name].experiment.minimization_problem = False\n LAss.plot_result_per_step(name, show_plot=False)",
"def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)",
"def plots():\n out = interactive_output(generate_plots, {'gsize':gridSlider, 'ra':RABox, 'ra':RASlider, 'dec':DECBox, 'dec':DECSlider, 'ang':radBox, 'ang':radSlider, 'style':hexDrop})\n return display(widgrid, out)",
"def get_multiobjective_plot(self):\n fig, ax = plt.subplots()\n\n values = self.stats['multiobj_stats']['episode_totals']\n for i in range(values.shape[1]):\n ax.plot(np.arange(len(values[:, i])), values[:, i],\n color=_COLORS[i % len(_COLORS)], lw=2, alpha=.9,\n label='Objective {}'.format(i))\n ax.legend()\n ax.set_ylabel('Objective value')\n ax.set_xlabel('Episode')\n return fig",
"def make_plots(self,indices=None,hardcopy=False,hardcopydir='.',hardcopyprefix='',hardcopytype='png'):\n\t\tfor (i,E) in enumerate(self.experiments):\n\t\t\tif(indices==None) or (i in indices):\n\t\t\t\tE.show_plot(hardcopy,hardcopydir,hardcopyprefix,hardcopytype)",
"def plot(self,experiment_id = None,image_path = None):\n c = self.cursor()\n where_experiment_id = ''\n if not experiment_id is None:\n if isinstance(experiment_id, list):\n exp_ids = ','.join([ str(f) for f in experiment_id ])\n where_experiment_id = ' WHERE id in ({})'.format(exp_ids)\n else:\n where_experiment_id = ' WHERE id = {}'.format(experiment_id)\n c.execute(\n 'SELECT exp_id,exp_name,exp_description,var_name FROM experiment'\n + where_experiment_id\n )\n experiments = c.fetchall()\n exp_count = len(experiments)\n fig, axs = plt.subplots(exp_count)\n if exp_count == 1:\n axs = [axs]\n trend = lambda a,b: np.poly1d(np.polyfit(a, b, 1))(a)\n for i in range(exp_count):\n axs[i].set_title(experiments[i]['exp_name'])\n axs[i].set_xlabel(experiments[i]['exp_description'])\n # build x-axis \n x_axis = []\n c.execute(\n '''\n SELECT val FROM fact\n WHERE var_name = ?\n AND exp_id = ?\n ORDER BY step_id ASC\n ''',\n (\n experiments[i]['var_name'],\n experiments[i]['exp_id']\n )\n )\n x_axis = [r['val'] for r in c.fetchall()]\n c.execute(\n '''\n SELECT DISTINCT var_name FROM fact \n WHERE exp_id = ? AND var_name != ?\n ORDER BY var_name ASC\n ''',\n (experiments[i]['exp_id'],experiments[i]['var_name'])\n )\n variables = [r['var_name'] for r in c.fetchall()]\n for variable in variables:\n c.execute(\n '''\n SELECT val FROM fact\n WHERE exp_id = ? AND var_name = ?\n ORDER BY step_id ASC \n ''',\n (experiments[i]['exp_id'], variable)\n )\n y_axis = [r['val'] for r in c.fetchall()]\n axs[i].scatter(x_axis, y_axis)\n axs[i].plot(x_axis,trend(x_axis, y_axis),label=variable)\n axs[i].legend()\n fig.tight_layout()\n # save into image on headless machine\n if not image_path is None:\n plt.savefig(image_path)\n else:\n try:\n plt.show()\n except:\n plt.savefig(\"plot.png\") \n self.commit()",
"def plot_modelparametercollections(plotname, parametercollection_SF, parametercollection_AGN,\n stat_SF, stat_AGN, AGNcol='blue',SFcol='red', constraintsstr=None,\n fluxratiodictionarylist=None, verbose=True):\n\n Nobj = len(parametercollection_SF)\n if verbose: print(' - Will generate plots of NEOGAL \"PDFs\" for all '+str(Nobj)+' objects in parameter collections')\n for oo in np.arange(Nobj):\n objid = parametercollection_SF[oo]['id']\n if verbose:\n infostr = ' plotting info for '+str(objid)+' ('+str(\"%.5d\" % (oo+1))+' / '+str(\"%.5d\" % Nobj)+') '\n sys.stdout.write(\"%s\\r\" % infostr)\n sys.stdout.flush()\n plotname_obj = plotname.replace('.pdf','_id'+str(objid)+'.pdf')\n # if verbose: print(' - Generating the figure '+plotname_obj)\n figuresize_x = 6\n figuresize_y = 5\n fig = plt.figure(figsize=(figuresize_x,figuresize_y))\n Fsize = 9\n LW = 2\n plt.rc('text', usetex=True) # enabling LaTex rendering of text\n plt.rc('font', family='serif',size=Fsize) # setting text font\n plt.rc('xtick', labelsize=Fsize)\n plt.rc('ytick', labelsize=Fsize)\n plt.clf()\n plt.ioff()\n\n left = 0.10 # the left side of the subplots of the figure\n right = 0.95 # the right side of the subplots of the figure\n bottom = 0.10 # the bottom of the subplots of the figure\n top = 0.90 # the top of the subplots of the figure\n wspace = 1.50 # the amount of width reserved for blank space between subplots\n hspace = 0.50 # the amount of height reserved for white space between subplots\n plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)\n\n Nrows, Ncols = 3, 6\n ylabel = 'Number of NEOGAL SF ('+str(SFcol)+') and AGN ('+str(AGNcol)+') models'\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n Nmodels_SF = float(len(parametercollection_SF[oo]['Zgas']))\n Nmodels_AGN = float(len(parametercollection_AGN[oo]['Zgas']))\n\n titlestr = 'Models satisfying ID='+str(objid)+' cuts: SF='+str(Nmodels_SF)+'; AGN='+str(Nmodels_AGN)\n if (Nmodels_AGN > 0) & (Nmodels_SF > 0):\n Nmodels_ratio = Nmodels_SF/Nmodels_AGN\n titlestr_addition = '; SF/AGN='+str(\"%.4f\" % Nmodels_ratio)\n titlestr = titlestr+titlestr_addition\n\n if fluxratiodictionarylist is not None:\n constraints = fluxratiodictionarylist[oo]\n constraintslist = [key+':['+str(\"%.2f\" % constraints[key][0])+','+str(\"%.2f\" % constraints[key][1])+']'\n for key in constraints.keys() if key not in ['id']]\n\n if len(constraintslist) < 4:\n constraintsstr = '; '.join(constraintslist)\n elif (len(constraintslist) > 3) & (len(constraintslist) < 7):\n constraintsstr = '; '.join(constraintslist[:3])+'\\n'+'; '.join(constraintslist[3:6])\n elif (len(constraintslist) > 6) & (len(constraintslist) < 10):\n constraintsstr = '; '.join(constraintslist[:3])+'\\n'+'; '.join(constraintslist[3:6])+\\\n '\\n'+'; '.join(constraintslist[6:])\n else:\n constraintsstr = '; '.join(constraintslist[:3])+'\\n'+'; '.join(constraintslist[3:6])+\\\n '\\n'+'; '.join(constraintslist[6:9])+'\\n'+'; '.join(constraintslist[9:])\n\n constraintsstr = constraintsstr.replace('10000000000.00','1e10')\n\n titlestr = titlestr+'\\n'+constraintsstr\n # titlestr = r'{\\fontsize{'+str(Fsize)+'pt}{3em}\\selectfont{}{'+titlestr+'\\r}{\\fontsize{'+str((Fsize-2.))+'pt}{3em}\\selectfont{}('+constraintsstr+'}'\n\n # plt.text(x=0.5, y=0.94, s=titlestr, fontsize=Fsize, ha=\"center\", transform=fig.transFigure)\n # plt.text(x=0.5, y=0.88, s=constraintsstr, fontsize=Fsize-2, ha=\"center\", transform=fig.transFigure)\n # fig.title(titlestr,fontsize=Fsize)\n fig.suptitle(titlestr,fontsize=Fsize-2)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Zgas\n plt.subplot(Nrows, Ncols, (1,3))\n\n bindefs = np.array([0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.004, 0.006, 0.008, 0.014,\n 0.017, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07])-0.00001\n\n plotkey = 'Zgas'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xscale('log')\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([0.00001,0.1])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # logUs\n plt.subplot(Nrows, Ncols, (4,6))\n\n bindefs = np.arange(-4.75, -0.25, 0.5)\n\n plotkey = 'logUs'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-5,-0.5])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # xid\n plt.subplot(Nrows, Ncols, (7,9))\n\n bindefs = np.array([0.0, 0.2, 0.4, 0.6])\n\n plotkey = 'xid'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-0.05,0.65])\n plt.ylabel(ylabel)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # nh\n plt.subplot(Nrows, Ncols, (10,12))\n\n bindefs = 10**np.array([1.5, 2.5, 3.5, 4.5])\n\n plotkey = 'nh'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n plt.xscale('log')\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([10,1e5])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # COCOsol\n plt.subplot(Nrows, Ncols, (13,14))\n\n #bindefs = np.array([0.10, 0.14, 0.20, 0.27, 0.38, 0.52, 0.72, 1.00, 1.40])\n bindefs = np.arange(0.05,1.5,0.06)\n\n plotkey = 'COCOsol'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],None,\n stat_SF[oo][plotkey],None,\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([0.00,1.55])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # mup\n plt.subplot(Nrows, Ncols, (15,16))\n\n bindefs = np.array([0,200,400])\n\n plotkey = 'mup'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],None,\n stat_SF[oo][plotkey],None,\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-10,410])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # alpha\n plt.subplot(Nrows, Ncols, (17,18))\n\n bindefs = np.array([-2.15,-1.85,-1.55,-1.25,-0.95])\n\n plotkey = 'alpha'\n Nbins = 10\n nm.plot_modelparametercollections_addhist(None,parametercollection_AGN[oo][plotkey],\n None,stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=None,Nbins=Nbins)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-2.2,-0.9])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n plt.savefig(plotname_obj)\n plt.clf()\n plt.close('all')\n # if verbose: print(' - Successfully saved figure to file')\n if verbose: print('\\n done...')",
"def get_multiobjective_plot(evaluator_list, stride=500):\n num_objectives = (\n evaluator_list[0].stats['multiobj_stats']['episode_totals'].shape[1])\n values = [collections.defaultdict(list) for _ in range(num_objectives)]\n for e in evaluator_list:\n for i in range(num_objectives):\n values[i][e.task_name].append(\n e.stats['multiobj_stats']['episode_totals'][:, i])\n means = [None] * num_objectives\n stds = [None] * num_objectives\n for i in range(num_objectives):\n values[i] = _map(np.vstack, values[i])\n means[i] = _map(functools.partial(np.mean, axis=0), values[i])\n stds[i] = _map(functools.partial(np.std, axis=0), values[i])\n\n fig, axes = plt.subplots(num_objectives, 1, figsize=(8, 6 * num_objectives))\n for objective_idx in range(num_objectives):\n ax = axes[objective_idx]\n for i, task_name in enumerate(means[objective_idx]):\n m = means[objective_idx][task_name]\n s = stds[objective_idx][task_name]\n idx = i % len(_COLORS)\n x = np.arange(len(m))\n ax.plot(x, m, lw=2, color=_COLORS[idx], alpha=.6, label=None)\n ax.plot(x[::stride], m[::stride], 'o', lw=2, marker=_MARKERS[idx],\n markersize=10, color=_COLORS[idx], label=task_name)\n ax.fill_between(x, m - s, m + s, alpha=.4, lw=2, color=_COLORS[idx])\n ax.legend()\n ax.set_ylabel('Objective {}'.format(objective_idx))\n ax.set_xlabel('Episode')\n return fig",
"def ex1_plots(instance, destination, prefix, save, animate):\n \n plts = ukf_plots(instance, destination, prefix, save, animate)\n\n truths = truth_parser(instance)\n nan_array= nan_array_parser(instance, truths, instance.base_model)\n #obs, obs_key = obs_parser(instance, True)\n obs_key = obs_key_parser(instance, True)\n preds = preds_parser(instance, True)\n #forecasts = forecasts_parser(instance, True)\n \n ukf_params = instance.ukf_params\n index2 = ukf_params[\"index2\"]\n \n \"remove agents not in model to avoid wierd plots\"\n #obs *= nan_array\n truths *= nan_array\n preds *= nan_array\n #forecasts*= nan_array\n \n \"indices for unobserved agents\"\n not_index2 = np.array([i for i in np.arange(truths.shape[1]) if i not in index2])\n plts.pair_frame(truths, preds, obs_key, 10, destination)\n plts.error_hist(truths[::instance.sample_rate,index2], \n preds[::instance.sample_rate,index2],\"Observed Errors\")\n if len(not_index2)>0:\n plts.error_hist(truths[::instance.sample_rate, not_index2], \n preds[::instance.sample_rate, not_index2],\"Unobserved Errors\")\n \n #plts.path_plots(obs[::instance.sample_rate] , \"Observed\")\n plts.path_plots(preds[::instance.sample_rate], \"Predicted\")\n plts.path_plots(truths, \"True\")\n #plts.path_plots(forecasts[::instance.sample_rate], \"Forecasts\")\n\n if animate:\n #plts.trajectories(truths, \"plots/\")\n plts.pair_frames(truths, preds, obs_key,\n truths.shape[0], \"../../plots/\")",
"def plot_results(model):\n\n # Is is a system of equation\n is_system = max(model.n_input, model.n_output) > 1\n\n # Choose the plotting function depending on the type of training data\n if model.dimension == 1 and not (is_system):\n plot_1d_results(model)\n\n elif model.dimension == 1 and is_system:\n plot_1d_systems(model)\n\n elif model.dimension > 1 and not (is_system):\n plot_2d_results(model)\n\n else:\n # Plot and save slices of the Green's matrix\n for i in range(1, 5):\n plot_2d_systems(model, Green_slice=i)",
"def plot_tsnes():\n # Two environments (for main paper figure. All for final figure)\n ENVS = [\n \"BipedalWalker-v3\",\n #\"LunarLander-v2\",\n #\"Pendulum-v0\"\n \"Acrobot-v1\",\n #\"CartPole-v1\"\n ]\n ALGO_TYPES = [\n \"stablebaselines\",\n \"stablebaselines\",\n \"wann\",\n \"wann\",\n ]\n ALGO_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMAES\",\n ]\n ALGO_PRETTY_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMA-ES\"\n ]\n\n REWARD_SCALES = {\n \"Pendulum-v0\": [-1600, -200],\n \"Acrobot-v1\": [-500, -100],\n \"LunarLander-v2\": [-230, 200],\n \"BipedalWalker-v3\": [-100, 300],\n \"CartPole-v1\": [0, 500]\n }\n\n figure, axs = pyplot.subplots(\n figsize=[6.4 * 2, 4.8],\n nrows=2,\n ncols=4,\n gridspec_kw={'hspace': 0, 'wspace': 0},\n )\n\n for plot_i in range(2):\n env = ENVS[plot_i]\n reward_scale = REWARD_SCALES[env]\n for algo_i in range(len(ALGO_TYPES)):\n column_idx = (algo_i % 2) + plot_i * 2\n row_idx = 0 if algo_i <= 1 else 1\n ax = axs[row_idx, column_idx]\n algo_type = ALGO_TYPES[algo_i]\n algo_name = ALGO_NAMES[algo_i]\n algo_pretty_name = ALGO_PRETTY_NAMES[algo_i]\n\n experiment_glob = \"experiments/{}_{}_{}_*\".format(algo_type, env, algo_name)\n experiment_paths = glob(experiment_glob)\n tsnes = []\n rewards = []\n for experiment_path in experiment_paths:\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n population_tsnes = []\n population_rewards = []\n for path in pivector_paths:\n data = np.load(path)\n population_tsnes.append(data[\"tsne\"])\n population_rewards.append(data[\"average_episodic_reward\"])\n data.close()\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n tsnes = np.concatenate(tsnes, axis=0)\n rewards = np.concatenate(rewards, axis=0)\n\n # Min-max normalization\n rewards = (rewards - reward_scale[0]) / (reward_scale[1] - reward_scale[0])\n\n scatter = ax.scatter(\n tsnes[:, 0],\n tsnes[:, 1],\n c=rewards,\n cmap=\"plasma\",\n s=1,\n vmin=0,\n vmax=1\n )\n\n ax.text(0.98, 0.98, algo_pretty_name, horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes)\n ax.set_xticks([])\n ax.set_yticks([])\n # Hide spines, the outer edges\n ax.spines[\"top\"].set_alpha(0.2)\n ax.spines[\"bottom\"].set_alpha(0.2)\n ax.spines[\"left\"].set_alpha(0.2)\n ax.spines[\"right\"].set_alpha(0.2)\n # Hide edge spines and bolden mid-spines\n if row_idx == 0:\n ax.spines[\"top\"].set_visible(False)\n else:\n ax.spines[\"bottom\"].set_visible(False)\n if column_idx == 0:\n ax.spines[\"left\"].set_visible(False)\n elif column_idx == 1:\n ax.spines[\"right\"].set_alpha(1.0)\n elif column_idx == 2:\n ax.spines[\"left\"].set_alpha(1.0)\n elif column_idx == 3:\n ax.spines[\"right\"].set_visible(False)\n\n # Add titles\n if row_idx == 0 and (column_idx == 0 or column_idx == 2):\n ax.set_title(env.split(\"-\")[0], x=1.0)\n\n cbaxes = figure.add_axes([0.4, 0.94, 0.2, 0.02])\n cbar = figure.colorbar(scatter, orientation=\"horizontal\", cax=cbaxes)\n cbar.set_ticks([0.0, 0.5, 1.0])\n cbar.set_ticklabels([\"Min\", \"Reward\", \"Max\"])\n cbar.ax.xaxis.set_ticks_position('top')\n cbar.ax.xaxis.set_label_position('top')\n cbar.ax.tick_params(labelsize=\"small\", length=0)\n figure.tight_layout()\n figure.savefig(\"figures/tsnes.png\", dpi=200, bbox_inches=\"tight\", pad_inches=0.0)",
"def do_plots(self, i, axis1, axis2, finder):\n fig_index = finder.plot_phase_space_matplotlib(\"%s [mm]\"%axis1, \"%s [MeV/c]\"%axis2)\n name = os.path.join(self.plot_dir,\n \"tune_\"+str(i)+\"_\"+axis1+\"_phase-space\")\n fig = matplotlib.pyplot.figure(fig_index)\n for format in [\"png\",]:\n fig.savefig(name+\".\"+format)\n matplotlib.pyplot.close(fig_index)\n\n fig_index = finder.plot_cholesky_space_matplotlib()\n name = os.path.join(self.plot_dir,\n \"tune_\"+str(i)+\"_\"+axis1+\"_cholesky-space\")\n fig = matplotlib.pyplot.figure(fig_index)\n for format in [\"png\",]:\n fig.savefig(name+\".\"+format)\n matplotlib.pyplot.close(fig_index)",
"def plot_ideal_asa_bad_all(self, env):\n print(f'Plotting ideal, ASA, and bad skill - all runs in subplots - {env}')\n rcParams.update({'figure.autolayout': False})\n params = self.env_params[env]\n fig, _ = plt.subplots(3, 1, sharex='all', sharey='all')\n fig.subplots_adjust(hspace=0, left=0.11, right=0.94, top=0.98, bottom=0.07)\n\n # Plot for all three skills\n # color tool: https://www.cssfontstack.com/oldsites/hexcolortool/\n skills = [\n (1, 'ideal', 'Manually added ideal skill', '#2BAB2B', ['#61E161', '#4FCF4F', '#3DBD3D', '#2BAB2B', '#199919', '#078707', '#007500']),\n (2, 'asa', 'Manually triggered ASA', '#4464C9', ['#91B1FF', '#7797FC', '#5E7EE3', '#4464C9', '#2B4BB0', '#113196', '#00187D']),\n (3, 'bad', 'Manually added bad skill', '#FF3333', ['#FF8F8F', '#FF7070', '#FF5252', '#FF3333', '#ED2121', '#DB0F0F', '#C90000'])\n ]\n for subplot, dataset, label, main_color, colors in skills:\n plt.subplot(3, 1, subplot)\n # Basic run\n plt.plot(\n *self.data(env, 'asa')\n .filter_basic_runs()\n .get_reward_mean(),\n color='black'\n )\n # With skills\n if env == 'gw':\n colors = colors[:1] + colors[2:-2] + colors[-1:]\n for i, itr in enumerate(params['other_asa_runs']):\n plt.plot(\n *self.data(env, dataset)\n .filter_resumed_from(itr)\n .append_prev_itr()\n .get_reward_mean(),\n color=colors[i]\n )\n plt.plot(-1, -1, color=main_color, label=label)\n\n # Format subplot\n plt.legend(loc='lower right')\n plt.grid()\n plt.xlim(*self.env_params[env]['plot_x_lim'])\n if subplot < 3:\n plt.gca().xaxis.set_ticklabels([])\n plt.ylim(*self.env_params[env]['plot_y_lim'])\n y_tics = list(self.env_params[env]['plot_y_tics'])\n y_tics[0] = y_tics[0][:-1]\n plt.yticks(*y_tics)\n if env == 'gw' and subplot == 2:\n plt.ylabel('Average discounted reward')\n\n # Finalize\n w, h = 12, 20\n plt.gcf().set_size_inches(w=w/2.54, h=h/2.54) # convert to inches\n plt.xticks(*self.env_params[env]['plot_x_tics'])\n plt.xlabel('Iteration')\n self.show_save_plot(f'ideal-asa-bad-all-{env}')\n rcParams.update({'figure.autolayout': True})",
"def plot_novelty_results():\n RESULTS_DIR = \"experiments\"\n STDOUT_FILE = \"log.txt\"\n REWARD_PATTERN = r\" EpRewMean[ ]*\\| ([0-9\\-\\.]+)\"\n TIMESTEP_PATTERN = r\" TimestepsSoFar[ ]*\\| ([0-9\\-\\.]+)\"\n ITERATION_PATTERN = r\" Iteration ([0-9]+)\"\n\n GLOBS = [\n os.path.join(RESULTS_DIR, \"novelty_DeceptivePointEnv-v0_es_*\"),\n os.path.join(RESULTS_DIR, \"novelty_DeceptivePointEnv-v0_nsres_*\"),\n os.path.join(RESULTS_DIR, \"novelty_DeceptivePointEnv-v0_nsresgaussian_*\"),\n os.path.join(RESULTS_DIR, \"novelty_DeceptivePointEnv-v0_nsressupervector_*\")\n ]\n\n COLORS = [\n \"C0\",\n \"C1\",\n \"C2\",\n \"C3\"\n ]\n\n LEGENDS = [\n \"ES\",\n \"NSR-ES (Terminal)\",\n \"NSR-ES (Gaussian)\",\n \"NSR-ES (Supervector)\"\n ]\n\n fig = pyplot.figure(figsize=[4.8, 4.8])\n\n for glob_pattern, legend, color in zip(GLOBS, LEGENDS, COLORS):\n experiment_paths = glob(glob_pattern)\n if len(experiment_paths) == 0:\n raise ValueError(\n \"Looks like there are no novelty experiments. Please see README.md on \"+\n \"running novelty search before plotting. Alternatively comment out call to `plot_novelty_results()`.\"\n )\n # Collect all lines and average over later\n xs = []\n ys = []\n for experiment_path in experiment_paths:\n # We just parse results from stdout file\n stdout_log = open(os.path.join(experiment_path, STDOUT_FILE), encoding=\"utf-8\").read()\n # Take maximum fitness of each generation.\n # We have only one printout for one result\n mean_rewards = list(map(float, re.findall(REWARD_PATTERN, stdout_log)))\n iteration = []\n max_rewards = []\n # Plot elite results\n for mean_reward in mean_rewards:\n max_reward = mean_reward\n if len(max_rewards) > 0:\n max_reward = max(max(max_rewards), max_reward)\n max_rewards.append(max_reward)\n iteration.append(len(max_rewards))\n\n xs.append(iteration)\n ys.append(max_rewards)\n\n # Average over curves\n xs = np.array(xs)\n ys = np.array(ys)\n average_x, average_y, std_y, lower_y, upper_y = interpolate_and_average(xs, ys, confidence_interval=True)\n\n pyplot.plot(average_x, average_y, c=color, label=legend)\n pyplot.fill_between(\n average_x,\n lower_y,\n upper_y,\n alpha=0.2,\n color=color,\n linewidth=0.0\n )\n\n pyplot.tick_params(axis='both', which='both', labelsize=\"x-large\")\n pyplot.grid(alpha=0.2)\n pyplot.xlabel(\"Generation\", fontsize=\"x-large\")\n pyplot.ylabel(\"Average Return\", fontsize=\"x-large\")\n pyplot.legend(prop={\"size\": \"large\"})\n pyplot.tight_layout()\n pyplot.savefig(\"figures/novelty_results.pdf\", bbox_inches=\"tight\", pad_inches=0.0)",
"def make_asimov_fit_parameter_plots(self, combined=False):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n \n if combined:\n outdir = os.path.join(self.outdir, 'CombinedBestFits')\n else:\n outdir = os.path.join(self.outdir, 'IndividualBestFits')\n mkdir(outdir)\n \n maintitle = self.make_main_title(\n end='Asimov Analysis'\n )\n\n hrange = self.inj_param_vals[-1]-self.inj_param_vals[0]\n xlims = [self.inj_param_vals[0]-0.1*hrange,\n self.inj_param_vals[-1]+0.1*hrange]\n\n th = self.labels[self.labels.keys()[0]].dict[\n '%s_name'%self.th_to_wh[0]['params']['bestfit']]\n wh = self.labels[self.labels.keys()[0]].dict[\n '%s_name'%self.th_to_wh[0]['params']['altfit']]\n\n th_to_wh_label = \"%s fit to %s fiducial\"%(\n self.tex_axis_label(th),\n self.tex_axis_label(wh)\n )\n wh_to_th_label = \"%s fit to %s fiducial\"%(\n self.tex_axis_label(wh),\n self.tex_axis_label(th)\n )\n fitlabels = [th_to_wh_label, wh_to_th_label]\n\n subtitle = \"True %s Best Fit Parameters\\end{center}\"%(self.tex_axis_label(th))\n\n # Set up multi-plot if needed\n if combined:\n num_rows = self.get_num_rows(\n data=self.th_to_wh[0]['params'],\n omit_metric=False\n )\n plt.figure(figsize=(20, 5*num_rows+2))\n subplotnum = 1\n else:\n subplotnum = None\n\n for param in self.th_to_wh[0]['params'].keys():\n if param not in ['bestfit', 'altfit']:\n ymax = None\n ymin = None\n for fit, fitname, fitlabel in zip(\n [self.th_to_wh, self.wh_to_th],\n ['th_to_wh', 'wh_to_th'],\n fitlabels):\n vals = []\n for param_val in fit[0]['params'][param]:\n val, units = self.parse_pint_string(\n pint_string=param_val\n )\n if param == 'deltam31':\n vals.append(np.abs(float(val)))\n else:\n vals.append(float(val))\n # Specify the subplot, if necessary\n if combined:\n plt.subplot(num_rows, 4, subplotnum)\n self.make_1d_graph(\n xvals=self.inj_param_vals,\n yvals=vals,\n xlabel=self.inj_param_name,\n xunits=self.inj_param_units,\n ylabel=param,\n yunits=units,\n marker=self.marker_style(fitname),\n color=self.plot_colour(fitname),\n plotlabel=fitlabel,\n xlims=xlims\n )\n\n if ymax is None:\n ymax = max(vals)\n else:\n ymax = max(ymax, max(vals))\n if ymin is None:\n ymin = min(vals)\n else:\n ymin = min(ymin, min(vals))\n\n yrange = ymax - ymin\n plt.ylim(ymin-0.1*yrange, ymax+0.2*yrange)\n plt.legend(loc='upper left')\n # Advance the subplot number, if necessary\n if combined:\n subplotnum += 1\n # Else, save/close this plot\n else:\n plt.title(r'%s \\\\ %s'%(maintitle,subtitle))\n plt.tight_layout()\n save_end = \"%s_%s_best_fit_values\"%(self.inj_param_name,\n param)\n self.save_plot(outdir=outdir, end=save_end, truth=th)\n plt.close()\n # Save the whole canvas, if necessary\n if combined:\n plt.suptitle(r'%s \\\\ %s'%(maintitle,subtitle), fontsize=36)\n plt.tight_layout()\n plt.subplots_adjust(top=0.9)\n save_end = \"%s_all_best_fit_values\"%(self.inj_param_name)\n self.save_plot(outdir=outdir, end=save_end, truth=th)\n plt.close()",
"def visualize_and_save_experiments(experiment_dict, output_dir, can_plots_show_value_and_weight=True, show_problem_stats=False, save_problem_stats=True, show_manual_solution_plots=False, save_manual_solution_plots=True, show_algorithm_solution_plots=False, save_algorithm_solution_plots=True, show_value_evolution_plots=False, save_value_evolution_plots=True, show_time_division_plots=False, save_time_division_plots=True, show_algorithm_comparison=False, save_algorithm_comparison=True, show_aggregated_result_tables=True, save_aggregated_result_tables=True):\n\n # if needed, save statistics about the problems and their manual solutions\n if show_problem_stats or save_problem_stats:\n problem_names = list(experiment_dict.keys())\n fields = [\"Item num.\", \"Opt. % item num. in cont.\", \"Opt. % item value in cont.\", \"Item weight % of max weight\", \"Item area % of max area\", \"Cont. weight satur. %\", \"Cont. area satur. %\"]\n data_frame = pd.DataFrame(index=problem_names, columns=fields)\n for problem_name in experiment_dict.keys():\n problem = experiment_dict[problem_name][\"problem\"]\n solution = experiment_dict[problem_name][\"manual_solution\"]\n if type(solution) == Solution:\n problem_results = [\n len(problem.items),\n round(len(solution.placed_items) / len(problem.items) * 100, 2),\n round(sum([problem.items[item_index].value for item_index in solution.placed_items.keys()]) / sum([item.value for item in problem.items.values()]) * 100, 2),\n round(sum([item.weight for item in problem.items.values()]) / problem.container.max_weight * 100, 2),\n round(sum([item.shape.area for item in problem.items.values()]) / problem.container.shape.area * 100, 2),\n round(sum([problem.items[item_index].weight for item_index in solution.placed_items.keys()]) / problem.container.max_weight * 100, 2),\n round(sum([problem.items[item_index].shape.area for item_index in solution.placed_items.keys()]) / problem.container.shape.area * 100, 2),\n ]\n data_frame.loc[problem_name] = problem_results\n if len(data_frame) > 0:\n data_frame.index = [(\"Problem \" + name if len(name) < 5 else name) for name in data_frame.index]\n min_row = data_frame.min()\n min_row = min_row.astype(float)\n max_row = data_frame.max()\n max_row = max_row.astype(float)\n mean_row = data_frame.mean()\n mean_row = mean_row.astype(float)\n std_row = data_frame.std()\n std_row = std_row.astype(float)\n data_frame.loc[\"Min\"] = round(min_row, 2)\n data_frame.loc[\"Max\"] = round(max_row, 2)\n data_frame.loc[\"Mean\"] = round(mean_row, 2)\n data_frame.loc[\"Std\"] = round(std_row, 2)\n data_frame.loc[\"Std / (max - min) %\"] = round(std_row / (max_row - min_row) * 100, 2)\n if show_problem_stats:\n print(data_frame.to_string())\n if save_problem_stats:\n data_frame.to_excel(output_dir + \"problem_stats.xlsx\")\n data_frame.to_latex(output_dir + \"problem_stats.tex\")\n\n # create the problem results directory (if not done yet)\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n # for each problem's results, show or save plots if needed\n if show_manual_solution_plots or save_manual_solution_plots or show_algorithm_solution_plots or save_algorithm_solution_plots or show_algorithm_comparison or save_algorithm_comparison:\n for problem_name in experiment_dict.keys():\n\n problem, manual_solution, algorithm_dict = experiment_dict[problem_name].values()\n\n # create a subdirectory to store the solutions of the problem (if not done yet)\n problem_dir_path = output_dir + problem_name + \"/\"\n if not os.path.exists(problem_dir_path):\n os.mkdir(problem_dir_path)\n\n plotted_problem_name = \"Problem \" + problem_name if len(problem_name) < 5 else problem_name\n\n # if needed, show/save a plot of the initial state (empty solution), and the final state of a manual solution\n if show_manual_solution_plots or save_manual_solution_plots:\n empty_solution = Solution(problem)\n empty_solution.visualize(title_override=plotted_problem_name + \" - Initial state\", show_plot=show_manual_solution_plots, save_path=problem_dir_path + \"empty_solution.png\" if save_manual_solution_plots else None, show_item_value_and_weight=can_plots_show_value_and_weight, show_value_weight_ratio_bar=can_plots_show_value_and_weight)\n if type(manual_solution) == Solution:\n manual_solution.visualize(title_override=plotted_problem_name + \" - Manual solution\", show_plot=show_manual_solution_plots, save_path=problem_dir_path + \"manual_solution.png\" if save_manual_solution_plots else None, show_item_value_and_weight=can_plots_show_value_and_weight, show_value_weight_ratio_bar=can_plots_show_value_and_weight)\n\n # if required, show/save plots of the solutions of each algorithm\n if show_algorithm_solution_plots or save_algorithm_solution_plots:\n for algorithm_name, subdict in algorithm_dict.items():\n for i, solution in enumerate(subdict[\"solutions\"]):\n solution.visualize(title_override=plotted_problem_name + \" - \" + algorithm_name + \" solution\", show_plot=show_algorithm_solution_plots, save_path=problem_dir_path + \"\" + algorithm_name.lower() + \"_exec\" + str(i + 1) + \"_solution.png\" if save_algorithm_solution_plots else None, show_item_value_and_weight=can_plots_show_value_and_weight, show_value_weight_ratio_bar=can_plots_show_value_and_weight)\n\n # if required, show/save plots of the value evolution of each algorithm\n if show_value_evolution_plots or save_value_evolution_plots:\n for algorithm_name, subdict in algorithm_dict.items():\n for i, value_evolution in enumerate(subdict[\"value_evolutions\"]):\n if value_evolution:\n if type(value_evolution) == list and type(value_evolution[0]) == list:\n visualize_boxplot_for_data_sequence(data_lists=value_evolution, title=plotted_problem_name + \" - Population fitness per generation\", show_plot=show_value_evolution_plots, save_path=problem_dir_path + \"\" + algorithm_name.lower() + \"_exec\" + str(i + 1) + \"_fitness_evolution.png\" if save_value_evolution_plots else None)\n else:\n visualize_plot(values=value_evolution, title=plotted_problem_name + \" - \" + algorithm_name + \" solution value per iteration\", show_plot=show_value_evolution_plots, save_path=problem_dir_path + \"\" + algorithm_name.lower() + \"_exec\" + str(i + 1) + \"_value_evolution.png\" if save_value_evolution_plots else None)\n\n # if required, show/save plots of the time division in tasks of each algorithm\n if show_time_division_plots or save_time_division_plots:\n for algorithm_name, subdict in algorithm_dict.items():\n for i, time_division in enumerate(subdict[\"time_divisions\"]):\n visualize_bar_plot(values=[value_pair[0] for value_pair in time_division.values()], labels=[add_newlines_by_spaces(label, 7) for label in list(time_division.keys())], title=\"Problem \" + problem_name + \" - \" + algorithm_name + \" time per task (milliseconds)\", show_plot=show_algorithm_solution_plots, save_path=problem_dir_path + \"\" + algorithm_name.lower() + \"_exec\" + str(i + 1) + \"_time_division.png\" if save_algorithm_solution_plots else None)\n\n # if needed, show/save plots that compare the value and time of each algorithm considering multiple executions\n if show_algorithm_comparison or save_algorithm_comparison:\n visualize_boxplot_for_data_sequence(data_lists=[experiment_dict[problem_name][\"algorithms\"][algo_name][\"values\"] for algo_name in experiment_dict[problem_name][\"algorithms\"].keys()], title=\"Problem \" + problem_name + \" - Solution value by algorithm\", labels=experiment_dict[problem_name][\"algorithms\"].keys(), show_plot=show_algorithm_comparison, save_path=problem_dir_path + \"value_comparison.png\" if save_algorithm_comparison else None)\n visualize_boxplot_for_data_sequence(data_lists=[experiment_dict[problem_name][\"algorithms\"][algo_name][\"times\"] for algo_name in experiment_dict[problem_name][\"algorithms\"].keys()], title=\"Problem \" + problem_name + \" - Computational time (milliseconds) by algorithm\", labels=experiment_dict[problem_name][\"algorithms\"].keys(), y_scale_override=\"log\", show_plot=show_algorithm_comparison, save_path=problem_dir_path + \"time_comparison.png\" if save_algorithm_comparison else None)\n\n # if needed, save tables with an aggregation of the value and time results of the executions of each problem (or just show them)\n if show_aggregated_result_tables or save_aggregated_result_tables:\n problem_names = list(experiment_dict.keys())\n algorithm_names = [algo_name for algo_name in experiment_dict[problem_names[0]][\"algorithms\"].keys()]\n fields = [\"mean\", \"std\", \"min\", \"med\", \"max\"]\n for concept in [\"value\", \"time\"]:\n algo_field_tuples = [(algo_name, field) for algo_name in algorithm_names for field in fields]\n if concept == \"value\":\n algo_field_tuples += [(\"Manual\", \"optim.\")]\n multi_index = pd.MultiIndex.from_tuples(algo_field_tuples, names=[\"Algorithm\", \"Statistic\"])\n data_frame = pd.DataFrame(index=problem_names, columns=multi_index)\n for problem_name in experiment_dict.keys():\n problem_results = list()\n for algo_name in algorithm_names:\n mean, std, min_, median, max_ = get_stats(experiment_dict[problem_name][\"algorithms\"][algo_name][concept + \"s\"], 2 if concept == \"value\" else 0)\n problem_results.extend([mean, std, min_, median, max_])\n if concept == \"value\":\n if type(experiment_dict[problem_name][\"manual_solution\"]) == Solution:\n problem_results.append(experiment_dict[problem_name][\"manual_solution\"].value)\n else:\n problem_results.append(experiment_dict[problem_name][\"manual_solution\"])\n data_frame.loc[problem_name] = problem_results\n data_frame.index = [(\"Problem \" + name if len(name) < 5 else name) for name in data_frame.index]\n if show_aggregated_result_tables:\n print(\"{} results:\\n{}\\n\".format(concept.capitalize(), data_frame.to_string()))\n if save_aggregated_result_tables:\n data_frame.to_excel(output_dir + concept + \"_results.xlsx\")\n data_frame.to_latex(output_dir + concept + \"_results.tex\")",
"def main():\n save = False\n show = True\n\n #hd_parameter_plots = HDparameterPlots(save=save)\n #hd_parameter_plots.flow_parameter_distribution_for_non_lake_cells_for_current_HD_model()\n #hd_parameter_plots.flow_parameter_distribution_current_HD_model_for_current_HD_model_reprocessed_without_lakes_and_wetlands()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_no_tuning()\n #ice5g_comparison_plots = Ice5GComparisonPlots(save=save)\n #ice5g_comparison_plots.plotLine()\n #ice5g_comparison_plots.plotFilled()\n #ice5g_comparison_plots.plotCombined()\n #ice5g_comparison_plots.plotCombinedIncludingOceanFloors()\n #flowmapplot = FlowMapPlots(save)\n #flowmapplot.FourFlowMapSectionsFromDeglaciation()\n #flowmapplot.Etopo1FlowMap()\n #flowmapplot.ICE5G_data_all_points_0k()\n #flowmapplot.ICE5G_data_all_points_0k_no_sink_filling()\n #flowmapplot.ICE5G_data_all_points_0k_alg4_two_color()\n #flowmapplot.ICE5G_data_all_points_21k_alg4_two_color()\n #flowmapplot.Etopo1FlowMap_two_color()\n #flowmapplot.Etopo1FlowMap_two_color_directly_upscaled_fields()\n #flowmapplot.Corrected_HD_Rdirs_FlowMap_two_color()\n #flowmapplot.ICE5G_data_ALG4_true_sinks_21k_And_ICE5G_data_ALG4_true_sinks_0k_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_sinkless_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_no_true_sinks_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_HD_as_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplot.Ten_Minute_Data_from_Virna_data_ALG4_corr_orog_downscaled_lsmask_no_sinks_21k_vs_0k_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n flowmapplotwithcatchment = FlowMapPlotsWithCatchments(save)\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_virna_data_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.compare_lgm_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.upscaled_rdirs_with_and_without_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #upscaled_rdirs_with_and_without_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_glcc_olson_lsmask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE5G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE6G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_ICE5G_and_ICE6G_with_catchments_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_original_ts()\n flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_new_ts_10min()\n outflowplots = OutflowPlots(save)\n #outflowplots.Compare_Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_sinkless_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_true_sinks_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_sinkless_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_true_sinks_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_downscaled_ls_mask_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_plus_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k()\n outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks_individual_rivers()\n #outflowplots.Compare_ICE5G_with_and_without_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #hd_output_plots = HDOutputPlots()\n #hd_output_plots.check_water_balance_of_1978_for_constant_forcing_of_0_01()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data_adding_back_to_discharge()\n #coupledrunoutputplots = CoupledRunOutputPlots(save=save)\n #coupledrunoutputplots.ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.ocean_grid_extended_present_day_rdirs_vs_ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_echam()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_mpiom_pem()\n #lake_plots = LakePlots()\n #lake_plots.plotLakeDepths()\n #lake_plots.LakeAndRiverMap()\n #lake_plots.LakeAndRiverMaps()\n if show:\n plt.show()",
"def plot_summary(self, **kwargs):\n ncols = 2\n nparams = len(self.distribution_parameter_names)\n d, r = divmod(nparams, 2)\n if r > 0:\n nrows = d + 1\n else:\n nrows = d\n\n gs = gridspec.GridSpec(nrows, ncols)\n fig = plt.figure(num=1, figsize=(16, 3*nrows))\n ax = []\n for n, parameter in enumerate(self):\n r, c = divmod(n, 2)\n ax.append(fig.add_subplot(gs[r, c]))\n\n if c == 0 and (n <= nparams-1):\n self._plot_posterior_pdf(parameter, ax[-1],\n y_label='Posterior pdf',\n x_label=parameter)\n elif n <= nparams-1:\n self._plot_posterior_pdf(parameter, ax[-1],\n y_label=None,\n x_label=parameter)\n\n fig.tight_layout()\n\n return fig, ax",
"def analyse_plots(plot_dict, data_dict) :\n for component in [ '_x', '_y' ] :\n z_pos = array.array( 'd' )\n trans_pos = array.array( 'd' )\n errors = array.array( 'd' )\n zeros = array.array( 'd' )\n\n plot = plot_dict['beam_positions'+component]\n\n for i in range( plot.GetXaxis().GetNbins()+2 ) :\n projection = plot.ProjectionY( \\\n 'profile'+component+'_pro_'+str(i), i, i )\n if projection.GetEntries() == 0 :\n continue\n\n pro_mean, pro_mean_err, pro_std, pro_std_err = \\\n scifi.fit_gaussian( projection )\n\n errors.append( pro_mean_err )\n trans_pos.append( pro_mean )\n z_pos.append( data_dict['station_positions'][ i-6 ] )\n zeros.append(0.0)\n\n position_graph = ROOT.TGraphErrors( len(zeros), z_pos, trans_pos, \\\n zeros, errors )\n position_graph.SetName('beam_profile'+component)\n plot_dict['beam_profile'+component] = position_graph\n\n profile_x = plot_dict['beam_profile_x']\n profile_y = plot_dict['beam_profile_y']\n\n up_x_func = ROOT.TF1( \"up_fit_x\", \"pol1\", -5000.0, 0.0 )\n up_y_func = ROOT.TF1( \"up_fit_y\", \"pol1\", -5000.0, 0.0 )\n down_x_func = ROOT.TF1( \"down_fit_x\", \"pol1\", 0.0, 5000.0 )\n down_y_func = ROOT.TF1( \"down_fit_y\", \"pol1\", 0.0, 5000.0 )\n\n up_fit_x = profile_x.Fit( 'up_fit_x', \"QSR\" )\n up_fit_y = profile_y.Fit( 'up_fit_y', \"QSR\" )\n down_fit_x = profile_x.Fit( 'down_fit_x', \"QSR\" )\n down_fit_y = profile_y.Fit( 'down_fit_y', \"QSR\" )\n\n plot_dict['beam_profile_x_up_fit'] = up_x_func\n plot_dict['beam_profile_y_up_fit'] = up_y_func\n plot_dict['beam_profile_x_down_fit'] = down_x_func\n plot_dict['beam_profile_y_down_fit'] = down_y_func\n\n\n up_beam_gra_x = up_x_func.GetParameter(1)\n up_beam_gra_x_err = up_x_func.GetParError(1)\n up_beam_gra_y = up_y_func.GetParameter(1)\n up_beam_gra_y_err = up_y_func.GetParError(1)\n\n up_beam_pos_x = data_dict['station_positions'][-1]*up_beam_gra_x + up_x_func.GetParameter(0)\n up_beam_pos_x_err = up_x_func.GetParError(0)\n up_beam_pos_y = data_dict['station_positions'][-1]*up_beam_gra_y + up_y_func.GetParameter(0)\n up_beam_pos_y_err = up_y_func.GetParError(0)\n\n up_beam_rot_x = math.atan( up_beam_gra_x )\n up_beam_rot_x_err = up_beam_gra_x_err # Approx linear\n up_beam_rot_y = math.atan( up_beam_gra_y )\n up_beam_rot_y_err = up_beam_gra_y_err # Approx linear\n\n\n\n down_beam_gra_x = down_x_func.GetParameter(1)\n down_beam_gra_x_err = down_x_func.GetParError(1)\n down_beam_gra_y = down_y_func.GetParameter(1)\n down_beam_gra_y_err = down_y_func.GetParError(1)\n\n down_beam_pos_x = data_dict['station_positions'][1]*down_beam_gra_x + down_x_func.GetParameter(0)\n down_beam_pos_x_err = down_x_func.GetParError(0)\n down_beam_pos_y = data_dict['station_positions'][1]*down_beam_gra_y + down_y_func.GetParameter(0)\n down_beam_pos_y_err = down_y_func.GetParError(0)\n\n down_beam_rot_x = math.atan( down_beam_gra_x )\n down_beam_rot_x_err = down_beam_gra_x_err # Approx linear\n down_beam_rot_y = math.atan( down_beam_gra_y )\n down_beam_rot_y_err = down_beam_gra_y_err # Approx linear\n\n\n# down_pos_x = down_beam_pos_x - data_dict['station_positions'][1]*up_beam_gra_x + up_x_func.GetParameter(0)\n# down_pos_x_err = math.sqrt( up_x_func.GetParError(0)**2 + down_beam_pos_x_err**2 )\n# down_pos_y = down_beam_pos_y - data_dict['station_positions'][1]*up_beam_gra_y + up_y_func.GetParameter(0)\n# down_pos_y_err = math.sqrt( up_y_func.GetParError(0)**2 + down_beam_pos_y_err**2 )\n\n length = TRACKER_SEPARATION\n down_pos_x = down_beam_pos_x - ( up_beam_pos_x + length*up_beam_gra_x )\n down_pos_x_err = math.sqrt( up_beam_pos_x_err**2 + down_beam_pos_x_err**2 + (length*up_beam_gra_x_err)**2 )\n down_pos_y = down_beam_pos_y - ( up_beam_pos_y + length*up_beam_gra_y )\n down_pos_y_err = math.sqrt( up_beam_pos_y_err**2 + down_beam_pos_y_err**2 + (length*up_beam_gra_y_err)**2 )\n\n down_rot_x = down_beam_rot_x - up_beam_rot_x\n down_rot_x_err = math.sqrt( down_beam_rot_x_err**2 + up_beam_rot_x_err**2 )\n down_rot_y = down_beam_rot_y - up_beam_rot_y\n down_rot_y_err = math.sqrt( down_beam_rot_y_err**2 + up_beam_rot_y_err**2 )\n\n\n print\n print \"Incoming Beam Misalignments:\"\n print\n print \"Displacement and rotation of beam with respect to upstream tracker:\"\n print\n print \"X Position = {0:0.3f} +/- {1:0.3f} mm\".format( up_beam_pos_x, up_beam_pos_x_err )\n print \"Y Position = {0:0.3f} +/- {1:0.3f} mm\".format( up_beam_pos_y, up_beam_pos_y_err )\n print\n print \"X Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( up_beam_rot_x*1000.0, up_beam_rot_x_err*1000.0 )\n print \"Y Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( up_beam_rot_y*1000.0, up_beam_rot_y_err*1000.0 )\n print\n\n print\n print \"Downstream Tracker Beam Misalignments:\"\n print\n print \"Displacement and rotation of beam with respect to downstream tracker:\"\n print\n print \"X Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_beam_pos_x, down_beam_pos_x_err )\n print \"Y Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_beam_pos_y, down_beam_pos_y_err )\n print\n print \"X Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_beam_rot_x*1000.0, down_beam_rot_x_err*1000.0 )\n print \"Y Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_beam_rot_y*1000.0, down_beam_rot_y_err*1000.0 )\n print\n\n print\n print \"Downstream Tracker Alignment:\"\n print\n print \"Displacement and rotation of between the two trackers:\"\n print\n print \"X Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_pos_x, down_pos_x_err )\n print \"Y Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_pos_y, down_pos_y_err )\n print\n print \"X Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_rot_x*1000.0, down_rot_x_err*1000.0 )\n print \"Y Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_rot_y*1000.0, down_rot_y_err*1000.0 )\n print",
"def plot(model, results, filename):\n\n # c = model.compartments.get_one(id='c')\n #\n # rna_1 = model.species_types.get_one(id='rna_1').species.get_one(compartment=c)\n # rna_2 = model.species_types.get_one(id='rna_2').species.get_one(compartment=c)\n # rna_3 = model.species_types.get_one(id='rna_3').species.get_one(compartment=c)\n #\n pops = results.get('populations')\n time = pops.index\n pop_rna_1 = pops['rna_1[c]']\n pop_rna_2 = pops['rna_2[c]']\n pop_rna_3 = pops['rna_3[c]']\n\n pop_atp = pops['atp[c]']\n pop_gtp = pops['gtp[c]']\n pop_utp = pops['ctp[c]']\n pop_ctp = pops['utp[c]']\n\n pop_amp = pops['amp[c]']\n pop_gmp = pops['gmp[c]']\n pop_ump = pops['cmp[c]']\n pop_cmp = pops['ump[c]']\n\n print(pop_rna_1, pop_atp, pop_gtp, pop_utp, pop_ctp)\n\n fig1, axes1 = pyplot.subplots(nrows=3, ncols=1)\n\n axes1[0].plot(time / 3600, pop_rna_1)\n axes1[0].plot(time / 3600, pop_rna_2)\n axes1[0].plot(time / 3600, pop_rna_3)\n axes1[0].set_xlim((time[0] / 3600, time[-1] / 3600))\n axes1[0].set_ylim((0., 10.0))\n axes1[0].legend(loc='upper right')\n\n axes1[1].plot(time / 3600, pop_atp)\n axes1[1].plot(time / 3600, pop_gtp)\n axes1[1].plot(time / 3600, pop_utp)\n axes1[1].plot(time / 3600, pop_ctp)\n axes1[1].set_xlim((time[0] / 3600, time[-1] / 3600))\n # axes1[1].set_ylim((0., 10.0))\n axes1[1].legend(loc='upper right')\n\n axes1[2].plot(time / 3600, pop_amp)\n axes1[2].plot(time / 3600, pop_gmp)\n axes1[2].plot(time / 3600, pop_ump)\n axes1[2].plot(time / 3600, pop_cmp)\n axes1[2].set_xlim((time[0] / 3600, time[-1] / 3600))\n # axes1[2].set_ylim((0., 10.0))\n axes1[2].legend(loc='upper right')\n\n fig1.savefig(filename.format('species'))\n pyplot.close(fig1)",
"def plot(self) -> List[matplotlib.figure.Figure]:\n figs = []\n\n # Figure 1, position in 3 subplots\n pos_fig_sub = self.plot_subplots(0, self.trial_name + ' Torso Intrinsic Position', 'Position (mm)',\n self.torso_pos_labeled, self.torso_pos_filled, self.torso_pos_smoothed)\n figs.append(pos_fig_sub)\n\n # Figure 2, orientation in 3 subplots\n eul_fig_sub = self.plot_subplots(1, self.trial_name + ' Torso Intrinsic Euler Angles', 'Angle (deg)',\n self.torso_eul_labeled, self.torso_eul_filled, self.torso_eul_smoothed)\n figs.append(eul_fig_sub)\n\n # Figure 3, position in one axes\n pos_fig_one = self.plot_one_axes(2, self.trial_name + ' Torso Intrinsic Position', 'Position (mm)',\n self.torso_pos_labeled, self.torso_pos_filled, self.torso_pos_smoothed,\n {'labeled': 'Labeled (X)', 'filled': 'Filled (Y)', 'smoothed': 'Smoothed (Z)'})\n figs.append(pos_fig_one)\n\n # Figure 3, position in one axes\n eul_fig_one = self.plot_one_axes(3, self.trial_name + ' Torso Intrinsic Euler Angles', 'Angle (deg)',\n self.torso_eul_labeled, self.torso_eul_filled, self.torso_eul_smoothed,\n {'labeled': 'Labeled (Flex/Ext)', 'filled': 'Filled (Lat Flex)',\n 'smoothed': 'Smoothed (Axial)'})\n figs.append(eul_fig_one)\n\n return figs",
"def basic_stats_and_plots():\n \n basename = sys.argv[1]\n ops = (\"two_opt\", \"twoh_opt\", \"three_opt\", \"three_opt_broad\", \"swap\", \"swap_adj\")\n opfs = {\n \"two_opt\": tsp.two_opt,\n \"twoh_opt\": tsp.twoh_opt,\n \"three_opt\": tsp.three_opt,\n \"three_opt_broad\": tsp.three_opt_broad,\n \"swap\": tsp.swap_two,\n \"swap_adj\": tsp.swap_adj\n }\n \n lengths = range(6, 11)\n for length in lengths:\n stddev = []\n gini = []\n nneighbours = []\n prop_unique = []\n for op in ops:\n filename = os.path.join(basename,\n \"tsp_length_%d_%s\" % (length, op),\n \"TP_row0.dat\")\n print op, length\n x = np.genfromtxt(filename)\n # stats to get:\n stddev.append(np.std(x))\n gini.append(random_walks.gini_coeff(x))\n nneighbours.append(np.sum(x > 0))\n mu, sigma = rw_experiment_with_op(length, opfs[op])\n prop_unique.append((mu, sigma))\n\n gini_barchart(length, gini, ops)\n stddev_barchart(length, stddev, ops)\n plot_gini_v_nneighbours(length, gini, nneighbours, ops)\n plot_stddev_v_nneighbours(length, stddev, nneighbours, ops)\n plot_gini_v_prop_unique(length, gini, prop_unique, ops)\n plot_stddev_v_prop_unique(length, stddev, prop_unique, ops)",
"def generate_plots():\n\n hmp = homemonitor_plot()\n hmp.load_data()\n hmp.plot_day()\n hmp.plot_hist()",
"def runExpt_and_makePlots(n, d, grid_size, reps, tho_scale=0.1, is_classification=True):\n\n args = [n, d, grid_size, reps]\n df_std_signal, df_tho_signal = repeatexp(*args,\n is_classification=is_classification,\n tho_scale=tho_scale,\n no_signal=False)\n \n df_std_nosignal, df_tho_nosignal = repeatexp(*args,\n is_classification=is_classification,\n tho_scale=tho_scale,\n no_signal=True)\n\n f, ax = plt.subplots(2, 2, figsize=(8,10), sharex=True, sharey=False)\n sb.set_style('whitegrid')\n \n kw_params = {'x':'dataset',\n 'y':'performance',\n 'units':'perm'}\n \n sb.barplot(data=df_std_signal,\n ax=ax[0,0],\n **kw_params)\n ax[0,0].set_title('Standard, HAS Signal')\n \n sb.barplot(data=df_tho_signal,\n ax=ax[0,1],\n **kw_params)\n ax[0,1].set_title('Thresholdout, HAS Signal')\n\n sb.barplot(data=df_std_nosignal,\n ax=ax[1,0],\n **kw_params)\n ax[1,0].set_title('Standard, NO Signal')\n\n sb.barplot(data=df_tho_nosignal,\n ax=ax[1,1],\n **kw_params)\n ax[1,1].set_title('Thresholdout, NO Signal')\n \n return f, ax",
"def all(folder, mt=False):\n handles = []\n experiments = get_experiment_series(folder, mT=mt)\n for ex in experiments:\n if mt:\n handles.append(\n plt.plot(\n ex.distance,\n ex.weight,\n label='{}mm {}mT'.format(ex.height, ex.magnet))[0])\n else:\n handles.append(\n plt.plot(\n ex.distance,\n ex.weight,\n label='{}mm'.format(ex.height))[0])\n plt.legend()\n plt.show()",
"def plot_objective_(OptimizeResult, dimensions, fig_savepath, figsize=(7.48, 7.48), format='PNG', dpi=300):\n plot_objective(OptimizeResult, figsize=figsize, dimensions=dimensions)\n plt.tight_layout()\n # plt.subplots_adjust(left=0.08, bottom=0.12, right=0.98, top=0.98, hspace=0.1, wspace=0.2)\n plt.savefig(fig_savepath, format=format, dpi=dpi)\n # plt.show()",
"def plot_test_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n\n df_i = process_for_test_objective(\n df_i.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = df_i.loc[df_i[\"max_idx\"]][\"timestamp_end\"].values\n y = df_i.loc[df_i[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = exp_config.get(\"best_objective\", 1) - f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n\n else:\n\n exp_df = process_for_test_objective(\n exp_df.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = exp_df.loc[exp_df[\"max_idx\"]][\"timestamp_end\"].values\n y = exp_df.loc[exp_df[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n idx = np.unique(x, return_index=True, axis=0)[1]\n\n x = x[idx]\n y = y[idx]\n\n x = np.clip(np.concatenate([x, [exp_config[\"t_max\"]]]), 0, exp_config[\"t_max\"])\n y = np.clip(exp_config.get(\"best_objective\", 1) - np.concatenate([y, [y[-1]]]), 0, 1)\n \n area = aulc(x, y)\n exp_config[\"data\"][exp_name][\"AULC\"] = area\n \n plt.step(\n x[:],\n y[:],\n where=\"post\",\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n # if MODE == \"min\":\n # plt.legend(loc=\"upper right\")\n # else:\n # plt.legend(loc=\"lower right\")\n plt.legend(loc=exp_config.get(\"legend\", \"best\"))\n\n plt.ylabel(\"Test Regret\")\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid(which=\"minor\", color=\"gray\", linestyle=\":\")\n plt.grid(which=\"major\", linestyle=\"-\")\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()",
"def get_plots(self):\n return list(self.plots.values())",
"def plot(self) -> List[matplotlib.figure.Figure]:\n figs = []\n\n title_prefix = self.trial_name + ' ' + self.segment_name + ' '\n # Figure 1, position in 3 subplots\n pos_fig_sub = self.plot_subplots(self.fig_num_start, title_prefix + 'Position (mm)', self.pos_raw,\n self.pos_smooth, self.pos_legend)\n figs.append(pos_fig_sub)\n\n # Figure 2, orientation in 3 subplots\n eul_fig_sub = self.plot_subplots(self.fig_num_start + 1, title_prefix + 'Euler Angles (deg)', self.eul_raw,\n self.eul_smooth, self.euler_legend)\n figs.append(eul_fig_sub)\n\n # Figure 3, velocity in 3 subplots\n vel_fig_sub = self.plot_subplots_vel(self.fig_num_start + 2, title_prefix + 'Velocity', 'Velocity (mm/s)',\n self.vel)\n figs.append(vel_fig_sub)\n\n # Figure 4, angular velocity in 3 subplots\n ang_vel_fig_sub = self.plot_subplots_vel(self.fig_num_start + 3, title_prefix + 'Angular Velocity',\n 'Angular Velocity (deg/s)', self.ang_vel)\n figs.append(ang_vel_fig_sub)\n\n # Figure 5, position in one axes\n pos_fig_one = self.plot_one_axes(self.fig_num_start + 4, title_prefix + 'Position', 'Position (mm)',\n self.pos_raw, self.pos_smooth, self.pos_legend)\n figs.append(pos_fig_one)\n\n # Figure 6, orientation in one axes\n eul_fig_one = self.plot_one_axes(self.fig_num_start + 5, title_prefix + 'Euler Angles', 'Angle (deg)',\n self.eul_raw, self.eul_smooth, self.euler_legend)\n figs.append(eul_fig_one)\n\n # Figure 7, velocity in one axes\n vel_fig_one = self.plot_one_axes_vel(self.fig_num_start + 6, title_prefix + 'Velocity', 'Velocity (mm/s)',\n self.vel, self.pos_legend)\n figs.append(vel_fig_one)\n\n # Figure 8, angular velocity in one axes\n ang_vel_fig_one = self.plot_one_axes_vel(self.fig_num_start + 7, title_prefix + 'Angular Velocity',\n 'Angular Velocity (deg/s)', self.ang_vel, self.pos_legend)\n figs.append(ang_vel_fig_one)\n\n return figs"
] |
[
"0.6394151",
"0.6230003",
"0.61507076",
"0.6087635",
"0.6050366",
"0.59917736",
"0.5969156",
"0.5965259",
"0.59232706",
"0.58762634",
"0.5772327",
"0.57710993",
"0.57626706",
"0.57371104",
"0.57312965",
"0.571158",
"0.56957924",
"0.56929827",
"0.5684071",
"0.56773794",
"0.5672687",
"0.5671708",
"0.56309503",
"0.56202906",
"0.5617537",
"0.56112164",
"0.56017536",
"0.5598069",
"0.55943674",
"0.554934"
] |
0.7944752
|
0
|
Transforms an experiment to a DataFrame. Only supports Experiment and SimpleExperiment. Transforms an Experiment into a dataframe with rows keyed by trial_index and arm_name, metrics pivoted into one row.
|
def exp_to_df(
exp: Experiment,
metrics: Optional[List[Metric]] = None,
run_metadata_fields: Optional[List[str]] = None,
trial_properties_fields: Optional[List[str]] = None,
**kwargs: Any,
) -> pd.DataFrame:
def prep_return(
df: pd.DataFrame, drop_col: str, sort_by: List[str]
) -> pd.DataFrame:
return not_none(not_none(df.drop(drop_col, axis=1)).sort_values(sort_by))
# Accept Experiment and SimpleExperiment
if isinstance(exp, MultiTypeExperiment):
raise ValueError("Cannot transform MultiTypeExperiments to DataFrames.")
key_components = ["trial_index", "arm_name"]
# Get each trial-arm with parameters
arms_df = pd.DataFrame()
for trial_index, trial in exp.trials.items():
for arm in trial.arms:
arms_df = arms_df.append(
{"arm_name": arm.name, "trial_index": trial_index, **arm.parameters},
ignore_index=True,
)
# Fetch results; in case arms_df is empty, return empty results (legacy behavior)
results = exp.fetch_data(metrics, **kwargs).df
if len(arms_df.index) == 0:
if len(results.index) != 0:
raise ValueError(
"exp.fetch_data().df returned more rows than there are experimental "
"arms. This is an inconsistent experimental state. Please report to "
"Ax support."
)
return results
# Create key column from key_components
arms_df["trial_index"] = arms_df["trial_index"].astype(int)
key_col = "-".join(key_components)
key_vals = arms_df[key_components[0]].astype("str") + arms_df[
key_components[1]
].astype("str")
arms_df[key_col] = key_vals
# Add trial status
trials = exp.trials.items()
trial_to_status = {index: trial.status.name for index, trial in trials}
arms_df["trial_status"] = [
trial_to_status[trial_index] for trial_index in arms_df.trial_index
]
# Add generator_run model keys
arms_df["generator_model"] = [
# This accounts for the generic case that generator_runs is a list of arbitrary
# length. If all elements are `None`, this yields an empty string. Repeated
# generator models within a trial are condensed via a set comprehension.
", ".join(
{
not_none(generator_run._model_key)
for generator_run in exp.trials[trial_index].generator_runs
if generator_run._model_key is not None
}
)
if trial_index in exp.trials
else ""
for trial_index in arms_df.trial_index
]
# replace all unknown generator_models (denoted by empty strings) with "Unknown"
arms_df["generator_model"] = [
"Unknown" if generator_model == "" else generator_model
for generator_model in arms_df["generator_model"]
]
# Add any trial properties fields to arms_df
if trial_properties_fields is not None:
if not (
isinstance(trial_properties_fields, list)
and all(isinstance(field, str) for field in trial_properties_fields)
):
raise ValueError(
"trial_properties_fields must be List[str] or None. "
f"Got {trial_properties_fields}"
)
# add trial._properties fields
for field in trial_properties_fields:
trial_to_properties_field = {
index: (
trial._properties[field] if field in trial._properties else None
)
for index, trial in trials
}
if any(trial_to_properties_field.values()): # field present for any trial
if not all(
trial_to_properties_field.values()
): # not present for all trials
logger.warning(
f"Field {field} missing for some trials' properties. "
"Returning None when missing."
)
arms_df["trial_properties_" + field] = [
trial_to_properties_field[key] for key in arms_df.trial_index
]
else:
logger.warning(
f"Field {field} missing for all trials' properties. "
"Not appending column."
)
# Add any run_metadata fields to arms_df
if run_metadata_fields is not None:
if not (
isinstance(run_metadata_fields, list)
and all(isinstance(field, str) for field in run_metadata_fields)
):
raise ValueError(
"run_metadata_fields must be List[str] or None. "
f"Got {run_metadata_fields}"
)
# add run_metadata fields
for field in run_metadata_fields:
trial_to_metadata_field = {
index: (
trial.run_metadata[field] if field in trial.run_metadata else None
)
for index, trial in trials
}
if any(trial_to_metadata_field.values()): # field present for any trial
if not all(
trial_to_metadata_field.values()
): # not present for all trials
logger.warning(
f"Field {field} missing for some trials' run_metadata. "
"Returning None when missing."
)
arms_df[field] = [
trial_to_metadata_field[key] for key in arms_df.trial_index
]
else:
logger.warning(
f"Field {field} missing for all trials' run_metadata. "
"Not appending column."
)
if len(results.index) == 0:
logger.info(
f"No results present for the specified metrics `{metrics}`. "
"Returning arm parameters and metadata only."
)
exp_df = arms_df
elif not all(col in results.columns for col in key_components):
logger.warn(
f"At least one of key columns `{key_components}` not present in results df "
f"`{results}`. Returning arm parameters and metadata only."
)
exp_df = arms_df
else:
# prepare results for merge
key_vals = results[key_components[0]].astype("str") + results[
key_components[1]
].astype("str")
results[key_col] = key_vals
metric_vals = results.pivot(
index=key_col, columns="metric_name", values="mean"
).reset_index()
# dedupe results by key_components
metadata = results[key_components + [key_col]].drop_duplicates()
metrics_df = pd.merge(metric_vals, metadata, on=key_col)
# merge and return
exp_df = pd.merge(
metrics_df, arms_df, on=key_components + [key_col], how="outer"
)
return prep_return(df=exp_df, drop_col=key_col, sort_by=["arm_name"])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def exp_to_df(\n exp: Experiment,\n metrics: Optional[List[Metric]] = None,\n key_components: Optional[List[str]] = None,\n **kwargs: Any,\n) -> pd.DataFrame:\n key_components = key_components or [\"trial_index\", \"arm_name\"]\n\n # Accept Experiment and SimpleExperiment\n if isinstance(exp, MultiTypeExperiment):\n raise ValueError(\"Cannot transform MultiTypeExperiments to DataFrames.\")\n\n results = exp.fetch_data(metrics, **kwargs).df\n if len(results.index) == 0: # Handle empty case\n return results\n key_col = \"-\".join(key_components)\n key_vals = results[key_components[0]].astype(\"str\")\n for key in key_components[1:]:\n key_vals = key_vals + results[key].astype(\"str\")\n results[key_col] = key_vals\n\n metric_vals = results.pivot(\n index=key_col, columns=\"metric_name\", values=\"mean\"\n ).reset_index()\n metadata = results[key_components + [key_col]].drop_duplicates()\n metric_and_metadata = pd.merge(metric_vals, metadata, on=key_col)\n arm_names_and_params = pd.DataFrame(\n [{\"arm_name\": name, **arm.parameters} for name, arm in exp.arms_by_name.items()]\n )\n exp_df = pd.merge(metric_and_metadata, arm_names_and_params, on=\"arm_name\")\n # pyre-fixme[16]: `Optional` has no attribute `sort_values`.\n return exp_df.drop(key_col, axis=1).sort_values(key_components)",
"def experiment_to_dataframe(\n experiment_guid: str, connection: WetLabAzureConnection, config: combi.CombinatoricsConfig\n) -> pd.DataFrame: # pragma: no cover\n # Retrieve the experiment\n experiment: domain.Experiment = connection.get_experiment_by_id(experiment_guid) # type: ignore # auto\n\n # Build the data frame gathering observation for each sample\n df = pd.DataFrame(\n [\n _process_sample(sample, experiment=experiment, connection=connection, config=config)\n for sample in experiment.samples\n ]\n )\n\n return df",
"def _repivot_dataframe(armscore_df: pd.DataFrame) -> pd.DataFrame:\n\n transform = (\n armscore_df.set_index([\"trial_index\", \"arm_name\", \"metric_name\"])\n .unstack(\"metric_name\")\n .reset_index()\n )\n new_cols = transform.columns.to_flat_index()\n parameters_holder = transform[\n list(filter(lambda x: \"parameters\" in x, new_cols))[0]\n ]\n transform.drop(columns=\"parameters\", level=0, inplace=True)\n new_cols = new_cols.drop(labels=filter(lambda x: \"parameters\" in x, new_cols))\n transform.columns = [\"trial_index\", \"arm_name\"] + [\n \"_\".join(tpl) for tpl in new_cols[2:]\n ]\n transform[\"parameters\"] = parameters_holder\n # pyre-fixme[7]: Expected `DataFrame` but got `Union[DataFrame, Series]`.\n return transform",
"def dataframe_from_sample(\n sample: domain.Sample, experiment: domain.Experiment, connection: WetLabAzureConnection\n) -> pd.DataFrame: # pragma: no cover\n # Retrieve the human-readable names\n signal_map = {signal.guid: _signal_to_common_name(signal) for signal in experiment.signals}\n\n # Get the timeseries associated to a sample\n timeseries: pd.DataFrame = connection.get_timeseries_from_sample(sample=sample)\n return timeseries.rename(columns=signal_map)",
"def get_experiment_data(experiment_names):\n\n snapshots_query = db_utils.query(\n Experiment.git_hash,\\\n Trial.experiment, Trial.fuzzer, Trial.benchmark,\\\n Trial.time_started, Trial.time_ended,\\\n Snapshot.trial_id, Snapshot.time, Snapshot.edges_covered)\\\n .select_from(Experiment)\\\n .join(Trial)\\\n .join(Snapshot)\\\n .filter(Experiment.name.in_(experiment_names))\\\n .filter(Trial.preempted.is_(False))\n\n return pd.read_sql_query(snapshots_query.statement, db_utils.engine)",
"def convert_measuerment_dict_to_DataFrame(measurement_dict):\n df = pd.DataFrame.from_dict(measurement_dict, orient='index')\n return(df)",
"def experiment_show_table_format(experiment):\n from msrestazure.tools import parse_resource_id\n row = OrderedDict()\n row['Name'] = experiment['name']\n row['Resource Group'] = experiment['resourceGroup']\n row['Workspace'] = parse_resource_id(experiment['id'])['name']\n row['State'] = experiment['provisioningState']\n return row",
"def experiment(task, eid, event_type, output, metric, sort, output_fields):\n event_type = EVENT_TYPES[event_type]\n ServerManager.get()\n try:\n result = ServerManager.api.experiment_details(task, eid, event_type=event_type, metric=metric)\n prop_name_loc = {k: i for i, k in enumerate(output_fields)}\n result_df = experiment_to_df(exp=result, prop_name_loc=prop_name_loc, event_type=event_type, sort=sort)\n if output is None:\n click.echo(result_df)\n else:\n result_df.to_csv(output)\n except ApiException as e:\n click.echo(click.style(json.loads(e.body)['detail'], fg='red'))",
"def convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n label = \"{}, {}, {}\".format(nets, critic, loss)\n rows = list(\n zip(\n itertools.repeat(exp_name),\n itertools.repeat(nets),\n itertools.repeat(critic),\n itertools.repeat(loss),\n itertools.repeat(seed),\n result.iterations,\n [-loss for loss in result.testing_losses], # Loss -> bound.\n result.classification_accuracies,\n itertools.repeat(label)))\n df_eval = pd.DataFrame(\n rows,\n columns=(\"exp_name\", \"nets\", \"Critic\", \"Estimator\",\n \"run\", \"iteration\", \"bound_value\", \"accuracy\", \"label\"))\n\n df_eval[\"Estimator\"] = df_eval[\"Estimator\"].replace(\n to_replace={\n \"cpc\": \"$CPC$\",\n \"pcc\": \"$PCC$\",\n \"drfc\": \"$D-RFC$\",\n \"wpc\": \"$WPC$\"\n })\n df_eval[\"Critic\"] = df_eval[\"Critic\"].replace(\n to_replace={\n \"concat\": \"MLP\",\n \"separable\": \"Separable\",\n \"innerprod\": \"Inner product\",\n \"bilinear\": \"Bilinear\"\n })\n return df_eval",
"def _trial(self, trial, X):\n\n # Evaluate TA defined as optuna trial string\n res = eval(self.function)\n\n # If return is tuple, convert to DF\n if isinstance(res, tuple):\n res = pd.DataFrame(res).T\n\n # Index result with X index\n res = pd.DataFrame(res, index=X.index)\n\n # Create consistent column names with function string and params\n name = col_name(self.function, trial.params)\n\n # Append integer identifier to DF with multiple columns\n if len(res.columns) > 1:\n res.columns = [f\"{name}_{i}\" for i in range(len(res.columns))]\n else:\n res.columns = [f\"{name}\"]\n return res",
"def to_timeseries(benchmark_data, x_label='Episode', y_label='Average Episode Reward',\n target=rewards_by_episode, cut_x=1e12, smooth=0):\n data_experiments, data_times, data_values = [], [], []\n\n for experiment_id, experiment_data in enumerate(benchmark_data):\n extended_results = experiment_data.extended_results()\n\n if smooth > 0:\n extended_results['rewards'] = np.array(pd.Series(extended_results['rewards']).ewm(span=smooth).mean())\n\n x, y = target(cut_x=cut_x, **extended_results)\n\n data_times.extend(x)\n data_values.extend(y)\n data_experiments.extend([experiment_id] * len(x))\n\n return pd.DataFrame({'experiment': data_experiments, x_label: data_times, y_label: data_values})",
"def run_tests():\n with open(FILENAME) as file:\n # Loads testing parameters from the yaml file.\n tests = yaml.safe_load(file)\n\n # create a dataframe to keep the results\n test_dict = tests['Tests']\n results = pd.DataFrame(test_dict)\n results['Last Average Score'] = \"\"\n results['No of Q-Learning episodes'] = \"\"\n\n # run experiments:\n for i, test in enumerate(test_dict):\n grid = Rooms(test[\"env_size\"], testing=True)\n learning = QLearning(grid, test[\"gamma\"], test[\"alpha\"], test[\"agent_start_pos\"])\n e_greedy = Policy(\"e-greedy\", test[\"epsilon\"], test[\"decay\"])\n greedy = Policy(policy_type=\"greedy\")\n experiment = Experiments(grid, learning, greedy, test[\"iters\"],\n test[\"agent_start_pos\"], test[\"test_no\"])\n\n for session in range(test[\"iters\"]):\n learning.run_multiple_episodes(test[\"batch_episodes\"], e_greedy)\n mean_reward = experiment.run_experiments(test[\"exp_per_batch\"])\n\n results.loc[i,'Last Average Score'] = mean_reward\n results.loc[i,'No of Q-Learning episodes'] = (session + 1) * test[\"batch_episodes\"]\n\n # save results to csv file\n filename = 'results/' + 'test_table.csv'\n results.to_csv(filename)\n\n # plot & save graphs\n experiment.generate_results(test[\"test_no\"], test)\n\n return results",
"def metrics_dataframe(metrics):\n general_metrics = {}\n precision = []\n recall = []\n\n general_metrics[\"rmse\"] = [np.mean(i) for i in metrics[\"cv_rmse\"]]\n general_metrics[\"fit_time\"] = [np.mean(i) for i in metrics[\"cv_fit_time\"]]\n general_metrics[\"pred_time\"] = [\n np.mean(i) for i in metrics[\"cv_pred_time\"]]\n general_metrics[\"personalization\"] = [\n np.mean(i) for i in metrics[\"cv_personalization\"]]\n general_metrics[\"algo_name\"] = metrics[\"algo_name\"]\n\n for i in metrics[\"cv_precision\"]:\n precision.append({k: np.mean(v) for k, v in i.items()})\n for i in metrics[\"cv_recall\"]:\n recall.append({k: np.mean(v) for k, v in i.items()})\n precision, recall = mean_average_precision_recall(precision, recall)\n df_general_metrics = pd.DataFrame(general_metrics).set_index(\"algo_name\")\n df_precision = pd.DataFrame(precision, index=metrics[\"algo_name\"]).T\n df_recall = pd.DataFrame(recall, index=metrics[\"algo_name\"]).T\n\n return df_precision, df_recall, df_general_metrics",
"def creating_DataFrame(group_name,experiments,parameter):\n series = []\n keys_name = []\n for exp in experiments:\n keys_name.append('%s_%s_%s'%(group_name,parameter,exp))\n if parameter == 'II':\n series.append(experiments[exp].II)\n elif parameter == 'KI':\n series.append(experiments[exp].KI)\n elif parameter == 'Reads':\n series.append(experiments[exp].reads)\n elif parameter == 'Bias':\n series.append(experiments[exp].bias)\n \n \n fusion = pd.concat(series, axis = 1, keys= keys_name)#concatantaion of the different experiments\n \n if len(keys_name) > 1:\n \n fusion['%s_%s_mean'%(group_name,parameter)] = fusion.mean(axis = 1)\n fusion['%s_%s_stdev'%(group_name,parameter)] = fusion.std(axis = 1) \n return fusion",
"def experiment_to_dict(experiment: Experiment) -> Dict[str, Any]:\n return {\n \"__type\": experiment.__class__.__name__,\n \"name\": experiment._name,\n \"description\": experiment.description,\n \"experiment_type\": experiment.experiment_type,\n \"search_space\": experiment.search_space,\n \"optimization_config\": experiment.optimization_config,\n \"tracking_metrics\": list(experiment._tracking_metrics.values()),\n \"runner\": experiment.runner,\n \"status_quo\": experiment.status_quo,\n \"time_created\": experiment.time_created,\n \"trials\": experiment.trials,\n \"is_test\": experiment.is_test,\n \"data_by_trial\": experiment.data_by_trial,\n \"properties\": experiment._properties,\n \"default_data_type\": experiment._default_data_type,\n }",
"def make_output_df(self):\n df = pd.concat([pd.DataFrame(dat) for dat in [self.qdata, self.pdata]], axis=1)\n columns = np.hstack(([['{}{}'.format(x, c) for c in self.actions] for x in ['q', 'p']]))\n df.columns = columns\n df.insert(0, 'trial', np.arange(1, df.shape[0]+1))\n df['choice'] = self.choices\n df['feedback'] = self.feedback\n# r = np.array(self.bandits.rvalues)\n# p = np.array(self.bandits.preward)\n df['optimal'] = self.demand\n df.insert(0, 'agent', 1)\n self.data = df.copy()",
"def to_df(self) -> pd.DataFrame:\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient=\"list\")\n return df[self.fields]",
"def prepareDataframeForPivot(self, result):\n df = result\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n if self._isIndexedDataframe(df):\n if isinstance(df.columns, pd.MultiIndex):\n df.columns = df.columns.map(' | '.join)\n df = df.select_dtypes(include=['float64', 'int64'])\n if df.size == 0:\n df[\"values\"] = np.nan\n # try to keep group measures\n try:\n df.groupMeasures = result.groupMeasures\n except:\n pass\n # try to keep aggMeasures\n try:\n df.aggMeasures = result.aggMeasures\n except:\n pass\n\n return df",
"def tflog2pandas(path: str) -> pd.DataFrame:\n DEFAULT_SIZE_GUIDANCE = {\n \"compressedHistograms\": 1,\n \"images\": 1,\n \"scalars\": 0, # 0 means load all\n \"histograms\": 1,\n }\n runlog_data = pd.DataFrame({\"metric\": [], \"value\": [], \"step\": []})\n try:\n event_acc = EventAccumulator(path, DEFAULT_SIZE_GUIDANCE)\n event_acc.Reload()\n tags = event_acc.Tags()[\"scalars\"]\n # tags = event_acc.Tags()[\"images\"]\n for tag in tags:\n event_list = event_acc.Scalars(tag)\n values = list(map(lambda x: x.value, event_list))\n step = list(map(lambda x: x.step, event_list))\n r = {\"metric\": [tag] * len(step), \"value\": values, \"step\": step}\n r = pd.DataFrame(r)\n runlog_data = pd.concat([runlog_data, r])\n # Dirty catch of DataLossError\n except Exception:\n print(\"Event file possibly corrupt: {}\".format(path))\n traceback.print_exc()\n return runlog_data",
"def create_evaluation_df(predictions, test_inputs, H, scaler):\n eval_df = pd.DataFrame(predictions, columns=['t+'+str(t) for t in range(1, H+1)])\n eval_df['timestamp'] = test_inputs.dataframe.index\n eval_df = pd.melt(eval_df, id_vars='timestamp', value_name='prediction', var_name='h')\n eval_df['actual'] = np.transpose(test_inputs['target']).ravel()\n eval_df[['prediction', 'actual']] = scaler.inverse_transform(eval_df[['prediction', 'actual']])\n return eval_df",
"def main():\n parser = argparse.ArgumentParser(description=\"Process the results of an experiment.\")\n parser.add_argument(\"experiment\")\n arguments = parser.parse_args()\n path = f\"experiments/{arguments.experiment}\"\n if not os.path.exists(path):\n raise SystemExit(f\"Path {path} does not exists.\")\n\n # For efficiency, one should generate the results from the parts without merging them.\n files = [file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file))]\n frames = []\n for file in files:\n device, experiment, _ = file.split(\".\")\n frame = pandas.read_csv(\n os.path.join(path, file),\n index_col=\"variable\",\n usecols=[\"variable\", \"group_index\", \"value_i\"], dtype={\"value_i\": \"Int64\"}\n )\n frame[\"board\"] = device\n frame[\"experiment\"] = experiment\n frames.append(frame)\n dataframe = pandas.concat(frames)\n frames = None\n\n current_grouping = dataframe.groupby([\"group_index\", \"variable\"])\n \n data = current_grouping.agg([\n numpy.median,\n _percentile_factory(95),\n numpy.mean,\n numpy.std,\n \"count\"\n ])\n\n print(data)\n \n data = data.droplevel([0], axis=1)\n data = data.unstack()\n data.columns = data.columns.map('_'.join)\n data.to_csv(f\"{arguments.experiment}.csv\")",
"def fetch_trial_data(self, trial: BaseTrial, **kwargs: Any) -> MetricFetchResult:\n\n try:\n if self.multiprocessing:\n with Pool(processes=min(len(trial.arms), MAX_NUM_PROCESSES)) as pool:\n records = pool.map(copy.deepcopy(self.evaluate_arm), trial.arms)\n pool.close()\n else:\n records = list(map(self.evaluate_arm, trial.arms))\n if isinstance(records[0], list):\n # Evaluation result output contains multiple metrics\n records = [metric for record in records for metric in record]\n for record in records:\n record.update({\"trial_index\": trial.index})\n return Ok(value=Data(df=pd.DataFrame.from_records(records)))\n except Exception as e:\n return Err(\n MetricFetchE(message=f\"Failed to fetch {self.name}\", exception=e)\n )",
"def resampler_records_dataframe(self, run_idxs):\n\n return pd.DataFrame(self.resampler_records(run_idxs))",
"def export_results(self):\n problemIDs = list(set([result.problemID for result in self.results]))\n configIDs = list(set([result.configID for result in self.results]))\n\n labels = []\n labels.extend(TestResults._fields)\n labels.extend(SizeMetrics._fields) \n # Remove unused columns\n labels.remove(\"size_metrics\")\n labels.remove(\"problemID\")\n labels.remove(\"configID\")\n\n # output = pd.Panel(items=labels, major_axis=problemIDs, minor_axis=configIDs)\n multiindex = pd.MultiIndex.from_product([problemIDs, configIDs], names=[\"problems\", \"configs\"])\n\n output = pd.DataFrame(index=multiindex, columns=labels)\n output.columns.names = [\"stats\"]\n\n for result in self.results:\n problemID = result.problemID\n configID = result.configID\n for label in [label for label in TestResults._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result, label)\n for label in [label for label in SizeMetrics._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result.size_metrics, label)\n\n # Compute Statistics\n output.fillna(value=np.nan, inplace=True)\n output.sort_index(inplace=True)\n try:\n TestFramework.compute_mosek_error(output, \"opt_val\", \"mosek_config\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_mosek_error: 'mosek_config' or 'opt_val' field not found.\")\n try:\n TestFramework.compute_performance(output, \"solve_time\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_performance: 'solve_time' field not found.\")\n return output",
"def frame(self):\n microseconds = np.array(self.results['times']) * 1e6\n return pd.DataFrame(self.results, index=microseconds)",
"def lime_explanation_as_df(self, instance_ind=None, instance_interval=None,\n explainer_type='tabular', class_names=None, num_features=10):\n explainer = LimeExplainer(self.x_train, self.model, explainer_type=explainer_type, class_names=class_names)\n return explainer.get_explanation_as_df(instance_ind=instance_ind, instance_interval=instance_interval,\n num_features=num_features)",
"def to_dataframe(self, timeout_sec: int = DEFAULT_TIMEOUT_SEC) -> pd.DataFrame:\n records = [r for r in self.result(timeout_sec=timeout_sec)]\n return pd.DataFrame.from_records(records)",
"def logreg_results_to_pandas(common_molids_cache=False):\n results = ResultInDisk.collect_results_under_dir(MALARIA_LOGREGS_EXPERIMENT_ROOT,\n factory=malaria_result_factory)\n\n # --- molids cache\n molids_cache = None\n if common_molids_cache:\n rf_lab, rf_amb, rf_unl, rf_scr = malaria_logreg_fpt_providers(None)\n # Labelled molids\n lab_molids = rf_lab.ids()\n amb_molids = rf_amb.ids() # To prioritize confirmatory tests on labelled data\n # Unlabelled molids\n unl_molids = rf_unl.ids()\n scr_molids = rf_scr.ids()\n # Let's avoid the need to reread them...\n molids_cache = {\n 'lab': lab_molids,\n 'amb': amb_molids,\n 'unl': unl_molids,\n 'scr': scr_molids\n }\n\n results_dict_of_dicts = {}\n for result in results:\n if common_molids_cache:\n result.ids_cache = molids_cache # dodgy, rework with a copying constructor\n rdict = copy(result.info())\n rdict['result'] = result\n rdict['class_weight'] = 'uniform' if rdict['class_weight'] is None else rdict['class_weight']\n # Some more ad-hoc keys for the model\n rdict['num_present_folds'] = result.num_present_folds()\n rdict['auc_mean'] = result.auc_mean()\n rdict['enrichement5_mean'] = result.enrichement5_mean()\n # Some more ad-hoc keys for the fingerprint folder\n folder = result.fingerprint_folder()\n rdict['folder_seed'] = int(folder.seed) if folder is not None else -1\n rdict['folder_size'] = int(folder.fold_size) if folder is not None else 0\n # Add this result to the data frame\n results_dict_of_dicts[result.root_key()] = rdict\n\n return DataFrame(results_dict_of_dicts).T",
"def model_to_df(self, transpose=True):\n X = np.vstack([self.sales(), self.unit_contribution(),\n self.net_revenue(), self.depreciation(),\n self.before_tax_profit(), self.after_tax_profit(), self.cash_flow()])\n\n if transpose:\n X = np.transpose(X)\n df = pd.DataFrame(X, columns=['sales', 'unit_contribution', 'net_revenue',\n 'depreciation', 'before_tax_profit', 'after_tax_profit',\n 'cash_flow'])\n else:\n df = pd.DataFrame(X, index=['sales', 'unit_contribution', 'net_revenue',\n 'depreciation', 'before_tax_profit', 'after_tax_profit',\n 'cash_flow'])\n\n return df",
"def json2pd(json_results):\n\n data = []\n for line in json_results.split(\"\\n\"):\n if line:\n data.append(json.loads(line))\n\n df = pd.DataFrame(data)\n # process some of the fields\n df.timestamp = pd.to_datetime(df.timestamp, unit=\"s\")\n # drop rows whose \"metric\" is \"Timestamp\"\n df = df[[\"Timestamp\" not in x for x in df.metric]]\n # Set a multiindex\n df = df.set_index([\"test\", \"metric\", \"timestamp\"])\n # Keep only some columns\n df = df[[\"labels\", \"value\", \"unit\", \"run_uri\"]]\n return df"
] |
[
"0.70922107",
"0.6628502",
"0.6381592",
"0.60324854",
"0.5789459",
"0.56973666",
"0.5482064",
"0.5477098",
"0.5394673",
"0.5329763",
"0.5311363",
"0.52702683",
"0.5265646",
"0.52604896",
"0.52600574",
"0.5229816",
"0.5196886",
"0.51170677",
"0.5105222",
"0.5060614",
"0.5018467",
"0.50172496",
"0.49845308",
"0.49748915",
"0.49727845",
"0.49645674",
"0.49609503",
"0.49528995",
"0.4951232",
"0.4921087"
] |
0.7879921
|
0
|
Finds the optimal trial given an experiment, based on raw objective value. Returns a 1row dataframe. Should match the row of ``exp_to_df`` with the best raw objective value, given the same arguments.
|
def get_best_trial(
exp: Experiment,
additional_metrics: Optional[List[Metric]] = None,
run_metadata_fields: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[pd.DataFrame]:
objective = not_none(exp.optimization_config).objective
if isinstance(objective, MultiObjective):
logger.warning(
"No best trial is available for `MultiObjective` optimization. "
"Returning None for best trial."
)
return None
if isinstance(objective, ScalarizedObjective):
logger.warning(
"No best trial is available for `ScalarizedObjective` optimization. "
"Returning None for best trial."
)
return None
if (additional_metrics is not None) and (
objective.metric not in additional_metrics
):
additional_metrics.append(objective.metric)
trials_df = exp_to_df(
exp=exp,
metrics=additional_metrics,
run_metadata_fields=run_metadata_fields,
**kwargs,
)
if len(trials_df.index) == 0:
logger.warning("`exp_to_df` returned 0 trials. Returning None for best trial.")
return None
metric_name = objective.metric.name
minimize = objective.minimize
if metric_name not in trials_df.columns:
logger.warning(
f"`exp_to_df` did not have data for metric {metric_name}. "
"Returning None for best trial."
)
return None
metric_optimum = (
trials_df[metric_name].min() if minimize else trials_df[metric_name].max()
)
return pd.DataFrame(trials_df[trials_df[metric_name] == metric_optimum].head(1))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def objective(self, trial):\n # Suggest start row IDs\n selected = [self.all_df.index.max()]\n for i in range(self.n_points):\n id_min = 3 * (self.n_points - len(selected) + 1)\n id_max = selected[-1] - 3\n if id_min + 3 > id_max:\n return np.inf\n new = trial.suggest_int(str(i), id_min, id_max)\n selected.append(new)\n start_ids = sorted(selected)[:-1]\n # Create subsets\n subsets = self.create_subsets(start_ids)\n # Curve fitting for each subset\n df_list = [self.curve_fit(df, num)\n for (num, df) in enumerate(subsets, start=1)]\n # Calculate the error\n return self.error_f(df_list)",
"def evaluate_optimum(dataset: Dataset) -> pd.DataFrame:\n # Get the index of data point with highest observed objective\n optimum_idx = dataset.pretransform_df[dataset.pretransform_output_name].argmax()\n # Get the inputs of the data point with highest observed objective\n optimum_loc = dataset.pretransform_df[dataset.pretransform_input_names].iloc[[optimum_idx]]\n return optimum_loc",
"def _trial(self, trial, X):\n\n # Evaluate TA defined as optuna trial string\n res = eval(self.function)\n\n # If return is tuple, convert to DF\n if isinstance(res, tuple):\n res = pd.DataFrame(res).T\n\n # Index result with X index\n res = pd.DataFrame(res, index=X.index)\n\n # Create consistent column names with function string and params\n name = col_name(self.function, trial.params)\n\n # Append integer identifier to DF with multiple columns\n if len(res.columns) > 1:\n res.columns = [f\"{name}_{i}\" for i in range(len(res.columns))]\n else:\n res.columns = [f\"{name}\"]\n return res",
"def exp_to_df(\n exp: Experiment,\n metrics: Optional[List[Metric]] = None,\n run_metadata_fields: Optional[List[str]] = None,\n trial_properties_fields: Optional[List[str]] = None,\n **kwargs: Any,\n) -> pd.DataFrame:\n\n def prep_return(\n df: pd.DataFrame, drop_col: str, sort_by: List[str]\n ) -> pd.DataFrame:\n return not_none(not_none(df.drop(drop_col, axis=1)).sort_values(sort_by))\n\n # Accept Experiment and SimpleExperiment\n if isinstance(exp, MultiTypeExperiment):\n raise ValueError(\"Cannot transform MultiTypeExperiments to DataFrames.\")\n\n key_components = [\"trial_index\", \"arm_name\"]\n\n # Get each trial-arm with parameters\n arms_df = pd.DataFrame()\n for trial_index, trial in exp.trials.items():\n for arm in trial.arms:\n arms_df = arms_df.append(\n {\"arm_name\": arm.name, \"trial_index\": trial_index, **arm.parameters},\n ignore_index=True,\n )\n\n # Fetch results; in case arms_df is empty, return empty results (legacy behavior)\n results = exp.fetch_data(metrics, **kwargs).df\n if len(arms_df.index) == 0:\n if len(results.index) != 0:\n raise ValueError(\n \"exp.fetch_data().df returned more rows than there are experimental \"\n \"arms. This is an inconsistent experimental state. Please report to \"\n \"Ax support.\"\n )\n return results\n\n # Create key column from key_components\n arms_df[\"trial_index\"] = arms_df[\"trial_index\"].astype(int)\n key_col = \"-\".join(key_components)\n key_vals = arms_df[key_components[0]].astype(\"str\") + arms_df[\n key_components[1]\n ].astype(\"str\")\n arms_df[key_col] = key_vals\n\n # Add trial status\n trials = exp.trials.items()\n trial_to_status = {index: trial.status.name for index, trial in trials}\n arms_df[\"trial_status\"] = [\n trial_to_status[trial_index] for trial_index in arms_df.trial_index\n ]\n\n # Add generator_run model keys\n arms_df[\"generator_model\"] = [\n # This accounts for the generic case that generator_runs is a list of arbitrary\n # length. If all elements are `None`, this yields an empty string. Repeated\n # generator models within a trial are condensed via a set comprehension.\n \", \".join(\n {\n not_none(generator_run._model_key)\n for generator_run in exp.trials[trial_index].generator_runs\n if generator_run._model_key is not None\n }\n )\n if trial_index in exp.trials\n else \"\"\n for trial_index in arms_df.trial_index\n ]\n\n # replace all unknown generator_models (denoted by empty strings) with \"Unknown\"\n arms_df[\"generator_model\"] = [\n \"Unknown\" if generator_model == \"\" else generator_model\n for generator_model in arms_df[\"generator_model\"]\n ]\n\n # Add any trial properties fields to arms_df\n if trial_properties_fields is not None:\n if not (\n isinstance(trial_properties_fields, list)\n and all(isinstance(field, str) for field in trial_properties_fields)\n ):\n raise ValueError(\n \"trial_properties_fields must be List[str] or None. \"\n f\"Got {trial_properties_fields}\"\n )\n\n # add trial._properties fields\n for field in trial_properties_fields:\n trial_to_properties_field = {\n index: (\n trial._properties[field] if field in trial._properties else None\n )\n for index, trial in trials\n }\n if any(trial_to_properties_field.values()): # field present for any trial\n if not all(\n trial_to_properties_field.values()\n ): # not present for all trials\n logger.warning(\n f\"Field {field} missing for some trials' properties. \"\n \"Returning None when missing.\"\n )\n arms_df[\"trial_properties_\" + field] = [\n trial_to_properties_field[key] for key in arms_df.trial_index\n ]\n else:\n logger.warning(\n f\"Field {field} missing for all trials' properties. \"\n \"Not appending column.\"\n )\n\n # Add any run_metadata fields to arms_df\n if run_metadata_fields is not None:\n if not (\n isinstance(run_metadata_fields, list)\n and all(isinstance(field, str) for field in run_metadata_fields)\n ):\n raise ValueError(\n \"run_metadata_fields must be List[str] or None. \"\n f\"Got {run_metadata_fields}\"\n )\n\n # add run_metadata fields\n for field in run_metadata_fields:\n trial_to_metadata_field = {\n index: (\n trial.run_metadata[field] if field in trial.run_metadata else None\n )\n for index, trial in trials\n }\n if any(trial_to_metadata_field.values()): # field present for any trial\n if not all(\n trial_to_metadata_field.values()\n ): # not present for all trials\n logger.warning(\n f\"Field {field} missing for some trials' run_metadata. \"\n \"Returning None when missing.\"\n )\n arms_df[field] = [\n trial_to_metadata_field[key] for key in arms_df.trial_index\n ]\n else:\n logger.warning(\n f\"Field {field} missing for all trials' run_metadata. \"\n \"Not appending column.\"\n )\n\n if len(results.index) == 0:\n logger.info(\n f\"No results present for the specified metrics `{metrics}`. \"\n \"Returning arm parameters and metadata only.\"\n )\n exp_df = arms_df\n elif not all(col in results.columns for col in key_components):\n logger.warn(\n f\"At least one of key columns `{key_components}` not present in results df \"\n f\"`{results}`. Returning arm parameters and metadata only.\"\n )\n exp_df = arms_df\n else:\n # prepare results for merge\n key_vals = results[key_components[0]].astype(\"str\") + results[\n key_components[1]\n ].astype(\"str\")\n results[key_col] = key_vals\n metric_vals = results.pivot(\n index=key_col, columns=\"metric_name\", values=\"mean\"\n ).reset_index()\n\n # dedupe results by key_components\n metadata = results[key_components + [key_col]].drop_duplicates()\n metrics_df = pd.merge(metric_vals, metadata, on=key_col)\n\n # merge and return\n exp_df = pd.merge(\n metrics_df, arms_df, on=key_components + [key_col], how=\"outer\"\n )\n return prep_return(df=exp_df, drop_col=key_col, sort_by=[\"arm_name\"])",
"def _optimize(self, objective):\n points = self._get_eval_points()\n\n if self.matrix_to_vector_transform is not None:\n # Transform the sampled matrix points in vectors\n points = np.array([self.matrix_to_vector_transform(points[i]) for i in range(self._nb_samples)])\n\n evaluations = objective(points)\n idx_best = np.argmin(evaluations, axis=0)\n\n return sc_opt.OptimizeResult(x=points[idx_best, :], success=True, fun=evaluations[idx_best, :],\n nfev=points.shape[0], message=\"OK\")",
"def _objective(self, trial, X, y, weights=None, split=None):\n\n # Generate even weights if none\n if weights is None:\n weights = pd.Series(np.ones(len(y)), index=y.index)\n else:\n weights = pd.Series(weights, index=y.index)\n\n # Execute trial function\n try:\n res = eval(self.function)\n except:\n raise RuntimeError(f\"Optuna execution error: {self.function}\")\n\n # If indicator result is tuple, select the one of interest\n if isinstance(res, tuple):\n res = res[self.idx]\n\n # Ensure result is a dataframe with same index as X\n res = pd.DataFrame(res, index=X.index)\n\n # If indicator result is dataframe, select the one of interest\n if len(res.columns) > 1:\n res = pd.DataFrame(res.iloc[:, self.idx])\n\n # y may be a subset of X, so reduce result to y and convert to series\n res_y = res.reindex(y.index).iloc[:, 0].replace([np.inf, -np.inf], np.nan)\n\n # Save all trial results for pruning and reporting\n # Only the best trial will eventually be saved to limit storage requirements\n self.res_y.append(res_y) # Save results\n\n # Indicator result may be all NANs based on parameter set\n # Return FALSE and alert\n if np.isnan(res_y).sum() / len(res_y) > .95: # Most or all NANs\n self.res_y_corr.append(np.zeros(len(y)))\n if split is not None:\n return tuple([False] * (len(split) - 1))\n else:\n return False\n\n # Obtain correlation for entire dataset\n if self.spearman:\n corr = _weighted_spearman(np.array(y), np.array(res_y), np.array(weights))\n else:\n corr = _weighted_pearson(np.array(y), np.array(res_y), np.array(weights))\n\n # Save correlation for res_y\n self.res_y_corr.append(corr)\n\n # Multi-objective optimization\n # Obtain correlation to target for each split for Optuna to maximize\n if split is not None:\n mo = []\n for i, e in enumerate(split):\n if i == 0:\n s = e\n continue\n\n # y could be a subset of X, use index of X to filter y\n idx = X[s:e].index\n\n # Filter y based on X split\n y_se = np.array(y[y.index.isin(idx)]).astype('float64')\n\n # Filter y predictions based on X split\n res_y_se = np.array(res_y[res_y.index.isin(idx)]).astype('float64')\n\n # Filter weights based on X split\n weights_se = np.array(weights[weights.index.isin(idx)]).astype('float64')\n\n if np.isnan(res_y_se).sum() / len(res_y_se) > .95:\n return tuple([False]*(len(split)-1))\n\n if self.spearman:\n mo.append(_weighted_spearman(y_se, res_y_se, weights_se))\n else:\n mo.append(_weighted_pearson(y_se, res_y_se, weights_se))\n s = e\n return tuple(mo)\n\n # Single objective optimization return corr for entire dataset\n else:\n return corr",
"def _min_max(study):\n\n # Iterate pareto-front trials storing mean correlation and std dev\n df = []\n for trial in study.best_trials:\n df.append([trial.number, np.mean(trial.values), np.std(trial.values)])\n\n # Sort dataframe ascending by mean correlation\n df = pd.DataFrame(df).sort_values(by=2, ascending=True)\n\n # Sort df with best trial in first row\n if len(df) > 1 and len(df.iloc[:, 1:3].drop_duplicates()) > 1:\n\n # Create second pareto to maximize correlation and minimize stddev\n # Epsilons define precision, ie dominance over other candidates\n # Dominance is defined as x percent of stddev of stddev\n try:\n nd = pareto.eps_sort([list(df.itertuples(False))], objectives=[1, 2],\n epsilons=[1e-09, np.std(df[1])*.5], maximize=[1])\n except:\n # Something went wrong, return df\n nd = df\n\n # Sort remaining candidates\n nd = pd.DataFrame(nd).sort_values(by=2, ascending=True)\n\n # Only 1st trial so return it\n else:\n nd = df\n\n # Return \"best\" trial index\n return nd.iloc[0, 0]",
"def test_run_experiment_locally(self) -> None:\n\n experiment = Experiment(\n name=\"torchx_booth_sequential_demo\",\n search_space=SearchSpace(parameters=self._parameters),\n optimization_config=OptimizationConfig(objective=self._objective),\n runner=self._runner,\n is_test=True,\n properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True},\n )\n\n scheduler = Scheduler(\n experiment=experiment,\n generation_strategy=(\n choose_generation_strategy(\n search_space=experiment.search_space,\n )\n ),\n options=SchedulerOptions(),\n )\n\n try:\n for _ in range(3):\n scheduler.run_n_trials(max_trials=2)\n\n # TorchXMetric always returns trial index; hence the best experiment\n # for min objective will be the params for trial 0.\n scheduler.report_results()\n except FailureRateExceededError:\n pass # TODO(ehotaj): Figure out why this test fails in OSS.\n # Nothing to assert, just make sure experiment runs.",
"def get_objective(self, sampler=None):\n def objective(params):\n circuit = self.get_circuit(params)\n circuit.make_cache()\n return self.get_energy(circuit, sampler)\n\n def obj_expect(params):\n circuit = self.get_circuit(params)\n circuit.make_cache()\n return self.get_energy_sparse(circuit)\n\n if sampler is not None:\n return objective\n if self.sparse is None:\n self.make_sparse()\n return obj_expect",
"def optimizer() -> pd.DataFrame:\n global iteration, coefs_all\n\n X_train, X_test, y_train, y_test = train_test()\n\n # Number of observations in train & test set\n n_train = len(X_train)\n n_test = len(X_test)\n n_coefs = X_train.shape[1]\n\n # Initialize coefficients\n coefs_init = np.random.normal(\n loc=conf.coef_mu, \n scale=conf.coef_sigma, \n size=n_coefs\n )\n coefs_all = coefs_init.copy()\n\n # Coefficients shall be bounded\n bounds = [conf.coef_bounds] * n_coefs\n iteration = 0\n\n # Run optimization\n result = minimize( \n fun=lambda x: loss_func(x, X_train, y_train, n_train),\n callback=lambda x: callback_func(x, X_train, y_train, n_train),\n x0=coefs_init,\n bounds=bounds\n )\n # Collect optimal coefficients\n coefs = pd.Series(data=result.x, index=X_train.columns, name=\"coefs\")\n coefs_df = pd.DataFrame(data=coefs_all, columns=X_train.columns).T\n coefs_df.index.name = \"area\"\n\n rmse_train = result.fun\n rmse_test = loss_func(\n coefs=coefs, \n X=X_test, \n y=y_test, \n n=n_test, \n )\n print(\"\\n\")\n print (f\"Train RMSE: {rmse_train:.4f}\")\n print (f\"Test RMSE: {rmse_test:.4f}\")\n\n return coefs_df",
"def tpe_sampler_search(feature_matrix, x_train, y_train, x_val, y_val, config, data_path):\n\n # Init sampler and n_trials\n sampler = optuna.samplers.TPESampler()\n n_trials = config['number_of_trials']\n # Create study\n study = optuna.create_study(sampler=sampler, direction='maximize')\n # Disable output\n optuna.logging.disable_default_handler()\n # Optimize\n study.optimize(lambda trial: objective(trial, feature_matrix, x_train, y_train,\n x_val, y_val, config, data_path),\n n_trials=n_trials)\n # Init model with best parameters\n print(\"Best trial: \", study.best_trial.number)\n print(\"Best parameters: \", study.best_params)\n # Load the best model.\n with open(data_path + 'interim/trial_{}.pickle'.format(study.best_trial.number), 'rb') as f:\n clf = pickle.load(f)\n # Delete all trials\n for trial_num in range(config['number_of_trials']):\n if os.path.exists(data_path + 'interim/trial_{}.pickle'.format(trial_num)):\n os.remove(data_path + 'interim/trial_{}.pickle'.format(trial_num))\n print(\"***Train***\")\n output_report(x_train, y_train, clf)\n print(\"***Validation***\")\n output_report(x_val, y_val, clf)\n # Remove keys from dict\n best_params_model = remove_keys_from_dict(study.best_params, keys=['ratio', 'sampling_strategy'])\n best_clf = model_init(feature_matrix, best_params_model, config)\n return best_clf, study.trials_dataframe()",
"def objective(args: Namespace, trial: optuna.trial._trial.Trial) -> float:\n # Paramters (to tune)\n args.embedding_dim = trial.suggest_int(\"embedding_dim\", 128, 512)\n args.num_filters = trial.suggest_int(\"num_filters\", 128, 512)\n args.hidden_dim = trial.suggest_int(\"hidden_dim\", 128, 512)\n args.dropout_p = trial.suggest_uniform(\"dropout_p\", 0.3, 0.8)\n args.lr = trial.suggest_loguniform(\"lr\", 5e-5, 5e-4)\n\n # Train (can move some of these outside for efficiency)\n logger.info(f\"\\nTrial {trial.number}:\")\n logger.info(json.dumps(trial.params, indent=2))\n artifacts = run(args=args, trial=trial)\n\n # Set additional attributes\n args = artifacts[\"args\"]\n performance = artifacts[\"performance\"]\n logger.info(json.dumps(performance[\"overall\"], indent=2))\n trial.set_user_attr(\"threshold\", args.threshold)\n trial.set_user_attr(\"precision\", performance[\"overall\"][\"precision\"])\n trial.set_user_attr(\"recall\", performance[\"overall\"][\"recall\"])\n trial.set_user_attr(\"f1\", performance[\"overall\"][\"f1\"])\n\n return performance[\"overall\"][\"f1\"]",
"def exp_to_df(\n exp: Experiment,\n metrics: Optional[List[Metric]] = None,\n key_components: Optional[List[str]] = None,\n **kwargs: Any,\n) -> pd.DataFrame:\n key_components = key_components or [\"trial_index\", \"arm_name\"]\n\n # Accept Experiment and SimpleExperiment\n if isinstance(exp, MultiTypeExperiment):\n raise ValueError(\"Cannot transform MultiTypeExperiments to DataFrames.\")\n\n results = exp.fetch_data(metrics, **kwargs).df\n if len(results.index) == 0: # Handle empty case\n return results\n key_col = \"-\".join(key_components)\n key_vals = results[key_components[0]].astype(\"str\")\n for key in key_components[1:]:\n key_vals = key_vals + results[key].astype(\"str\")\n results[key_col] = key_vals\n\n metric_vals = results.pivot(\n index=key_col, columns=\"metric_name\", values=\"mean\"\n ).reset_index()\n metadata = results[key_components + [key_col]].drop_duplicates()\n metric_and_metadata = pd.merge(metric_vals, metadata, on=key_col)\n arm_names_and_params = pd.DataFrame(\n [{\"arm_name\": name, **arm.parameters} for name, arm in exp.arms_by_name.items()]\n )\n exp_df = pd.merge(metric_and_metadata, arm_names_and_params, on=\"arm_name\")\n # pyre-fixme[16]: `Optional` has no attribute `sort_values`.\n return exp_df.drop(key_col, axis=1).sort_values(key_components)",
"def explain(self, X_e, Y_e, objective='SPARSITY', n_explanations=1, max_features=999999, max_runtime=60): \r\n \r\n if self.data_type==\"BINARY\":\r\n X_e = complement_binary_dataframe(X_e) \r\n else:\r\n X_e = complement_continuous_dataframe(X_e) \r\n \r\n assert(len(np.setdiff1d(Y_e, [0,1]))==0), 'currently supports binary datasets'\r\n assert(len(X_e)==len(Y_e)), 'Mismatch between the number of observations and predictions'\r\n assert(type(X_e)==pd.DataFrame), 'Expected dataframe for X_e'\r\n assert(set(self.features) == set(X_e.columns.values))\r\n assert(objective in ['SPARSITY','SUPPORT']), 'Unknown objective passed to explain_local'\r\n assert(n_explanations>=1), 'n_explanations should be >= 1'\r\n assert(max_features>=1), 'max_features should be >= 1'\r\n \r\n res = []\r\n for i in range(len(X_e)):\r\n df_i = self.__explain_local__(X_e.iloc[i], Y_e[i], objective, n_explanations, max_features, max_runtime)\r\n df_i.insert(0, \"#Observation\",i)\r\n df_i.insert(1, \"#Explanation\",df_i.index.values)\r\n #df_i.index = map(lambda x:\"Obs. #%s, Exp. #%s:\"%(str(i),str(x)),df_i.index.values)\r\n res.append(df_i)\r\n return(pd.concat(res).reset_index(drop=True))",
"def optimize(\n # trials,\n random_state=SEED):\n\n space = {\n 'max_depth': scope.int(hp.uniform('max_depth', 5, 15)),\n 'subsample': hp.uniform('subsample', 0.03, 1),\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.5)) - 0.0001,\n 'colsample_bytree': hp.uniform('colsample_bytree', 0.3, 1),\n 'reg_alpha': hp.loguniform('reg_alpha', np.log(0.005), np.log(5)) - 0.0001,\n 'reg_lambda': hp.loguniform('reg_lambda', np.log(1), np.log(5)),\n 'bagging_freq': hp.choice('bagging_freq', [0, 1]),\n 'num_leaves': scope.int(hp.uniform('num_leaves', 10, 128)),\n 'n_estimators': 1000,\n 'boosting': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 12,\n 'metric': 'None',\n 'is_unbalance': 'true',\n # 'min_data_per_group': 1000,\n 'verbose': -1,\n 'random_seed': 42,\n \n }\n\n # Use the fmin function from Hyperopt to find the best hyperparameters\n best = fmin(score_model, space, algo=tpe.suggest,\n # trials=trials,\n max_evals=hyperopt_niters)\n return best",
"def evaluate(self, ind, filename=\"\"):\n start = dt.now()\n # rule_set = self._generator.create_rule_set(ind)\n rule_set, indicators = self._generator.create_rule_set(ind)\n data_selected = self._data[indicators]\n\n # Calculate the signals according to the fuzzy rule set\n decision = fuzzy.DecisionMaker(rule_set, data_selected)\n\n # signals = pd.DataFrame([0.5, -0.4, 0.2, 0.4, 0.1, -0.2, -0.3, 0.2, 0.3, 0.1], index=self._data.index,columns=['Signal'])\n\n signals = []\n # signals = decision.defuzzify(self._data) #signal is a dataframe\n for row_index, data_row in zip(range(len(data_selected)), data_selected.iterrows()):\n dictionary = dict(data_row[1])\n signal = decision.defuzzify(dictionary)\n signals.append(signal)\n\n self._data['Signal'] = signals\n print(\"::::: [evaluator] Calculate signals \", dt.now() - start, \":::::\")\n\n start = dt.now()\n # Calculate the fitness value according to the trading signals\n Hold = 0\n Money = 10000000\n Fortune = []\n for i, row in self._data.iterrows():\n Hold, Money, fortune = self.trade(row, Hold, Money)\n Fortune.append(fortune)\n self._data['Fortune'] = Fortune\n # self._data['Operation'] = 0\n # self._data.Operation[self._data.Signal > 0] = 1\n # self._data.Operation[self._data.Signal < 0] = -1\n fit_val = self._data.iloc[-1]['Fortune']\n\n print(\"::::: [evaluator] Calculate fitness value\", dt.now() - start, \":::::\")\n return fit_val",
"def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r",
"def execQ5():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n cheapest = frame.sort_values(by='Price', ascending=True).head(1)\n return cheapest",
"def single_with_trials(single_without_success, orionstate, storage):\n exp = experiment_builder.build(name=\"test_single_exp\", storage=storage)\n\n x = {\"name\": \"/x\", \"type\": \"real\", \"value\": 100}\n results = {\"name\": \"obj\", \"type\": \"objective\", \"value\": 0}\n trial = Trial(experiment=exp.id, params=[x], status=\"completed\", results=[results])\n orionstate.database.write(\"trials\", trial.to_dict())\n return exp.configuration",
"def repeatexp(n, d, grid_size, reps, tho_scale=0.1, is_classification=True, no_signal=True):\n \n datasetList = ['Train', 'Holdout', 'Test']\n colList = ['perm', 'performance', 'dataset']\n \n df_list_std = []\n df_list_tho = []\n \n for perm in tqdm(range(reps)):\n \n vals_std, vals_tho = fitModels_paramTuning(n, d, grid_size,\n is_classification=is_classification,\n tho_scale=tho_scale,\n no_signal=no_signal)\n for i, ds in enumerate(datasetList):\n df_list_std.append((perm, vals_std[i], ds))\n df_list_tho.append((perm, vals_tho[i], ds))\n\n df_std = pd.DataFrame(df_list_std, columns=colList)\n df_tho = pd.DataFrame(df_list_tho, columns=colList)\n return df_std, df_tho",
"def objective(trial):\n # The parameters that we will calibrate the model for are shown here.\n # Optuna trial i\n BOD = trial.suggest_uniform(\"BOD\", 0, 1) #Review ranges here\n k_r = trial.suggest_uniform(\"k_r\", 0, 1) #Review Ranges here \n \n def ChLa(t):\n return 1 # Need to link to data\n\n def I(x):\n return 1 # Need to link to data\n\n K_z = 2 * 10**(-5) # p.51\n a = K_z\n k_b = 0.1 # Table 5\n th_b = 1.047 # Table 5\n k_r = 0.1 # Table 5\n YCHO2 = 0.0083 # Table 5\n th_p = 1.036 # Table 5\n th_s = 1.065 # Table 5\n th_r = 1.047 # Table 5\n\n def Temp(t):\n \"\"\"\n Function that maps time to temperature\n \"\"\"\n return 20 # Need to link to data\n\n def P_max(t):\n return 9.6 * 1.036 **(Temp(t) - 20) # Eq. 4\n\n def L_min(t):\n I = 1 # Need to link to PAR data\n K_1 = 0.687 * 1.086**(Temp(t) - 20)\n K_2 = 15\n return I * (1 + 2 * np.sqrt(K_1 / K_2)) / (I + K_1 + I**2 / K_2) # Eq. 5\n \n # f deals with sink and source terms \n def f(x, t):\n return -1 / YCHO2 * k_r * th_r**(Temp(t) - 20) * ChLa(t) + P_max(t) * L_min(t) * ChLa(t) - k_b * th_b**(Temp(t)-20) * BOD \n\n L = 200 # Length of domain\n dt = 1 / 48 # Mesh spacing in t\n F = a * dt # a * dt / dx**2\n T = 100 # Simulation time stop\n\n # Solving the PDE\n DO, x, t, _ = solver_FE_simple(I, a, f, L, dt, F, T)\n \n # Creating some bogus targets while database errors are happening\n DO_data = DO + np.random.random(len(DO))\n\n # Using mean squared error as the measure of fit, where we want\n # to minimize this number\n return ((DO - DO_data)**2).mean()",
"def execQ6():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n expensive = frame.sort_values(by='Price', ascending=False).head(1)\n return expensive",
"def objective(trial):\n %time\n env = gym.make('Delivery-v0')\n alpha = trial.suggest_discrete_uniform('alpha', 0.3,0.9,0.3)\n gamma = trial.suggest_discrete_uniform('gamma', 0.6, 1,0.1)\n epsilon = trial.suggest_discrete_uniform('epsilon', 0.01, 0.11, 0.04)\n episodes = 1000000\n \n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n #Initialize Q table of 22500 x 8 size (22500 states and 8 actions) with all zeroes\n q_table = np.zeros([env.observation_space.n, env.action_space.n]) \n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n if random.uniform(0, 1) < epsilon:\n action = env.action_space.sample() # Explore action space randomly\n else:\n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n\n next_state, reward, done, info = env.step(action) \n\n old_value = q_table[state, action]\n next_max = np.max(q_table[next_state])\n\n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[state, action] = new_value\n\n if reward == -10:\n penalties += 1\n \n\n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n rewards.append(np.sum(episode_rewards))\n \n last_reward = np.mean(rewards)\n # trial.report(-1 * last_reward)\n\n return -1 * last_reward",
"def hyperopt_func(model_dict, model_param_names, training_param_names, param_space, datasets, max_evals=30):\n tester = fitness(model_dict, model_param_names, training_param_names, datasets)\n trials = Trials()\n \n timer_start = timer()\n best = fmin(fn=tester.objective, \n space=param_space, \n algo=tpe.suggest, \n max_evals=max_evals, \n trials=trials, \n rstate=np.random.RandomState(50))\n timer_end = timer()\n print('Total training time (min):',(timer_end-timer_start)/60)\n results = sorted(trials.results, key = lambda x: x['loss'])\n return results",
"def DataFrameSolution(E, CP, M, CHI, MAbs, N=None):\n\n if N == None:\n solutions = [{'MeanEnergy' : E,\\\n 'SpecificHeat' : CP,\\\n 'MeanMagnetization' : M,\\\n 'Susceptibility' : CHI,\\\n 'MeanMagnetizationAbs' : MAbs}]\n\n dataframe = pd.DataFrame(solutions)\n else:\n solutions = [{'MCcycles' : N,\\\n 'MeanEnergy' : E,\\\n 'SpecificHeat' : CP,\\\n 'MeanMagnetization' : M,\\\n 'Susceptibility' : CHI,\\\n 'MeanMagnetizationAbs' : MAbs}]\n\n dataframe = pd.DataFrame(solutions)\n dataframe.set_index('MCcycles', inplace=True)\n\n return dataframe",
"def _exploit(self, trial_executor, trial, trial_to_clone):\n\n trial_state = self._trial_state[trial]\n new_state = self._trial_state[trial_to_clone]\n \n if not new_state.last_checkpoint:\n logger.info(\"[pbt]: no checkpoint for trial.\"\n \" Skip exploit for Trial {}\".format(trial))\n return\n \n # if we are at a new timestep, we dont want to penalise for trials still going\n if self.data['T'].max() > self.latest:\n self.current = None\n \n print(\"\\n\\n\\n\\n Copying: \\n{} \\n with:{} \\n\\n\".format(str(trial), str(trial_to_clone)))\n new_config, lengthscale, mindist, meandist, data = explore(self.data, self.bounds,\n self.current,\n trial_to_clone,\n trial,\n trial_to_clone.config,\n self._hyperparam_mutations,\n self._resample_probability)\n \n # important to replace the old values, since we are copying across\n self.data = data.copy()\n \n # if the current guy youre selecting is at a point youve already done, \n # then append the data to the \"current\" which is the points in the current batch\n \n new = []\n for key in self._hyperparam_mutations.keys():\n new.append(new_config[key])\n \n new = np.array(new)\n new = new.reshape(1, new.size)\n if self.data['T'].max() > self.latest:\n self.latest = self.data['T'].max()\n self.current = new.copy()\n else:\n self.current = np.concatenate((self.current, new), axis=0)\n print(\"\\n\\n\\n\\n\\n Currently Evaluating \\n\\n\\n\\n\\n\")\n print(self.current)\n print(\"\\n\\n\\n\\n\\n\")\n \n # log the lengthscale\n self.meta['timesteps'].append(self.data['T'].values[-1])\n self.meta['lengthscales'].append(lengthscale)\n self.meta['closest'].append(mindist)\n self.meta['meandist'].append(meandist)\n meta = pd.DataFrame({'timesteps': self.meta['timesteps'], \n 'lengthscales': self.meta['lengthscales'],\n 'closest': self.meta['closest'],\n 'meandist': self.meta['meandist']})\n meta.to_csv('meta_data.csv')\n \n logger.info(\"[exploit] transferring weights from trial \"\n \"{} (score {}) -> {} (score {})\".format(\n trial_to_clone, new_state.last_score, trial,\n trial_state.last_score))\n\n if self._log_config:\n self._log_config_on_step(trial_state, new_state, trial,\n trial_to_clone, new_config)\n\n new_tag = make_experiment_tag(trial_state.orig_tag, new_config,\n self._hyperparam_mutations)\n reset_successful = trial_executor.reset_trial(trial, new_config,\n new_tag)\n if reset_successful:\n trial_executor.restore(\n trial, Checkpoint.from_object(new_state.last_checkpoint))\n else:\n trial_executor.stop_trial(trial, stop_logger=False)\n trial.config = new_config\n trial.experiment_tag = new_tag\n trial_executor.start_trial(\n trial, Checkpoint.from_object(new_state.last_checkpoint))\n\n self._num_perturbations += 1\n # Transfer over the last perturbation time as well\n trial_state.last_perturbation_time = new_state.last_perturbation_time",
"def run(self) -> Tuple[Path, pd.DataFrame]:\n\n # Construct the data set\n dataset: Dataset = self.construct_dataset()\n\n assert (\n self.config.zoomopt is not None\n ), \"You need to set the 'zoomopt' field in the config to use Zoom optimizer.\"\n batch_transformed_space: np.ndarray = _suggest_samples(dataset=dataset, settings=self.config.zoomopt)\n\n # Transform the batch back to original space\n batch_original_space: pd.DataFrame = self.suggestions_to_original_space(\n dataset=dataset, new_samples=batch_transformed_space\n )\n\n # Save the batch to the disk and return it\n batch_original_space.to_csv(self.config.experiment_batch_path, index=False)\n # Save the inferred optimum\n optimum = evaluate_optimum(dataset)\n optimum.to_csv(self.config.results_dir / \"optima.csv\", index=False)\n return self.config.experiment_batch_path, batch_original_space",
"def get_results_df(fname, problem):\n t = '\\t'\n \n # Cols to add:\n val_cols = ['Actions','Expansions','GoalTests','NewNodes','PlanLength','ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n \n # Rename cols: c (temp) -> Searcher\n df.columns = ['c', 'Searcher']\n # Add new cols & reindex\n df = df.reindex(columns = df.columns.tolist() + val_cols)\n \n # Populate new cols according to row with search name:\n sr = df.loc[df.c == 'Searcher', 'Searcher'] \n for (idx, sr_row) in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n\n df.dropna(inplace=True)\n # Add a minute column:\n df['Minutes'] = np.round(df.ElapsedSeconds/60, 3)\n \n # Replace values of 1st col with problem name & update col name:\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n \n return df, ''",
"def snaive_exp_smoothing_method_pred(training_data,HORIZON,METHOD=\"simple\",smoothing_level=.3,optimized=True,smoothing_slope=.05):\n \n \n exp_smoothing_type = METHOD #\"simple\"\n data_predictions = pd.DataFrame(index=training_data.index[-HORIZON:]+timedelta(days=HORIZON))\n data_predictions.astype(np.float)\n for i in range(7):\n for series_name in training_data.filter(regex='^series').columns:\n try:\n data_predictions[series_name].shape\n except:\n data_predictions[series_name]=0.0\n if exp_smoothing_type == \"holt\":\n model = Holt(training_data[series_name][training_data.index.dayofweek==i])\n elif exp_smoothing_type == \"simple\":\n model = SimpleExpSmoothing(training_data[series_name][training_data.index.dayofweek==i])\n \n model._index = training_data[training_data.index.dayofweek==i].index\n \n if exp_smoothing_type == \"holt\":\n if optimized:\n fit = model.fit(optimized=True)\n else:\n fit = model.fit(smoothing_level=smoothing_level, smoothing_slope=smoothing_slope)\n elif exp_smoothing_type == \"simple\":\n fit = model.fit(smoothing_level=smoothing_level)\n \n \n #pred = fit.forecast(HORIZON)\n data_predictions[series_name][data_predictions.index.dayofweek==i] = fit.forecast(HORIZON//7)\n \n return data_predictions",
"def run_and_evaluate():\n tsp_problems = read_all_problems()\n # Empty list of metrics\n results = []\n for problem in tqdm.tqdm(tsp_problems):\n # As random factors are involved repeat experiments a couple of times\n best_routes_base = []\n best_routes_af = []\n best_routes_ms = []\n base_times = []\n af_times = []\n ms_times = []\n for i in range(10):\n # Base solution\n start_time = timeit.default_timer()\n best_route_base = solve_tsp_basic(problem)\n base_time = timeit.default_timer() - start_time\n best_routes_base.append(Fitness(route=best_route_base).route_distance())\n base_times.append(base_time)\n\n # AF clustering solution\n start_time = timeit.default_timer()\n best_route_af = solve_tsp_affinity_propagation(problem)\n af_time = timeit.default_timer() - start_time\n best_routes_af.append(Fitness(route=best_route_af).route_distance())\n af_times.append(af_time)\n\n # MS solution\n start_time = timeit.default_timer()\n best_route_ms = solve_mean_shift(problem)\n ms_time = timeit.default_timer() - start_time\n best_routes_ms.append(Fitness(route=best_route_ms).route_distance())\n ms_times.append(ms_time)\n\n results.append(\n {\n \"problem name\": problem.name,\n \"optimal solution\": find_route_optimal_route_length(problem),\n \"baseline tour length\": mean(best_routes_base),\n \"af clustering tour length\": mean(best_routes_af),\n \"ms clustering tour length\": mean(best_routes_ms),\n \"baseline algorithm time\": mean(base_times),\n \"af clustering algorithm time\": mean(af_times),\n \"ms clustering algorithm time\": mean(ms_times),\n }\n )\n # Create dataframe and safe results\n df = pd.DataFrame(results)\n df.to_csv(\"results.csv\", index=False)\n return df"
] |
[
"0.6023675",
"0.57024705",
"0.549064",
"0.5453015",
"0.53470635",
"0.5274136",
"0.49488753",
"0.48567286",
"0.4852621",
"0.48308128",
"0.47868332",
"0.47639084",
"0.47044444",
"0.46960852",
"0.46759018",
"0.46727654",
"0.46523926",
"0.465156",
"0.4637326",
"0.46021733",
"0.45883653",
"0.457882",
"0.45651728",
"0.45585307",
"0.4538632",
"0.45369974",
"0.4526135",
"0.4518623",
"0.45167705",
"0.45141566"
] |
0.70668334
|
0
|
Display the form on get, on submission, save the uploaded pdf to the "serial" directory, and return the form populated along with the generated thumbnails.
|
def generate_thumbnails(self):
form = Form(PDFUploadSchema(), buttons=("submit",))
if "submit" in self.request.POST:
log.info("submit: %s", self.request.POST)
controls = self.request.POST.items()
try:
appstruct = form.validate(controls)
rendered_form = form.render(appstruct)
self.write_upload_files(appstruct)
self.write_thumbnails(appstruct)
return {"form":rendered_form, "appstruct":appstruct}
except ValidationFailure as exc:
log.info("Validation failure")
return {'form':exc.render()}
return {"form":form.render()}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_pdf(submission):\n # Get questions from sections\n fields = {}\n for section in submission.questions:\n for form in section[\"forms\"]:\n for field in form[\"fields\"]:\n fs = field.get(\"fields\", [field])\n for f in fs:\n fields[f[\"name\"]] = f\n\n # Pull out image and answers\n images = []\n docs = []\n answers = []\n for answer in submission.answers:\n answer, name = answer.get(\"answer\", \"\"), answer.get(\"name\", \"\")\n field = fields[name]\n if field[\"type\"] == \"FILE\":\n image_ids = []\n doc_ids = []\n for file in answer:\n if \"image\" in file:\n image_ids.append(file[\"id\"])\n elif \"file\" in file:\n doc_ids.append(file[\"id\"])\n\n if image_ids:\n images += [\n image_upload.image\n for image_upload in ImageUpload.objects.filter(\n pk__in=image_ids\n ).all()\n ]\n if doc_ids:\n docs += [\n file_upload.file\n for file_upload in FileUpload.objects.filter(pk__in=doc_ids).all()\n ]\n else:\n answers.append(\n {\n \"name\": name.lower().replace(\"_\", \" \").capitalize(),\n \"prompt\": field.get(\"prompt\", \"\"),\n \"answers\": answer if type(answer) is list else [answer],\n }\n )\n\n context = {\n \"submission\": submission,\n \"answers\": answers,\n \"images\": images,\n \"docs\": docs,\n }\n pdf_html_str = render_to_string(\"client-intake.html\", context=context)\n pdf_bytes = weasyprint.HTML(string=pdf_html_str).write_pdf()\n return pdf_bytes",
"def generate_pdf():\n uu_id = uuid.uuid4().hex\n current_app.logger.debug(uu_id)\n current_app.logger.debug(request.form)\n tmp_pdf_filename = \"{}_tmp.pdf\".format(uu_id)\n pdf_filename = \"{}.pdf\".format(uu_id)\n tmp_out_pdf_path = os.path.join(\n current_app.config[\"UPLOAD_FOLDER\"], tmp_pdf_filename\n )\n out_pdf_path = os.path.join(\n current_app.config[\"UPLOAD_FOLDER\"], pdf_filename\n )\n fill_template_from_input(\n request.form,\n current_app.config[\"PDF_TEMPLATE_PATH\"],\n tmp_out_pdf_path,\n INPUT_FORM_MAP\n )\n fdf_tmp = os.path.join(\n current_app.config[\"UPLOAD_FOLDER\"], 'tmp.fdf'\n )\n os.system('pdftk ' + tmp_out_pdf_path + ' generate_fdf output ' + fdf_tmp)\n os.system(\n 'pdftk ' + tmp_out_pdf_path + ' fill_form ' + fdf_tmp +\n ' output ' + out_pdf_path + ' flatten'\n )\n os.remove(tmp_out_pdf_path)\n return pdf_filename",
"def form_valid(self, *args, **kwargs):\n\t\tform = kwargs['form']\n\n\t\tself.object = form.save(commit=False)\n\n\t\timport logging\n\t\tlogger = logging.getLogger(\"furnicloud\")\n\t\tlogger.debug(form.get_data_fields_dict())\n\n\t\t# save data fields to model instance\n\t\tdata_fields = form.cleaned_data\n\t\tdel data_fields['claim']\n\t\tdel data_fields['file']\n\t\tself.object.data_fields = json.dumps(data_fields)\n\n\t\t# generate PDF claim request file and save to model instance field\n\t\tpdf_template = self.get_pdf_template_name()\n\t\ttemplate = pdf.get_template(pdf_template)\n\t\tcontext = form.get_data_fields_dict()\n\t\twith TemporaryFile(mode=\"w+b\") as f: #open('', 'wb') as f:\n\t\t\tpdf_contents = template.render(context) \n\t\t\tf.write(pdf_contents)\n\t\t\t#f.write(json.dumps(context))\n\t\t\tf.seek(0) # go to beginning of file\n\t\t\t#reopen = open('/tmp/claim.pdf', 'rb')\n\t\t\tclaim_file = File(f)\n\t\t\tdate_str = datetime.now().strftime(\"%Y_%m_%d\")\n\t\t\tself.object.file.save(\"claim_request\" + date_str + \".pdf\", claim_file, save=True)\n\n\t\t# save order\n\t\tself.object.save()\n\n\t\t#response = HttpResponse(content_type='application/pdf')\n\t\t#response['Content-Disposition'] = \\\n\t\t#\t'attachment; filename=Natuzzi_service_request_form.pdf'\n\n\t\t#response.write(pdf_contents)\n\t\t#return response\n\n\t\treturn HttpResponseRedirect(self.get_success_url())",
"def book(request, slug):\n if request.method == 'POST':\n book = Book.objects.get(slug=slug)\n form = AddForms(request.POST, instance=book)\n if form.is_valid():\n form.save()\n messages.info(request, \"Info updated\")\n else:\n logger = logging.getLogger(__name__)\n logger.error(form.errors)\n messages.error(request, form.errors)\n return redirect(request.path)\n else:\n book = Book.objects.get(slug=slug)\n book_file = '/media/' + str(book.pdf)\n return render(request, 'book.html', {'book_file': book_file, 'book': book, 'url': book.url})",
"def post(self):\n if validate(request.form):\n handle_upload(request.files['qqfile'], request.form)\n filepath = 'static/images/{}/{}'.format(request.form['qquuid'], request.form['qqfilename'])\n session['img_upload_filepath'] = filepath\n return make_response(200, {\"success\": True})\n else:\n return make_response(400, {\"error\": \"Invalid request\"})",
"def pdf_manager(self):\n\n s3ocr_root = self.s3ocr_etree() # get element s3xml\n\n # define font size\n titlefontsize = 18\n sectionfontsize = 15\n regularfontsize = 13\n hintfontsize = 10\n \n # etree labels\n ITEXT = \"label\"\n HINT = \"comment\"\n TYPE = \"type\"\n HASOPTIONS = \"has_options\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n\n #l10n\n l10n = self.l10n\n\n # get pdf title\n if self.pdftitle == None or self.pdftitle == \"\":\n try:\n pdftitle = self.manager.s3.crud_strings[\\\n self.tablename].subtitle_list.decode(\"utf-8\")\n except:\n pdftitle = self.resource.tablename\n else:\n pdftitle = self.pdftitle\n\n # prepare pdf\n form = Form()\n form.decorate()\n\n # set header\n form.canvas.setTitle(pdftitle) # set pdf meta title\n form.print_text([pdftitle,],\n fontsize=titlefontsize,\n style=\"center\") # set pdf header title\n\n form.print_text(\n [\n unicode(l10n.get(\"ocr_inst\").get(\"inst1\").decode(\"utf-8\")),\n unicode(l10n.get(\"ocr_inst\").get(\"inst2\").decode(\"utf-8\")),\n unicode(l10n.get(\"ocr_inst\").get(\"inst3\").decode(\"utf-8\"))\n ],\n fontsize=regularfontsize,\n gray=0)\n form.linespace(3)\n # printing the etree\n for eachresource in s3ocr_root:\n form.draw_line()\n form.print_text([\n eachresource.attrib.get(ITEXT,\n eachresource.attrib.get(\"name\"))\n ],\n fontsize=sectionfontsize)\n form.draw_line(nextline=1)\n form.linespace(12) # line spacing between each field\n for eachfield in eachresource.iterchildren():\n fieldlabel = eachfield.attrib.get(ITEXT)\n spacing = \" \" * 5\n fieldhint = self.__trim(eachfield.attrib.get(HINT))\n if fieldhint != \"\" and fieldhint != None:\n form.print_text([\"%s%s( %s )\" % \\\n (fieldlabel,\n spacing,\n fieldhint)],\n fontsize=regularfontsize)\n else:\n form.print_text([fieldlabel],\n fontsize=regularfontsize)\n\n if eachfield.attrib.get(\"readable\", \"False\") == \"True\" and \\\n eachfield.attrib.get(\"writable\", \"False\") == \"False\":\n # if it is a readonly field\n form.print_text(\n [eachfield.attrib.get(\"default\",\"No default Value\")],\n seek=10,\n )\n elif eachfield.attrib.get(HASOPTIONS) == \"True\":\n fieldtype = eachfield.attrib.get(TYPE)\n # if the field has to be shown with options\n if fieldtype == \"boolean\":\n form.nextline()\n form.resetx()\n bool_text = l10n.get(\"boolean\")\n form.print_text(\n [bool_text.get(\"yes\").decode(\"utf-8\")],\n continuetext=1,\n seek=3,\n )\n # TODO: Store positions\n form.draw_circle(\n boxes=1,\n continuetext=1,\n gray=0.9,\n seek=10,\n fontsize=12,\n )\n form.print_text(\n [bool_text.get(\"no\").decode(\"utf-8\")],\n continuetext=1,\n seek=10,\n )\n # TODO: Store positions\n form.draw_circle(\n boxes=1,\n continuetext=1,\n gray=0.9,\n seek=10,\n fontsize=12,\n )\n else:\n if fieldtype == \"multiselect\":\n option_hint = l10n.get(\"select\").get(\"multiselect\")\n else:\n option_hint = l10n.get(\"select\").get(\"singleselect\")\n form.print_text(\n [option_hint.decode(\"utf-8\")],\n fontsize=hintfontsize,\n gray=0.4,\n seek=3,\n )\n s3ocrselect = eachfield.getchildren()[0]\n form.nextline(regularfontsize)\n form.resetx() # move cursor to the front\n optionseek = 10\n # resting margin for options\n formmargin = form.marginsides\n form.marginsides = optionseek + formmargin\n for eachoption in s3ocrselect.iterchildren():\n form.print_text(\n [eachoption.text],\n continuetext=1,\n fontsize = regularfontsize,\n seek = 10,\n )\n # TODO: Store positions\n form.draw_circle(\n boxes=1,\n continuetext=1,\n gray=0.9,\n seek=10,\n fontsize=12,\n )\n # restoring orginal margin\n form.marginsides = formmargin\n \n else:\n # if it is a text field\n fieldtype = eachfield.attrib.get(TYPE)\n BOXES_TYPES = [\"string\", \"textbox\", \"integer\",\n \"double\", \"date\", \"datetime\",]\n if fieldtype in BOXES_TYPES:\n if fieldtype in [\"string\", \"textbox\"]:\n form.linespace(3)\n num_lines = int(eachfield.attrib.get(\"lines\",\n 1))\n for eachline in xrange(num_lines):\n # TODO: Store positions\n form.draw_check_boxes(\n completeline=1,\n gray=0.9,\n seek=3,\n )\n elif fieldtype in [\"integer\", \"double\"]:\n num_boxes = int(eachfield.attrib.get(\"boxes\",\n 9))\n form.linespace(3)\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = num_boxes,\n gray=0.9,\n seek=3,\n )\n elif fieldtype in [\"date\", \"datetime\"]:\n # print hint\n hinttext = \\\n l10n.get(\"datetime_hint\").get(fieldtype).decode(\"utf-8\")\n form.print_text(\n [hinttext],\n fontsize=hintfontsize,\n gray=0.4,\n seek=3,\n )\n form.linespace(8)\n datetime_continuetext = 0\n datetime_seek = 3\n if fieldtype == \"datetime\":\n datetime_continuetext = 1\n datetime_seek = 6\n #HH\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n seek = 3,\n )\n #MM\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n continuetext=1,\n seek = 4,\n )\n # DD\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n continuetext = datetime_continuetext,\n seek = datetime_seek,\n )\n # MM\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n continuetext=1,\n seek = 4,\n )\n # YYYY\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 4,\n gray=0.9,\n continuetext=1,\n seek = 4,\n )\n else:\n self.r.error(501, self.manager.PARSE_ERROR)\n print sys.stderr(\"%s :invalid field type: %s\" %\\\n (eachfield.attrib.get(\"name\"),\n fieldtype))\n return form.save()",
"def write_upload_files(self, appstruct):\n \n # Create the directory if it does not exist\n final_dir = \"thumbnails/%s\" % slugify(appstruct[\"serial\"])\n if not os.path.exists(final_dir):\n log.info(\"Make directory: %s\", final_dir)\n os.makedirs(final_dir)\n\n final_file = \"%s/uploaded.pdf\" % final_dir\n file_pointer = appstruct[\"pdf_upload\"][\"fp\"]\n self.single_file_write(file_pointer, final_file)",
"def submit(request):\n if request.POST:\n form = CaptchaForm(request.POST, request.FILES)\n if form.is_valid():\n image = request.FILES['singleImage']\n extension = image.name.split('.')[1]\n hashname = random.getrandbits(128)\n with open(os.path.join(settings.STATIC_ROOT, \"tmp/%s.%s\" % (hashname, extension)), \"w+\") as imagePath:\n imagePath.write(image.read())\n\n ctx = RequestContext(request, {\"hash\":hashname, \"extension\":extension})\n template = loader.get_template(\"wainz/submission_details.html\")\n\n return HttpResponse(template.render(ctx))\n else:\n form = CaptchaForm()\n\n return render_to_response(\"wainz/submit.html\", dict(form=form), context_instance = RequestContext(request))",
"def document_upload():\n form = SourceTextForm()\n if form.validate_on_submit():\n user = current_user\n\n doc = {}\n doc[\"file\"] = form.filename.data\n doc[\"author\"] = form.author.data\n doc[\"title\"] = form.title.data\n doc[\"language\"] = form.language.data\n\n params = {}\n params[\"email\"] = user.email\n params[\"new_page\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"PAGE_LIMIT\"]\n params[\"line_size\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"LINE_SIZE\"]\n params[\"early_cutoff\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"EARLY_CUTOFF\"]\n params[\"batch_size\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"BATCH_SIZE\"]\n params[\"tokenizer\"] = current_app.config[\"TOKENIZER\"].select(doc[\"language\"])\n params[\"resource\"] = create_book\n doc_uploader = DocumentUploader(params)\n \n could_upload = True\n try:\n doc_uploader.upload(doc)\n except Exception as e:\n traceback.print_exc()\n could_upload = False\n error_msg = \"Error uploading document. Please try again.\"\n flash(error_msg)\n\n if could_upload:\n success_msg = \"Document successfully uploaded.\"\n flash(success_msg)\n\n return render_template('content_management/document_upload.html', form=form)",
"def index():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n alert('File not found.', 'danger')\n return redirect(request.url)\n\n file = request.files['file']\n\n # if user does not select file\n if file.filename == '':\n alert('No file selected.', 'warning')\n return redirect(request.url)\n\n #if a correct file is uploaded\n if file and allowed_file(file.filename):\n #Eliminate chance of injection attack\n filename = secure_filename(file.filename)\n\n #Save the file to the static/uploads folder\n if not os.path.exists(app.config['UPLOAD_FOLDER']): os.makedirs(app.config['UPLOAD_FOLDER'])\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n #Split the file and collect the text. Store the number of pages.\n num_pages = pf.split_PDF(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n #Delete the file once it's been split and parsed\n os.remove(os.path.join('static', 'uploads', filename))\n alert(f'Uploaded {num_pages} page(s) from {file.filename} successfully.', 'success')\n \n return render_template('index.html', alerts=alert_messages)\n\n return render_template('index.html', alerts=alert_messages)",
"def form(request):\n\n font_config = FontConfiguration()\n # Use the BCSans font as the default.\n css = CSS(string='''\n @font-face {\n font-family: 'BCSans';\n font-style: normal;\n src: url('https://cdn.jsdelivr.net/npm/@bcgov/[email protected]/fonts/BCSans-Regular.woff') format('woff');\n }\n @font-face {\n font-family: 'BCSans';\n font-style: italic;\n src: url('https://cdn.jsdelivr.net/npm/@bcgov/[email protected]/fonts/BCSans-Italic.woff') format('woff');\n }\n @font-face {\n font-family: 'BCSans';\n font-weight: 700;\n src: url('https://cdn.jsdelivr.net/npm/@bcgov/[email protected]/fonts/BCSans-Bold.woff') format('woff');\n }\n @font-face {\n font-family: 'BCSans';\n font-style: italic;\n font-weight: 700;\n src: url('https://cdn.jsdelivr.net/npm/@bcgov/[email protected]/fonts/BCSans-BoldItalic.woff') format('woff');\n }''', font_config=font_config)\n\n\n\n data = json.loads(request.body)\n name = request.GET['name']\n template = '{}.html'.format(name)\n\n template = get_template(template)\n html_content = template.render(data)\n\n pdf_content = render_pdf(html_content)\n\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"report.pdf\"'\n\n response.write(pdf_content, stylesheet[css], font_config=font_config)\n\n return response",
"def createpdf():\n with app.app_context():\n # Get form data\n if request.form:\n data = request.form\n else:\n return 'no form'\n msg = {}\n msg['name'] = data['name']\n msg['role'] = data['role']\n msg['unit'] = data['unit']\n msg['unitdetail'] = data['unitdetail']\n msg['phone'] = data['phone']\n msg['email'] = data['email']\n msg['employmentdate'] = data['employmentdate']\n filename = 'default.png'\n if 'img' in request.files:\n file = request.files['img']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename).replace(\"_\",\"\")\n portraitFilePath = os.path.join(app.config['IMAGE_UPLOADS'], filename)\n file.save(portraitFilePath)\n if 'presentation' in data:\n msg['presentation'] = data['presentation']\n if 'edu-title' in data:\n msg['edu'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('edu-title'), request.form.getlist('edu-time'))]\n msg['edu'].sort(key = itemgetter('title'))\n msg['edu'].sort(key = itemgetter('time'), reverse=True)\n if 'emp-title' in data:\n msg['emp'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('emp-title'), request.form.getlist('emp-time'))]\n msg['emp'].sort(key = itemgetter('title'))\n msg['emp'].sort(key = itemgetter('time'), reverse=True)\n if 'cou-title' in data:\n msg['cou'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('cou-title'), request.form.getlist('cou-time'))]\n msg['cou'].sort(key = itemgetter('title'))\n msg['cou'].sort(key = itemgetter('time'), reverse=True)\n if 'ass-title' in data:\n msg['ass'] = [{'title': i, 'company': j, 'role': k, 'descr': l, 'time': m} for i,j,k,l,m in zip(request.form.getlist('ass-title'), request.form.getlist('ass-company'), request.form.getlist('ass-role'), request.form.getlist('ass-descr'), request.form.getlist('ass-time'))]\n msg['ass'].sort(key = itemgetter('title'))\n msg['ass'].sort(key = itemgetter('time'), reverse=True)\n\n cv = TEXTEMPLATE.render(msg = msg, portrait = 'img/' + filename)\n pdf = writeTex(cv, app.config[\"OUT_DIR\"], filename)\n deleteImgUpload(filename)\n return redirect(\"/getpdf/\" + pdf)",
"def upload():\n\n file = request.files['query']\n filepath = upload_filepath(secure_filename(file.filename))\n file.save(filepath)\n classification = classify(filepath)\n classification['filename'] = file.filename\n return render_template('index.html', classification=classification)",
"def clean_PDF(submission):\n src = submission.file_upload.file.name\n pdf1 = PdfFileReader(src)\n merger = PdfFileMerger(strict=False, )\n merger.append(pdf1, import_bookmarks=False)\n merger.addMetadata({'/Title': '',\n '/Author': '',\n '/Creator': '',\n '/Producer': ''})\n fd, temp_file = tempfile.mkstemp(suffix='.pdf')\n merger.write(temp_file)\n merger.close()\n os.close(fd)\n shutil.move(temp_file, src) # replace the original PDF on the server",
"def form_valid(self, form):\n file_in_memory = form.cleaned_data \n xml_text = forms.handle_upload(file_in_memory)\n data = parse_txt(xml_text)\n return render(self.request, 'esfviewer/output.html', {'data': data})",
"def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()",
"def _pdf(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'pdf')\n xmlDoc = PDFiD(self.src_path)\n oPDFiD = cPDFiD(xmlDoc, True)\n # TODO: are there other characteristics which should be dangerous?\n if oPDFiD.encrypt.count > 0:\n self.make_dangerous('encrypted pdf')\n if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:\n self.make_dangerous('pdf with javascript')\n if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:\n self.make_dangerous('openaction')\n if oPDFiD.richmedia.count > 0:\n self.make_dangerous('flash')\n if oPDFiD.launch.count > 0:\n self.make_dangerous('launch')",
"def save(self, *args, **kwargs):\n step_numeral, step_name = kwargs.pop('step', (None, None))\n\n if step_numeral == 1:\n \"\"\"\n Basic Form: Application & File Uploader\n \"\"\"\n return self.cleaned_data\n if step_numeral == 2:\n \"\"\"\n Basic Form + Mapping Fields\n \"\"\"\n return self.cleaned_data\n\n if step_numeral == 3:\n pass # end-user is previewing",
"def index():\r\n # Generate the number the WB will use to come back to\r\n # their submission\r\n wb_number = randomizer.generate_tulip_receipt()\r\n\r\n # Perform a check to see if the client is using Tor\r\n anonymity = Anonymity.TorAccessCheck(request.client, request.env)\r\n\r\n # If a session has not been created yet, create one.\r\n if not session.wb_id:\r\n session.wb_id = randomizer.generate_wb_id()\r\n\r\n # -- follow a comment preserved since 'the age of the upload'\r\n #\r\n # Tor Browser Bundle has JS enabled by default!\r\n # Hurray! I love you all!!\r\n # Yeah, even *you* the anti-JS taliban hater!\r\n # As someone put it, if you think JS is evil remember\r\n # that the world is in technicolor and not in black and white.\r\n # Look up, the sun is shining, thanks to jQuery.\r\n\r\n # This is necessary because otherwise web2py will go crazy when\r\n # it sees {{ }}\r\n upload_template = jQueryHelper.upload_tmpl()\r\n\r\n download_template = jQueryHelper.download_tmpl()\r\n\r\n # Generate the material upload elements\r\n # JavaScript version\r\n material_js = TR('Material',\r\n DIV(_id='file-uploader'),\r\n _id='file-uploader-js')\r\n\r\n # .. and non JavaScript\r\n material_njs = DIV(DIV(LABEL(\"Material:\"),\r\n _class=\"w2p_fl\"),\r\n DIV(INPUT(_name='material', _type='file',\r\n _id='file-uploader-nonjs'),\r\n _class=\"w2p_fc\"),\r\n _id=\"file-uploader-nonjs\")\r\n\r\n # Use the web2py captcha setting to generate a Captcha\r\n # captcha = TR('Are you human?', auth.settings.captcha)\r\n\r\n # The default fields and labels\r\n form_fields = ['title', 'desc']\r\n form_labels = {'title': 'Title', 'desc': 'Description'}\r\n\r\n form_extras = []\r\n\r\n # Add to the fields to be displayed the ones inside of\r\n # the extrafields setting\r\n # for i in settings.extrafields.fields:\r\n # form_extras.append(str(i['name']))\r\n # form_fields.append(str(i['name']))\r\n # form_labels[str(i['name'])] = i['desc']\r\n\r\n if settings.extrafields.wizard:\r\n the_steps = settings.extrafields.gen_wizard()\r\n\r\n form = FormShaman(db.leak, steps=the_steps)\r\n # this is the only error handled at the moment, the fact that __init__\r\n # could return only None, maybe an issue when more errors might be managed\r\n if not hasattr(form, 'vars'):\r\n return dict(error='No receiver present in the default group', existing_files=[])\r\n\r\n else:\r\n form = SQLFORM(db.leak,\r\n fields=form_fields,\r\n labels=form_labels)\r\n\r\n # Check to see if some files have been loaded from a previous session\r\n existing_files = []\r\n if session.files:\r\n for f in session.files:\r\n existing_files.append(f)\r\n\r\n # Make the submission not spooled and set the timestamp\r\n form.vars.spooled = False\r\n form.vars.submission_timestamp = time.time()\r\n\r\n # Insert all the data into the db\r\n if form.accepts(request.vars):\r\n logger.debug(\"Submission %s\", request.vars)\r\n\r\n group_ids = [] # Will contain all the groups selected by the WB\r\n\r\n # XXX Since files are processed via AJAX, maybe this is unecessary?\r\n # if we want to keep it to allow legacy file upload, then the\r\n # file count should only be one.\r\n # File upload in a slightly smarter way\r\n # http://www.web2py.com/book/default/chapter/06#Manual-Uploads\r\n for var in request.vars:\r\n if var == \"material\":\r\n try:\r\n f = Storage()\r\n f.filename = request.vars.material.filename\r\n\r\n tmp_file = db.material.file.store(request.body, filename)\r\n logger.info(\"the tmp_file is [%s] with filename [%s]\",\r\n tmp_file, filename)\r\n\r\n f.ext = mutils.file_type(filename.split(\".\")[-1])\r\n\r\n tmp_fpath = os.path(os.path.join(request.folder,\r\n 'uploads',\r\n session.upload_dir,\r\n tmp_file + filename))\r\n\r\n f.size = os.path.getsize(tmp_fpath)\r\n files.append(f)\r\n\r\n dst_folder = os.path.join(request.folder,\r\n 'material',\r\n str(leak_id.id))\r\n if not os.path.isdir(dst_folder):\r\n os.mkdir(dst_folder)\r\n os.rename(os.path.join(request.folder,\r\n 'uploads',\r\n session.upload_dir,\r\n tmp_file),\r\n dst_folder + filename)\r\n # XXX define exception for this except\r\n except:\r\n logger.error(\"There was an error in processing the \"\r\n \"submission files.\")\r\n\r\n\r\n if var.startswith(\"target_\") and var.split(\"_\")[-1].isdigit():\r\n group_ids.append(var.split(\"_\")[-1])\r\n\r\n # The metadata associated with the file is stored inside\r\n # the session variable this should be safe to use this way.\r\n if not session.files:\r\n session.files = []\r\n\r\n # Add the default files\r\n default_material(form.vars)\r\n\r\n # XXX verify that this is safe\r\n pfile = json.dumps(session.files)\r\n\r\n # leak_id has been used in the previous code as this value,\r\n # I'm keeping to don't change the following lines\r\n leak_id = form.vars.id\r\n\r\n\r\n # XXX probably a better way to do this\r\n # Create a record in submission db associated with leak_id\r\n # used to keep track of sessions\r\n if not db(db.submission.session==session.wb_id).select():\r\n db.submission.insert(session=session.wb_id,\r\n leak_id=leak_id,\r\n dirname=session.dirname)\r\n\r\n # Instantiate the Leak object\r\n leak = Leak(leak_id)\r\n\r\n # Create the material entry for the submitted data\r\n leak.add_material(leak_id, None, \"localfs\", file=pfile)\r\n\r\n # Create the leak with the GlobaLeaks factory\r\n # (the data has actually already been added to db leak,\r\n # this just creates the tulips), the first is the whistleblower tulip\r\n gl.create_tulip(form.vars.id, 0, wb_number[1])\r\n\r\n # create the tulips for every receiver inside a basket\r\n\r\n # if len(group_ids):\r\n # fixme: we're not considering the selecred group, but *all*\r\n group_id = db().select(db.targetgroup.ALL).first().id\r\n leak.create_tulip_by_group(group_id)\r\n\r\n # Make the WB number be *** *** *****\r\n pretty_number = wb_number[0][:3] + \" \" + wb_number[0][3:6] + \\\r\n \" \" + wb_number[0][6:]\r\n\r\n session.wb_number = pretty_number\r\n # Clean up all sessions\r\n session.dirname = None\r\n session.wb_id = None\r\n session.files = None\r\n\r\n return dict(leak_id=leak_id, leaker_tulip=pretty_number, error=None,\r\n form=None, tulip_url=wb_number[1], jQuery_templates=None,\r\n existing_files=existing_files)\r\n\r\n elif form.errors:\r\n response.flash = 'form has errors'\r\n\r\n return dict(form=form,\r\n error=None,\r\n leak_id=None,\r\n tulip=None,\r\n tulips=None,\r\n anonymity=anonymity.result,\r\n jQuery_templates=(XML(upload_template),\r\n XML(download_template)),\r\n existing_files=existing_files)",
"def test_generate_pdf(self):\n with mock.patch.object(form_api.Client, 'wait') as wait_patched:\n template_id = 'tpl_000000000000000001' # str |\n\n response = self.client.generate_pdf(\n template_id, {\n 'data': {\n 'title': 'Test PDF',\n 'description': 'This PDF is great!'\n }\n })\n wait_patched.assert_called()\n self.assertEquals(response.status, 'success')\n submission = response.submission\n self.assertRegexpMatches(submission.id, '^sub_')\n self.assertEquals(submission.expired, False)\n self.assertEquals(submission.state, 'processed')",
"def ocr():\n return render_template('upload.html')",
"def display_form(self):\n\n result = self.client.get(\"/submit_image\")\n self.assertIn(b\"multipart/form-data\", result.data)",
"def OnSavePdf(self, event):\r\n dirname = ''\r\n d = wx.FileDialog(self, \"Save File\", dirname, \"\", \"*.pdf\", wx.SAVE)\r\n if d.ShowModal() == wx.ID_OK:\r\n self.filename = os.path.join(d.GetDirectory(), d.GetFilename())\r\n d.Destroy()\r\n return self.filename",
"def processing_image_upload_view(request):\n if request.method == 'POST':\n form = ImageProcForm(request.POST, request.FILES)\n\n if form.is_valid():\n # create and separated db object to provide ID before image saving\n db_object = ImageProc.objects.create()\n db_object.save()\n\n # saving image to new created db object\n db_object.image = request.FILES['image']\n db_object.user_field = request.user\n db_object.save()\n\n # saving image data in session\n request.session['image'] = db_object.image.url\n request.session['id'] = db_object.id\n\n return redirect('processing_app:processing')\n else:\n form = ImageProcForm()\n return render(request, 'processing_app/processing_image_upload.html', {'form': form})",
"def send_uploadform(pagename, request):\n _ = request.getText\n\n if not request.user.may.read(pagename):\n request.write('<p>%s</p>' % _('You are not allowed to view this page.'))\n return\n\n writeable = request.user.may.write(pagename)\n\n # First send out the upload new attachment form on top of everything else.\n # This avoids usability issues if you have to scroll down a lot to upload\n # a new file when the page already has lots of attachments:\n if writeable:\n request.write('<h2>' + _(\"New Attachment\") + '</h2>')\n request.write(\"\"\"\n<form action=\"%(url)s\" method=\"POST\" enctype=\"multipart/form-data\">\n<dl>\n<dt>%(upload_label_file)s</dt>\n<dd><input type=\"file\" name=\"file\" size=\"50\"></dd>\n<dt>%(upload_label_target)s</dt>\n<dd><input type=\"text\" name=\"target\" size=\"50\" value=\"%(target)s\"></dd>\n<dt>%(upload_label_overwrite)s</dt>\n<dd><input type=\"checkbox\" name=\"overwrite\" value=\"1\" %(overwrite_checked)s></dd>\n</dl>\n%(textcha)s\n<p>\n<input type=\"hidden\" name=\"action\" value=\"%(action_name)s\">\n<input type=\"hidden\" name=\"do\" value=\"upload\">\n<input type=\"hidden\" name=\"ticket\" value=\"%(ticket)s\">\n<input type=\"submit\" value=\"%(upload_button)s\">\n</p>\n</form>\n\"\"\" % {\n 'url': request.href(pagename),\n 'action_name': action_name,\n 'upload_label_file': _('File to upload'),\n 'upload_label_target': _('Rename to'),\n 'target': wikiutil.escape(request.values.get('target', ''), 1),\n 'upload_label_overwrite': _('Overwrite existing attachment of same name'),\n 'overwrite_checked': ('', 'checked')[request.form.get('overwrite', '0') == '1'],\n 'upload_button': _('Upload'),\n 'textcha': TextCha(request).render(),\n 'ticket': wikiutil.createTicket(request),\n})\n\n request.write('<h2>' + _(\"Attached Files\") + '</h2>')\n request.write(_get_filelist(request, pagename))\n\n if not writeable:\n request.write('<p>%s</p>' % _('You are not allowed to attach a file to this page.'))",
"def upload_submission(request, learner, trigger, no_thumbnail=True):\n base_dir_for_file_uploads = settings.MEDIA_ROOT\n thumbnail_file_name_django = ''\n entry_point = trigger.entry_point\n\n files = request.FILES.getlist('file_upload', None)\n if files is None:\n return None\n\n # Is the storage space reachable?\n deepest_dir = base_dir_for_file_uploads + 'uploads/{0}/tmp/'.format(\n entry_point.id)\n\n try:\n os.makedirs(deepest_dir)\n except OSError:\n if not os.path.isdir(deepest_dir):\n logger.error('Cannot create directory for upload: {0}'.format(\n deepest_dir))\n raise\n\n if len(files) == 1:\n filename = files[0].name\n extension = filename.split('.')[-1].lower()\n submitted_file_name_django = 'uploads/{0}/{1}'.format(entry_point.id,\n generate_random_token(token_length=16) + '.' + extension)\n full_path = base_dir_for_file_uploads + submitted_file_name_django\n with open(full_path, 'wb+') as dst:\n for chunk in files[0].chunks():\n dst.write(chunk)\n\n\n f_size = os.path.getsize(full_path)\n if f_size > trigger.max_file_upload_size_MB * 1024 * 1024:\n logger.warning('File too large {0}'.format(\n submitted_file_name_django))\n return None, ('File too large ({0} MB); it must be less than '\n '{1} MB.'.format(round(float(f_size/1024.0/1024.0), 1),\n trigger.max_file_upload_size_MB))\n\n\n else: #if trigger.allow_multiple_files: this is removed for now\n filename = ''\n extension = ''\n submitted_file_name_django = ''\n full_path = ''\n\n\n # Check that the file format is PDF, if that is required.\n strike1 = False\n if 'pdf' in trigger.accepted_file_types_comma_separated.lower() and \\\n extension in ('pdf',):\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'application/pdf' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid PDF upload: {0} [{1}]'.format(mime,\n full_path))\n #return None, 'Invalid file uploaded. Uploaded file must be a PDF.'\n\n doc = PdfFileReader(full_path)\n if doc.isEncrypted:\n logger.debug('Encrypted PDF upload: {0}'.format(full_path))\n return None, ('An encrypted PDF cannot be uploaded. Please remove '\n 'the encryption and try again.')\n\n\n strike1 = False\n if (('jpeg' in trigger.accepted_file_types_comma_separated.lower()) or \\\n ('jpg' in trigger.accepted_file_types_comma_separated.lower())) and \\\n extension in ('jpg', 'jpeg'):\n\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'image/jpeg' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid JPG upload: {0} [{1}]'.format(mime,\n full_path))\n return None, ('Invalid file. Uploaded image should be a valid '\n 'and readable JPEG file.')\n\n\n strike1 = False\n if ('png' in trigger.accepted_file_types_comma_separated.lower()) and \\\n extension in ('png',):\n\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'image/png' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid PNG upload: {0} [{1}]'.format(mime,\n full_path))\n return None, ('Invalid file. Uploaded image should be a valid '\n 'and readable PNG file.')\n\n\n strike2 = False\n if extension.lower() not in \\\n trigger.accepted_file_types_comma_separated.lower():\n logger.debug('Invalid file type upload: received \".{0}\"; [{1}]'.format(\\\n extension, full_path))\n return None, ('Invalid file uploaded. Uploaded file must be: {}'.format(\\\n trigger.accepted_file_types_comma_separated))\n\n\n if trigger == entry_point:\n # In some instances we don't use triggers, just entry_points\n prior = Submission.objects.filter(status='S',\n submitted_by=learner,\n entry_point=entry_point,\n is_valid=True\n )\n else:\n prior_indiv = Q(status='S', submitted_by=learner, entry_point=entry_point,\n trigger=trigger, is_valid=True)\n\n # We need this here, but also for the code later in the next\n # if (trigger==entry_point) part\n\n # Default returned by this function is ``None`` if the user is not\n # enrolled in a group, or if this course simply does not use groups.\n group_submitted = is_group_submission(learner, entry_point)\n if is_group_submission(learner, entry_point):\n group_submitted = group_submitted.group\n\n prior_group = Q(status='S', group_submitted=group_submitted,\n entry_point=entry_point, trigger=trigger,\n is_valid=True)\n else:\n prior_group = Q()\n\n prior = Submission.objects.filter(prior_indiv | prior_group)\n\n\n for item in prior:\n logger.debug(('Setting prior submission to False: {0} and name '\n '\"{1}\"'.format(str(item), item.submitted_file_name)))\n item.is_valid = False\n item.save()\n\n\n if trigger == entry_point:\n # In some instances we don't use triggers, just entry_points\n sub = Submission(submitted_by=learner,\n group_submitted=None,\n status='S',\n entry_point=entry_point,\n is_valid=True,\n file_upload=submitted_file_name_django,\n thumbnail=thumbnail_file_name_django,\n submitted_file_name=filename,\n ip_address=get_IP_address(request),\n )\n sub.save()\n else:\n\n sub = Submission(submitted_by=learner,\n group_submitted=group_submitted,\n status='S',\n entry_point=entry_point,\n trigger=trigger,\n is_valid=True,\n file_upload=submitted_file_name_django,\n thumbnail=thumbnail_file_name_django,\n submitted_file_name=filename,\n ip_address=get_IP_address(request),\n )\n sub.save()\n\n if 'pdf' in trigger.accepted_file_types_comma_separated.lower() and \\\n extension in ('pdf',):\n clean_PDF(sub)\n\n return sub",
"def save_pdf(self, response):\n\n # get metadata\n file_type = \"__comprovante_de_acesso__\"\n\n # options to save pdf\n file_id = str(uuid.uuid4())\n filename = \"{file_id}.pdf\".format(file_id=file_id)\n file_path = os.path.join(path, \"downloads\", self.scrape_id, filename)\n with open(file_path, 'wb') as f:\n f.write(response.body)\n\n # upload pdf to s3 and call the webhook\n self.upload_file(file_id)\n\n # update values in result\n self.result.update({file_type: {\"file_id\": file_id}})",
"def fit():\n form = MedForm(request.form)\n if request.method == 'POST' and form.validate():\n\n zipcode = form.zipcode.data\n # Check the zipcode\n\n plan = form.plan.data\n medication = form.medication.data\n\n ip = str(request.environ.get('HTTP_X_REAL_IP', request.remote_addr))\n rq = Requests(**dict(user=current_user.id, ip = ip, zipcode = zipcode, plan = plan, drug = medication))\n rq.save()\n\n # Process either medicare or medicaid\n plan_type = form.plan_type.data\n try:\n if plan_type == 'medicare':\n table = get_medicare_plan(medication, plan, zipcode)\n else:\n table = get_medicaid_plan(medication, plan, zipcode, plan_type)\n\n except tools.BadPlanName as e:\n form.errors['plan_name'] = str(e)\n context = {'form': form}\n html = 'fit.html'\n\n except tools.BadLocation as e:\n form.errors['zipcode'] = str(e)\n context = {'form': form}\n html = 'fit.html'\n else:\n # You have to order the data in a list or it won't show right\n data = []\n for item in table['data']:\n row = [item[h] for h in table['heading']]\n data.append(row)\n\n context = {'data':data,\n 'head':table['heading'],\n 'drug':medication,\n 'pa': table['pa'],\n 'zipcode':zipcode,\n 'plan':plan,\n 'plan_type':form.plan_type.data,\n }\n html = 'table.html'\n\n # If its a GET see if parameters were passed\n else:\n if request.method == 'GET':\n form.zipcode.data = request.args.get('zipcode', \"\")\n form.plan.data = request.args.get('plan', \"\")\n form.medication.data = request.args.get('drug', \"\")\n form.plan_type.data = request.args.get('plan_type', \"medicare\")\n\n # a POST with errors\n elif form.errors:\n if 'plan_type' in form.errors:\n form.errors['plan_type'] = \"Please pick a Medicare, Medicaid, or Private plan\"\n\n context = {'form': form}\n html = 'fit.html'\n\n content = render_template(html, **context)\n return content",
"def make_pdf(self, net_id, request_id, request_date):\n with open(\"{0}/user_uploads/{1}/{2}/submission.json\".format(self.__APP_PATH__, net_id, request_id), mode=\"r\") as json_file:\n request_details = json.load(json_file)\n\n files_text = \"\"\n travel_text = None\n\n if request_details[\"request_type\"] == \"travel\":\n travel_text = \"\\n\\nTravel Details:\\n\" \\\n \"\\t\\t\\t\\tTravel from: {0} ({1})\\n\" \\\n \"\\t\\t\\t\\tTravel to: {2} ({3})\\n\" \\\n \"\\t\\t\\t\\tTravel Number: {4}\\n\" \\\n \"\\t\\t\\t\\tEvent Website: {5}\".format(request_details[\"travel_from\"],\n request_details[\"travel_from_date\"],\n request_details[\"travel_to\"],\n request_details[\"travel_to_date\"],\n request_details[\"travel_number\"],\n request_details.get(\"event_website\", \"N/A\"))\n for file in request_details[\"files\"]:\n amount_text = \"${0}\".format(file[\"dollar_amount\"]) if file[\"dollar_amount\"] > 0.0 else \"Auxiliary File\"\n files_text += \"\\t\\t\\t\\t{0} ({1})\\n\\t\\t\\t\\t\\t\\t\\t\\t\" \\\n \"{2}\\n\\t\\t\\t\\t\\t\\t\\t\\t{3}\\n\\n\".format(file[\"label\"], amount_text,\n file[\"name\"], file[\"description\"])\n\n if request_details[\"notes\"].strip():\n request_notes = \"\\nNotes:\\n{0}\".format(request_details[\"notes\"].strip())\n else:\n request_notes = \"\"\n\n pdf_title = \"({0}) {1:02d}/{2:02d}/{3:04d} - {4:02d}:{5:02d}:{6:02d}, Amount: ${7}\".format(\n request_details[\"request_date\"][\"weekday\"], request_details[\"request_date\"][\"month\"],\n request_details[\"request_date\"][\"day\"], request_details[\"request_date\"][\"year\"],\n request_details[\"request_date\"][\"hours\"], request_details[\"request_date\"][\"minutes\"],\n request_details[\"request_date\"][\"seconds\"], request_details[\"total_amount\"])\n\n if request_details[\"pay_to\"][\"id\"]:\n pay_to_details = \"{0} ({1}, {2})\".format(request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"id\"],\n request_details[\"pay_to\"][\"email\"])\n else:\n pay_to_details = \"{0} ({1})\".format(request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"])\n\n pdf_body = \"{0}{1}\\n\\nRequestee: \\n\\t\\t\\t\\tAccount:{2}\\n\\t\\t\\t\\tName: {3} {4} ({5})\\n\\t\\t\\t\\t\" \\\n \"Phone: {6}\\t|\\tNet ID: {7}\\t\\n\\nPay To:\\n\\t\\t\\t\\tName: {8}{9}\\n\\n\" \\\n \"Files:\\n{10}\".format(request_details[\"short_description\"], request_notes,\n request_details[\"account_number\"],\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_details[\"requester\"][\"email\"],\n request_details[\"requester\"][\"phone_number\"],\n request_details[\"requester\"][\"net_id\"],\n pay_to_details,\n travel_text,\n files_text)\n try:\n logo_path = \"{0}/static/assets/main/uta_logo.png\".format(self.__APP_PATH__.split(\"/apps/\")[0])\n pdf = PDFMaker(**{\"title\": \"Reimbursement Request Report\"})\n\n pdf.set_margins(left=19.05, top=19.05, right=19.05)\n pdf.set_auto_page_break(auto=True, margin=19.05)\n pdf.set_author(\"MavApps - Reimbursement App\")\n pdf.print_page(pdf_title, pdf_body)\n pdf.image(logo_path, x=53, y=11, w=107, h=10, type=\"PNG\", link=\"https://uta.edu\")\n pdf.output(\"{0}/user_uploads/{1}/{2}/[{1}-{3}]_report.pdf\".format(self.__APP_PATH__, net_id, request_id, request_date), \"F\")\n except Exception as e:\n print(e)\n return False\n return True",
"def index(request):\n if request.method == \"POST\":\n form = ReferralForm(request.POST, request.FILES)\n\n if form.is_valid():\n referral = form.save()\n\n files = request.FILES.getlist(\"files\")\n for file in files:\n referral_attachment = ReferralAttachment(\n file=file, name=\"some name\", referral=referral\n )\n referral_attachment.save()\n\n # The form is valid and we saved the referral: confirm it to the user by email\n send_email_referral_saved(referral)\n\n # Redirect the user to the \"single referral\" view\n return HttpResponseRedirect(\n reverse(\"referral-received\", kwargs={\"pk\": referral.id})\n )\n\n else:\n return HttpResponse(form.errors.as_text())\n\n else:\n form = ReferralForm()\n\n return render(request, \"core/new_referral.html\", {\"form\": form})"
] |
[
"0.6528884",
"0.6424879",
"0.6307522",
"0.6126606",
"0.5985242",
"0.59249526",
"0.58687127",
"0.5807031",
"0.5754557",
"0.5734037",
"0.5708609",
"0.5695069",
"0.56741",
"0.56607753",
"0.56555784",
"0.56451267",
"0.56432295",
"0.56404954",
"0.5632161",
"0.55544764",
"0.55529344",
"0.5533621",
"0.55231214",
"0.5501939",
"0.5482163",
"0.54582113",
"0.54502344",
"0.5449634",
"0.5439633",
"0.5436213"
] |
0.7277283
|
0
|
Create the output filenames and generate the top and mosaic thumbnails and write them to disk.
|
def write_thumbnails(self, appstruct):
slugser = slugify(appstruct["serial"])
pdf_filename = "thumbnails/%s/uploaded.pdf" % slugser
top_file = "thumbnails/%s/top.png" % slugser
mos_file = "thumbnails/%s/mosaic.png" % slugser
thumg = ThumbnailGenerator(pdf_filename)
self.save_blob(thumg.top_thumbnail(), top_file)
self.save_blob(thumg.mosaic_thumbnail(), mos_file)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mosaic(self):\n \n if len(self.tiles) > 1:\n hdflist = sorted(glob.glob(self.fullPath + '/*.hdf'))\n for i in range(0,len(hdflist),2):\n ms = pymodis.convertmodis_gdal.createMosaicGDAL(hdfnames = [hdflist[i], hdflist[i+1]], subset = self.subset, outformat = 'GTiff')\n ms.run(str(hdflist[i].split('.h')[0]) + 'mos.tif')\n ms.write_vrt(output = str(hdflist[i].split('.h')[0]), separate = True)\n mosaicCount = len(glob.glob(self.fullPath + '/*mos.tif'))\n logger.log('SUCCESS', 'Mosaic complete! MODIS tiles %s were successfully mosaicked into %d mosaic images.' % (str(self.tiles), mosaicCount))",
"def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)",
"def make_mosaic(target_im, saved_file_name):\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n color_data_file = os.path.join(BASE_DIR, 'static/images/data/average_color.csv')\n color_data = materials_list_from_file(color_data_file)\n\n target_file = os.path.join(BASE_DIR, 'static/images/target/{}'.format(target_im))\n icon_im = image_process.open_image_RGB(target_file)\n icon_im_width, icon_im_height = icon_im.size\n mosaic_icon_im = Image.new('RGBA', (1600, 1600))\n\n for left in range(0, icon_im_width, DOT_AREA_ONE_SIDE):\n for top in range(0, icon_im_height, DOT_AREA_ONE_SIDE):\n average_color = calc.average_color_in_range(icon_im, left, top,\n left+DOT_AREA_ONE_SIDE, top+DOT_AREA_ONE_SIDE)\n if len(average_color) != 3:\n continue\n\n filename = similar_color_filename(average_color, color_data)\n # 距離最小のファイルを1600×1600の画像に貼り付け\n open_file = os.path.join(BASE_DIR, 'static/images/material/euph_part_icon/'+filename)\n area_im = Image.open(open_file)\n mosaic_icon_im.paste(area_im, (left//DOT_AREA_ONE_SIDE * THUMBNAIL_ONE_SIDE,\n top//DOT_AREA_ONE_SIDE * THUMBNAIL_ONE_SIDE))\n\n saved_file_path = 'static/images/ftnext/{}'.format(saved_file_name)\n saved_file = os.path.join(BASE_DIR, saved_file_path)\n mosaic_icon_im.save(saved_file)",
"def generate_thumbnail():\n import tempfile\n import glob\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n\n if not v:\n return\n\n # do not generate a thumbnail from a Repr\n if '@' in v.take_name:\n return\n\n task = v.task\n project = task.project\n # repo = project.repository\n imf = project.image_format\n width = int(imf.width * 0.5)\n height = int(imf.height * 0.5)\n\n temp_output = tempfile.mktemp()\n\n current_frame = pm.currentTime(q=1)\n output_file = pm.playblast(\n fmt='image',\n startTime=current_frame,\n endTime=current_frame,\n sequenceTime=1,\n forceOverwrite=1,\n filename=temp_output,\n clearCache=1,\n showOrnaments=1,\n percent=100,\n wh=(width, height),\n offScreen=1,\n viewer=0,\n compression='PNG',\n quality=70,\n framePadding=0\n )\n pm.currentTime(current_frame)\n\n output_file = output_file.replace('####', '*')\n found_output_file = glob.glob(output_file)\n if found_output_file:\n output_file = found_output_file[0]\n\n from anima.ui import utils\n utils.upload_thumbnail(task, output_file)\n\n return found_output_file",
"def write(self,vname,kmz='out.kmz'):\n\n imgs=[] # to store a list of all images created\n content=[] # the content of the main kml\n vstr='files/%s_%05i.png' # format specification for images (all stored in `files/' subdirectory)\n\n # create empty files subdirectory for output images\n try:\n shutil.rmtree('files')\n except:\n pass\n os.makedirs('files')\n\n # loop through all time slices and create the image data\n # appending to the kml content string for each image\n for i in xrange(0,self.nstep,1):\n kml=ncNWRC(self.filename,istep=i)\n img=vstr % (vname,i)\n imgs.append(img)\n content.append(kml.image2kml(vname,img))\n\n # create the main kml file\n kml=ncNWRC.kmlstr % \\\n {'content':'\\n'.join(content),\\\n 'prog':ncNWRC.progname}\n\n # create a zipfile to store all images + kml into a single compressed file\n z=zipfile.ZipFile(kmz,'w',compression=zipfile.ZIP_DEFLATED)\n z.writestr(kmz[:-3]+'kml',kml)\n for img in imgs:\n z.write(img)\n z.close()",
"def create_and_write_output(predictions_path,output_path,inpDir):\n \n filenames= sorted(os.listdir(predictions_path)) \n for filename in filenames:\n \n # read the 3 channel output image from the neural network\n image=cv2.imread(os.path.join(predictions_path,filename))\n \n # create binary image output using the create_binary function\n out_image=create_binary(image) \n \n # read and store the metadata from the input image\n with BioReader(os.path.join(inpDir,filename)) as br:\n metadata = br.metadata\n\n # Write the binary output consisting of the metadata using bfio.\n output_image_5channel=np.zeros((out_image.shape[0],out_image.shape[1],1,1,1),dtype=np.uint8)\n output_image_5channel[:,:,0,0,0]=out_image \n\n with BioWriter(os.path.join(output_path,filename), metadata=metadata) as bw:\n bw.dtype = output_image_5channel.dtype\n bw.write(output_image_5channel)",
"def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)",
"def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names",
"def thumbnail(self, fnameIn, fnameOut):\n cmd = \"convert -define jpeg:size=500x150 \"\n cmd += '\"%s\" ' % os.path.join(self.downloadFolder, fnameIn)\n cmd += \"-auto-orient -thumbnail 250x150 \"\n cmd += '\"%s\" ' % os.path.join(self.thumbnailFolder, fnameOut)\n self.log(\"creating thumbnail ...\")\n self.log(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()",
"def process_files(self):\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))",
"def generate_metadata(self):\n if self.options.mbtiles:\n return\n if not os.path.exists(self.output):\n os.makedirs(self.output)\n\n if self.options.profile == 'mercator':\n\n south, west = self.mercator.MetersToLatLon( self.ominx, self.ominy)\n north, east = self.mercator.MetersToLatLon( self.omaxx, self.omaxy)\n south, west = max(-85.05112878, south), max(-180.0, west)\n north, east = min(85.05112878, north), min(180.0, east)\n self.swne = (south, west, north, east)\n\n # Generate googlemaps.html\n if self.options.webviewer in ('all','google') and self.options.profile == 'mercator':\n if not self.options.resume or not os.path.exists(os.path.join(self.output, 'googlemaps.html')):\n f = open(os.path.join(self.output, 'googlemaps.html'), 'w')\n f.write( self.generate_googlemaps() )\n f.close()\n\n # Generate openlayers.html\n if self.options.webviewer in ('all','openlayers'):\n if not self.options.resume or not os.path.exists(os.path.join(self.output, 'openlayers.html')):\n f = open(os.path.join(self.output, 'openlayers.html'), 'w')\n f.write( self.generate_openlayers() )\n f.close()\n\n elif self.options.profile == 'geodetic':\n\n west, south = self.ominx, self.ominy\n east, north = self.omaxx, self.omaxy\n south, west = max(-90.0, south), max(-180.0, west)\n north, east = min(90.0, north), min(180.0, east)\n self.swne = (south, west, north, east)\n\n # Generate openlayers.html\n if self.options.webviewer in ('all','openlayers'):\n if not self.options.resume or not os.path.exists(os.path.join(self.output, 'openlayers.html')):\n f = open(os.path.join(self.output, 'openlayers.html'), 'w')\n f.write( self.generate_openlayers() )\n f.close()\n\n elif self.options.profile in ['raster','gearth','garmin']:\n\n west, south = self.ominx, self.ominy\n east, north = self.omaxx, self.omaxy\n\n self.swne = (south, west, north, east)\n\n # Generate openlayers.html\n if self.options.webviewer in ('all','openlayers'):\n if not self.options.resume or not os.path.exists(os.path.join(self.output, 'openlayers.html')):\n f = open(os.path.join(self.output, 'openlayers.html'), 'w')\n f.write( self.generate_openlayers() )\n f.close()\n\n\n # Generate tilemapresource.xml.\n if (self.options.tile_format != 'hybrid' and self.options.profile != 'garmin'\n and (not self.options.resume or not os.path.exists(os.path.join(self.output, 'tilemapresource.xml')))):\n f = open(os.path.join(self.output, 'tilemapresource.xml'), 'w')\n f.write( self.generate_tilemapresource())\n f.close()",
"def combine_subinstances(output_dir, subinstance_list, subinstance_size, clickable_size, num_subinstances_to_combine, seed=42):\n try:\n os.mkdir(os.path.join(output_dir, \"mosaics\"))\n except OSError:\n print(os.path.join(output_dir, \"mosaics\") + \" exits... :(\")\n\n expand = (subinstance_size // clickable_size)\n num_subinstances = len(subinstance_list) // (expand * expand)\n np.random.seed(seed)\n perm = np.random.permutation(num_subinstances)\n \n num_result_files = int(np.ceil(float(num_subinstances) / float(num_subinstances_to_combine * num_subinstances_to_combine)))\n print(\"#(mosaics) =\", num_result_files)\n\n mosaic_img_list = [None] * num_result_files\n mosaic_loc_list = [None] * num_subinstances * expand * expand\n patch_i = 0\n for file_i in range(num_result_files):\n mosaic_img = np.zeros((subinstance_size * num_subinstances_to_combine, subinstance_size * num_subinstances_to_combine, 3))\n for i in range(num_subinstances_to_combine):\n for j in range(num_subinstances_to_combine):\n if patch_i < num_subinstances:\n for l in range(expand):\n for m in range(expand):\n mosaic_img[i * subinstance_size + l * clickable_size : i * subinstance_size + (l + 1) * clickable_size,\n j * subinstance_size + m * clickable_size : j * subinstance_size + (m + 1) * clickable_size, :] \\\n = subinstance_list[perm[patch_i] * expand * expand + l * expand + m]\n mosaic_loc_list[perm[patch_i] * expand * expand + l * expand + m] \\\n = (file_i, (subinstance_size * i + l * clickable_size, \n subinstance_size * j + m * clickable_size, \n clickable_size, \n clickable_size))\n patch_i += 1\n cv2.imwrite(os.path.join(output_dir, \"mosaics\", str(file_i)+\".jpg\"), mosaic_img)\n\n return mosaic_loc_list",
"def ImageOutput(name, out_ds, tile_size, resampling, init_dest, output_dir, verbose,mbtiles):\n\n resampler = Resampler(resampling)\n\n if name == \"hybrid\":\n return HybridImageOutput(out_ds, tile_size, resampler, init_dest, output_dir, verbose)\n\n if name == \"png\":\n image_format = \"PNG\"\n elif name == \"jpeg\":\n image_format = \"JPEG\"\n\n return SimpleImageOutput(out_ds, tile_size, resampler, init_dest, output_dir, verbose, [image_format],mbtiles)",
"def generate_figures():\r\n # create results directory if necessary\r\n try:\r\n makedirs(\"results\")\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n \r\n for b in benchmarks:\r\n generate_figure(model[b], b)",
"def organise_qa_output(metadata, base_dir, write_tag):\n filenames = metadata['FITSImageFilename']\n for i, fits_file in enumerate(filenames):\n kat_target = katpoint.Target(metadata['KatpointTargets'][i])\n\n # Move QA report and create metadata\n pb_filebase = os.path.splitext(fits_file)[0] + '_PB'\n qa_report = pb_filebase + '_continuum_validation_snr5.0_int'\n pb_dir = _productdir(metadata, base_dir, i, '_PB', write_tag)\n\n qa_dir = _productdir(metadata, base_dir, i, '_QA', write_tag)\n os.mkdir(qa_dir)\n os.rename(os.path.join(pb_dir, qa_report), qa_dir)\n make_report_metadata(metadata, qa_dir)\n\n # Move RMS image and create metadata\n rms_dir = _productdir(metadata, base_dir, i, '_RMS', write_tag)\n os.mkdir(rms_dir)\n rms_image = pb_filebase + '_aegean_rms'\n mean_pb_rms = _calc_rms(os.path.join(pb_dir, rms_image + FITS_EXT))\n\n make_image_metadata(metadata, '_PB', pb_dir, i,\n 'Continuum Image PB corrected',\n 'Continuum image PB corrected',\n mean_pb_rms)\n\n os.rename(os.path.join(pb_dir, rms_image + FITS_EXT),\n os.path.join(rms_dir, rms_image + FITS_EXT))\n _add_missing_axes(os.path.join(rms_dir, rms_image + FITS_EXT))\n _caption_pngs(rms_dir, rms_image, kat_target, 'RMS PB Corrected')\n make_image_metadata(metadata, '_PB_aegean_rms', rms_dir, i,\n 'Continuum PB Corrected RMS Image',\n 'Continuum PB Corrected RMS image',\n mean_pb_rms)\n\n # Move MEAN image and create metadata\n bkg_dir = _productdir(metadata, base_dir, i, '_BKG', write_tag)\n os.mkdir(bkg_dir)\n bkg_image = pb_filebase + '_aegean_bkg'\n os.rename(os.path.join(pb_dir, bkg_image + FITS_EXT),\n os.path.join(bkg_dir, bkg_image + FITS_EXT))\n _add_missing_axes(os.path.join(bkg_dir, bkg_image + FITS_EXT))\n _caption_pngs(bkg_dir, bkg_image, kat_target, 'MEAN PB Corrected')\n make_image_metadata(metadata, '_PB_aegean_bkg', bkg_dir, i,\n 'Continuum PB Corrected Mean Image',\n 'Continuum PB Corrected Mean image',\n mean_pb_rms)\n\n # Remove .writing tag\n dir_list = [pb_dir, qa_dir, rms_dir, bkg_dir]\n for product_dir in dir_list:\n os.rename(product_dir, os.path.splitext(product_dir)[0])",
"def rot_mosaic(source_dir='K:/IID_SaltonSea/Tasks/Soil mapping/PhotoDocumentation/Processing/',\r\n output_dir='K:/IID_SaltonSea/Tasks/Soil mapping/PhotoDocumentation/Final/',\r\n file_pattern='IID201905*.jpg', sub_dir=False, k=1, replace=False): \r\n \r\n \r\n if sub_dir:\r\n mosaics = []\r\n for root, dirnames, filenames in os.walk(source_dir):\r\n for filename in fnmatch.filter(filenames, file_pattern):\r\n mosaics.append(os.path.join(root, filename))\r\n else:\r\n mosaics = glob.glob(source_dir + file_pattern) \r\n \r\n g = 0\r\n r = 0\r\n s = 0\r\n for m in mosaics:\r\n f = output_dir + os.path.basename(m)\r\n if not os.path.exists(f):\r\n img = improc.imops.imio.imread(m)\r\n img = np.rot90(img, k=k) \r\n improc.imops.imio.imsave(f, img)\r\n print('generated: %s' % f)\r\n print('')\r\n g+=1\r\n elif replace:\r\n img = improc.imops.imio.imread(m)\r\n img = np.rot90(img, k=k)\r\n improc.imops.imio.imsave(f, img)\r\n print('replaced: %s' % f)\r\n print('')\r\n r+=1\r\n else:\r\n print('skipping: %s' % m)\r\n print('')\r\n s+=1\r\n\r\n print('generated total of %i files' % g)\r\n print('replaced total of %i files' % r)\r\n print('skipped total of %i files' % s)",
"def create_test_set(self):\n test_files = os.listdir(self.image_folder_path)\n test_files = sorted_alphanumeric(test_files)\n delete_files(self.root_name, \"/VOC2021/ImageSets/Main\")\n write_txt(\"test.txt\", self.txt_path, test_files)",
"def create_overview_tiles(tile_job_info, output_folder, options):\n mem_driver = gdal.GetDriverByName('MEM')\n tile_driver = tile_job_info.tile_driver\n out_driver = gdal.GetDriverByName(tile_driver)\n\n tilebands = tile_job_info.nb_data_bands + 1\n\n # Usage of existing tiles: from 4 underlying tiles generate one as overview.\n\n tcount = 0\n for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):\n tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]\n tcount += (1 + abs(tmaxx-tminx)) * (1 + abs(tmaxy-tminy))\n\n ti = 0\n\n if tcount == 0:\n return\n\n if not options.quiet:\n print(\"Generating Overview Tiles:\")\n\n progress_bar = ProgressBar(tcount)\n progress_bar.start()\n\n for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):\n tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]\n for ty in range(tmaxy, tminy - 1, -1):\n for tx in range(tminx, tmaxx + 1):\n\n ti += 1\n ytile = GDAL2Tiles.getYtile(ty, tz, options)\n tilefilename = os.path.join(output_folder,\n str(tz),\n #str(tx),\n #\"%s.%s\" % (ytile, tile_job_info.tile_extension))\n '{0:04d}'.format(tx) + \"_\" + '{0:04d}'.format(ytile) + \".\" + tile_job_info.tile_extension)\n\n if options.verbose:\n print(ti, '/', tcount, tilefilename)\n\n if options.resume and os.path.exists(tilefilename):\n if options.verbose:\n print(\"Tile generation skipped because of --resume\")\n else:\n progress_bar.log_progress()\n continue\n\n # Create directories for the tile\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,\n 2 * tile_job_info.tile_size, tilebands)\n # TODO: fill the null value\n dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,\n tilebands)\n\n # TODO: Implement more clever walking on the tiles with cache functionality\n # probably walk should start with reading of four tiles from top left corner\n # Hilbert curve\n\n children = []\n # Read the tiles and write them to query window\n for y in range(2 * ty, 2 * ty + 2):\n for x in range(2 * tx, 2 * tx + 2):\n minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]\n if x >= minx and x <= maxx and y >= miny and y <= maxy:\n ytile2 = GDAL2Tiles.getYtile(y, tz+1, options)\n dsquerytile = gdal.Open(\n os.path.join(output_folder, str(tz + 1),\n '{0:04d}'.format(x) + \"_\" + '{0:04d}'.format(ytile2) + \".\" + tile_job_info.tile_extension),\n #str(x), \"%s.%s\" % (ytile2, tile_job_info.tile_extension)),\n gdal.GA_ReadOnly)\n if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):\n tileposy = 0\n else:\n tileposy = tile_job_info.tile_size\n if tx:\n tileposx = x % (2 * tx) * tile_job_info.tile_size\n elif tx == 0 and x == 1:\n tileposx = tile_job_info.tile_size\n else:\n tileposx = 0\n dsquery.WriteRaster(\n tileposx, tileposy, tile_job_info.tile_size,\n tile_job_info.tile_size,\n dsquerytile.ReadRaster(0, 0,\n tile_job_info.tile_size,\n tile_job_info.tile_size),\n band_list=list(range(1, tilebands + 1)))\n children.append([x, y, tz + 1])\n\n scale_query_to_tile(dsquery, dstile, tile_driver, options,\n tilefilename=tilefilename)\n # Write a copy of tile to png/jpg\n if options.resampling != 'antialias':\n # Write a copy of tile to png/jpg\n out_driver.CreateCopy(tilefilename, dstile, strict=0)\n\n del dstile\n\n options.generatedFiles.append(tilefilename)\n # applyLegend(tilefilename, options.legendObj)\n\n if options.verbose:\n print(\"\\tbuild from zoom\", tz + 1,\n \" tiles:\", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),\n (2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))\n\n # # Create a KML file for this tile.\n # if tile_job_info.kml:\n # with open(os.path.join(\n # output_folder,\n # '%d/%d/%d.kml' % (tz, tx, ty)\n # ), 'wb') as f:\n # f.write(generate_kml(\n # tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,\n # get_tile_swne(tile_job_info, options), options, children\n # ).encode('utf-8'))\n\n if not options.verbose and not options.quiet:\n progress_bar.log_progress()",
"def output_files_as_file(output_files, output_type: str = \"svg\", debug=False):\n for counter, output_file in enumerate(output_files):\n plant_uml_command = 'java -Djava.awt.headless=true -jar \"{0}\" \"{1}\"'.format(plant_uml_jar, output_file)\n if debug:\n plant_uml_command = '{0} -v'.format(plant_uml_command)\n generate_svg = '{0} -t{1}'.format(plant_uml_command, output_type)\n try:\n logging.debug('Generating {3} diagram {1}/{2}: {0}'.format(\n generate_svg,\n counter + 1,\n len(output_files),\n output_type.upper()))\n os.system(generate_svg)\n except:\n logging.debug('Could not generate {0} diagram'.format(output_type))\n traceback.print_exc()",
"def generate_plots(path):\n videos = glob(path + '/*.mkv')\n print(path, len(videos), videos)\n\n if len(videos) == 0:\n return\n else:\n videos = videos[0]\n\n metadata_list = glob(path + '/metadata.txt')\n #print(path, len(metadata_list), metadata_list)\n\n if len(metadata_list) == 0:\n return \n\n P = Preprocessor()\n P.import_video(str(videos))\n P.read_metadata(path)\n P.preprocess()\n Im = P.frames_processed\n if len(Im) == 0:\n print(len(Im))\n return\n\n z_start = P.z_start\n z_end = P.z_end\n\n mean, cov = analyze_image(Im)\n\n window_size = 10\n mean_smoothed = smoothing.mean_moving_average(mean, window_size)\n cov_smoothed = smoothing.cov_moving_average(cov, window_size)\n\n c = CubicFitRotated()\n c.fit(mean=mean_smoothed, cov=cov_smoothed, z_start=z_start, z_end=z_end)\n\n try:\n os.mkdir(path + '/analysis')\n path += '/analysis'\n except OSError:\n pass\n\n\n plots.plot_mean(mean, z_start, z_end).savefig(path + '/beam_center.png')\n plots.plot_beta(cov, z_start, z_end).savefig(path + '/sigma_squared.png')\n\n export.export_mean(mean = mean, filename = path + '/center.csv', z_start = z_start, z_end = z_end)\n export.export_cov(cov = cov, filename = path + '/cov.csv', z_start = z_start, z_end = z_end)\n\n plt.close('all')",
"def main():\n\tparser = construct_parser()\n\targs = parser.parse_args()\n\ttiles = slice(args.image, args.num_tiles, save=False)\n\tsave_tiles(tiles, prefix=get_basename(args.image), directory=args.dir,\n\t\t format=args.format)",
"def save(images, output):\n for image, frame in images:\n image.save(output(frame))",
"def mbtiles(ctx, files, output, overwrite, title, description,\n layer_type, img_format, tile_size, zoom_levels, image_dump,\n num_workers, src_nodata, dst_nodata, resampling):\n output, files = resolve_inout(files=files, output=output,\n overwrite=overwrite)\n inputfile = files[0]\n\n logger = logging.getLogger('rio-mbtiles')\n\n with ctx.obj['env']:\n\n # Read metadata from the source dataset.\n with rasterio.open(inputfile) as src:\n\n validate_nodata(dst_nodata, src_nodata, src.profile.get('nodata'))\n base_kwds = {'dst_nodata': dst_nodata, 'src_nodata': src_nodata}\n\n if src_nodata is not None:\n base_kwds.update(nodata=src_nodata)\n\n if dst_nodata is not None:\n base_kwds.update(nodata=dst_nodata)\n\n # Name and description.\n title = title or os.path.basename(src.name)\n description = description or src.name\n\n # Compute the geographic bounding box of the dataset.\n (west, east), (south, north) = transform(\n src.crs, 'EPSG:4326', src.bounds[::2], src.bounds[1::2])\n\n # Resolve the minimum and maximum zoom levels for export.\n if zoom_levels:\n minzoom, maxzoom = map(int, zoom_levels.split('..'))\n else:\n zw = int(round(math.log(360.0 / (east - west), 2.0)))\n zh = int(round(math.log(170.1022 / (north - south), 2.0)))\n minzoom = min(zw, zh)\n maxzoom = max(zw, zh)\n\n logger.debug(\"Zoom range: %d..%d\", minzoom, maxzoom)\n\n # Parameters for creation of tile images.\n base_kwds.update({\n 'driver': img_format.upper(),\n 'dtype': 'uint8',\n 'nodata': 0,\n 'height': tile_size,\n 'width': tile_size,\n 'count': 3,\n 'crs': TILES_CRS})\n\n img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'\n\n # Initialize the sqlite db.\n if os.path.exists(output):\n os.unlink(output)\n # workaround for bug here: https://bugs.python.org/issue27126\n sqlite3.connect(':memory:').close()\n\n conn = sqlite3.connect(output)\n cur = conn.cursor()\n cur.execute(\n \"CREATE TABLE tiles \"\n \"(zoom_level integer, tile_column integer, \"\n \"tile_row integer, tile_data blob);\")\n cur.execute(\n \"CREATE TABLE metadata (name text, value text);\")\n\n # Insert mbtiles metadata into db.\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"name\", title))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"type\", layer_type))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"version\", \"1.1\"))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"description\", description))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"format\", img_ext))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"bounds\", \"%f,%f,%f,%f\" % (west, south, east, north)))\n\n conn.commit()\n\n # Create a pool of workers to process tile tasks.\n pool = Pool(num_workers, init_worker,\n (inputfile, base_kwds, resampling), 100)\n\n # Constrain bounds.\n EPS = 1.0e-10\n west = max(-180 + EPS, west)\n south = max(-85.051129, south)\n east = min(180 - EPS, east)\n north = min(85.051129, north)\n\n # Initialize iterator over output tiles.\n tiles = mercantile.tiles(\n west, south, east, north, range(minzoom, maxzoom + 1))\n\n for tile, contents in pool.imap_unordered(process_tile, tiles):\n\n if contents is None:\n logger.info(\"Tile %r is empty and will be skipped\", tile)\n continue\n\n # MBTiles has a different origin than Mercantile/tilebelt.\n tiley = int(math.pow(2, tile.z)) - tile.y - 1\n\n # Optional image dump.\n if image_dump:\n img_name = '%d-%d-%d.%s' % (\n tile.x, tiley, tile.z, img_ext)\n img_path = os.path.join(image_dump, img_name)\n with open(img_path, 'wb') as img:\n img.write(contents)\n\n # Insert tile into db.\n cur.execute(\n \"INSERT INTO tiles \"\n \"(zoom_level, tile_column, tile_row, tile_data) \"\n \"VALUES (?, ?, ?, ?);\",\n (tile.z, tile.x, tiley, buffer(contents)))\n\n conn.commit()\n\n conn.close()\n # Done!",
"def extract_write_to_file(self, num_extracted, write_dir, sub_h, sub_w, margin=10):\n\n file_seed = len(os.listdir(write_dir))\n\n for i in range(num_extracted):\n file_num = str(file_seed + i)\n write_path = os.path.join(write_dir, file_num + \".\" + 'jpg')\n\n print('extracting {}/{} images of dimension {}x{}'.format(i, num_extracted, sub_h, sub_w))\n print('writting to location: {}'.format(write_path))\n\n self.extract_single(sub_h, sub_w, write_path)",
"def generate(software, out_dir, suffix, dry_run):\n m3u_filename = software.name + (suffix if suffix else '') + '.m3u'\n\n if not dry_run:\n m3u_fd = open(os.path.join(out_dir, m3u_filename), 'w')\n\n for i in software.images():\n image_rel_path = os.path.relpath(i.path, out_dir)\n\n if not dry_run:\n m3u_fd.write((image_rel_path + '\\n'))\n\n if not dry_run:\n m3u_fd.close()\n logging.info('Created M3U file for %s (%i image files)', \n software.name, len(software.images()))",
"def create_png_images(self):\n if self.subject is None:\n print Console.WARNING + 'You need to specify a subject first' + Console.ENDC\n return\n\n check_dir_of = self.locations.check_dir_of\n check_dir_of(self.locations.HISTO_PNG_U)\n check_dir_of(self.locations.HISTO_PNG)\n check_dir_of(self.locations.SOURCE_PNG)\n\n\n\n fmap_img = ImageUtils.load_nifti_image(self.locations.HIST_FMAP) #loading subject nifti files\n volumes = []\n try:\n for s in self.locations.SOURCES:\n volumes.append(ImageUtils.load_nifti_image(s))\n except IOError as e:\n print Console.FAIL + 'There are errors loading nifi files for subject %s'%self.subject + Console.ENDC\n return False\n \n\n num_slices = volumes[0].shape[2] #use first volume to check expected number of slices\n\n self.locations.create_empty_dir(self.locations.IMAGES_DIR)\n\n print 'Creating input PNGs for %s'%self.subject\n for k, vol in enumerate(volumes):\n for i in range(num_slices):\n imslice = ImageUtils.data_to_bytescale_rgb(vol[:, :, i])\n im = Image.fromarray(imslice)\n im.save(self.locations.SOURCE_PNG % (self.locations.LABELS[k],i))\n\n \n print 'Creating histology PNGs for %s'%self.subject\n for i in range(num_slices):\n\n im_unscaled = ImageUtils.data_to_unscaled_rgb(fmap_img[:, :, i]); #keeps the original values\n im_unscaled = Image.fromarray(im_unscaled)\n im_unscaled = im_unscaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_unscaled.save(self.locations.HISTO_PNG_U % i)\n\n im_scaled = ImageUtils.data_to_bytescale_rgb(fmap_img[:,:,i]); # bytescaled histology\n im_scaled = Image.fromarray(im_scaled)\n im_scaled = im_scaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_scaled.save(self.locations.HISTO_PNG % i)\n\n print\n return True",
"def main():\n\n # Just grab all files - we'll use try/except to filter\n images = glob.glob(os.path.join(args.input_dir, '*.*'))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n for img_file in images:\n print(img_file)\n try:\n np_img = plt.imread(img_file)\n print(np_img.shape)\n img_name = img_file.split(os.sep)[-1]\n new_img_file = os.path.join(args.output_dir, img_name)\n pad_image(np_img, new_img_file)\n except Exception as e:\n print('Warning: {}. Skpping file.'.format(e))\n continue",
"def generate_thumbnail(self, img_path):\n\n thumb_path = self.thumbnail_path(img_path)\n dirpath = os.path.dirname(thumb_path)\n try:\n os.makedirs(dirpath)\n except OSError: # path exists\n pass\n\n cmd = [\n '/usr/local/bin/gm',\n 'convert',\n '-thumbnail', '256x256>',\n '-background', 'transparent',\n '-gravity', 'center',\n '-extent', '256x256',\n img_path, thumb_path\n ]\n\n retcode = subprocess.call(cmd)\n\n if retcode:\n log.error('convert exited with %d : %s', retcode, img_path)\n return False\n\n log.debug('Wrote thumbnail for `%s` to `%s`.', img_path, thumb_path)\n\n return True",
"def createAllImageFiles(poly, name) :\n \n for i in range(len(poly.getPaths())):\n fileName = name + \"_\" + str(i) + \".dot\"\n imgName = name + \"_\" + str(i) + \".jpg\"\n \n Command = \"neato -Tjpeg \" + fileName + \" -o \" + imgName\n run(Command, shell=True)",
"def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])"
] |
[
"0.6649731",
"0.6447098",
"0.6351097",
"0.63092995",
"0.6193053",
"0.61820424",
"0.61727613",
"0.6171277",
"0.6159675",
"0.61490023",
"0.6126385",
"0.610195",
"0.6096374",
"0.6060699",
"0.6001953",
"0.59990644",
"0.5988286",
"0.596851",
"0.59632564",
"0.59625936",
"0.59452736",
"0.5888819",
"0.5881254",
"0.5866567",
"0.5864131",
"0.5838219",
"0.5827237",
"0.5820963",
"0.5814987",
"0.5813457"
] |
0.7358759
|
0
|
Expect a binary blob of image data from wand and a filename. Write the binary blob to the file.
|
def save_blob(self, img_blob, filename):
out_file = open(filename, "wb")
out_file.write(img_blob)
out_file.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write_img_to_fs(name, data):\n with open(name, \"wb\") as fout:\n fout.write(data)",
"def write(filename, data):\r\n with open(filename, \"wb\") as f:\r\n pic.dump(data, f)",
"def write_data(writer: UFOWriter, filename: str, data: bytes) -> None:\n writer.writeImage(filename, data)",
"def _save_binary(file_name, data):\n with open(file_name, \"wb\") as f:\n cp.dump(data, f)",
"def writeimage(self, fp):\n execfile = open(self.binpath, \"w\")\n databuf = fp.read(4096)\n while databuf:\n execfile.write(databuf)\n databuf = fp.read(4096)\n execfile.flush()\n execfile.close()\n os.chmod(self.binpath, stat.S_IRWXU)",
"def write_img_to_db():\n with lite.connect(\"test.db\") as con:\n cur = con.cursor()\n data = read_image_from_fs()\n binary = lite.Binary(data)\n cur.execute(\"INSERT INTO Images(Data) VALUES (?)\", (binary,))",
"def test_binary_contents(file, tmp_path):\n name = tmp_path / \"1px.gif\"\n ret = file.managed(name=str(name), contents=BINARY_FILE)\n assert ret.result is True",
"def write_image(fname, data):\n # Get the file extension for this file, assuming it is the last continuous string after the last period\n fmt = fname.split(\".\")[-1]\n return writers[fmt](fname, data)",
"def save(self, file=None, filename=None):\n if file is None and filename is None:\n raise TypeError('expected an argument')\n elif file is not None and filename is not None:\n raise TypeError('expected only one argument; but two passed')\n elif file is not None:\n if isinstance(file, types.FileType) and hasattr(libc, 'fdopen'):\n fd = libc.fdopen(file.fileno(), file.mode)\n r = library.MagickWriteImageFile(self.wand, fd)\n if not r:\n self.raise_exception()\n else:\n if not callable(getattr(file, 'write', None)):\n raise TypeError('file must be a writable file object, '\n 'but it does not have write() method: ' +\n repr(file))\n file.write(self.make_blob())\n else:\n if not isinstance(filename, basestring):\n raise TypeError('filename must be a string, not ' +\n repr(filename))\n r = library.MagickWriteImage(self.wand, filename)\n if not r:\n self.raise_exception()",
"def test_write_img(img_: Tensor, ext: str) -> None:\n with NamedTemporaryFile(\"w\") as f:\n path = f\"{f.name}{ext}\"\n write_img(img_, path)\n img = read_image(path)\n torch.testing.assert_allclose(img, img_)",
"def set_blob ( self, object_class_id, object_instance_id, attribute_name, blob_file, file_name ) :\n try :\n inputs = []\n inputs.append(open(blob_file, 'rb'))\n for input in inputs:\n binary_data = input.read()\n blobfile = self.oracle_cursor.var(cx_Oracle.BLOB)\n blobfile.setvalue(0, binary_data)\n self.oracle_cursor.callproc(\"sdb_interface_pck.setBlob\", [object_class_id, object_instance_id, attribute_name, file_name, blobfile ])\n except Exception, err:\n print \"Error storing BLOB: ERROR: \" + str(err)\n raise",
"def testWriteBinaryData(self):\n file_writer = writers.FileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteBinaryData(b'Binary data')\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n expected_output_data = b'Binary data'\n self.assertEqual(output_data, expected_output_data)",
"def write_image(image_base64: str, filepath: pathlib.Path):\n with open(filepath, \"wb\") as f:\n f.write(base64.b64decode(image_base64))",
"def tiffwrite(filename, im):\n tf.imwrite(filename, im)",
"def write_bytes_to_file(bytes, filename):\n try:\n with open(filename, mode=\"bx\") as file:\n file.write(bytes)\n except FileExistsError:\n os.remove(filename)\n ResourceHandler.write_bytes_to_file(bytes, filename)\n except Exception as e:\n print(e)",
"def _Write(buf, filename):\n with open(filename, 'wb') as f:\n f.write(buf)",
"def write_blob(blob, header_data, path, levels):\n if path is None:\n path = \"\"\n\n mkdir(os.path.join(\"file\", path))\n mkdir(os.path.join(\"raw\", path))\n\n if blob.is_binary:\n content = None\n highlighted = None\n rendered = None\n nlines = 0\n else:\n content = blob.data.decode()\n nlines = len(content.strip().split(\"\\n\"))\n\n lexer = pick_lexer(blob.name, content)\n highlighted = highlight(content, lexer)\n\n if isinstance(lexer, lexers.MarkdownLexer):\n rendered = markdown.markdown(\n content,\n extensions=[\n \"extra\",\n \"admonition\",\n \"codehilite\",\n \"legacy_attrs\",\n \"legacy_em\",\n \"meta\",\n # \"nl2br\",\n \"sane_lists\",\n \"smarty\",\n \"toc\",\n \"wikilinks\",\n ],\n )\n else:\n rendered = None\n\n data = {\n \"size\": blob.size,\n \"name\": blob.name,\n \"mode\": format_filemode(blob.filemode),\n \"full_name\": os.path.join(path, blob.name),\n \"binary\": blob.is_binary,\n \"nlines\": nlines,\n }\n\n write_output(\n template=\"file.html\",\n outfile=f\"file/{path}/{blob.name}.html\",\n content=highlighted,\n rendered=rendered,\n name=blob.name,\n header=header_data,\n rootpath=\"/\".join([\"..\"] * levels),\n file=data,\n )\n\n raw_path = os.path.join(\"raw\", path, blob.name)\n with open(raw_path, \"wb\") as out_fh:\n out_fh.write(blob.data)\n\n return data",
"def test_read_img(\n img_: Tensor, ext: str, write_image: Callable[..., Any]\n) -> None:\n with NamedTemporaryFile(\"w\") as f:\n path = f.name + ext\n write_image(img_, path)\n img = read_img(path)\n torch.testing.assert_allclose(img, img_)",
"def _save(self, data: PIL.Image) -> None:\n with self._fs.open(self._filepath, mode=\"wb\") as f:\n data.save(f)",
"def write(self, filename):\n f = open(filename, 'bw')\n\n # file header (14)\n f.write(char('B'))\n f.write(char('M'))\n f.write(dword(14 + 40 + self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(14 + 40))\n\n # image header (40)\n f.write(dword(40))\n f.write(dword(self.width))\n f.write(dword(self.height))\n f.write(word(1))\n f.write(word(24))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n\n # pixel data\n for x in range(self.height):\n for y in range(self.width):\n f.write(self.pixels[x][y])\n f.close()",
"def save_image(data, file_path):\n with open(file_path, 'wb'):\n prefix = 'data:image/webp;base64,'\n data = data[len(prefix):]\n byte_data = base64.b64decode(data)\n image_data = BytesIO(byte_data)\n img = Image.open(image_data)\n img.save(file_path)\n return True",
"def binary_write(iring, file_ext='out', *args, **kwargs):\n return BinaryFileWriteBlock(iring, file_ext, *args, **kwargs)",
"def save_image(self, file_obj):\n manager = pyglet.image.get_buffer_manager()\n colorbuffer = manager.get_color_buffer()\n\n # if passed a string save by name\n if hasattr(file_obj, 'write'):\n colorbuffer.save(file=file_obj)\n else:\n colorbuffer.save(filename=file_obj)",
"def write_bytes_to_image(self, file_path):\n data_manipulation.bytes_to_image(self.bytes, file_path)",
"def write(self, blob):\r\n return RecordIO.Writer.do_write(self._fp, blob, self._codec, sync=self._sync)",
"def image_to_fp(image, image_format):\n # type: (Any, str) -> IO[bytes]\n fp = io.BytesIO()\n image.save(fp, format=image_format) # save the content to fp\n fp.seek(0)\n return fp",
"def _write(self, stream):\n\n self._img.append(self.make_path())\n self._img.append(self.make_border())\n self._img.append(self.make_text())\n\n ET.ElementTree(self._img).write(stream, encoding=\"UTF-8\", xml_declaration=True)",
"def test_upload_binary(self):\n uploadFile = os.path.join(testdatadir, \"upload.data.gz\")\n r = gracedb.writeFile(eventId, uploadFile)\n self.assertEqual(r.status, 201) # CREATED",
"async def dump_blob(elem, elem_type=None):\n elem_is_blob = isinstance(elem, x.BlobType)\n data = getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem\n if data is None or len(data) == 0:\n return b''\n if isinstance(data, (bytes, bytearray, list)):\n return base64.b16encode(bytes(data))\n else:\n raise ValueError('Unknown blob type')",
"def write_image(self, image_name, image):\n raise NotImplementedError"
] |
[
"0.65800583",
"0.64400434",
"0.64122224",
"0.6264098",
"0.61690265",
"0.6132258",
"0.6069596",
"0.60462534",
"0.60354304",
"0.5993703",
"0.5823665",
"0.5802249",
"0.573362",
"0.5722561",
"0.56572026",
"0.563467",
"0.5620268",
"0.55981404",
"0.5575564",
"0.55676",
"0.5556569",
"0.5529774",
"0.5503394",
"0.54928756",
"0.54801375",
"0.5477435",
"0.5474554",
"0.5466866",
"0.54664165",
"0.54645693"
] |
0.66238785
|
0
|
Read from the file pointer, write intermediate file, and then copy to final destination.
|
def single_file_write(self, file_pointer, filename):
temp_file = "resources/temp_file"
file_pointer.seek(0)
with open(temp_file, "wb") as output_file:
shutil.copyfileobj(file_pointer, output_file)
os.rename(temp_file, filename)
log.info("Saved file: %s", filename)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def copy_file(input, output):\n for f in input:\n while True:\n chunk = f.read(1024)\n if not chunk:\n break\n output.write(chunk)\n output.flush()",
"def copy_file(file, destination):\n with open(file, 'rb') as infile, open(destination, 'wb') as outfile:\n outfile.write(infile.read())",
"def extract_file(f, seekpt, size, dest_path):\n f.seek(seekpt)\n # Clear the file in case there are permission problems\n subprocess.call(['rm', '-f', dest_path])\n with open(dest_path, 'wb') as out:\n while size:\n bytes=min(size, 8192)\n out.write(f.read(bytes))\n size -= bytes",
"def copyFile( src, dest ):\n\tinFile = open( src, 'r' )\n\toutFile = open( dest, 'w' )\n\tfor line in inFile:\n\t\toutFile.write( line )\n\toutFile.close()\n\tinFile.close()",
"def copy_file_range(self, in_file, out_file):\n in_file.seek(self.range_from)\n # Add 1 because the range is inclusive\n bytes_to_copy = 1 + self.range_to - self.range_from\n buf_length = 64*1024\n bytes_copied = 0\n while bytes_copied < bytes_to_copy:\n read_buf = in_file.read(min(buf_length, bytes_to_copy-bytes_copied))\n if len(read_buf) == 0:\n break\n out_file.write(read_buf)\n bytes_copied += len(read_buf)\n return bytes_copied",
"def copy_file_range(self, in_file, out_file):\n in_file.seek(self.range_from)\n # Add 1 because the range is inclusive\n bytes_to_copy = 1 + self.range_to - self.range_from\n buf_length = 64*1024\n bytes_copied = 0\n while bytes_copied < bytes_to_copy:\n read_buf = in_file.read(\n min(buf_length, bytes_to_copy-bytes_copied))\n if len(read_buf) == 0:\n break\n out_file.write(read_buf)\n bytes_copied += len(read_buf)\n return bytes_copied",
"def copySubRangeOfFile(inputFile, fileStart, fileEnd, outputFileHandle):\n with open(inputFile, 'r') as fileHandle:\n fileHandle.seek(fileStart)\n data = fileHandle.read(fileEnd - fileStart)\n assert len(data) == fileEnd - fileStart\n outputFileHandle.write(data)",
"def __extract_file( self, data_offset: int, data_length: int, fp_out: IO[bytes] ) -> bool:\n\n if not self.opened:\n self.log( \"err\", \"file is closed\" )\n return False\n\n if not \"w\" in fp_out.mode:\n self.log( \"err\", \"invalid file-handle for output file\")\n return False\n\n self.fp.seek( data_offset )\n\n goal = data_offset + data_length\n\n try:\n while 1:\n rest = goal - self.fp.tell()\n\n if rest < 4096:\n fp_out.write( self.fp.read( rest ) )\n break\n else:\n fp_out.write( self.fp.read( 4096 ) )\n except:\n self.log( \"err\", \"Could not extract file! Are we out of space?\" )\n return False\n\n return True",
"def copyfile(source, dest, buffer_size=1024*1024):\n if not hasattr(source, 'read'):\n source = open(source, 'rb')\n if not hasattr(dest, 'write'):\n dest = open(dest, 'wb')\n while 1:\n copy_buffer = source.read(buffer_size)\n if copy_buffer:\n dest.write(copy_buffer)\n else:\n break\n source.close()\n dest.close()\n return True",
"def copy_file(src, dest):\n with open_local_or_gcs(src, 'r') as h_src:\n with open_local_or_gcs(dest, 'w') as h_dest:\n shutil.copyfileobj(h_src, h_dest)",
"def copy_file(file_name, new_file_name):\n\n import os\n\n if not os.path.exists(file_name):\n raise FileNotFoundError\n\n with open(str(file_name), 'rb') as infile:\n with open(str(new_file_name), 'wb') as outfile:\n while True:\n buff = infile.read(10240)\n if buff:\n outfile.write(buff)\n else:\n break\n\n return",
"def copy_file(fs, inpath, outpath):\n fs.copy(inpath, outpath)",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def writefile(path, instream, start=None, end=None, append=False):",
"def write_to_file(original_path, new_path):\n print(f\"[INFO]: Transform data from binary to text file {new_path}\")\n with open(new_path, mode='wt', encoding='utf-8') as new_file:\n with open(original_path, mode='rb') as original_file:\n for line in original_file:\n new_file.write(line.decode())",
"def _copy_file ( self, source, dest ):\n return",
"def cp_to_file(fn0, fn):\n\n # keep rewriting attributes\n shutil.copyfile(fn0, fn)",
"def process_file(cmap, source, destination):\n line = source.readline()\n while line:\n destination.write(process_line(cmap, line))\n line = source.readline()\n\n source.close()\n destination.close()",
"def copy_file(self, dst, tmpdir=None):\n if tmpdir is None:\n tmpfn = sameDir\n else:\n tmpfn = lambda _: tmpdir._path\n assert isinstance(dst, Path)\n with open(self._path, 'rb') as src_fd:\n with safeopen(dst._path, 'wb', useDir=tmpfn) as dst_fd:\n copyfileobj(src_fd, dst_fd)",
"def copy_fd(self, src_fd, tmpdir=None):\n if tmpdir is None:\n tmpfn = sameDir\n else:\n tmpfn = lambda _: tmpdir._path\n mode = 'w'\n if 'b' in src_fd.mode:\n mode += 'b'\n with safeopen(self._path, mode, useDir=tmpfn) as dst_fd:\n copyfileobj(src_fd, dst_fd)",
"def writefile(name, instream, start=None, end=None, append=False):",
"def copy_file_out(self, path, callback=None):\n try:\n self.copy_volume(path, self.device, callback=callback)\n except IOError, e:\n logger.exception(\"copy_file_out failed with '%s'\" % e)\n raise ISCSICopyFailed()",
"def _copy(self):\n for d in self._current_chunk:\n self.out.write(d)",
"def copy(self, data, fobject_factory=tempfile.TemporaryFile):\n datastream = fobject_factory()\n self.writestream(data, datastream)\n datastream.seek(0)\n self.copystream(datastream)\n datastream.close()",
"def _copy_file(executor, s3_uploader, relative_path, filename):\n try:\n src = os.path.join(intermediate_path, relative_path, filename)\n dst = os.path.join(tmp_dir_path, relative_path, \"{}.{}\".format(_timestamp(), filename))\n shutil.copy2(src, dst)\n executor.submit(_upload_to_s3, s3_uploader, relative_path, dst, filename)\n except FileNotFoundError: # noqa ignore=F821\n # Broken link or deleted\n pass\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to copy file to the temporary directory.\")",
"def inout(input_, output_):\n while True:\n chunk = input_.read(1024)\n if not chunk:\n break\n output_.write(chunk)",
"def _WriteFileEntry(self, file_entry, data_stream_name, destination_file):\n source_file_object = file_entry.GetFileObject(\n data_stream_name=data_stream_name)\n if not source_file_object:\n return\n\n try:\n with open(destination_file, 'wb') as destination_file_object:\n source_file_object.seek(0, os.SEEK_SET)\n\n data = source_file_object.read(self._COPY_BUFFER_SIZE)\n while data:\n destination_file_object.write(data)\n data = source_file_object.read(self._COPY_BUFFER_SIZE)\n\n finally:\n source_file_object.close()",
"def overwrite_file(self):\n\n new_file = open(self.temp_filename, 'r')\n file = open(self.filename, 'w')\n file.writelines(new_file.readlines())\n new_file.close()\n file.close()\n os.remove(self.temp_filename)"
] |
[
"0.6279301",
"0.6177359",
"0.58933413",
"0.58200485",
"0.5752162",
"0.57351536",
"0.56330127",
"0.5618136",
"0.5591773",
"0.5576094",
"0.5556444",
"0.5539393",
"0.5537127",
"0.5537127",
"0.5537127",
"0.5520664",
"0.5498421",
"0.5457351",
"0.5456319",
"0.5376576",
"0.53366977",
"0.5331048",
"0.5308449",
"0.5296879",
"0.5288732",
"0.52786833",
"0.5254625",
"0.5245353",
"0.5193113",
"0.5158772"
] |
0.642381
|
0
|
Calculates easting, northing and coordinate precision from the station grid reference. Returns dictionary with the additional keys.
|
def calcStationCoords(station, gridSquares):
# calculate coordinates and precision
gridRef = station["gridReference"]
gridCode = gridRef[:2]
station["precision"] = 10 ** (5 - len(gridRef[2:])/2) # Units: meters
station["easting"] = (
gridSquares[gridCode][0] + int(gridRef[2:len(gridRef[2:])/2 + 2]) *
station["precision"]
)
station["northing"] = (
gridSquares[gridCode][1] + int(gridRef[len(gridRef[2:])/2 + 2:]) *
station["precision"]
)
return station
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getStandardGeoProperties():\n geoproperties = {\n 'Hsin': {\n 'tl': {\n 'lat': 21.108,\n 'long': -158.584,\n },\n 'br': {\n 'lat': 21.9,\n 'long': -157.392,\n }\n },\n 'Hsout':{\n 'tl': {\n 'lat': 20.33,\n 'long': -159.87,\n },\n 'br': {\n 'lat': 22.7,\n 'long': -156.3,\n }\n },\n }\n return geoproperties",
"def grid_to_geodetic(self, northing, easting):\n\n ξ = (northing - self.fn) / (self.k0 * self.â)\n η = (easting - self.fe) / (self.k0 * self.â)\n\n ξ_prim = ξ -\\\n self.δ1 * math.sin(2 * ξ) * math.cosh(2 * η) -\\\n self.δ2 * math.sin(4 * ξ) * math.cosh(4 * η) -\\\n self.δ3 * math.sin(6 * ξ) * math.cosh(6 * η) -\\\n self.δ4 * math.sin(8 * ξ) * math.cosh(8 * η)\n\n η_prim = η -\\\n self.δ1 * math.cos(2 * ξ) * math.sinh(2 * η) -\\\n self.δ2 * math.cos(4 * ξ) * math.sinh(4 * η) -\\\n self.δ3 * math.cos(6 * ξ) * math.sinh(6 * η) -\\\n self.δ4 * math.cos(8 * ξ) * math.sinh(8 * η)\n\n φ_star = math.asin(math.sin(ξ_prim) / math.cosh(η_prim))\n δλ = math.atan(math.sinh(η_prim) / math.cos(ξ_prim))\n\n λ = self.λ0 + δλ\n φ = φ_star + math.sin(φ_star) * math.cos(φ_star) * (self.A_star +\n self.B_star * math.sin(φ_star) ** 2 +\n self.C_star * math.sin(φ_star) ** 4 +\n self.D_star * math.sin(φ_star) ** 6)\n\n return math.degrees(φ), math.degrees(λ)",
"def computeGC2(self, lon, lat, depth):\n # This just returns defaults of zero, which will hopefully behave\n # gracefully as used in GMPEs.\n dict = {\"rx\": np.zeros_like(lon),\n \"ry\": np.zeros_like(lon),\n \"ry0\": np.zeros_like(lon),\n \"U\": np.zeros_like(lon),\n \"T\": np.zeros_like(lon)\n }\n return dict",
"def british_national_grid_to_lonlat(eastings, northings):\n\n # E, N are the British national grid coordinates - eastings and northings\n\n # The Airy 180 semi-major and semi-minor axes used for OSGB36 (m)\n a, b = 6377563.396, 6356256.909\n # Scale factor on the central meridian\n F0 = 0.9996012717\n # Latitude of true origin (radians)\n lat0 = np.deg2rad(49)\n # Longtitude of true origin and central meridian (radians)\n lon0 = np.deg2rad(-2)\n # Northing & easting of true origin (m)\n N0, E0 = -100000, 400000\n # eccentricity squared\n e2 = 1 - (b*b)/(a*a)\n n = (a-b)/(a+b)\n\n # Iterate through the pairs of values in eastings and northings.\n lonlist, latlist = [], []\n for xy in zip(eastings, northings):\n\n E = xy[0]\n N = xy[1]\n\n # Initialise the iterative variables\n lat, M = lat0, 0\n\n while N - N0 - M >= 0.00001: # Accurate to 0.01mm\n lat = (N - N0 - M)/(a * F0) + lat\n M1 = (1 + n + (5./4) * n**2 + (5./4) * n**3) * (lat-lat0)\n M2 = (3*n + 3 * n**2 + (21./8)*n**3) * np.sin(lat-lat0) * \\\n np.cos(lat+lat0)\n M3 = ((15./8) * n**2 + (15./8)*n**3) * np.sin(2*(lat-lat0)) * \\\n np.cos(2 * (lat+lat0))\n M4 = (35./24)*n**3 * np.sin(3*(lat-lat0)) * np.cos(3*(lat+lat0))\n # meridional arc\n M = b * F0 * (M1 - M2 + M3 - M4)\n\n # transverse radius of curvature\n nu = a * F0 / np.sqrt(1-e2 * np.sin(lat)**2)\n\n # meridional radius of curvature\n rho = a * F0 * (1-e2) * (1-e2 * np.sin(lat)**2)**(-1.5)\n eta2 = nu / rho-1\n\n secLat = 1./np.cos(lat)\n VII = np.tan(lat) / (2 * rho * nu)\n VIII = np.tan(lat) / (24 * rho * nu**3) * (5 + 3 * np.tan(lat)**2 +\n eta2 - 9 * np.tan(lat)**2 * eta2)\n IX = np.tan(lat) / (720 * rho * nu**5) * (61 + 90 * np.tan(lat)**2 +\n 45 * np.tan(lat)**4)\n X = secLat / nu\n XI = secLat / (6 * nu**3) * (nu / rho + 2 * np.tan(lat)**2)\n XII = secLat / (120 * nu**5) * (5 + 28 * np.tan(lat)**2 + 24 *\n np.tan(lat)**4)\n XIIA = secLat / (5040 * nu**7) * (61 + 662 * np.tan(lat)**2 + 1320 *\n np.tan(lat)**4 + 720 * np.tan(lat)**6)\n dE = E-E0\n\n # These are on the wrong ellipsoid currently: Airy1830. (Denoted by _1)\n lat_1 = lat - VII * dE**2 + VIII * dE**4 - IX * dE**6\n lon_1 = lon0 + X * dE - XI * dE**3 + XII * dE**5 - XIIA * dE**7\n\n # Want to convert to the GRS80 ellipsoid.\n # First convert to cartesian from spherical polar coordinates\n H = 0 # Third spherical coord.\n x_1 = (nu / F0 + H) * np.cos(lat_1) * np.cos(lon_1)\n y_1 = (nu / F0 + H) * np.cos(lat_1) * np.sin(lon_1)\n z_1 = ((1-e2) * nu / F0 + H) * np.sin(lat_1)\n\n # Perform Helmut transform (to go between Airy 1830 (_1) and GRS80 (_2))\n s = -20.4894 * 10**-6 # The scale factor -1\n # The translations along x,y,z axes respectively\n tx, ty, tz = 446.448, -125.157, + 542.060\n # The rotations along x,y,z respectively, in seconds\n rxs, rys, rzs = 0.1502, 0.2470, 0.8421\n # And in radians\n rx = rxs * np.pi / (180 * 3600.)\n ry = rys * np.pi / (180 * 3600.)\n rz = rzs * np.pi / (180 * 3600.)\n\n x_2 = tx + (1 + s) * x_1 + (-rz) * y_1 + (ry) * z_1\n y_2 = ty + (rz) * x_1 + (1 + s) * y_1 + (-rx) * z_1\n z_2 = tz + (-ry) * x_1 + (rx) * y_1 + (1 + s) * z_1\n\n # Back to spherical polar coordinates from cartesian\n # Need some of the characteristics of the new ellipsoid\n\n # The GSR80 semi-major and semi-minor axes used for WGS84(m)\n a_2, b_2 = 6378137.000, 6356752.3141\n # The eccentricity of the GRS80 ellipsoid\n e2_2 = 1 - (b_2 * b_2) / (a_2 * a_2)\n p = np.sqrt(x_2**2 + y_2**2)\n\n # Lat is obtained by an iterative proceedure:\n lat = np.arctan2(z_2, (p * (1-e2_2))) # Initial value\n latold = 2 * np.pi\n while abs(lat - latold) > 10**-16:\n lat, latold = latold, lat\n nu_2 = a_2 / np.sqrt(1-e2_2 * np.sin(latold)**2)\n lat = np.arctan2(z_2 + e2_2 * nu_2 * np.sin(latold), p)\n\n # Lon and height are then pretty easy\n lon = np.arctan2(y_2, x_2)\n H = p / np.cos(lat) - nu_2\n\n # Convert to degrees\n latlist.append(np.rad2deg(lat))\n lonlist.append(np.rad2deg(lon))\n\n # Convert to NumPy arrays.\n lon = np.asarray(lonlist)\n lat = np.asarray(latlist)\n\n # Job's a good'n.\n return lon, lat",
"def grid_vals(grid):\n\tletters = list(grid)\n\t#print \"---------------------------------\\n-------------------\"\n\t#print letters\n\t#print \"----------------------------------\\n-------------------\"\n\tassert len(letters) == 81\n\ttempdict = zip(squares, letters)\n\treturn dict(tempdict)",
"def grid_values(self, grid):\n chars = [col for col in grid if col in self.digits or col in '0.']\n assert len(chars) == 81\n return dict(zip(self.squares, chars))",
"def test_under_86km():\n z = np.array([50000.0, 70000.0, 86000.0])\n h = util.geometric_to_geopotential(z)\n expected_h = np.array([49610.0, 69238., 84852.0])\n expected_T = np.array([270.65, 219.585, 186.87])\n expected_p = np.array([79.779, 5.2209, 0.37338])\n expected_rho = np.array([0.0010269, 0.000082829, 0.000006958])\n\n h, T, p, rho = coesa.table(h)\n \n assert_array_almost_equal(h, expected_h, decimal=0)\n assert_array_almost_equal(T, expected_T, decimal=2)\n assert_array_almost_equal(p, expected_p, decimal=3)\n assert_array_almost_equal(rho, expected_rho, decimal=7)",
"def toDict(self):\n\n aDict = {}\n\n # Required Keys\n try:\n aDict[self.E0_KEY] = self.e0.toDict()\n aDict[self.E1_KEY] = self.e1.toDict()\n aDict[self.E2_KEY] = self.e2.toDict()\n aDict[self.MAXIMUM_HORIZONTAL_KEY] = self.maximumHorizontalProjection\n aDict[self.MAXIMUM_VERTICAL_KEY] = self.maximumVerticalProjection\n aDict[self.EQUIVALENT_HORIZONTAL_KEY] = self.equivalentHorizontalRadius\n\n except (NameError, AttributeError) as e:\n print(\"Missing required data error: %s\" % e)\n\n return aDict",
"def get_bounds(self):\n\n northing=self.f.variables['y']\n easting=self.f.variables['x']\n\n lat1,lon1 = utm.to_latlon(np.min(easting),np.min(northing),11,northern=True)\n lat2,lon2 = utm.to_latlon(np.max(easting),np.max(northing),11,northern=True)\n\n return (lon1,lon2,lat1,lat2)",
"def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}",
"def point2wgs84_9603(self, datum):\n \"\"\"\n h is the height above the ellipsoid. This is the height value that is \n delivered by GPS satellite observations but is not the gravity-related height \n value which is normally used for national mapping and levelling operations. The\n gravity-related height (H) is usually the height above mean sea level or an \n alternative level reference for the country. If one starts with a gravity-related \n height H, it will be necessary to convert it to an ellipsoid height (h) before \n using the above transformation formulas. See section 4.11.1. For the WGS 84 \n ellipsoid the difference between ellipsoid and mean sea level can vary between \n values of -100m in the Sri Lanka area to +80m in the North Atlantic.)\n \"\"\"\n h=0\n # a is the semi-major axis of the ellipsoid of the given datum.\n a = datum.axis\n\n # f is the flattening of the ellipsoid of the given datum \n # (get_flattening actually returns the inverse flattening).\n f = 1.0/datum.flattening\n \n # dx, dy, dz are the x, y, z offset parameters for the given datum transformation\n # to WGS84\n dx = datum.dx\n dy = datum.dy\n dz = datum.dz\n \n # latr, lngr are the latitude and longitude in radians\n latr = math.radians(self.lat)\n lngr = math.radians(self.lng)\n\n # e is the eccentricity of the ellipsoid\n e_squared = f*(2-f)\n\n # nu is the prime vertical radius of curvature at latr\n nu = a/math.pow((1-e_squared*sqr(math.sin(latr))),0.5)\n\n X = (nu+h)*math.cos(latr)*math.cos(vlambda)\n Y = (nu+h)*math.cos(latr)*math.sin(vlambda)\n Z = ((1 - math.pow(e,2))*nu + h)*math.sin(phi)\n\n Xwgs84 = X+dx\n Ywgs84 = Y+dy\n Zwgs84 = Z+dz\n\n epsilon = e_squared/(1-e_squared)\n b = a*(1-f)\n p = math.pow(sqr(Xwgs84)+sqr(Ywgs84),0.5)\n q = math.atan2((Zwgs84*a),(p*b))\n\n latrwgs84 = math.atan2( (Zwgs84 + epsilon*b*math.pow(math.sin(q)),3)), \\\n (p - e_squared*a*math.pow(math.cos(q),3) )\n lngrwgs84 = math.atan2(Ywgs84, Xwgs84)\n hwgs84 = (p/math.cos(latrwgs84))-nu\n newlng = lng180(math.degrees(lngrwgs84))\n newlat = math.degrees(latrwgs84)\n return Point(float(truncate(newlng,DEGREE_DIGITS)), float(truncate(newlat,DEGREE_DIGITS)))",
"def getDictWells(self):\n #Method begins here\n #nx=self.__grid['nx'] #From the geometry in grid\n ny=self.__grid['ny']\n nz=self.__grid['nz']\n minx=self.__grid['ox']\n miny=self.__grid['oy']\n minz=self.__grid['oz']\n rx=self.__grid['dx']\n ry=self.__grid['dy']\n rz=self.__grid['dz']\n \n # well package\n # Remember to use zero-based layer, row, column indices!\n lcoordw=np.zeros((self.__nwells,3),dtype=np.int32)\n for i in range (self.__nwells):\n lcoordw[i,0]=floor((self.__dflst.iloc[i,3]-minx)/rx)\n #In MODFLOW y ans z coordinates are inverted\n lcoordw[i,1]=floor((miny+ry*ny-self.__dflst.iloc[i,4])/ry)\n lcoordw[i,2]=floor((minz+rz*nz-self.__dflst.iloc[i,5])/rz)\n \n nper=self.__df.getForcPer()\n wel_sp = {} \n for i in range(nper):\n lst=[]\n for j in range(self.__nwells):\n pumping_rate=self.__dfwells.iloc[i+1,j+1]\n lst.append( [lcoordw[j,2], lcoordw[j,1], lcoordw[j,0], pumping_rate] )\n wel_sp[i]=lst\n print(wel_sp)\n \n print('*--- Succesfull reading of wells ---*')\n \n return wel_sp",
"def getSearchSpaceCoords(self):\r\n needed = {}\r\n coords = self.graph.getNodesCoords()\r\n for vertex, neighbours in self.search_space[1:]:\r\n needed[vertex] = coords[vertex]\r\n for arc in neighbours:\r\n needed[arc] = coords[arc]\r\n return needed",
"def _cal_grid_coordinates(self, nc_handle):\n print(\"calculating grid coordinates\")\n #\n x = np.zeros(self._grid[\"counts\"][0], dtype=float)\n y = np.zeros(self._grid[\"counts\"][1], dtype=float)\n z = np.zeros(self._grid[\"counts\"][2], dtype=float)\n \n for i in range(self._grid[\"counts\"][0]):\n x[i] = self._grid[\"origin\"][0] + i*self._grid[\"d0\"][0]\n\n for j in range(self._grid[\"counts\"][1]):\n y[j] = self._grid[\"origin\"][1] + j*self._grid[\"d1\"][1]\n\n for k in range(self._grid[\"counts\"][2]):\n z[k] = self._grid[\"origin\"][2] + k*self._grid[\"d2\"][2]\n\n self._set_grid_key_value(\"x\", x)\n self._set_grid_key_value(\"y\", y)\n self._set_grid_key_value(\"z\", z)\n\n for key in [\"x\", \"y\", \"z\"]:\n self._write_to_nc(nc_handle, key, self._grid[key])\n return None",
"def _get_econt_info(self, out_log):\n f = open_general(out_log)\n tmptxt = f.readlines()\n f.close()\n econt = {}\n itmp = search_string('[read_energy] number of energy points', tmptxt)\n if itmp>=0: econt['Nepts'] = int(tmptxt.pop(itmp).split()[-1])\n itmp = search_string('energies and weights are:', tmptxt)\n if itmp>=0:\n tmp = []\n for ie in range(econt['Nepts']):\n tmpline = tmptxt[itmp+4+ie].split()[1:]\n tmp.append([float(tmpline[0]), float(tmpline[1]), float(tmpline[2]), float(tmpline[3])])\n tmp = array(tmp)\n econt['epts'] = tmp[:,:2]\n econt['weights'] = tmp[:,2:]\n econt['emin'] = tmp[0,0]\n return econt",
"def ep2dict(EP, area=1.0):\n areafactor = 1.0 / area\n eparen = areafactor * EP['EPpasoA']['ren']\n epanren = areafactor * EP['EPpasoA']['nren']\n epatotal = eparen + epanren\n eparer = eparen / epatotal if epatotal else 0.0\n\n epren = areafactor * EP['EP']['ren']\n epnren = areafactor * EP['EP']['nren']\n eptotal = epren + epnren\n eprer = epren / eptotal if eptotal else 0.0\n\n epdict = {\"EPAren\": eparen, \"EPAnren\": epanren, \"EPAtotal\": epatotal, \"EPArer\": eparer,\n \"EPren\": epren, \"EPnren\": epnren, \"EPtotal\": eptotal, \"EPrer\": eprer}\n\n return epdict",
"def test_under_11km():\n z = np.array([500.0, 2500.0, 6500.0, 9000.0, 11000.0])\n h = util.geometric_to_geopotential(z)\n expected_h = np.array([500.0, 2499.0, 6493.0, 8987.0, 10981.0])\n expected_T = np.array([284.900, 271.906, 245.943, 229.733, 216.774])\n expected_p = np.array([95461.0, 74691.0, 44075.0, 30800.0, 22699.0])\n expected_rho = np.array([1.1673, 0.95695, 0.62431, 0.46706, 0.36480])\n\n h, T, p, rho = coesa.table(h)\n \n assert_array_almost_equal(h, expected_h, decimal=0)\n assert_array_almost_equal(T, expected_T, decimal=3)\n assert_array_almost_equal(p, expected_p, decimal=0)\n assert_array_almost_equal(rho, expected_rho, decimal=4)",
"def nancay():\n return coord.EarthLocation(lat=47.376511*u.deg, lon=2.1924002*u.deg)",
"def output_grid_information():\n # translate = [-74.26, 40.50]\n # scale = [0.02, 0.02]\n # step = 1\n\n translate = [0, 0]\n scale = [1, 1]\n step = 0.02\n\n lon_limits = [(-74.26 - translate[0]) / scale[0], (-73.76 - translate[0]) / scale[0]]\n lat_limits = [(40.48 - translate[1]) / scale[1], (40.94 - translate[1]) / scale[1]]\n\n lons = np.arange(lon_limits[0], lon_limits[1] - step, step)\n lats = np.arange(lat_limits[0], lat_limits[1] - step, step)\n\n all_json = {\n \"type\": \"FeatureCollection\"\n }\n\n gr_id = 0\n grid_df = pd.DataFrame(columns=['gr_id', 'c_lat', 'c_lon', 's_lon', 'w_lat', 'n_lon', 'e_lat'])\n features = []\n\n for lat in lats:\n for lon in lons:\n w_lon = lon\n e_lon = lon + step\n s_lat = lat\n n_lat = lat + step\n\n c_lon = lon + step / 2\n c_lat = lat + step / 2\n\n grid_df = grid_df.append(pd.DataFrame({\"gr_id\": [gr_id],\n \"c_lon\": [c_lon], \"c_lat\": [c_lat],\n \"w_lon\": [w_lon], \"s_lat\": [s_lat],\n \"e_lon\": [e_lon], \"n_lat\": [n_lat]}))\n\n coor = [[[s_lat, w_lon], [n_lat, w_lon], [n_lat, e_lon],\n [s_lat, e_lon], [s_lat, w_lon]]]\n\n feature = {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": coor\n },\n \"properties\": {\n \"id\": str(gr_id)\n }\n }\n\n features.append(feature)\n\n gr_id += 1\n\n all_json['features'] = features\n\n with open(BaseDir + '/grid.geojson', 'w') as f:\n json.dump(all_json, f)\n\n grid_df.to_csv(BaseDir + '/grid_locs.csv', index=False)",
"def _get_gedi2a_main_data_dict(self) -> dict:\n gedi_l2a_count_start = pd.to_datetime(\"2018-01-01T00:00:00Z\")\n data = {\n # General identifiable data\n \"granule_name\": [self.parent_granule.filename] * self.n_shots,\n \"shot_number\": self[\"shot_number\"][:],\n \"beam_type\": [self.beam_type] * self.n_shots,\n \"beam_name\": [self.name] * self.n_shots,\n # Temporal data\n \"delta_time\": self[\"delta_time\"][:],\n \"absolute_time\": (\n gedi_l2a_count_start\n + pd.to_timedelta(list(self[\"delta_time\"]), unit=\"seconds\")\n ),\n # Quality data\n \"sensitivity\": self[\"sensitivity\"][:],\n \"quality_flag\": self[\"quality_flag\"][:],\n \"solar_elevation\": self[\"solar_elevation\"][:],\n \"solar_azimuth\": self[\"solar_elevation\"][:],\n \"energy_total\": self[\"energy_total\"][:],\n # DEM\n \"dem_tandemx\": self[\"digital_elevation_model\"][:],\n \"dem_srtm\": self[\"digital_elevation_model_srtm\"][:],\n # Processing data\n \"selected_algorithm\": self[\"selected_algorithm\"][:],\n \"selected_mode\": self[\"selected_mode\"][:],\n # Geolocation data\n \"lon_lowestmode\": self[\"lon_lowestmode\"][:],\n \"longitude_bin0_error\": self[\"longitude_bin0_error\"][:],\n \"lat_lowestmode\": self[\"lat_lowestmode\"][:],\n \"latitude_bin0_error\": self[\"latitude_bin0_error\"][:],\n \"elev_lowestmode\": self[\"elev_lowestmode\"][:],\n \"elevation_bin0_error\": self[\"elevation_bin0_error\"][:],\n \"lon_highestreturn\": self[\"lon_highestreturn\"][:],\n \"lat_highestreturn\": self[\"lat_highestreturn\"][:],\n \"elev_highestreturn\": self[\"elev_highestreturn\"][:],\n } | {f\"rh{i}\": self[\"rh\"][:, i] for i in range(101)}\n return data",
"def __init__(self, gridname=None, verbose=False):\n self.gridname = gridname\n g = re.match(r'(EASE2_[NST])([0-9\\.]+)km', gridname)\n if g is None:\n print(\"%s : error parsing gridname %s\" % (__name__, gridname),\n file=sys.stderr,\n flush=True)\n raise ValueError\n projection = g.group(1)\n resolution = g.group(2)\n\n # Check for typos in resolution\n if resolution not in resolutions:\n print(\"%s : unrecognized resolution %s\" % (__name__, resolution),\n file=sys.stderr,\n flush=True)\n raise ValueError\n\n # The geotransform information\n # is the set of GDAL affine transform parameters:\n # (map_UL_x, scale_x, b, map_UL_y, d, scale_y)\n if projection == \"EASE2_N\":\n # The geotransform is the set of GDAL affine transform parameters:\n # (map_UL_x, scale_x, b, map_UL_y, d, scale_y)\n self.proj4text = \"+proj=laea +lat_0=90 +lon_0=0 \" + \\\n \"+x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m\"\n self.map_UL_x = -9000000.\n self.map_UL_y = 9000000.\n self.b = 0.\n self.d = 0.\n self.scale_x = float(resolution) * m_per_km\n self.scale_y = -1 * float(resolution) * m_per_km\n\n elif projection == \"EASE2_S\":\n self.proj4text = \"+proj=laea +lat_0=-90 +lon_0=0 \" + \\\n \"+x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m\"\n self.map_UL_x = -9000000.\n self.map_UL_y = 9000000.\n self.b = 0.\n self.d = 0.\n self.scale_x = float(resolution) * m_per_km\n self.scale_y = -1 * float(resolution) * m_per_km\n\n elif projection == \"EASE2_T\":\n self.proj4text = \"+proj=cea +lat_0=0 +lon_0=0 +lat_ts=30 \" \\\n \"+x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m\"\n self.map_UL_x = -17367530.44\n self.map_UL_y = 6756820.20000\n self.b = 0.\n self.d = 0.\n base_resolution_m = 25025.26000\n factor = resolutions.index(resolution)\n self.scale_x = base_resolution_m / (2. ** factor)\n self.scale_y = -1 * base_resolution_m / (2. ** factor)\n\n else:\n print(\"%s : unrecognized projection %s\" % (__name__, projection),\n file=sys.stderr,\n flush=True)\n raise ValueError\n\n # Thanks to affine help pages at\n # https://github.com/sgillies/affine/blob/master/README.rst\n # http://www.perrygeo.com/python-affine-transforms.html\n geotransform = (self.map_UL_x + self.scale_x / 2.,\n self.scale_x,\n self.b,\n self.map_UL_y + self.scale_y / 2.,\n self.d,\n self.scale_y)\n self.fwd = Affine.from_gdal(*geotransform)\n\n # Initialize and save coordinate transformation\n # for this projection\n self.gridSpatialRef = osr.SpatialReference()\n self.gridSpatialRef.SetFromUserInput(self.proj4text)\n\n # Initialize and save coordinate transformation\n # for EPSG4326 (lat/lon)\n self.epsg4326SpatialRef = osr.SpatialReference()\n self.epsg4326SpatialRef.SetFromUserInput(self.epsg4326Proj4text)\n\n # Initialize and save the forward and reverse transformations\n self.projToGeog = osr.CoordinateTransformation(\n self.gridSpatialRef, self.epsg4326SpatialRef)\n self.geogToProj = osr.CoordinateTransformation(\n self.epsg4326SpatialRef, self.gridSpatialRef)\n\n if verbose:\n print(\"%s : initialized new Ease2Transform object\" % (__name__),\n file=sys.stderr,\n flush=True)",
"def npdict(self):\n\n d = {}\n\n # per profile\n d['cruise'] = self.cruise()\n d['day'] = self.day()\n d['latitude'] = self.latitude()\n d['latitude_unc'] = self.latitude_unc()\n d['longitude'] = self.longitude()\n d['longitude_unc'] = self.longitude_unc()\n d['month'] = self.month()\n d['n_levels'] = self.n_levels()\n d['primary_header_keys'] = self.primary_header_keys()\n d['probe_type'] = self.probe_type()\n d['time'] = self.time()\n d['uid'] = self.uid()\n d['year'] = self.year()\n d['PIs'] = self.PIs()\n d['originator_station'] = self.originator_station()\n d['originator_cruise'] = self.originator_cruise()\n d['originator_flag_type'] = self.originator_flag_type()\n d['t_metadata'] = self.t_metadata()\n d['s_metadata'] = self.s_metadata()\n # per level\n d['s'] = self.s()\n d['s_unc'] = self.s_unc()\n d['s_level_qc'] = self.s_level_qc()\n d['s_profile_qc'] = self.s_profile_qc()\n d['s_qc_mask'] = self.s_qc_mask()\n d['t'] = self.t()\n d['t_unc'] = self.t_unc()\n d['t_level_qc'] = self.t_level_qc()\n d['t_profile_qc'] = self.t_profile_qc()\n d['t_qc_mask'] = self.t_qc_mask()\n d['z'] = self.z()\n d['z_unc'] = self.z_unc()\n d['z_level_qc'] = self.z_level_qc()\n d['oxygen'] = self.oxygen()\n d['phosphate'] = self.phosphate()\n d['silicate'] = self.silicate()\n d['pH'] = self.pH()\n d['p'] = self.p()\n\n return d",
"def calcLatLon(northing, easting):\n from math import asin, atan2, cos, log, pow, sin, sqrt\n\n # CONSUS Albers variables (EPSG: 5070)\n RE_NAD83 = 6378137.0\n E_NAD83 = 0.0818187034 # Eccentricity\n D2R = 0.01745329251 # Pi/180\n standardParallel1 = 43.\n standardParallel2 = 47.\n centralMeridian = -114.\n originLat = 30\n originLon = 0\n\n m1 = cos(standardParallel1 * D2R) / \\\n sqrt(1.0 - pow((E_NAD83 * sin(standardParallel1 * D2R)), 2.0))\n m2 = cos(standardParallel2 * D2R) / \\\n sqrt(1.0 - pow((E_NAD83 * sin(standardParallel2 * D2R)), 2.0))\n\n def calcPhi(i):\n sinPhi = sin(i * D2R)\n return (1.0 - pow(E_NAD83, 2.0)) * \\\n ((sinPhi/(1.0 - pow((E_NAD83 * sinPhi), 2.0))) -\n 1.0/(2.0 * E_NAD83) *\n log((1.0 - E_NAD83 * sinPhi)/(1.0 + E_NAD83 * sinPhi)))\n\n q0 = calcPhi(originLat)\n q1 = calcPhi(standardParallel1)\n q2 = calcPhi(standardParallel2)\n nc = (pow(m1, 2.0) - pow(m2, 2.0)) / (q2 - q1)\n C = pow(m1, 2.0) + nc * q1\n rho0 = RE_NAD83 * sqrt(C - nc * q0) / nc\n rho = sqrt(pow(easting, 2.0) + pow((rho0 - northing), 2.0))\n q = (C - pow((rho * nc / RE_NAD83), 2.0)) / nc\n beta = asin(q / (1.0 - log((1.0 - E_NAD83) / (1.0 + E_NAD83)) *\n (1.0 - pow(E_NAD83, 2.0))/(2.0 * E_NAD83)))\n a = 1.0 / 3.0 * pow(E_NAD83, 2.0) + 31.0 / 180.0 * \\\n pow(E_NAD83, 4.0) + 517.0 / 5040.0 * pow(E_NAD83, 6.0)\n b = 23.0/360.0 * pow(E_NAD83, 4.0) + 251.0 / 3780.0 * pow(E_NAD83, 6.0)\n c = 761.0/45360.0 * pow(E_NAD83, 6.0)\n theta = atan2(easting, (rho0 - northing))\n\n lat = (beta + a * sin(2.0 * beta) + b * sin(4.0 * beta) +\n c * sin(6.0 * beta))/D2R\n lon = centralMeridian + (theta / D2R) / nc\n coords = [lat, lon]\n\n return coords",
"def get_geometry(self):\n rows, cols = self.get_gridspec().get_geometry()\n return rows, cols, self.num1, self.num2",
"def cminfo_compute():\n from hera_mc import cm_sysutils \n h = cm_sysutils.Handling()\n cminfo = h.get_cminfo_correlator()\n snap_to_ant = {}\n ant_to_snap = {}\n for antn, ant in enumerate(cminfo['antenna_numbers']):\n name = cminfo['antenna_names'][antn]\n for pol in cminfo['correlator_inputs'][antn]:\n if pol.startswith('e'):\n e_pol = pol\n if pol.startswith('n'):\n n_pol = pol\n ant_to_snap[ant] = {}\n if e_pol != 'None':\n snapi_e, channel_e = snap_part_to_host_input(cminfo['correlator_inputs'][antn][0])\n ant_to_snap[ant]['e'] = {'host': snapi_e, 'channel': channel_e}\n if snapi_e not in snap_to_ant.keys():\n snap_to_ant[snapi_e] = [None] * 6\n snap_to_ant[snapi_e][channel_e] = name + 'E'\n if n_pol != 'None':\n snapi_n, channel_n = snap_part_to_host_input(cminfo['correlator_inputs'][antn][1])\n ant_to_snap[ant]['n'] = {'host': snapi_n, 'channel': channel_n}\n if snapi_n not in snap_to_ant.keys():\n snap_to_ant[snapi_n] = [None] * 6\n snap_to_ant[snapi_n][channel_n] = name + 'N'\n return snap_to_ant, ant_to_snap",
"def compute_map(self):\n number_of_orders = 0\n orders = []\n for i, line in enumerate(self.__grid):\n for j, column in enumerate(line):\n if self.__grid[i][j][\"humans\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(self.__grid[i][j][\"humans\"])\n orders.append(0)\n orders.append(0)\n if self.__grid[i][j][\"vampires\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(self.__grid[i][j][\"vampires\"])\n orders.append(0)\n if self.__grid[i][j][\"werewolves\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(0)\n orders.append(self.__grid[i][j][\"werewolves\"])\n return number_of_orders, orders",
"def as_bounds(self) -> Dict[str, float]:\n return {\n \"left\": self.x,\n \"top\": self.y,\n \"right\": self.x + self.width,\n \"bottom\": self.y + self.height,\n }",
"def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right",
"def _resolution(self):\n _, xres, _, _, _, yres = self.geotransform\n return xres, yres",
"def calculate_statistics(self) -> Dict[str, Tuple[str, float]]:\n tempDict = {\n 'max_start': ('', -1),\n 'max_end': ('', -1),\n 'max_time_low_availability': ('', -1),\n 'max_time_low_unoccupied': ('', -1)\n }\n\n\n\n return {\n 'max_start': ('', -1),\n 'max_end': ('', -1),\n 'max_time_low_availability': ('', -1),\n 'max_time_low_unoccupied': ('', -1)\n }"
] |
[
"0.58611155",
"0.5576505",
"0.55065113",
"0.5425901",
"0.5412877",
"0.53959215",
"0.5370489",
"0.53406626",
"0.5338572",
"0.5295297",
"0.52770424",
"0.52559906",
"0.5180949",
"0.51474416",
"0.5129869",
"0.510701",
"0.5102792",
"0.50732666",
"0.5068528",
"0.5057706",
"0.5056513",
"0.5047802",
"0.5046625",
"0.50297576",
"0.5026859",
"0.5024809",
"0.50237733",
"0.5011277",
"0.50112534",
"0.49941158"
] |
0.6553498
|
0
|
Adds river ID to station dictionary.
|
def addStationRiverID(station, riverIDs):
stationID = station["id"]
riverID = riverIDs.get(stationID)
station["riverId"] = riverID
return station
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_person_to_the_station(self, line, station):\n\n if line in self.__stations_dict:\n if station in self.__stations_dict[line]:\n self.__stations_dict[line][station] += 1\n else:\n self.__stations_dict[line][station] = 1\n else:\n self.__stations_dict[line] = {station: 1}",
"def add_station(self, station_id=None, time=None, location=None):",
"def save_new_lid(self):\n region = 'world' if self.city is None else self.city\n id_ = str(hash(self.cids[0]))[:5]\n output = 'new_venue_id_{}_{}'.format(id_, region)\n p.save_var(output, set(self.new_venues))",
"def station_id(self, station_id: str):\n\n self._station_id = station_id",
"def add_station(self, station):\n self.__stations.append(station)",
"def station_details_for(self, station_id):\n if not self.enabled:\n return None\n station_id_upper = station_id.upper()\n if not self.is_loaded():\n self.reload()\n if not station_id_upper in self.known_stations:\n prism_station = self.prism_station_details(station_id_upper)\n self.known_stations[station_id_upper] = prism_station\n return self.known_stations[station_id_upper]",
"def set_up_trader_id(trader_dict):\n trader_id = 0\n for name, trader in trader_dict.items():\n trader.trader_id = str(trader_id)\n trader_id += 1",
"def resolve_seer_pick(g, id):\n info_dict = {'id': id, 'affiliation': g[id]}\n # super hacky\n if info_dict['affiliation'] == 'b':\n info_dict['affiliation'] = 'v'\n elif info_dict['affiliation'] == 'c': # chupa looks like villager\n info_dict['affiliation'] = 'v'\n\n game_state['seer_info'].append(info_dict)\n if info_dict['affiliation'] == 'w':\n game_state['s_found_w_prev_night'] = True\n game_state['s_prev_night_id'] = id",
"def __add_to_users_dict(self, update):\n\n message = update.message.text.lower().split(\" \")\n line_num = int(message[1])\n station_num = int(message[2])\n station = self.Station(line_num, station_num)\n user = self.User(update)\n user.add_station(station)\n if user.id in self.__users.keys():\n self.__users[user.id].add_station(station)\n else:\n self.__users[user.id] = user",
"def make_station_dict(self):\n self.station_dict = {}\n\n # interates over stations and puts the amount of connections in the dict\n for station in self.stations:\n length = len(self.stations[station].connections)\n self.station_dict[station] = length\n \n return self.station_dict",
"def _generate_voter_in_dict(id: bytes, timestamp: int, prep: 'Prep') -> dict:\n voter_in_dict = {\n \"id\": '0x' + bytes.hex(id),\n \"timestamp\": timestamp,\n \"address\": str(prep.address),\n \"name\": prep.name,\n \"amount\": prep.delegated\n }\n return voter_in_dict",
"def insert(self):\n\t\t# create utc-date for when bird is added\n\t\tself.added = datetime.utcnow().strftime(\"%Y-%m-%d\")\n\n\t\t# build our bird-dict\n\t\tbird = {\n\t\t\t\"name\": self.name, \n\t\t\t\"family\": self.family, \n\t\t\t\"continents\": self.continent, \n\t\t\t\"visible\": self.visible, \n\t\t\t\"added\": self.added\n\t\t}\n\n\t\t# insert bird\n\t\tid = self.M.insert(bird)\n\n\t\treturn id",
"def _add_rid_to_vrf_list(self, ri):\n if ri.ex_gw_port or ri.router.get('gw_port'):\n driver = self.driver_manager.get_driver(ri.id)\n vrf_name = driver._get_vrf_name(ri)\n if not vrf_name:\n return\n if not self._router_ids_by_vrf.get(vrf_name):\n LOG.debug(\"++ CREATING VRF %s\" % vrf_name)\n driver._do_create_vrf(vrf_name)\n self._router_ids_by_vrf.setdefault(vrf_name, set()).add(\n ri.router['id'])",
"def _encode_and_store_(self, latitude, longitude, ID):\n hash = geohash.encode(latitude=latitude, longitude=longitude)\n self.storage[hash] = ID\n self.points_by_id[ID] = (latitude, longitude)",
"def register_new_scanner(self, tarchive_info_dict, center_id):\n\n # create a new candidate for the scanner\n candidate = Candidate(self.verbose)\n new_cand_id = candidate.generate_cand_id(self.db)\n column_names = (\n 'CandID', 'PSCID', 'RegistrationCenterID', 'Date_active',\n 'UserID', 'Entity_type', 'Date_registered',\n )\n values = (\n new_cand_id, 'scanner', center_id, datetime.datetime.now(),\n 'imaging.py', 'Scanner', datetime.datetime.now()\n )\n self.db.insert(\n table_name = 'candidate',\n column_names = column_names,\n values = values\n )\n\n # create the new scanner ID\n manufacturer = tarchive_info_dict['ScannerManufacturer'],\n serial_number = tarchive_info_dict['ScannerSerialNumber'],\n software = tarchive_info_dict['ScannerSoftwareVersion'],\n model = tarchive_info_dict['ScannerModel'],\n scanner_id = self.db.insert(\n table_name = 'mri_scanner',\n column_names = ('Manufacturer', 'Model', 'Serial_number', 'Software', 'CandID'),\n values = (manufacturer, model, serial_number, software, new_cand_id),\n get_last_id = True\n )\n\n return scanner_id",
"def add_vertex(self, vertex_id):\n # just add new dict entry\n self.vertices[vertex_id] = set()\n\n pass # TODO",
"def add_room(self, data):\n room_id = data['room_id']\n x, y = literal_eval(data['coordinates'])\n room_data = {'id': data['room_id'],\n 'title': data['title'],\n 'description' : data['description'],\n 'coordinates': literal_eval(data['coordinates']),\n 'elevation': data['elevation'],\n 'terrain': data['terrain'],\n 'exits' : {direction: '?' for direction in data['exits']}\n }\n self.rooms.setdefault(room_id, room_data)",
"def add_location(self, latitude, longitude, ID):\n\n self._encode_and_store_(latitude=latitude, longitude=longitude,\n ID=ID)",
"def add_candidate_to_election(self, election_id: str, candidate_id: str) -> dict:",
"def add_router_vrf(session, router_id):\n with session.begin(subtransactions=True):\n # Get the highest VRF number in the DB\n new_vrf = session.query(\n func.max(models.VppRouterVrf.vrf_id)).scalar() or 0\n new_vrf += 1\n\n row = models.VppRouterVrf(router_id=router_id, vrf_id=new_vrf)\n session.add(row)\n\n return new_vrf",
"def extra_state_attributes(self) -> dict[str, int | str]:\n return {\n ATTR_STATION_ID: self._station_id,\n ATTR_STATION_NAME: self._get_station_name(),\n }",
"def associate_node_id(tr, node=\"\"):\n return {\"id\": tr.get_uml_id(name=node)}",
"def add_new_arrival(self):\n pass",
"def dentist_id(self, dentist_id: float):\n\n self._dentist_id = dentist_id",
"def __init__(self, api_key, station_id):\n self._station_id = station_id\n self._api_key = api_key\n self.data = {}",
"def store(self, idCust, idBook, flag, id):\n allR=self.__loadFromFile()\n\n rt=Rent( idBook,idCust, flag, id)\n if rt in allR:\n raise RepositoryExceptionRent(\"\\n Duplicated id \\n\".upper())\n\n\n allR.append(rt)\n self.__storeToFile(allR)",
"def _add_id(self, attrs):\n _id = {}\n _id['id'] = str(attrs.get('name', ''))\n _id['valid_from'] = (\n _get_date_from_string(attrs.get('validFrom', '')))\n _id['created'] = (\n _get_date_from_string(attrs.get('created', '')))\n _id['device'] = str(attrs.get('device', ''))\n self._ids[str(attrs.get('name', ''))] = _id",
"def river_region(rr_id):\n r = RiverRegionRenderer(request, rr_id, None)\n return r.render()",
"def station_id(self) -> str:\n return self._station_id",
"def _addTrack(self, t, id_num):\n # Add a new sequence\n self.id_seq.append([])\n self.id_seq[self._n_cell].append(id_num)\n\n # Include the time it was added\n self.t_appearance.append(t)\n\n # Insert it in control variables\n self.dict_track[t][id_num] = self._n_cell\n self._n_cell += 1"
] |
[
"0.64741087",
"0.58891344",
"0.5817344",
"0.58151716",
"0.56741",
"0.53069776",
"0.510854",
"0.5107141",
"0.5069008",
"0.50093204",
"0.49964875",
"0.49900323",
"0.49865165",
"0.49712417",
"0.49245796",
"0.49185914",
"0.48994073",
"0.48912543",
"0.4868128",
"0.4858781",
"0.4856736",
"0.48527145",
"0.4843468",
"0.48373282",
"0.47980854",
"0.47904372",
"0.47877285",
"0.4778334",
"0.47694474",
"0.4764038"
] |
0.84891737
|
0
|
Field gain is in T/A so to get Amps we need to devide the field value by the field Gain
|
def set_field(coil, fieldValue, fieldGain):
current = (fieldValue/fieldGain)*1e3 # set the current to be in milliamps
print(current)
coil.current(current)
return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def dB2gain(dB):\n V = math.exp(dB/20)\n return V",
"def gain(self):\n return self[1]",
"def update_playback_gain(self, val):\n self.playbackGain = 10**(5.0*(val - self.speedDial.maximum()/2)/self.speedDial.maximum())",
"def microphone_transferfactor(sensitivity: float) -> float:\n a = db2amp(sensitivity)\n return a * 1000 # convert it to mV",
"def get_mod_gain_val(self):\n return self.mod_gain_table[self.tx_pwr_dbm];",
"def gain2dB(gain):\n dB = 20*math.log(gain)\n return dB",
"def opamp_gain(R1, Rf):\n R1, Rf = map(_normalizevalue, (R1, Rf))\n gain = 1 + (Rf/R1)\n return gain",
"def field_strength_to_power_flux(field: float) -> float:\n\n power = np.float_power(np.abs(field), 2)\n power *= (0.5 * speed_of_light * epsilon_0)\n\n return power",
"def microphone_sensitivity(transferfactor: float) -> float:\n return amp2db(transferfactor/1000.)",
"def get_gain(self, channel_data):\n values = getattr(channel_data, self.field, None)\n if values is None:\n raise ValueError(f\"Channel group {channel_data} does not contain \"\n f\"{self.field} field.\")\n return values.astype(float)",
"def front_column_model_p_gain():",
"def gaindb(self, value):\n self._logger.debug(\"setting gain: %7.2f\", value)\n self._gaindb = value\n self._update()",
"def gain(self, value: int):\n self._gain = value",
"def gain(self) -> int:\n return self._gain",
"def get_gain(self):\n gain = float(self._driver.Gain.Value)\n return gain",
"def opamp_inverting_gain(R1, Rf):\n R1, Rf = map(_normalizevalue, (R1, Rf))\n gain = -(Rf/R1)\n return gain",
"def lms_gain(self):\n return self._lms_gain",
"def tGain(self, dt, r):\n\t return 1./(10.**6)*r#*precip.r",
"def digital_gain():\n def r(x):\n return x/512.\n\n def w(x):\n return int(x*512)\n return r, w",
"def get_mod_gain_output(self, mod_gain_input):\n\n output = ((( (mod_gain_input*self.tx_rate_decim_dict[self.dec_filt_rate]) & 0x7FFFFF) >> 11)*\\\n (self.mod_gain_table[self.tx_pwr_dbm])) >> 11\n return output",
"def velocity_p_gain(self):\n return self._read(MX_VELOCITY_P_GAIN)",
"def normalize_gains(self, gain=None):\n if gain is None:\n gain = super().get_gains(validate=True)\n average_gain = self.normalize_gains(gain)\n super().set_gains(gain, flag_normalized=False)\n return average_gain\n\n discard_flags = self.skip_flags & ~self.gain_flag\n average_gain = self.channel_group.get_typical_gain_magnitude(\n gain, discard_flag=discard_flags)\n\n if average_gain == 1: # pragma: no cover\n return 1.0\n\n # Gain updated in-place\n gain /= average_gain\n return average_gain",
"def _amp_ ( self , x ) :\n v = self.amplitude ( x )\n #\n return complex( v.real () , v.imag () )",
"def velocity_p_gain(self, value):\n self._write(MX_VELOCITY_P_GAIN, value)",
"def _gain(self):\n return None",
"def set_gain(self, channel_data, gain):\n value = np.asarray(gain, dtype=float)\n if isinstance(value, units.Quantity): # pragma: no cover\n # not normally reachable after np.asarray call\n if value.unit == units.dimensionless_unscaled:\n value = value.value\n\n if value.size != channel_data.size:\n raise ValueError(\"Gain size does not match channel size.\")\n if not hasattr(channel_data, self.field):\n raise ValueError(f\"{channel_data} does not have a \"\n f\"{self.field} field.\")\n setattr(channel_data, self.field, value)",
"def calc_q_gain(Tfl, Tabs, q_rad_Whperm2, DT, Tin, Tout, aperture_area_m2, c1, c2, Mfl, delts, Cp_waterglycol, C_eff, Te):\n\n xgain = 1\n xgainmax = 100\n exit = False\n while exit == False:\n qgain_Whperm2 = q_rad_Whperm2 - c1 * (DT[1]) - c2 * abs(DT[1]) * DT[1] # heat production from solar collector, eq.(5)\n\n if Mfl > 0:\n Tout = ((Mfl * Cp_waterglycol * Tin) / aperture_area_m2 - (C_eff * Tin) / (2 * delts) + qgain_Whperm2 + (\n C_eff * Tfl[1]) / delts) / (Mfl * Cp_waterglycol / aperture_area_m2 + C_eff / (2 * delts)) # eq.(6)\n Tfl[2] = (Tin + Tout) / 2\n DT[2] = Tfl[2] - Te\n qdiff = Mfl / aperture_area_m2 * Cp_waterglycol * 2 * (DT[2] - DT[1])\n else:\n Tout = Tfl[1] + (qgain_Whperm2 * delts) / C_eff # eq.(8)\n Tfl[2] = Tout\n DT[2] = Tfl[2] - Te\n qdiff = 5 * (DT[2] - DT[1])\n\n if abs(qdiff < 0.1):\n DT[1] = DT[2]\n exit = True\n else:\n if xgain > 40:\n DT[1] = (DT[1] + DT[2]) / 2\n if xgain == xgainmax:\n exit = True\n else:\n DT[1] = DT[2]\n xgain += 1\n\n # FIXME: redundant...\n # qout = Mfl * Cp_waterglycol * (Tout - Tin) / aperture_area\n # qmtherm = (Tfl[2] - Tfl[1]) * C_eff / delts\n # qbal = qgain - qout - qmtherm\n # if abs(qbal) > 1:\n # qbal = qbal\n return qgain_Whperm2",
"def feedforward_1st_gain(self):\n return self._read(MX_FEEDFORWARD_1ST_GAIN)",
"def feedback_gain(self):\n return self._feedback_gain",
"def velocity_i_gain(self):\n return self._read(MX_VELOCITY_I_GAIN)"
] |
[
"0.68036616",
"0.67524546",
"0.660555",
"0.6549612",
"0.643078",
"0.6416478",
"0.6324724",
"0.627394",
"0.6268708",
"0.61913145",
"0.6164041",
"0.6145486",
"0.6114966",
"0.611201",
"0.6110434",
"0.610212",
"0.60916513",
"0.6076527",
"0.60728437",
"0.5995432",
"0.596725",
"0.59490263",
"0.59475315",
"0.593421",
"0.5871074",
"0.5852642",
"0.5842004",
"0.58106816",
"0.5802426",
"0.5797872"
] |
0.70014805
|
0
|
Adds a validation schema for a required object property.
|
def with_required_property(self, name, typ, *rules):
self.properties = self.properties if self.properties != None else []
schema = PropertySchema(name, typ)
schema.rules = rules
schema.make_required()
return self.with_property(schema)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def validate_property_schema(self, schema):\n json_schema_path = os.path.join(_ROOT,\n 'data',\n 'property_json_schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)",
"def test_jsonschema_validation_with_schema_object(self):\n check_value = {\"a\": 1}\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"a\": {\n \"type\": \"integer\",\n \"const\": 1\n }\n }\n }\n jsonschema_validation(check_value, schema)",
"def test_swagger_field_is_required():\n raw_schema = RawSchemaFactory()\n raw_schema.pop('swagger', None)\n\n assert 'swagger' not in raw_schema\n\n with pytest.raises(ValidationError) as err:\n swagger_schema_validator(raw_schema)\n\n assert_message_in_errors(\n MESSAGES['required']['required'],\n err.value.detail,\n 'required.swagger',\n )",
"def validate_against_schema(self, json_doc):\n if self.uri not in self.se.validation:\n raise RuntimeError(\"$validation is not defined for {} field; thus the json document could not be validated\".format(self.name))\n else:\n validate(json_doc, self.se.validation[self.uri])\n print('The JSON document is valid')",
"def test_extra_with_required():\n schema = Schema({Required('toaster'): str, Extra: object})\n r = schema({'toaster': 'blue', 'another_valid_key': 'another_valid_value'})\n assert_equal(\n r, {'toaster': 'blue', 'another_valid_key': 'another_valid_value'})",
"def _validate_object(\n instance: typing.Dict[str, typing.Any],\n schema: typing.Dict[str, typing.Any],\n path: typing.List[str],\n allow_disabled_languages: bool = False,\n strict: bool = False\n) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n errors = []\n\n properties_with_unfulfilled_conditions = []\n for property_name, property_schema in schema['properties'].items():\n if not are_conditions_fulfilled(property_schema.get('conditions'), instance):\n properties_with_unfulfilled_conditions.append(property_name)\n if property_name in instance or (property_name == 'name' and not path):\n errors.append(ValidationError('conditions for property \"{}\" not fulfilled'.format(property_name), path + [property_name]))\n\n if 'required' in schema:\n for property_name in schema['required']:\n if property_name in properties_with_unfulfilled_conditions:\n # this property must not be included, as its conditions are not fulfilled\n continue\n if property_name not in instance:\n errors.append(ValidationError('missing required property \"{}\"'.format(property_name), path + [property_name]))\n for property_name, property_value in instance.items():\n try:\n if property_name not in schema['properties']:\n raise ValidationError('unknown property \"{}\"'.format(property_name), path + [property_name])\n else:\n validate(property_value, schema['properties'][property_name], path + [property_name], allow_disabled_languages=allow_disabled_languages, strict=strict)\n except ValidationError as e:\n errors.append(e)\n if len(errors) == 1:\n raise errors[0]\n elif len(errors) > 1:\n raise ValidationMultiError(errors)",
"def validate_json_schema(self, json_schema):\n cls = validators.validator_for(json_schema)\n cls.check_schema(json_schema)",
"def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)",
"def validate_schema(*,\n jsonschema: dict,\n data: Any\n ) -> None:\n # from typing import TYPE_CHECKING\n # if not TYPE_CHECKING:\n # otherwise mypy raises error\n # return\n\n _errors: defaultdict = defaultdict(list)\n\n def set_nested_item(data_dict, path, key, val): # type: ignore\n for _key in path:\n data_dict.setdefault(_key, {})\n data_dict = data_dict[_key]\n\n data_dict.setdefault(key, list())\n data_dict[key].append(val)\n\n for err in Draft7Validator(schema=jsonschema).iter_errors(instance=data):\n path = err.schema_path\n\n if \"properties\" in path:\n path.remove(\"properties\")\n key = path.popleft()\n\n if \"required\" in path or key == \"required\":\n key = err.message.split(\"'\")[1]\n elif err.relative_path:\n key = err.relative_path.pop()\n\n set_nested_item(_errors, err.relative_path, key, err.message)\n\n if _errors:\n raise app_exceptions.ValidateDataError(dict(_errors))",
"def validation_required(self, validation_required):\n self._validation_required = validation_required",
"def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)",
"def test_validate_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'legalName': 'legal name CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid",
"def test_required():\n schema = Schema({Required('q'): 1})\n # Can't use nose's raises (because we need to access the raised\n # exception, nor assert_raises which fails with Python 2.6.9.\n try:\n schema({})\n except Invalid as e:\n assert_equal(str(e), \"required key not provided @ data['q']\")\n else:\n assert False, \"Did not raise Invalid\"",
"def test_schema_strict():\n path = os.path.join(extensiondir, 'release-schema.json')\n if os.path.isfile(path):\n with open(path) as f:\n data = json.load(f)\n\n original = deepcopy(data)\n add_validation_properties(data)\n\n assert data == original, f'{path} is missing validation properties, run: ocdskit schema-strict {path}'",
"def validate_validation_field(self, schema):\n if \"$validation\" in schema:\n if 'properties' not in schema[\"$validation\"]:\n raise KeyError('properties not in $validation field')\n else:\n # validate the json schema\n self.validate_json_schema(schema[\"$validation\"])\n properties = schema[\"$validation\"][\"properties\"].keys()\n # find all parents of the class\n paths = nx.all_simple_paths(self.schema_nx,\n source='http://schema.org/Thing',\n target=schema[\"@id\"])\n parent_classes = set()\n for _path in paths:\n for _item in _path:\n parent_classes.add(_item)\n # loop through all properties and check if the value of\n # domainIncludes belong to one of the parent_classes\n for _property in properties:\n matched = False\n for _record in self.all_schemas:\n if _record[\"rdfs:label\"] == _property:\n domainincludes_value = dict2list(_record[\"http://schema.org/domainIncludes\"])\n for record in domainincludes_value:\n if record[\"@id\"] in parent_classes:\n matched = True\n if not matched:\n raise ValueError('field {} in $validation is not correctly documented'.format(_property))\n else:\n pass",
"def test_validate_json_validates_schema(self):\n invalid_schema = {\"type\": \"any\"}\n valid_json = {}\n test_model = RecordSchema(schema=invalid_schema)\n\n with self.assertRaises(jsonschema.exceptions.SchemaError):\n test_model.validate_json(valid_json)",
"def _validate_required_field(field_name, field_value, prefix='', **kwargs):\n if prefix:\n field_name = prefix + '__' + field_name\n\n if not field_value:\n raise AssertionError(\n \"Missing required Job Definition field: {0}\".format(field_name)\n )",
"def __init__(self):\n super(ObjectSchema, self).__init__()\n self.is_allow_undefined = False",
"def _validate(self):\n fields, schema = self.__dict__, self._def.default\n extra_fields = fields.viewkeys() - schema.viewkeys()\n if len(extra_fields) > 0:\n raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))\n for key in fields.iterkeys():\n if type(fields[key]) is not type(schema[key]):\n raise AttributeError('Invalid %s for field \"%s\", should be %s' %\n (type(fields[key]), key, type(schema[key])))",
"def _validate(self):\n schema_version = util.schemas[self.schema_name]\n stored_schemas = util.stored_schemas\n\n try:\n schema_obj = stored_schemas[\n \"http://redfish.dmtf.org/schemas/v1/\" + schema_version]\n except KeyError:\n raise OneViewRedfishError(\"{} not found\".format(schema_version))\n\n resolver = jsonschema.RefResolver('', schema_obj, store=stored_schemas)\n jsonschema.validate(self.redfish, schema_obj, resolver=resolver)",
"def validate(obj, schema=PROCESSING_SERVER_CONFIG_SCHEMA):\n return JsonValidator.validate(obj, schema)",
"def test_validate_json(self):\n # Lifted directly from the python-jsonschema docs\n test_schema = {\"type\": \"object\",\n \"properties\": {\n \"price\": {\"type\": \"number\"},\n \"name\": {\"type\": \"string\"},\n }}\n valid = {\"name\": \"Eggs\", \"price\": 34.99}\n invalid = {\"name\": \"Eggs\", \"price\": \"Invalid\"}\n\n test_model = RecordSchema(schema=test_schema)\n\n self.assertIsNone(test_model.validate_json(valid))\n\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n test_model.validate_json(invalid)",
"def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))",
"def check_for_schema(cls):\n if not hasattr(cls, \"Schema\") or cls.Schema is None:\n raise PillowtalkError(\"Schema not found. @add_schema may not have been added to class definition.\")",
"def schema_validation(schema):\n def decorator(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n data = {}\n if request.method in ['POST', 'PATCH', 'PUT']:\n data = request.get_json(force=True)\n elif request.method in ['GET', 'DELETE']:\n data = request.args.to_dict()\n\n v = Validator(schema)\n v.allow_unknown = True\n if v.validate(data):\n return function(*args, **kwargs)\n else:\n return jsonify({'errors': v.errors}), 400\n\n return wrapper\n return decorator",
"def validate(self, path, schema, value, results):\n name = path if path != None else \"value\"\n found = []\n\n for prop in self._properties:\n property_value = ObjectReader.get_property(value, prop)\n if property_value != None:\n found.append(prop)\n\n if len(found) == 0:\n results.append(\n ValidationResult(\n path,\n ValidationResultType.Error,\n \"VALUE_NULL\",\n name + \" must have at least one property from \" + str(self._properties),\n self._properties,\n None\n )\n )",
"def add_required_properties(self, p: str):\n # TODO: Deprecate\n for k in p.keys():\n try:\n self._properties[k].set_required(True)\n except KeyError:\n self._properties.define_property(name=k, supported=False, required=True)",
"def validate_schema(schema):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n try:\n validate(request.json, schema)\n except:\n return bad_request()\n\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator",
"def with_property(self, schema):\n self.properties = self.properties if self.properties != None else []\n self.properties.append(schema)\n return self",
"def _validate_property(mapping: Mapping[str, Any], ref: str,\n types: Set[str]) -> Optional[SchemaError]:\n return _validate_type_recursively(\n mapping=mapping, ref=ref, types=types, depth=0)"
] |
[
"0.6920623",
"0.6739063",
"0.6722102",
"0.6159456",
"0.6094306",
"0.6013235",
"0.597917",
"0.5977781",
"0.5967081",
"0.5964867",
"0.59267306",
"0.59114385",
"0.5868049",
"0.5863857",
"0.58553565",
"0.5825143",
"0.58220434",
"0.5815082",
"0.58138317",
"0.5802128",
"0.57447124",
"0.5735047",
"0.5729248",
"0.5686122",
"0.56623906",
"0.5648789",
"0.55987084",
"0.55469614",
"0.55428636",
"0.55408204"
] |
0.6751664
|
1
|
Convert address to ip and prefix.
|
def address_to_ip_prefix(address):
return address.split('/')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def format_ip(addr):\n return \\\n str(ord(addr[0])) + '.' + \\\n str(ord(addr[1])) + '.' + \\\n str(ord(addr[2])) + '.' + \\\n str(ord(addr[3]))",
"def morseToPubIP(self, address):\n ip_from_morse = address[0];\n port_from_morse = address[1];\n \n ip_from_str = \"0.0.\";\n ip_from_str += str(ord(ip_from_morse[0])) + \".\" + str(ord(ip_from_morse[1]));\n port_from_str = str(ord(port_from_morse));\n \n return ip_from_str, port_from_str;",
"def parse_addr(addr):\n\ttry:\n\t\tnew_addr = socket.inet_aton(addr)\n\texcept:\n\t\taddr = socket.gethostbyname(addr)\n\t\ttry:\n\t\t\tnew_addr = socket.inet_aton(addr)\n\t\texcept ValueError:\n\t\t\tlogging.exception('Error:')\n\t\t\traise ValueError, 'Invalid address: %s' % addr\n\n\treturn new_addr",
"def reverse_lookup_zone(ipaddress):\n return reverse_dotted_decimals(ipaddress) + '.in-addr.arpa'",
"def defangIPaddr(address):\n address_as_list = list(address)\n length_of_address = len(address_as_list)\n for i in range(length_of_address):\n if address_as_list[i] == \".\":\n address_as_list[i] = \"[.]\"\n return \"\".join(address_as_list)",
"def format_url_address(address):\n try:\n addr = netaddr.IPAddress(address)\n if addr.version == constants.IPV6_FAMILY:\n return \"[%s]\" % address\n else:\n return str(address)\n except netaddr.AddrFormatError:\n return address",
"def convert_from_address(cls, x_ns):\n if x_ns % 4 != 0:\n raise ValueError(\"From address must be divisible by 4 ({} given)\".format(x_ns))\n if not(cls.JT_MIN_FROM_ADDR <= x_ns // 4 <= cls.JT_MAX_FROM_ADDR):\n raise ValueError(\"From address must be {}<x<{}, ({} given)\".format(\n cls.JT_MIN_FROM_ADDR*4, cls.JT_MAX_FROM_ADDR*4, x_ns\n ))\n return x_ns // 4 + cls.JT_FROM_ADDR_OFFSET",
"def convert_address(self, addr_obj):\n return addr_obj.mailbox.decode() + '@' + addr_obj.host.decode()",
"def normalize_address(address: str):\n return Web3.toChecksumAddress(address.lower())",
"def generateIPAddress(base, subnet, host, mask):\n\n addr = str(base)+'.'+str(subnet)+'.' + str(host)\n if mask != None:\n addr = addr + '/' + str(mask)\n return addr",
"def overlay_ip(ip):\n return \"192.168.{}.{}\".format( *ip.split(\".\")[2:])",
"def AioNodeToIpAddressString(node):\n ip = aio_node_to_ip_address.AioNodeToIpAddress(node)\n return '%d.%d.%d.%d' % (ip.a, ip.b, ip.c, ip.d)",
"def __ip2intstr(self, address):\n return str(struct.unpack('!I', address)[0])",
"def ip_address(addr):\n parts = addr.split('.')\n if len(parts) != 4:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n for part in parts:\n try:\n num = int(part)\n if num < 0 or num > 255:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n except ValueError:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n return addr",
"def new_ip(address):\n return ipaddress.IPv4Address(address)",
"def safe_addr(ip_addr):\n return '.'.join(ip_addr.split('.')[:2] + ['xxx', 'xxx'])",
"def _parse_addr(self, addr_str):\n addr = [int(i) for i in addr_str.split('.')]\n if len(addr) != 4 or any([i < 0 for i in addr]) or any([i > 255 for i in addr]):\n raise ValueError('Invalid IP address: %s' % addr_str)\n val = 0\n for i in addr:\n val *= 255\n val += i\n return val",
"def _get_addr(self, protocol, address):\n if address:\n return address[0]\n else:\n return protocol.transport.getPeer().host",
"def normalize_address(addr: str) -> str:\n # bitcoin hrps\n hrps = {net[\"bech32\"] + \"1\" for net in NETWORKS.values()}\n # liquid hrps\n # Blech32 addresses are intended for confidential assets\n hrps = hrps.union(\n {net[\"blech32\"] + \"1\" for net in NETWORKS.values() if \"blech32\" in net}\n )\n if addr.lower().startswith(tuple(hrps)):\n return addr.lower()\n return addr",
"def convert_to_address(cls, x_ns):\n if x_ns % 4 != 0:\n raise ValueError(\"To address must be divisible by 4 ({} given)\".format(x_ns))\n if not(cls.JT_MIN_TO_ADDR <= x_ns // 4 <= cls.JT_MAX_TO_ADDR):\n raise ValueError(\"To address must be {}<x<{}, ({} given)\".format(\n cls.JT_MIN_TO_ADDR*4, cls.JT_MAX_TO_ADDR*4, x_ns\n ))\n return x_ns // 4",
"def _format_address(self,address):\n address = int(address)\n if address >=1 and address <= 250:\n address = hex(int(address)) #Convert address if between 0-250.\n if len(address) == 3: #Take the last char and append a zero.\n address = str(address[-1]).rjust(2,'0')\n elif len(address) == 4:\n address = address[-2:] #Take the last two char. \n return address\n elif address == 0:\n address = '00'\n return address\n else:\n return False",
"def iplst_to_ipaddr(iplst):\n return \".\".join([str(o) for o in iplst])",
"def toAddr(self, addressString: unicode) -> ghidra.program.model.address.Address:\n ...",
"def ipwrap(address: Any) -> str:\n try:\n if not isinstance(address, int):\n ipaddress.IPv6Address(address)\n return f\"[{address}]\"\n except ValueError:\n pass\n\n return str(address)",
"def IP(address):\n for klass in (V4Address, V6Address):\n try:\n ip = klass(address)\n except ValueError, e:\n error = e\n else:\n return ip\n\n raise error",
"def split_address(address):\n if '://' in address:\n protocol, address = address.split('://')\n else:\n protocol = 'http'\n\n if ':' in address:\n address, port = address.split(':')\n else:\n port = 443 if protocol == 'https' else 8000\n\n return protocol, address, int(port)",
"def format_ip(ip_addr):\n ip = ip_addr.split(\"/\")[0]\n cidr = ip_network(ip_addr, strict=False).prefixlen\n result = \"{}/{}\".format(ip, cidr)\n log.debug(\"Converted '%s' to CIDR notation '%s'.\", ip_addr, result)\n return result",
"def _format_ip(val: Any, input_format: str, output_format: str, errors: str) -> Any:\n # pylint: disable=too-many-branches\n address, status = _check_ip(val, input_format, True)\n\n if status == \"null\":\n return np.nan, 0\n if status == \"unknown\":\n if errors == \"raise\":\n raise ValueError(f\"Unable to parse value {val}\")\n return val if errors == \"ignore\" else np.nan, 1\n\n # compressed version without the leading zeros (for ipv6 double colon for zeros)\n if output_format == \"compressed\":\n result = address.compressed\n\n # Converts the integer repesentation of the ip address to its hexadecimal\n # form. Does not contain any dots or colons.\n elif output_format == \"hexa\":\n result = hex(int(address))\n\n # converts the ip address to its binary representation\n elif output_format == \"binary\":\n if address.version == 4:\n result = \"{0:032b}\".format(int(address))\n else:\n result = \"{0:0128b}\".format(int(address))\n\n # converts to integer format\n elif output_format == \"integer\":\n result = int(address)\n\n # converts to packed binary format (big-endian)\n elif output_format == \"packed\":\n result = address.packed\n\n # convert to full representation\n else:\n dlm = \".\" if address.version == 4 else \":\" # delimiter\n result = dlm.join(f\"{'0' * (4 - len(x))}{x}\" for x in address.exploded.split(dlm))\n\n return result, 2 if result != val else 3",
"def format_address(value):\n if type(value) in (tuple, list):\n return ', '.join([format_address(v) for v in value])\n name, addr = parseaddr(value)\n return formataddr((encode_header(name), addr.encode('ascii')))",
"def _make_addr_resolve(self, addr: 'str | bytes', htype: 'int') -> 'bytes':\n _addr = addr.encode() if isinstance(addr, str) else addr\n\n if htype == Enum_Hardware.Ethernet:\n if PAT_MAC_ADDR.fullmatch(_addr) is not None:\n return _addr.replace(b':', b'').replace(b'-', b'')\n raise ProtocolError(f'Invalid MAC address: {addr!r}')\n return _addr"
] |
[
"0.682455",
"0.67066574",
"0.669897",
"0.65752935",
"0.6555434",
"0.6473585",
"0.6432157",
"0.63691694",
"0.6363774",
"0.6297581",
"0.6280675",
"0.6266919",
"0.6254692",
"0.62492925",
"0.62355924",
"0.6225235",
"0.6209515",
"0.6200921",
"0.61859864",
"0.61740637",
"0.61513656",
"0.6143373",
"0.61421365",
"0.61245126",
"0.6118698",
"0.6107221",
"0.60759264",
"0.60738164",
"0.6072522",
"0.6058804"
] |
0.7921652
|
0
|
Set ip addresses of all interfaces.
|
def set_ip_adresses(self):
# unfold a config tree for the current suffix, if any
for interface, details in self.interfaces.items():
for k, v in details.items():
if k == 'address':
ip, prefix = address_to_ip_prefix(v)
self.interfaces[interface]['ip_address'] = ip
self.interfaces[interface]['ip_prefix'] = prefix
break
if interface == 'wan':
self.ip_address = ip
if interface == 'ha_sync':
self.ha_sync_ip_address = ip
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def ips(self, ips):\n\n self._ips = ips",
"def set_all(self, host_names, ip_address):\n for host_name in host_names:\n self.set_one(host_name, ip_address)",
"def ipv4_interface_setup(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current interfaces with IP addresses\n current_ints = VPPUtil.get_int_ip(node)\n if current_ints != {}:\n print(\"\\nThese are the current interfaces with IP addresses:\")\n for items in sorted(current_ints.items()):\n name = items[0]\n value = items[1]\n if \"address\" not in value:\n address = \"Not Set\"\n else:\n address = value[\"address\"]\n print(\"{:30} {:20} {:10}\".format(name, address, value[\"state\"]))\n question = \"\\nWould you like to keep this configuration \" \"[Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n continue\n else:\n print(\"\\nThere are currently no interfaces with IP \" \"addresses.\")\n\n # Create a script that add the ip addresses to the interfaces\n # and brings the interfaces up\n ints_with_addrs = self._ipv4_interface_setup_questions(node)\n content = \"\"\n for ints in ints_with_addrs:\n name = ints[\"name\"]\n addr = ints[\"addr\"]\n setipstr = \"set int ip address {} {}\\n\".format(name, addr)\n setintupstr = \"set int state {} up\\n\".format(name)\n content += setipstr + setintupstr\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/set_int_ipv4_and_up\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))",
"def ip_addresses_list(self, ip_addresses_list):\n\n self._ip_addresses_list = ip_addresses_list",
"def update_interfaces(self, interfaces):\n for i in interfaces:\n self.update_interface(i)",
"def _update_addresses(self, real_ifname, interface, old_interface):\n\n def _gen_cmd(cmd, address):\n \"\"\"\n Generates an `ip addr (add|del) <cidr> dev <ifname>` command.\n \"\"\"\n family = {4: 'inet', 6: 'inet6'}[address[0].version]\n args = ['addr', cmd, '%s/%s' % (address[0], address[1])]\n if family == 'inet' and cmd == 'add':\n args += ['brd', '+']\n args += ['dev', real_ifname]\n if family == 'inet6':\n args = ['-6'] + args\n return args\n\n add = functools.partial(_gen_cmd, 'add')\n delete = functools.partial(_gen_cmd, 'del')\n mutator = lambda a: (a.ip, a.prefixlen)\n\n self._update_set(real_ifname, interface, old_interface,\n 'all_addresses', add, delete, mutator)",
"def move_ips_to_interface(apps, schema_editor):\n UserAS = apps.get_model('scionlab', 'UserAS')\n\n for useras in UserAS.objects.iterator():\n # UserASes have a unique host and before the multi-AP feature had a unique interface\n host = useras.hosts.get()\n iface = useras.interfaces.get()\n if not iface.public_ip:\n iface.public_ip = host.public_ip\n iface.bind_ip = host.bind_ip\n iface.save()\n host.public_ip = None\n host.bind_ip = None\n host.save()",
"def test_ipam_ip_addresses_update(self):\n pass",
"def ifaces(self, ifaces):\n \n self._ifaces = ifaces",
"def setIP(self, idx, ip):\n self.ip[int(idx)-1] = ip",
"def delete_ipaddr(self, ifaces=None):\n if not ifaces:\n ifaces = self.iface_ip\n for iface in ifaces:\n self._lhost.ui.modify_ports([iface], ipAddr=None)\n self.iface_ip = []",
"def addIpAddressesToIpSet(set_list_name, ip_addresses):\n for ip_address in ip_addresses:\n result = subprocess.Popen(\"/usr/sbin/ipset -A %s %s 2>&1\" % (set_list_name, ip_address), shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not add ip address %s to ipset %s. Error: %s.\" % (ip_address, set_list_name, result))",
"def set_ip(self, ip: str, host_addr: str) -> None:\n self.config[\"linkIp\"] = ip\n self.config[\"ngapIp\"] = ip\n self.config[\"gtpIp\"] = ip",
"def network_interfaces(self, network_interfaces):\n\n self._network_interfaces = network_interfaces",
"def update_interfaces_config(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n all_devices = devices[\"other_devices\"]\n all_devices.update(devices[\"dpdk_devices\"])\n all_devices.update(devices[\"kernel_devices\"])\n\n current_ifcs = {}\n interfaces = {}\n if \"interfaces\" in node:\n current_ifcs = node[\"interfaces\"]\n if current_ifcs:\n for ifc in current_ifcs.values():\n dvid = ifc[\"pci_address\"]\n if dvid in all_devices:\n VppPCIUtil.vpp_create_interface(\n interfaces, dvid, all_devices[dvid]\n )\n node[\"interfaces\"] = interfaces\n\n self.updateconfig()",
"def setAddress(self, addresses):\n self.addr = addresses\n for i in xrange(len(self.addr)):\n if not ':' in self.addr[i]: \n self.addr[i] += ':' + self.port \n self.addr[i] += '/' + self.service",
"def set_net_addresses(self, hNetAddressesList):\n\t\tcall_sdk_function('PrlVmDevNet_SetNetAddresses', self.handle, conv_handle_arg(hNetAddressesList))",
"def setIpaddr(self):\n\t\tself.ipaddr = self.settings.getKeyValue('ipaddr')\n\t\tself.socket.send('setenv ipaddr ' + self.ipaddr+'\\r', 1)\t\t\n\t\treturn None",
"def _config_interfaces(self):\n self.interfaces['loopback'] = \"127.0.0.1\"\n self.interfaces['internal'] = \"127.0.0.1\"\n self.interfaces['external'] = \"0.0.0.0\"\n self.interfaces[\"any\"] = \"0.0.0.0\"\n self.interfaces[\"localhost\"] = \"127.0.0.1\"",
"def update_endpoints(self, iface_hosts):\n self._ep_hosts.update(iface_hosts)",
"def set_all_pins(self, ap):\n self.allpins = ap",
"def test_ipam_ip_addresses_list(self):\n pass",
"def setIP( self, intf, ip, prefixLen=8 ):\n ipSub = '%s/%d' % ( ip, prefixLen )\n result = self.cmd( 'ifconfig', intf, ipSub, 'up' )\n self.ips[ intf ] = ip\n return result",
"def __init__(self):\n self.networks = [\n ipaddress.ip_network(address)\n for address in self.addresses\n ]",
"def clear_ipv4_addresses(self, net_interface):\n ip_info = self.get_ipv4_addresses(net_interface)\n\n for address, _ in ip_info:\n self.remove_ipv4_address(net_interface, address)",
"def __call__(self, parser, namespace, values, option_string=None):\n ip_split = values.split(\",\")\n [ip_address(ip) for ip in ip_split]\n setattr(namespace, self.dest, ip_split)",
"def add_ip(self, inf, ip):\n self.interfaces[inf]['ip'] = ip",
"def set_blacklist(self):\n\n for name in self.__ipset:\n if self.verbose:\n print(\"Start create: \" + self.__ipset[name]['ipset-name'])\n\n # create ipset\n self.__process(name, self.__parser.create(name))\n\n if self.verbose:\n print('Done')",
"def _update_ips(self):\n self.ip_others = []\n ips = self.mesh.ipaddr()\n self.rloc16 = self.mesh.rloc()\n for line in ips:\n if line.startswith('fd'):\n # Mesh-Local unicast IPv6\n try:\n addr = int(line.split(':')[-1], 16)\n except Exception:\n continue\n if addr == self.rloc16:\n # found RLOC\n # RLOC IPv6 has x:x:x:x:0:ff:fe00:RLOC16\n self.rloc = line\n elif ':0:ff:fe00:' not in line:\n # found Mesh-Local EID\n self.ip_eid = line\n elif line.startswith('fe80'):\n # Link-Local\n self.ip_link = line\n else:\n self.ip_others.append(line)",
"def add_ips(self, ip_addresses: Iterable[str], **kwargs):\n _, ip_entities = _GEO_LITE.lookup_ip(ip_addr_list=ip_addresses)\n self.add_ip_cluster(ip_entities=ip_entities, **kwargs)"
] |
[
"0.72794324",
"0.661401",
"0.65779203",
"0.6340851",
"0.6324031",
"0.63150054",
"0.61869067",
"0.617959",
"0.61534643",
"0.6090856",
"0.6058395",
"0.600056",
"0.5943",
"0.5909638",
"0.5908889",
"0.5886598",
"0.58724004",
"0.58520234",
"0.58423156",
"0.5806767",
"0.5795594",
"0.57883006",
"0.57666504",
"0.5755564",
"0.57040685",
"0.5697731",
"0.5692529",
"0.5643865",
"0.564056",
"0.5566882"
] |
0.74871445
|
0
|
Retrieve pci addresses for network interfaces.
|
def retrieve_pci_addresses(self):
debug('Retrieve PCI addresses...')
try:
lshw_json = self.run_ssh('lshw -json').stdout
except SSHError:
fatal('Cannot connect to node:', self.ip_address)
lshw = json.loads(lshw_json)
pci_addresses = []
for component in lshw["children"][0]["children"]:
if component["class"] == "bridge":
for subsystem in component["children"]:
if subsystem["class"] == "network":
index = int(subsystem["id"].split(':')[1])
pci_addresses.append((index, subsystem["businfo"]))
pci_addresses = [v.strip('pci@') for k, v in sorted(pci_addresses)]
# iterate over interfaces and set pci address
i = 0
for interface in self.interfaces:
self.interfaces[interface]['pci_address'] = pci_addresses[i]
i += 1
if i >= len(pci_addresses):
break
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def network_interfaces():\n try:\n command = which('ipadm')\n args = ('show-addr', '-p', '-o', 'STATE,ADDR')\n pattern = r'ok:(\\d+\\.\\d+\\.\\d+\\.\\d+)'\n except CommandMissing:\n # Fall back to old command on old solaris releases.\n command = which('/usr/sbin/ifconfig')\n args = ('-a')\n pattern = r'inet (\\d+\\.\\d+\\.\\d+\\.\\d+)'\n addrs = []\n output = sh(command, *args)\n for line in output:\n match = re.match(pattern, line)\n if match:\n addr = match.group(1)\n if not addr.startswith(\"127.\"):\n addrs.append(addr)\n return addrs",
"def _get_pci_devices(self):\n\n system = self._get_host_details()\n if ('links' in system['Oem']['Hp'] and\n 'PCIDevices' in system['Oem']['Hp']['links']):\n # Get the PCI URI and Settings\n pci_uri = system['Oem']['Hp']['links']['PCIDevices']['href']\n status, headers, pci_device_list = self._rest_get(pci_uri)\n\n if status >= 300:\n msg = self._get_extended_error(pci_device_list)\n raise exception.IloError(msg)\n\n return pci_device_list\n\n else:\n msg = ('links/PCIDevices section in ComputerSystem/Oem/Hp'\n ' does not exist')\n raise exception.IloCommandNotSupportedError(msg)",
"def get_net_addresses(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevNet_GetNetAddresses', self.handle))",
"def get_network_interfaces(self):\n return self.mycam.devicemgmt.GetNetworkInterfaces()",
"def nics() -> List[str]:\n output = []\n\n if sys.platform == 'linux':\n try:\n # first we try ip addr command\n out = subprocess.Popen([\"ip\", \"addr\"],\n stdout=subprocess.PIPE)\n stdout, stderr = out.communicate()\n output = stdout.decode('utf-8').split(\"\\n\")\n except FileNotFoundError:\n # ip addr command failed so lets try ifconfig\n out = subprocess.Popen(\"ifconfig\",\n stdout=subprocess.PIPE)\n stdout, stderr = out.communicate()\n output = stdout.decode('utf-8').split(\"\\n\")\n elif sys.platform == 'darwin':\n return subprocess.call('ifconfig')\n elif sys.platform == 'win32':\n return subprocess.call('ipconfig')\n\n return output",
"def __get_network_interface_info(self):\n iface_list = []\n for i in netifaces.interfaces():\n addr = netifaces.ifaddresses(i)\n\n\n # clumsy way to filter which interfaces get added to list. If these elements raise KeyErrors, we skip\n try:\n iface_list.append( {i : { \n 'ip_address' : addr[netifaces.AF_INET][0]['addr'],\n 'mac' : addr[netifaces.AF_LINK][0]['addr']\n }})\n except KeyError,e:\n\t pass\n self.print_debug(\"Key not found - _get_network_interface_info - {0}\".format(addr))\n\n return iface_list",
"def interfaces():\n network_interfaces = SCNetworkInterfaceCopyAll()\n interfaces = {}\n for interface in network_interfaces:\n interfaces[SCNetworkInterfaceGetLocalizedDisplayName(interface)] = (\n SCNetworkInterfaceGetBSDName(interface),\n SCNetworkInterfaceGetHardwareAddressString(interface),\n )\n return interfaces",
"def pci_address(self):\n return self._pci_address",
"def get_interfaces_ip(self):\n\n interfaces_ip = dict()\n command = '/ip address print terse'\n\n ip_address_output_v4 = self._send_command(command)\n\n ip_addresses = parse_terse_output(ip_address_output_v4)\n\n for ip_address in ip_addresses:\n interface = ip_address.get('interface')\n address, mask = ip_address.get('address').split('/')\n\n interfaces_ip.setdefault(interface, {}) \\\n .setdefault('ipv4', {}) \\\n .setdefault(cast_ip(address), {}) \\\n .setdefault('prefix_length', int(mask))\n\n return interfaces_ip",
"def get_net_addresses(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfgNet_GetNetAddresses', self.handle))",
"def getmacaddrs():\n # Unpack just for the sake of being meaningful.\n ifaddrs, sockaddr_dl, sockaddr = PLATFORM_LOOKUP[PLATFORM]\n ptr = c_void_p(None)\n result = LIBC.getifaddrs(pointer(ptr))\n if result != 0:\n return {}\n ifa = ifaddrs.from_address(ptr.value)\n result = {}\n\n while True:\n name = ifa.ifa_name\n if name not in result:\n result[name] = []\n # Some interface (such as a TUN virtual network) doesn't give us\n # ifa_addr at all and we can usually skip them because they're hardly\n # relevant for our usage case.\n if ifa.ifa_addr:\n sa = sockaddr.from_address(ifa.ifa_addr)\n if sa.sa_family == AF_LINK:\n si = sockaddr_dl.from_address(ifa.ifa_addr)\n addr = \"%s\" % si\n if addr:\n result[name].append(addr)\n if ifa.ifa_next:\n ifa = ifaddrs.from_address(ifa.ifa_next)\n else:\n break\n\n LIBC.freeifaddrs(ptr)\n return result",
"def list():\n\n\treturn netifaces.interfaces()",
"def net_if_addrs():\n ret = []\n for items in cext.net_if_addrs():\n items = list(items)\n items[0] = py2_strencode(items[0])\n ret.append(items)\n return ret",
"def get_net_interfaces():\n import netifaces\n return netifaces.interfaces()",
"def getLocalInterfaces():\n SIOCGIFCONF = 0x8912\n MAXBYTES = 8096\n \n var1 = 32\n var2 = 32\n \n sock = socket(AF_INET, SOCK_DGRAM)\n names = array('B', '\\0' * MAXBYTES)\n outbytes = unpack('iL', ioctl(sock.fileno(), SIOCGIFCONF, pack('iL', MAXBYTES, names.buffer_info()[0]) ))[0]\n \n namestr = names.tostring()\n \n return [(namestr[i:i+var1].split('\\0', 1)[0], inet_ntoa(namestr[i+20:i+24])) for i in xrange(0, outbytes, var2)]",
"def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:\n return pulumi.get(self, \"network_interfaces\")",
"def get_ips():\r\n local_ips = []\r\n public_ips = []\r\n \r\n # list of iface names, 'lo0', 'eth0', etc.\r\n for iface in netifaces.interfaces():\r\n # list of ipv4 addrinfo dicts\r\n ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])\r\n for entry in ipv4s:\r\n addr = entry.get('addr')\r\n #print(\"addr: \" + addr)\r\n if not addr:\r\n continue\r\n if not (iface.startswith('lo') or addr.startswith('127.')):\r\n public_ips.append(addr)\r\n else:\r\n local_ips.append(addr) \r\n return public_ips",
"def get_all_interfaces():\n global all_interfaces\n if all_interfaces:\n return all_interfaces\n\n f = open('/proc/net/dev','r')\n ifacelist = f.read().split('\\n')\n f.close()\n\n # remove 2 lines header\n ifacelist.pop(0)\n ifacelist.pop(0)\n\n all_interfaces = {}\n # loop to check each line\n for line in ifacelist:\n\n ifacedata = line.replace(' ','').split(':')\n\n # check the data have 2 elements\n if len(ifacedata) == 2:\n all_interfaces[ifacedata[0]] = get_interface_ip(ifacedata[0])\n\n return all_interfaces",
"def fact():\n result = []\n\n interfaces = [SCNetworkInterfaceGetBSDName(i) for i in SCNetworkInterfaceCopyAll()]\n\n for i in interfaces:\n try:\n active = subprocess.check_output(\n [\"/usr/sbin/ipconfig\", \"getifaddr\", i]\n ).strip()\n if active:\n result.append(i)\n except subprocess.CalledProcessError:\n continue\n\n return {factoid: result}",
"def all_interfaces():\n max_possible = 128 # arbitrary. raise if needed.\n number_of_bytes = max_possible * 32\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n names = array.array('B', '\\0' * number_of_bytes)\n outbytes = struct.unpack('iL', fcntl.ioctl(\n s.fileno(),\n 0x8912, # SIOCGIFCONF\n struct.pack('iL', number_of_bytes, names.buffer_info()[0])\n ))[0]\n namestr = names.tostring()\n interfaces = {}\n\n for i in range(0, outbytes, 40):\n name = namestr[i:i+16].split('\\0', 1)[0]\n ip = namestr[i+20:i+24]\n interfaces[name] = format_ip(ip)\n return interfaces",
"def ipv6_addresses(self) -> Dict[str, List[IPv6Address]]:\n log.debug(\"Host %s: ipv6 addresses of the devices interfaces %s.\", self.host, self._get_ipv6_addresses(\"self\"))\n return self._get_ipv6_addresses(\"self\")",
"def find_nic():\n result = subprocess.run([\"iw\", \"dev\"], capture_output=True).stdout.decode()\n network_interface_controllers = wlan_code.findall(result)\n return network_interface_controllers",
"def test_get_pci_device_list(self):\n pass",
"def get_ipv4_addresses(self, net_interface):\n results = self._runner.run('ip addr show dev %s' % net_interface)\n lines = results.stdout.splitlines()\n\n # Example stdout:\n # 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000\n # link/ether 48:0f:cf:3c:9d:89 brd ff:ff:ff:ff:ff:ff\n # inet 192.168.1.1/24 brd 192.168.1.255 scope global eth0\n # valid_lft forever preferred_lft forever\n # inet6 2620:0:1000:1500:a968:a776:2d80:a8b3/64 scope global temporary dynamic\n # valid_lft 599919sec preferred_lft 80919sec\n\n for line in lines:\n line = line.strip()\n match = re.search('inet (?P<address>[^\\s]*) brd (?P<bcast>[^\\s]*)',\n line)\n if match:\n d = match.groupdict()\n address = ipaddress.IPv4Interface(d['address'])\n bcast = ipaddress.IPv4Address(d['bcast'])\n yield (address, bcast)\n\n match = re.search('inet (?P<address>[^\\s]*)', line)\n if match:\n d = match.groupdict()\n address = ipaddress.IPv4Interface(d['address'])\n yield (address, None)",
"def app_network_interface_address_list(self, interface_name=\"\", **kwargs):\n data = {\"iface\": interface_name}\n return self._post(\n _name=APINames.Application,\n _method=\"networkInterfaceAddressList\",\n data=data,\n response_class=NetworkInterfaceAddressList,\n **kwargs\n )",
"def get_host_interfaces(self, context, host_uuid):\n result = {}\n interfaces = self._get_cgtsclient().iinterface.list(host_uuid)\n for interface in interfaces:\n if interface.networktype != \"data\":\n continue\n providernets = interface.providernetworks\n result[interface.uuid] = {'uuid': interface.uuid,\n 'mtu': interface.imtu,\n 'vlans': '',\n 'network_type': interface.networktype,\n 'providernets': providernets}\n return result",
"def possible_mac_addresses(interface):\n\n mac_addrs = []\n\n # In case of VLANs, just grab the parent interface\n if interface.interface_type == 'vlan':\n interface = interface.parent\n\n # Bonding/bridge: append the MACs of the physical interfaces\n # TODO: drop the public/bootable check once we decide how to send the extra\n # information to clients\n for slave in interface.all_slaves():\n if slave.mac and (slave.interface_type != \"public\" or slave.bootable):\n mac_addrs.append(slave.mac)\n\n # Handle physical interfaces, and bonding with a dedicated MAC\n # TODO: drop the public/bootable check once we decide how to send the extra\n # information to clients\n if interface.mac and (interface.interface_type != \"public\" or interface.bootable):\n mac_addrs.append(interface.mac)\n\n return mac_addrs",
"def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceArgs']]]]:\n return pulumi.get(self, \"network_interfaces\")",
"def get_port_interfaces(self, oid):\n path = '/servers/%s/os-interface' % oid\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('List port interfaces for server %s: %s' % \n (oid, truncate(res)))\n nets = res[0]['interfaceAttachments']\n for item in nets:\n item[u'name'] = None\n return nets",
"def ip_addresses(self) -> pulumi.Output[Sequence['outputs.IpMappingResponse']]:\n return pulumi.get(self, \"ip_addresses\")"
] |
[
"0.6476282",
"0.63909584",
"0.6255508",
"0.6090838",
"0.6077904",
"0.6034055",
"0.60326415",
"0.5983117",
"0.5981854",
"0.5968475",
"0.5956649",
"0.5947739",
"0.593454",
"0.59312534",
"0.590775",
"0.5905652",
"0.5843624",
"0.5813561",
"0.58115387",
"0.57801414",
"0.57743907",
"0.57644457",
"0.5753107",
"0.5735497",
"0.5724121",
"0.56946164",
"0.56818044",
"0.5654854",
"0.56436825",
"0.56357884"
] |
0.76167774
|
0
|
Generates a slope raster from the input DEM raster.
|
def generate_slope_raster(in_path, out_path):
cmd = "gdaldem slope -alg ZevenbergenThorne {} {}".format(in_path, out_path)
os.system(cmd)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute_slope(self):\n\n # assign variables\n slope = 'slope'\n aspect = 'aspect'\n dx = 'dx'\n dy = 'dy'\n grow_slope = 'grow_slope'\n grow_aspect = 'grow_aspect'\n grow_dx = 'grow_dx'\n grow_dy = 'grow_dy'\n\n # compute slope and partial derivatives\n gscript.run_command(\n 'r.slope.aspect',\n elevation=self.elevation,\n slope=slope,\n dx=dx,\n dy=dy,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=slope,\n value=grow_slope,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{slope}={grow_slope}\".format(\n slope=slope,\n grow_slope=grow_slope),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=dx,\n value=grow_dx,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{dx}={grow_dx}\".format(\n dx=dx,\n grow_dx=grow_dx),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=dy,\n value=grow_dy,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{dy}={grow_dy}\".format(\n dy=dy,\n grow_dy=grow_dy),\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['grow_slope',\n 'grow_dx',\n 'grow_dy'],\n flags='f')\n\n return slope, dx, dy",
"def Slope(InputFilePath,OutputFilePath): # perform a slope raster onto a DEM and return\r\n try:\r\n print(\"\"\"\r\nProcessing Slope Layer...\r\n \"\"\") \r\n \r\n \r\n arcpy.gp.Slope_sa(InputFilePath, OutputFilePath, \"DEGREE\", \"1\") \r\n print(\"Complete\")\r\n \r\n except Exception, err: # an error occurred (probably in arcGIS)\r\n raise RuntimeError(\"** Error: Slope Failed (\"+str(err)+\")\")",
"def slope(slope:float, offset=0., bounds: tuple[float, float] = None) -> core.Slope:\n return core.Slope(slope, offset, bounds=bounds)",
"def _get_slope(x, y):\n slope = linregress(x, y)\n return slope",
"def _regression_slope_metric(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.slope",
"def fit_slope_1d_residue(X,Y):\n X = np.array(X)\n Y = np.array(Y)\n slope, alpha = fit_slope_1d(X,Y)\n return slope*X + alpha - Y",
"def get_slope(self) -> str:\n return self.query('slope,?')",
"def fit_slope_1d(X,Y):\n Sx = np.sum(X)\n Sy = np.sum(Y)\n Sxx = np.sum(np.power(X,2))\n Sxy = np.sum(X*Y)\n Syy = np.sum(np.power(Y,2)) \n n = len(X)*1.\n slope = (n*Sxy - Sx*Sy)/(n*Sxx-Sx**2)\n alpha = Sy/n - slope*Sx/n\n return slope, alpha",
"def get_slope(x, y, deg=1, err=[]):\n inverse_error = []\n for i in err:\n inv = 1/i\n inverse_error.append(i)\n\n if len(err)>0:\n z = np.polyfit(x, y, deg, w=inverse_error)\n else:\n z = np.polyfit(x, y, deg)\n\n m, b = z\n p = np.poly1d(z)\n\n return m, b, p",
"def slope(x1, y1, x2, y2):\n return (y2 - y1) / (x2 - x1)",
"def slope_alphas(dm):\n\n # Calculate the alphas for the displacement map. We could have just as easily read this in from the image.\n for x in range(image_size - 1):\n for y in range(image_size - 1):\n h = dm[x, y]\n # Start with the assumption that this is straight sand\n a = 0\n # If we're above the water, then the alpha value will equal the height above the water (* 2), meaning that\n # areas 128 units above water-level will be pure grass\n if h > water_height:\n a = (h - water_height) * 2\n if a > 255:\n a = 255\n\n # Get the angle of the slope here\n slope_angle = math.degrees(dm.get_slope((x, y)))\n\n # Add a sharp decrease in alpha around 45 degrees, so that unclimbable areas are visible\n if slope_angle > 30 and slope_angle < 60:\n a -= (slope_angle - 30) * 7\n elif slope_angle >= 60:\n a -= (slope_angle - 60) * 2 + 210\n\n if a < 0:\n a = 0\n # Finally, we set the actual value to what we've determined\n dm.source_alphas[x, y] = a",
"def slope(x1, y1, x2, y2):\r\n delta_y = y2-y1\r\n delta_x = x2-x1\r\n return delta_y / delta_x",
"def slope_from_origin(self):\n\n return self.y / self.x",
"def calc_slope(self, left, right):\n return (left[1] - right[1]) / (left[0] - right[0])",
"def slope(l):\n if l[1] == l[0]:\n return float(\"inf\")\n else:\n return float(l[3]-l[2])/(l[1]-l[0])",
"def slope_from_origin(self):\n\n return (self.y / self.x)",
"def slope(self):\n if self.b == 0:\n return None\n else:\n return (-1) * self.a/self.b",
"def fit_slope_with_zero_intercept_residue(X,Y):\n X = np.array(X)\n Y = np.array(Y)\n slope = np.sum(Y*X)/np.sum(np.power(X,2))\n return slope*X - Y",
"def slopemap(inr,insp,dims): \n slope,intercept = np.polyfit(inr,insp, 1)\n slopemap = slope.reshape(dims)\n\n return slopemap",
"def _calculate_slope(klass, p1, p2):\n xdiff = p1.x - p2.x\n if xdiff:\n return (p1.y - p2.y) / xdiff\n else:\n return float(\"+inf\")",
"def slope_lines(self,image):\r\n img_copy = image.copy()\r\n \r\n left_lines,right_lines=self.makeLeftRightline()\r\n left_line = np.mean(left_lines, axis=0)\r\n right_line = np.mean(right_lines, axis=0)\r\n\r\n poly_vertices = []\r\n order = [0,1,3,2]\r\n\r\n for slope, intercept in [left_line, right_line]:\r\n #getting height of image in y1\r\n rows, cols = image.shape[:2]\r\n y1= int(rows) \r\n #taking y2 upto 68% of y1\r\n y2= int(rows*0.68) \r\n #y=mx +c can be written as x=(y-c)/m\r\n x1=int((y1-intercept)/slope)\r\n x2=int((y2-intercept)/slope)\r\n poly_vertices.append((x1, y1))\r\n poly_vertices.append((x2, y2))\r\n\r\n # DRAWING LINES AND PATH ON THE IMAGE\r\n thickness_of_line=9\r\n color_of_line=[20, 255, 20]\r\n lines=np.array([[[x1,y1,x2,y2]]])\r\n for i in lines:\r\n for x1,y1,x2,y2 in i:\r\n cv2.line(img_copy, (x1, y1), (x2, y2), color_of_line, thickness_of_line)\r\n poly_vertices = [poly_vertices[i] for i in order]\r\n #filling polygon color\r\n cv2.fillPoly(img_copy, pts = np.array([poly_vertices],'int32'), color = (200,20,20))\r\n final_out=cv2.addWeighted(image,0.7,img_copy,0.4,0.)\r\n return final_out",
"def slope_data(self, grptime=2.77504, diff_only=False):\n assert self.ngroups > 1\n # TODO: Very inefficient and does not calculate ERR or DQ arrays.\n output = np.zeros([self.nints, self.rows, self.columns],\n dtype=self.data.dtype)\n \n if diff_only:\n # Quick and dirty estimate which subtracts the last\n # ramp from the first. timediff should never be zero because\n # self.ngroups is forced to be > 1.\n timediff = grptime * (self.ngroups - 1)\n output = (self.data[:,-1,:,:] - self.data[:,0,:,:]) / float(timediff)\n else:\n # Full straight line fit\n timearray = grptime * np.array( list(range(0, self.ngroups)) )\n for intg in range(0, self.nints):\n for row in range(0, self.rows):\n for column in range(0, self.columns):\n # Ouch! Slope calculated one (row,column) at a time.\n # Can the efficiency be improved?\n (slope, ic) = linear_regression( timearray,\n self.data[intg,:,row,column] )\n output[intg,row,column] = slope\n return output",
"def find_slopes(x, y):\n slopes = np.zeros((len(x) - 1))\n for i in range(len(x) - 1):\n # m = (y2 - y1) / (x2 - x1)\n delta_x = x[i + 1] - x[i]\n delta_y = y[i + 1] - y[i]\n slopes[i] = delta_y / delta_x\n return slopes",
"def find_slope(lat1,lon1,lat2,lon2):\n return (lon2-lon1)/(lat2-lat1)",
"def rslope(x,y,window):\n \n import numpy as np\n \n # Check that x and y are the same length\n if len(x) != len(y): \n print (\"Error: x and y must be the same length\")\n return 0 \n \n N = len(x) # Number of points in the dataset\n slopes = np.ones(N) # Make array for slopes\n \n # Pad data with window number of points NaN on either side\n x_padded = np.empty(2*window+N)\n x_padded[0:window] = 0\n x_padded[window:N+window] = x\n x_padded[N+window:2*window+N] = 0\n \n y_padded = np.empty(2*window+N)\n y_padded[0:window] = 0\n y_padded[window:N+window] = y\n y_padded[N+window:2*window+N] = 0\n \n sum_x = np.sum(x_padded[0:2*window+1])\n sum_y = np.sum(y_padded[0:2*window+1])\n sum_x_sq = np.sum(x_padded[0:2*window+1]*x_padded[0:2*window+1])\n sum_xy = np.sum(x_padded[0:2*window+1]*y_padded[0:2*window+1])\n\n n = np.empty(N)\n n[0:window] = np.arange(window+1,2*window+1)\n n[window:N-window] = window*2+1\n n[N-window:N] = np.arange(2*window,window,-1)\n \n slopes[0] = (sum_xy - (sum_x*sum_y/n[0]))/(sum_x_sq - (sum_x*sum_x/n[0]))\n \n for i in range(1,N):\n sum_x = sum_x - x_padded[i-1] + x_padded[2*window+i]\n sum_y = sum_y - y_padded[i-1] + y_padded[2*window+i]\n sum_x_sq = sum_x_sq - x_padded[i-1]*x_padded[i-1] + \\\n x_padded[2*window+i]*x_padded[2*window+i]\n sum_xy = sum_xy - x_padded[i-1]*y_padded[i-1] +\\\n x_padded[2*window+i]*y_padded[2*window+i]\n slopes[i] = (sum_xy - (sum_x*sum_y/n[i]))/(sum_x_sq - (sum_x*sum_x/n[i]))\n return slopes",
"def offset_slope(self):\n foc_um_slope = self.focus_slope * self.pix_size\n offset_slope = 0.5 * foc_um_slope / np.tan(self.convergence_angle)\n return offset_slope",
"def slope(Ser, n):\n slopes = [i*0 for i in range(n-1)]\n for i in range(n, len(Ser)+1):\n y = Ser[i-n:i]\n x = np.array(range(n))\n y_scl = (y - y.min())/ (y.max() - y.min())\n x_scl = (x - x.min())/ (x.max() - x.min())\n x_scl = sm.add_constant(x_scl)\n model = sm.OLS(y_scl, x_scl)\n result= model.fit()\n slopes.append(result.params[-1])\n slope_angl= (np.rad2deg(np.arctan(np.array(slopes))))\n return np.array(slope_angl)",
"def obj_slope(X, Y, lbd, beta):\n n = X.shape[0]\n return np.sum((Y - X@beta)**2)/n + np.sum(lbd * np.sort(abs(beta))[::-1])",
"def slope(point_a, point_b, flip):\n\n x_a, y_a = point_a\n x_b, y_b = point_b\n\n dx = x_b - x_a\n dy = y_b - y_a\n\n return -dx / dy if flip else dy / dx",
"def linear_slope(self, dim=\"time\", nan_policy=\"none\"):\n return linear_slope(self._obj, dim=dim, nan_policy=nan_policy)"
] |
[
"0.67087334",
"0.6488468",
"0.64282715",
"0.6219489",
"0.61622566",
"0.6151169",
"0.59784114",
"0.5951957",
"0.59396815",
"0.5916147",
"0.5889869",
"0.58412445",
"0.58203536",
"0.57782197",
"0.5762054",
"0.5753399",
"0.57412916",
"0.57289064",
"0.5714653",
"0.57070047",
"0.5694895",
"0.5683525",
"0.5677043",
"0.5659065",
"0.56469125",
"0.5642158",
"0.56332386",
"0.56286174",
"0.56219923",
"0.5619652"
] |
0.76822
|
0
|
Removes noise (high elevation data points like roofs, etc.) from the ground DEM raster. Replaces values in those pixels with No data Value (99999.0)
|
def remove_noise(ground_dem_path, out_path, ignore_value=-99999.0):
ground_np = np.array(gdal.Open(ground_dem_path).ReadAsArray())
std = ground_np[ground_np != ignore_value].std()
mean = ground_np[ground_np != ignore_value].mean()
threshold_value = mean + 1.5 * std
ground_np[ground_np >= threshold_value] = -99999.0
save_array_as_geotif(ground_np, ground_dem_path, out_path)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def perform_noise_removal(mask):\n trans1 = cv.dilate(mask, KERNEL, iterations=4)\n trans1 = cv.erode(trans1, KERNEL, iterations=5)\n return cv.dilate(trans1, KERNEL, iterations=7)",
"def fix_nan(image, replace=0.):\n h = pyfits.open(image, mode='update')\n imgdata = h[0].data\n imgdata = np.where(np.isnan(imgdata), replace, imgdata)\n h[0].data = imgdata\n h.flush()\n h.close()",
"def remove_dark(self):\r\n self.decimate(numpy.isfinite(self.z))",
"def zero_to_nodata(base_raster):\n target_raster = base_raster.copy()\n target_raster[target_raster == 0] = _IC_NODATA\n return target_raster",
"def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):\n # TODO: gbataille - Seems that I forgot tests there\n if nodata_values != []:\n temp_file = gettempfilename('-gdal2tiles.vrt')\n warped_vrt_dataset.GetDriver().CreateCopy(temp_file, warped_vrt_dataset)\n with open(temp_file, 'r') as f:\n vrt_string = f.read()\n\n vrt_string = add_gdal_warp_options_to_string(\n vrt_string, {\"INIT_DEST\": \"NO_DATA\", \"UNIFIED_SRC_NODATA\": \"YES\"})\n\n# TODO: gbataille - check the need for this replacement. Seems to work without\n# # replace BandMapping tag for NODATA bands....\n# for i in range(len(nodata_values)):\n# s = s.replace(\n# '<BandMapping src=\"%i\" dst=\"%i\"/>' % ((i+1), (i+1)),\n# \"\"\"\n# <BandMapping src=\"%i\" dst=\"%i\">\n# <SrcNoDataReal>%i</SrcNoDataReal>\n# <SrcNoDataImag>0</SrcNoDataImag>\n# <DstNoDataReal>%i</DstNoDataReal>\n# <DstNoDataImag>0</DstNoDataImag>\n# </BandMapping>\n# \"\"\" % ((i+1), (i+1), nodata_values[i], nodata_values[i]))\n\n # save the corrected VRT\n with open(temp_file, 'w') as f:\n f.write(vrt_string)\n\n corrected_dataset = gdal.Open(temp_file)\n os.unlink(temp_file)\n\n # set NODATA_VALUE metadata\n corrected_dataset.SetMetadataItem(\n 'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))\n\n if options and options.verbose:\n print(\"Modified warping result saved into 'tiles1.vrt'\")\n # TODO: gbataille - test replacing that with a gdal write of the dataset (more\n # accurately what's used, even if should be the same\n with open(\"tiles1.vrt\", \"w\") as f:\n f.write(vrt_string)\n\n return corrected_dataset",
"def reset_noise(self):\n self.advantage_hidden_layer.reset_noise()\n self.advantage_layer.reset_noise()\n self.value_hidden_layer.reset_noise()\n self.value_layer.reset_noise()",
"def sky_noise(sky_file_name):\n fits_file = fits.open(sky_file_name)\n image_data = fits_file[0].data\n return image_data",
"def replace_value(cls, data, nodata):\n data = data.astype('float64')\n mask = data != nodata\n if hasattr(data, 'where'):\n return data.where(mask, np.NaN)\n return np.where(mask, data, np.NaN)",
"def remove_noise(self):\n kernel = np.ones((5, 5), np.uint8)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_CLOSE, kernel)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_OPEN, kernel)",
"def unmasked_data(self):\n return numpy.ma.filled(self.data.astype(numpy.float_),\n fill_value=numpy.nan)",
"def remove_blank_pixels(self,pixels,non_zero=None):\n self.uni2pix= np.unique(pixels).astype(int)\n self.pix2uni = {u:k for k,u in enumerate(self.uni2pix)}\n\n gb, gl = hp.pix2ang(self.nside, self.uni2pix)\n\n self.npix = self.uni2pix.size\n if isinstance(non_zero,type(None)):\n non_zero = np.where(self.wei != 0)[0]\n\n self.output = self.output[self.uni2pix]\n self.sigwei = self.sigwei[self.uni2pix]\n self.wei = self.wei[self.uni2pix]\n\n print('SIZE CHECK', self.wei.size, self.npix)",
"def remove_noise(emg):\n def butter_bandstop_filter(data, lowcut, highcut, fs, order=2):\n def butter_bandstop(lowcut, highcut, fs, order=2):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='bandstop')\n return b, a\n \n b, a = butter_bandstop(lowcut, highcut, fs, order=order)\n y = lfilter(b, a, data)\n return y\n \n # Remove noise from signal\n for channel in [\"emg1\", \"emg2\", \"emg3\", \"emg4\", \"emg5\", \"emg6\"]:\n emg[channel] = butter_bandstop_filter(emg[channel], 49., 51., EMG_F_SAMPLE, order=2)\n return emg",
"def white_noise():\n return random.randint(-32767, 32767)",
"def remove_noise(image):\n filtered = cv2.absdiff(image.astype(np.uint8), 255,\n cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 41)\n kernel = np.ones((1, 1), np.uint8)\n opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n\n img = image_smoothening(image)\n transform = cv2.bitwise_or(img, closing)\n return transform",
"def mask_nodata(self):\n ds_out = self._obj\n for var in self.vars:\n ds_out[var] = ds_out[var].raster.mask_nodata()\n return ds_out",
"def noisy(self, drawer):\n cov = self._noise_coverage if 0 <= self._noise_coverage < 1 else 0.5\n for width_pixel in xrange(self._width):\n for height_pixel in xrange(self._height):\n if random.random() > cov:\n continue\n drawer.point(\n (width_pixel, height_pixel),\n fill=self.randcolor(64, 255)\n )",
"def noiseRemoval(array, minSize, classes):\n img=array.astype('int')\n for i in range(classes):\n B=(img!=i) # return a bool array\n B = morphology.remove_small_objects(B, min_size=minSize, connectivity=1) \n img[B==False]=i\n \n return img",
"def sentinel2_(image):\n nubes = image.select(\"QA60\")\n opaque = tools.compute_bits_client(nubes, 10, 10, \"opaque\")\n cirrus = tools.compute_bits_client(nubes, 11, 11, \"cirrus\")\n mask = opaque.Or(cirrus)\n result = image.updateMask(mask.Not())\n return result",
"def _suppress_bg_dc(self):\n # mask for suppressing background/don't care classes\n suppress_mask = 1 - (self.classification_mask[0] + self.classification_mask[1])\n # Suppress bounding box mask\n for i in range(self.num_coords):\n self.bbox_mask[i] = np.multiply(self.bbox_mask[i], suppress_mask)\n # Suppress for depth mask\n self.depth_mask = np.multiply(self.depth_mask, suppress_mask)\n return suppress_mask",
"def remove_background1(img):\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = img.astype(np.uint8)\n # Binarize the image using OTSU's algorithm. This is used to find the center\n # of mass of the image, and find the threshold to remove background noise\n threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # Remove noise - anything higher than the threshold. Note that the image is still grayscale\n img[img > threshold] = 255\n\n return img",
"def interpolate_none(self):\n\n # Reset processed data\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan",
"def noiseReduction(self):\n pass",
"def remove_rain_norain_discontinuity(R):\n R = R.copy()\n zerovalue = np.nanmin(R)\n threshold = np.nanmin(R[R > zerovalue])\n R[R > zerovalue] -= threshold - zerovalue\n R -= np.nanmin(R)\n\n return R",
"def add_noise(self, data):",
"def addWhiteNoise(map,rmsArcmin):\n noisyMap = map.copy()\n if rmsArcmin == 0.0:\n pass\n else:\n radToMin = 180/numpy.pi*60\n pixArea = radToMin**2 * map.pixScaleX*map.pixScaleY\n rms = rmsArcmin/numpy.sqrt(pixArea)\n \n noise = numpy.random.normal( scale = rms, size = map.data.shape )\n \n noisyMap.data[:] += noise[:]\n\n \n return noisyMap",
"def ice_unmasked(res='4x5', debug=False):\n # Create a np.ma mask\n m = np.logical_not((land_unmasked(res)*ocean_unmasked(res)))\n if debug:\n print((mask, mask.shape))\n return m",
"def remove_background(img):\n \n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n img = img.astype(np.uint8)\n # Binarize the image using OTSU's algorithm. This is used to find the center\n # of mass of the image, and find the threshold to remove background noise\n threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n \n # Remove noise - anything higher than the threshold. Note that the image is still grayscale\n img[img > threshold] = 255\n\n return img",
"def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))",
"def remove_data(ds, nh_lim, sh_lim, time_max, lat_name='lat', time_name='time'):\n return xr.where((ds[lat_name] < nh_lim) &\n (ds[lat_name] > sh_lim) &\n (ds[time_name] < pd.to_datetime([time_max]).values),\n np.nan,\n ds)",
"def remove_filler(dgm, val=np.inf):\r\n inds = (dgm[:,0] != val)\r\n return dgm[inds,:]"
] |
[
"0.64070904",
"0.6257984",
"0.61852384",
"0.61520267",
"0.5998266",
"0.5968071",
"0.5939426",
"0.5876657",
"0.58729666",
"0.5850251",
"0.58171725",
"0.5785496",
"0.57801366",
"0.5749277",
"0.5748041",
"0.57474446",
"0.5727457",
"0.57212025",
"0.5697683",
"0.5676271",
"0.5666042",
"0.5663809",
"0.5626706",
"0.5619493",
"0.55990136",
"0.556061",
"0.554822",
"0.55343115",
"0.5529535",
"0.55223596"
] |
0.7533694
|
0
|
Replaces values in input rasterA with no_data_value where cell value >= threshold in rasterB
|
def replace_values(
rasterA_path, rasterB_path, out_path, no_data_value=-99999.0, threshold=0.98
):
cmd = 'gdal_calc.py -A {} --NoDataValue={} -B {} --outfile {} --calc="{}*(B>={}) + (A)*(B<{})"'.format(
rasterA_path,
no_data_value,
rasterB_path,
out_path,
no_data_value,
threshold,
threshold,
)
os.system(cmd)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def subtract(raster1, raster2):\n valid_mask = (\n (raster1 != nodata) &\n (raster2 != nodata))\n result = numpy.empty(raster1.shape, dtype=numpy.float32)\n result[:] = nodata\n result[valid_mask] = raster1[valid_mask] - raster2[valid_mask]\n return result",
"def apply_threshold(heatmap, threshold):\n heatmap[heatmap <= threshold] = 0\n\n return heatmap",
"def zero_to_nodata(base_raster):\n target_raster = base_raster.copy()\n target_raster[target_raster == 0] = _IC_NODATA\n return target_raster",
"def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap",
"def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap",
"def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap",
"def apply_threshold(da, threshold=1.):\n with np.errstate(all='ignore'):\n result = xr.where(da < threshold, np.nan, da)\n result.attrs = da.attrs\n return result",
"def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):\n # TODO: gbataille - Seems that I forgot tests there\n if nodata_values != []:\n temp_file = gettempfilename('-gdal2tiles.vrt')\n warped_vrt_dataset.GetDriver().CreateCopy(temp_file, warped_vrt_dataset)\n with open(temp_file, 'r') as f:\n vrt_string = f.read()\n\n vrt_string = add_gdal_warp_options_to_string(\n vrt_string, {\"INIT_DEST\": \"NO_DATA\", \"UNIFIED_SRC_NODATA\": \"YES\"})\n\n# TODO: gbataille - check the need for this replacement. Seems to work without\n# # replace BandMapping tag for NODATA bands....\n# for i in range(len(nodata_values)):\n# s = s.replace(\n# '<BandMapping src=\"%i\" dst=\"%i\"/>' % ((i+1), (i+1)),\n# \"\"\"\n# <BandMapping src=\"%i\" dst=\"%i\">\n# <SrcNoDataReal>%i</SrcNoDataReal>\n# <SrcNoDataImag>0</SrcNoDataImag>\n# <DstNoDataReal>%i</DstNoDataReal>\n# <DstNoDataImag>0</DstNoDataImag>\n# </BandMapping>\n# \"\"\" % ((i+1), (i+1), nodata_values[i], nodata_values[i]))\n\n # save the corrected VRT\n with open(temp_file, 'w') as f:\n f.write(vrt_string)\n\n corrected_dataset = gdal.Open(temp_file)\n os.unlink(temp_file)\n\n # set NODATA_VALUE metadata\n corrected_dataset.SetMetadataItem(\n 'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))\n\n if options and options.verbose:\n print(\"Modified warping result saved into 'tiles1.vrt'\")\n # TODO: gbataille - test replacing that with a gdal write of the dataset (more\n # accurately what's used, even if should be the same\n with open(\"tiles1.vrt\", \"w\") as f:\n f.write(vrt_string)\n\n return corrected_dataset",
"def mask_nodata(img_patch: Union[str, Path], gt_patch: Union[str, Path], nodata_val: int, mask_val: int = 255) -> None:\n image_ds = gdal.Open(str(img_patch), gdalconst.GA_ReadOnly)\n image_arr = image_ds.ReadAsArray()\n nodata_mask = image_arr != nodata_val\n nodata_mask_flat = np.sum(nodata_mask, axis=0) != 0\n\n if nodata_mask_flat.min() == 1:\n image_ds = None\n return\n\n gt_patch_ds = gdal.Open(str(gt_patch), gdalconst.GA_Update)\n gt_patch_arr = gt_patch_ds.ReadAsArray()\n masked_gt_arr = np.where(nodata_mask_flat == 1, gt_patch_arr, mask_val)\n gt_patch_ds.GetRasterBand(1).WriteArray(masked_gt_arr)\n gt_patch_ds = None\n image_ds = None",
"def cloud_filter(array, bqa):\n array_dest = array.copy()\n array_dest[np.where((bqa != 2720) & (bqa != 2724) & (bqa != 2728) & (bqa != 2732)) ] = 'nan'\n return array_dest",
"def replace_value(cls, data, nodata):\n data = data.astype('float64')\n mask = data != nodata\n if hasattr(data, 'where'):\n return data.where(mask, np.NaN)\n return np.where(mask, data, np.NaN)",
"def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n thresh_heatmap = np.copy(heatmap)\n thresh_heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return thresh_heatmap",
"def expand_holes_in_raster(\n in_path, search_window=7, no_data_value=-99999.0, threshold=50\n):\n np_raster = np.array(gdal.Open(in_path).ReadAsArray())\n height, width = np_raster.shape[0], np_raster.shape[1]\n for i in range(int((search_window - 1) / 2), width, 1):\n for j in range(int((search_window - 1) / 2), height, 1):\n window = np_raster[\n int(i - (search_window - 1) / 2) : int(i - (search_window - 1) / 2)\n + search_window,\n int(j - (search_window - 1) / 2) : int(j - (search_window - 1) / 2)\n + search_window,\n ]\n if (\n np.count_nonzero(window == no_data_value)\n >= (threshold * search_window ** 2) / 100\n ):\n try:\n np_raster[i, j] = no_data_value\n except:\n pass\n return np_raster",
"def set_range_null(rastlist, above, below, NoData_Value):\n\n # sanitize filelist input\n rastlist = enf_rastlist(rastlist)\n\n # iterate through each file in the filelist and set nodata values\n for rastname in filelist:\n #load raster as numpy array and save spatial referencing.\n raster, meta = to_numpy(rastname)\n\n if above and below:\n raster[raster <= below and raster >= above] = NoData_Value\n elif above:\n raster[raster >= above] = NoData_Value\n elif below:\n raster[raster <= below] = NoData_Value\n \n raster.from_numpy(raster, meta, filename)\n arcpy.SetRasterProperties_management(rastname, data_type=\"#\",statistics=\"#\",\n stats_file=\"#\",nodata=\"1 \" + str(NoData_Value))\n \n print(\"Set NoData values in {0}\".format(rastname))\n \n return",
"def apply_threshold(heatmap, threshold):\n heatmap_thresh = np.copy(heatmap)\n ind = np.where(np.logical_and(heatmap_thresh>1, heatmap_thresh<=threshold))\n heatmap_thresh[ind] = 0\n #heatmap_thresh[(heatmap_thresh <= threshold)] = 0\n return heatmap_thresh",
"def applymask(self,mask):\n self.spec[mask==0]=np.nan",
"def filterMissings(self, threshold, data):\n\n #replace NAs by 0 for counting\n data.fillna(0).astype(bool).sum(axis=1)\n\n filtered_columns = data.columns\n\n\n #find out threshold, i.e. minimum number of non-zero in real numbers\n rowNumber = data.shape[0]\n min_nonZeros = int(rowNumber - ((rowNumber * int(threshold))/100))\n\n zero_counts = data.astype(bool).sum(axis=0)\n\n for columnID, nonZeros in zero_counts.items():\n if nonZeros <= min_nonZeros:\n filtered_columns = filtered_columns.drop(columnID)\n\n\n return data[filtered_columns]",
"def _mult_raster_op(array_a, array_b, nodata_a, nodata_b, target_nodata):\r\n result = numpy.empty(array_a.shape, dtype=numpy.float32)\r\n result[:] = target_nodata\r\n valid_mask = (array_a != nodata_a) & (array_b != nodata_b)\r\n result[valid_mask] = array_a[valid_mask] * array_b[valid_mask]\r\n return result",
"def threshold_mask(mask, threshold=0.5):\n mask[np.where(mask >= threshold)] = 1.\n mask[np.where(mask < threshold)] = 0.\n return mask",
"def apply_new_threshold(self, T):\n E = self.R1 - T * self.R2\n E[E < 0.0] = 0.0\n return E",
"def threshold_mask(image, threshold):\n image = image.copy()\n if threshold == None:\n threshold = skimage.filters.threshold_isodata(image)\n image[image > threshold] = 255\n image[image <= threshold] = 0\n return image",
"def remove_data(ds, nh_lim, sh_lim, time_max, lat_name='lat', time_name='time'):\n return xr.where((ds[lat_name] < nh_lim) &\n (ds[lat_name] > sh_lim) &\n (ds[time_name] < pd.to_datetime([time_max]).values),\n np.nan,\n ds)",
"def remove_distance_extremes(scan, low, high):\n scan.samples[:] = [sample for sample in scan.samples if (\n sample.distance >= low and sample.distance <= high)]",
"def mask_sparse(self, threshold=10):\n self.MaskPrefix = 's' + self.MaskPrefix\n print('Masking pixels that do not have at least {0} coherent values'.format(threshold))\n # each pixel assigned an integer corresponding to # of igrams where coherent\n # NOTE: save coverage map if it doesn't exist already\n coverage = self.get_coverage()\n sparse = ma.masked_less(coverage, threshold)\n for ig in self.Set:\n igram = self.load_ma(ig)\n igram[sparse.mask] = ma.masked\n self.save_ma(ig, igram)\n print('Done')",
"def process_pain(x, lb, ub):\n x = x.abs()\n x.loc[(x > ub)] = 8\n x.loc[(x < lb) | (x > ub)] = np.nan\n return x",
"def test_tile_read_wrong_nodata():\n # non-boundless tile covering the nodata part\n with rasterio.open(S3_NODATA_PATH) as src_dst:\n arr, mask = reader.tile(\n src_dst, 438217, 801835, 21, tilesize=256, indexes=(1, 2, 3), nodata=1000\n )\n assert arr.shape == (3, 256, 256)\n assert mask.all()\n\n # Mask boundless values\n arr, mask = reader.tile(\n src_dst, 109554, 200458, 19, tilesize=256, indexes=(1, 2, 3), nodata=1000\n )\n assert arr.shape == (3, 256, 256)\n assert not mask.all()",
"def threshold_select_raster(\r\n base_raster_path, select_raster_path, threshold_val, target_path):\r\n base_nodata = pygeoprocessing.get_raster_info(\r\n base_raster_path)['nodata'][0]\r\n target_nodata = -9999.\r\n\r\n def threshold_select_op(\r\n base_array, select_array, threshold_val, base_nodata,\r\n target_nodata):\r\n result = numpy.empty(select_array.shape, dtype=numpy.float32)\r\n result[:] = target_nodata\r\n valid_mask = (base_array != base_nodata) & (select_array == 1)\r\n result[valid_mask] = numpy.interp(\r\n base_array[valid_mask], [0, threshold_val], [0.0, 1.0], 0, 1)\r\n return result\r\n\r\n pygeoprocessing.raster_calculator(\r\n [(base_raster_path, 1), (select_raster_path, 1),\r\n (threshold_val, 'raw'), (base_nodata, 'raw'),\r\n (target_nodata, 'raw')], threshold_select_op,\r\n target_path, gdal.GDT_Float32, target_nodata, gtiff_creation_options=(\r\n 'TILED=YES', 'BIGTIFF=YES', 'COMPRESS=DEFLATE',\r\n 'PREDICTOR=2', 'BLOCKXSIZE=256', 'BLOCKYSIZE=256',\r\n 'NUM_THREADS=2'))",
"def set_threshold_data(self,threshold_data):\n for roi, roi_thresh in zip(self.rois,threshold_data):\n roi.set_threshold_data(roi_thresh)",
"def replace_Nones(array1, array2):",
"def nodata_vec_mask(raster: rasterio.DatasetReader, nodata_val: int = None) -> ogr.DataSource | None:\n if nodata_val is None:\n nodata_val = raster.nodata\n if not isinstance(nodata_val, int | float):\n return None\n\n # Get original CRS and transform:\n crs_wkt = raster.crs.to_wkt()\n crs_gt = raster.transform\n\n # Read the data and calculate a nodata mask:\n image_arr = raster.read()\n nodata_mask = image_arr != nodata_val\n nodata_mask_flat = np.sum(nodata_mask, axis=0) != 0\n nodata_mask_flat = nodata_mask_flat.astype('uint8')\n\n raster_drv = gdal.GetDriverByName(\"MEM\")\n dst = raster_drv.Create(\"/vsimem/raster\", int(nodata_mask_flat.shape[1]),\n int(nodata_mask_flat.shape[0]), 1, gdal.GDT_Byte)\n\n gdal_src_gt = [crs_gt[2], crs_gt[0], crs_gt[1], crs_gt[5], crs_gt[3], crs_gt[4]]\n dst.SetGeoTransform(gdal_src_gt)\n dst.SetProjection(crs_wkt)\n dst.GetRasterBand(1).WriteArray(nodata_mask_flat)\n dst.GetRasterBand(1).SetNoDataValue(0)\n src_band = dst.GetRasterBand(1)\n\n # Create vector datasource in memory:\n drv = ogr.GetDriverByName(\"MEMORY\")\n vec_ds = drv.CreateDataSource('memdata')\n\n # Initialize projection:\n spatial_ref = osr.SpatialReference()\n spatial_ref.ImportFromWkt(crs_wkt)\n layer = vec_ds.CreateLayer('0', spatial_ref, geom_type=ogr.wkbPolygon)\n\n # Vectorize the raster nodata mask:\n gdal.Polygonize(src_band, src_band, layer, -1, [], callback=None)\n\n return vec_ds"
] |
[
"0.6061164",
"0.5870934",
"0.5798967",
"0.5786439",
"0.5786439",
"0.5786439",
"0.5763308",
"0.5708147",
"0.56289405",
"0.56241614",
"0.5601261",
"0.558359",
"0.5527729",
"0.5523799",
"0.54995984",
"0.5403832",
"0.5399435",
"0.5385514",
"0.5356004",
"0.5315001",
"0.52941465",
"0.52737594",
"0.52730274",
"0.52405274",
"0.5234353",
"0.52212167",
"0.52122027",
"0.5209701",
"0.5187831",
"0.51602507"
] |
0.6994987
|
0
|
Expands holes (cells with no_data_value) in the input raster.
|
def expand_holes_in_raster(
in_path, search_window=7, no_data_value=-99999.0, threshold=50
):
np_raster = np.array(gdal.Open(in_path).ReadAsArray())
height, width = np_raster.shape[0], np_raster.shape[1]
for i in range(int((search_window - 1) / 2), width, 1):
for j in range(int((search_window - 1) / 2), height, 1):
window = np_raster[
int(i - (search_window - 1) / 2) : int(i - (search_window - 1) / 2)
+ search_window,
int(j - (search_window - 1) / 2) : int(j - (search_window - 1) / 2)
+ search_window,
]
if (
np.count_nonzero(window == no_data_value)
>= (threshold * search_window ** 2) / 100
):
try:
np_raster[i, j] = no_data_value
except:
pass
return np_raster
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fill_sat_holes (data_mask, mask_value):\n\n value_sat = mask_value['saturated']\n value_satcon = mask_value['saturated-connected']\n mask_satcon = ((data_mask & value_sat == value_sat) |\n (data_mask & value_satcon == value_satcon))\n struct = np.ones((3,3), dtype=bool)\n mask_satcon = ndimage.binary_closing(mask_satcon, structure=struct)\n mask_satcon = ndimage.binary_fill_holes(mask_satcon, structure=struct)\n mask_satcon2add = (mask_satcon & (data_mask==0))\n data_mask[mask_satcon2add] = value_satcon",
"def fill():\n # Switch in edit mode\n bpy.ops.object.mode_set(mode = 'EDIT')\n \n # Fill hole\n bpy.ops.mesh.fill()",
"def fill_blind_pores(im):\n holes = find_disconnected_voxels(im)\n im[holes] = False\n return im",
"def fill_holes(img):\n neg = 1 - img\n s = ndimage.generate_binary_structure(3,1) # iterate structure\n labeled_array, numpatches = ndimage.label(neg,s) # labeling\n sizes = ndimage.sum(neg,labeled_array,range(1,numpatches+1)) \n sizes_list = [sizes[i] for i in range(len(sizes))]\n sizes_list.sort()\n max_size = sizes_list[-1]\n max_label = np.where(sizes == max_size)[0] + 1\n component = labeled_array == max_label\n return 1 - component",
"def fill_gaps(image, closing_radius=0, min_hole_size=0, median_radius=0.6):\n closing_structure = _disk(closing_radius)\n median_structure = _disk(median_radius)\n\n out = morphology.binary_closing(image, closing_structure)\n out = morphology.remove_small_holes(out, min_size=min_hole_size)\n out = filters.median(out, selem=median_structure)\n\n return(out)",
"def trim_floating_solid(im):\n holes = find_disconnected_voxels(~im)\n im[holes] = True\n return im",
"def infill_small_regions(I):\n n_tiles = 4 # ntiles horizontally.\n assert I.shape[0] == I.shape[1]\n tile_size = I.shape[0] // (n_tiles - 1)\n tile_delta = tile_size // 2\n\n k = 0\n I_stack = np.ones(I.shape + (2, 2)) * np.nan\n for j in range(n_tiles * 2 - 1):\n for i in range(n_tiles * 2 - 1):\n dy = slice(tile_delta * j, tile_delta * (j + 2))\n dx = slice(tile_delta * i, tile_delta * (i + 2))\n S = I[dy, dx]\n M = ndimage.binary_dilation(np.isnan(S), iterations=2)\n image_inpainted = inpaint.inpaint_biharmonic(S, M, multichannel=False)\n I_stack[dy, dx, j % 2, i % 2] = image_inpainted\n k += 1\n return np.nanmean(np.nanmean(I_stack, axis=2), axis=2)",
"def _expand_raster(raster, distance = (4, 2)):\n try:\n from skimage import draw, morphology\n except:\n raise ImportError('The fill function requires the module '\n '\"scikit-image\" to operate. Please retry '\n 'after installing scikit-image:\\n\\n'\n '$ pip install --upgrade scikit-image')\n if distance[0] <= 0.5 and distance[1] <= 0.5: return raster\n\n num_pixels = np.array(np.ceil(distance), dtype = int)\n neighborhood = np.zeros((num_pixels[1]*2 + 1, num_pixels[0]*2 + 1),\n dtype = np.bool)\n rr, cc = draw.ellipse(num_pixels[1], num_pixels[0],\n distance[1]+0.5, distance[0]+0.5)\n neighborhood[rr, cc] = 1\n\n return morphology.binary_dilation(image = raster, selem = neighborhood)",
"def findHoles(image):\n local_minima=find_local_minima(image)\n mask_gaussians=where_are_gaussians(image)\n im_holes=show_holes_on_img(np.logical_and(local_minima,~mask_gaussians),image)\n local_minima = np.logical_and(local_minima,~mask_gaussians)\n local_minima = local_minima.astype(np.uint8)\n return local_minima,im_holes",
"def create_bad_pixel_mask(\n data: np.ndarray,\n grow: bool = False,\n iterations: int = 1,\n diagonal: bool = False,\n):\n\n bad_pixel_mask = ~np.isfinite(data) | np.isnan(data)\n\n if grow:\n bad_pixel_mask = grow_mask(\n mask=bad_pixel_mask, iterations=iterations, diagonal=diagonal\n )\n\n if bad_pixel_mask.any():\n logging.warning(\"Bad pixels detected.\")\n return bad_pixel_mask, True\n\n else:\n return bad_pixel_mask, False",
"def fillHoles(img):\n out,contour,hierarchy = cv2.findContours(img,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_NONE)\n i=0\n for cnt in contour:\n cv2.drawContours(img,contour,i,255,-1)\n i+=1\n return img",
"def local_useless_fill(node):\r\n if node.op == T.fill:\r\n r, v = node.inputs\r\n if v.type == node.outputs[0].type:\r\n # this is a useless fill, erase it.\r\n return [v]",
"def blank_copy (self):\n try:\n d = copy.deepcopy (self)\n except:\n print ('ERROR: raster copy error')\n \n d.ras[:,:] = np.nan\n return d",
"def simulate_source_mask(binary, n_holes, hole_radius_arcmin):\n\n mask = binary.copy()\n if binary.pixel == \"HEALPIX\":\n idx = np.where(binary.data == 1)\n for i in range(n_holes):\n random_index1 = np.random.choice(idx[0])\n vec = hp.pixelfunc.pix2vec(binary.nside, random_index1)\n disc = hp.query_disc(binary.nside, vec, hole_radius_arcmin / (60.0 * 180) * np.pi)\n mask.data[disc] = 0\n\n if binary.pixel == \"CAR\":\n random_index1 = np.random.randint(0, binary.data.shape[0], size=n_holes)\n random_index2 = np.random.randint(0, binary.data.shape[1], size=n_holes)\n mask.data[random_index1, random_index2] = 0\n dist = enmap.distance_transform(mask.data)\n mask.data[dist * 60 * 180 / np.pi < hole_radius_arcmin] = 0\n\n return mask",
"def fix_nan(image, replace=0.):\n h = pyfits.open(image, mode='update')\n imgdata = h[0].data\n imgdata = np.where(np.isnan(imgdata), replace, imgdata)\n h[0].data = imgdata\n h.flush()\n h.close()",
"def shrink_mask(self):\n m = self._mask\n if m.ndim and not m.any():\n self._mask = nomask\n return self",
"def floodFill(c,r,mask):\n # cells already filled\n filled = set()\n # cells to fill\n fill = set()\n fill.add((c,r))\n width = mask.shape[1]-1\n height = mask.shape[0]-1\n # Our output inundation array\n flood = np.zeros_like(mask, dtype=np.int8)\n # Loop through and modify the cells which\n # need to be checked.\n while fill:\n # Grab a cell\n x,y = fill.pop()\n if y == height or x == width or x < 0 or y < 0:\n # Don't fill\n continue\n if mask[y][x] == 1:\n # Do fill\n flood[y][x]=1\n filled.add((x,y))\n # Check neighbors for 1 values\n west =(x-1,y)\n east = (x+1,y)\n north = (x,y-1)\n south = (x,y+1)\n if not west in filled:\n fill.add(west)\n if not east in filled: \n fill.add(east)\n if not north in filled: \n fill.add(north)\n if not south in filled: \n fill.add(south)\n return flood",
"def grow_mask(mask: np.ndarray, iterations: int, diagonal: bool):\n\n if diagonal:\n struct = ndimage.generate_binary_structure(2, 2)\n\n else:\n struct = ndimage.generate_binary_structure(2, 1)\n\n mask_grown = ndimage.binary_dilation(\n input=mask, structure=struct, iterations=iterations\n )\n\n return mask_grown",
"def flood_fill(c, r, mask):\n # cells already filled\n filled = set()\n\n # cells to fill\n fill = set()\n fill.add((c, r))\n width = mask.shape[1] - 1\n height = mask.shape[0] - 1\n\n # Our output inundation array\n flood = np.zeros_like(mask, dtype=np.int8)\n\n # Loop through and modify the cells which need to be checked.\n while fill:\n # Grab a cell\n x, y = fill.pop()\n if y == height or x == width or x < 0 or y < 0:\n # Don't fill\n continue\n if mask[y][x] == 1:\n # Do fill\n flood[y][x] = 1\n filled.add((x, y))\n\n # Check neighbors for 1 values\n west = (x - 1, y)\n east = (x + 1, y)\n north = (x, y - 1)\n south = (x, y + 1)\n if not west in filled:\n fill.add(west)\n if not east in filled:\n fill.add(east)\n if not north in filled:\n fill.add(north)\n if not south in filled:\n fill.add(south)\n return flood",
"def mask_nodata(self, fill_value=np.nan):\n _da = self._obj\n if self.nodata is not None and self.nodata != fill_value:\n mask = _da.notnull() if np.isnan(self.nodata) else _da != self.nodata\n _da = _da.where(mask, fill_value)\n _da.raster.set_nodata(fill_value)\n return _da",
"def fill(img, sigma=1, erosion=2):\n img = img.copy()\n img = skimage.img_as_float(img)\n h, w, d = img.shape\n assert d == 4, \"image must be RGBA\"\n raw_mask = (img[:, :, 3] != 0)\n if raw_mask.sum() == 0:\n return img\n mask = morphology.binary_erosion(raw_mask, selem=morphology.disk(erosion))\n img[mask == 0] = 0\n invmask = ~mask\n while invmask.sum():\n denom = filters.gaussian(mask.astype(float), sigma=sigma)[invmask] + 1e-6\n for i in range(3):\n img[invmask, i] = filters.gaussian(img[:, :, i], sigma=sigma)[invmask] / denom\n mask = morphology.binary_dilation(mask, selem=morphology.disk(sigma))\n invmask = ~mask\n img[invmask] = 0\n sigma *= 2\n\n img[:, :, 3] = 1.0\n return img",
"def local_fill_cut(node):\r\n\r\n # this optimization is essentially for getting broadcasting to\r\n # replace fill. This is always possible when using a Compound\r\n # Elemwise operation, but it is not always possible without one\r\n # (consider filling a large matrix with a scalar, and then adding\r\n # another scalar. The only numbers that count are the two\r\n # scalars, but we can't ignore the large matrix because it gives\r\n # the shape of the result.\r\n\r\n if node.op != T.Elemwise:\r\n return False\r\n\r\n output = node.outputs[0]\r\n try:\r\n #reference is some input with the same type as the output but\r\n #that is not produced by a fill\r\n reference = [input\r\n for input in node.inputs\r\n if input.type == output.type and\r\n (not input.owner or input.owner.op != T.fill)][0]\r\n except IndexError:\r\n return False\r\n\r\n new_inputs = []\r\n new = False\r\n for input in node.inputs:\r\n if input.owner and input.owner.op == T.fill:\r\n model, filling = input.owner.inputs\r\n if encompasses_broadcastable(reference.type.broadcastable,\r\n filling.type.broadcastable):\r\n new_inputs.append(filling)\r\n new = True\r\n continue\r\n new_inputs.append(input)\r\n\r\n if not new:\r\n return False\r\n\r\n rval = node.op(*new_inputs)\r\n if isinstance(rval, gof.Variable):\r\n return rval.owner.outputs\r\n else:\r\n return rval[0].owner.outputs",
"def infill_large_regions(I, npixels=10000, precision=1000):\n assert I.shape[0] == I.shape[1]\n xgrid, ygrid = np.meshgrid(np.arange(I.shape[1]),\n np.arange(I.shape[0]))\n\n I_ = I.copy()\n\n # Exclude two pixels on the border during infilling.\n bad_regions, n_bad_regions = ndimage.label(\n ndimage.binary_dilation(np.isnan(I), iterations=2))\n\n # Use 5 pixel regions surrounding each hole.\n surround = ndimage.grey_dilation(bad_regions, size=5)\n counts, _ = np.histogram(bad_regions, np.arange(n_bad_regions + 1) - .5)\n\n for i in range(1, n_bad_regions):\n if counts[i] > npixels:\n # This is a big region, infill using the GPR method.\n surround_data = (surround == i) & (bad_regions == 0)\n xgrid_s, ygrid_s = xgrid[surround_data], ygrid[surround_data]\n\n # Take N_points points at random, fit a Gaussian process.\n subs = np.random.permutation(np.arange(len(xgrid_s)))[:precision]\n gp_kernel = Matern(length_scale=1,\n length_scale_bounds=(.01, 100), nu=1.5)\n gpr = GaussianProcessRegressor(kernel=gp_kernel, normalize_y=True)\n\n X = np.concatenate((xgrid_s.reshape(-1, 1),\n ygrid_s.reshape(-1, 1)), axis=1)\n gpr.fit(X[subs, :], I[surround_data][subs])\n xgrid_s, ygrid_s = xgrid[bad_regions == i], ygrid[bad_regions == i]\n X_ = np.concatenate((xgrid_s.reshape(-1, 1),\n ygrid_s.reshape(-1, 1)), axis=1)\n y_ = gpr.predict(X_)\n I_[bad_regions == i] = y_\n return I_",
"def update_holes(self, holes):\n for instr in self.instructions:\n if instr.is_hole:\n for h in holes:\n if h.id == instr.id:\n instr.hole_declaration = h\n elif instr.contains_blocks():\n for ib in instr.get_instruction_blocks():\n ib.update_holes(holes)",
"def show_holes_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n radius=9\n out_image = img.copy()\n out_image = cv2.cvtColor(out_image, cv2.COLOR_GRAY2RGB)\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n y_center = (dy.start + dy.stop - 1)/2 \n center=(x_center,y_center)\n cv2.circle(out_image, center, radius,(111,17,108),thickness=2)\n\n plt.figure()\n plt.imshow(out_image)\n plt.autoscale(False)\n return out_image",
"def fill_outside_mask_borders(data, passes=1):\n # Data must be 2-dimensional\n if data.ndim != 2:\n raise ValueError('data must be 2-dimensional')\n # If all values are NaNs, raise a warning\n if np.all(np.isnan(data)):\n warnings.warn('All values of data are NaN, returning original array')\n return data\n # If data is already a masked array, then make sure to return a masked array. If not,\n # return just the data portion\n try:\n data.mask\n is_masked = True\n except AttributeError as e:\n data = np.ma.masked_invalid(data)\n is_masked = False\n for _ in range(passes):\n for shift in (-1, 1):\n for axis in (0, 1):\n data_shifted = np.roll(data, shift=shift, axis=axis)\n idx = ~data_shifted.mask * data.mask\n data[idx] = data_shifted[idx]\n if is_masked:\n return data\n else:\n return data.data",
"def RemovePolygonHoles_management(in_fc, threshold=0.0):\n desc = arcpy.Describe(in_fc)\n if desc.dataType != \"FeatureClass\" and desc.dataType != \"ShapeFile\":\n print(\"Invalid data type. The input is supposed to be a Polygon FeatureClass or Shapefile.\")\n return\n else:\n if desc.shapeType != \"Polygon\":\n print(\"The input is supposed to be a Polygon FeatureClass or Shapefile.\")\n return\n if threshold < 0.0:\n threshold = 0.0\n with arcpy.da.UpdateCursor(in_fc, [\"SHAPE@\"]) as updateCursor:\n for updateRow in updateCursor:\n shape = updateRow[0]\n new_shape = arcpy.Array()\n for part in shape:\n new_part = arcpy.Array()\n if threshold > 0:\n # find None point in shape part\n # in arcpy module, a None point is used to seperate exterior and interior vertices\n null_point_index = []\n for i in range(len(part)):\n if part[i] is None:\n null_point_index.append(i)\n # if interior vertices exist, create polygons and compare polygon shape area to given threshold\n # if larger, keep vertices, else, dismiss them\n if len(null_point_index) > 0:\n for k in range(0, null_point_index[0]):\n new_part.add(part[k])\n for i in range(len(null_point_index)):\n pointArray = arcpy.Array()\n # determine if the None point is the last one\n if i+1 < len(null_point_index):\n for j in range(null_point_index[i] + 1, null_point_index[i+1]):\n pointArray.add(part[j])\n else:\n for j in range(null_point_index[i] + 1, len(part)):\n pointArray.add(part[j])\n # create a polygon to check shape area against the given threshold\n inner_poly = arcpy.Polygon(pointArray)\n # if larger than threshold, then add to the new part Array\n if inner_poly.area > threshold:\n if i+1 < len(null_point_index):\n for k in range(null_point_index[i], null_point_index[i+1]):\n new_part.add(part[k])\n else:\n for k in range(null_point_index[i], len(part)):\n new_part.add(part[k])\n new_shape.add(new_part)\n # if interior does not exist, add the whole part\n else:\n new_shape.add(part)\n else:\n # get the first None point index\n first_null_point_index = 0\n for i in range(len(part)):\n if part[i] is None:\n first_null_point_index = i\n break\n if first_null_point_index == 0:\n new_shape.add(part)\n else:\n for j in range(first_null_point_index):\n new_part.add(part[j])\n new_shape.add(new_part)\n if len(new_shape) > 0:\n new_poly = arcpy.Polygon(new_shape)\n updateRow[0] = new_poly\n updateCursor.updateRow(updateRow)",
"def fill_nan(x):\n (n_rows, wdw) = x.shape\n new_x = np.zeros((n_rows,wdw)); new_x[:] = np.nan\n for i in range(n_rows):\n indMissing = np.where(np.isnan(x[i,:]))[0]\n l = len(x[i,indMissing]) #number of MVs\n if l < 4*wdw/5: #20% available values otherwise discarded\n new_x[i,:] = x[i,:]\n if l > 0 and indMissing[0] == 0: #missing value at index 0 \n c = 0\n while c + 1 < len(indMissing) and indMissing[c+1] == indMissing[c] + 1:\n c += 1\n new_x[i,:c+1] = x[i,c+1] #first nans replaced by first non nan value\n indMissing = np.where(np.isnan(new_x[i,:]))[0]\n l = len(new_x[i,indMissing])\n if l > 0 and indMissing[0] > 0:\n new_x[i,:] = interpolate1d(new_x[i,:]) #interpolate intermediate nans\n ind = np.where(~np.isnan(new_x).all(axis=1))[0]\n new_x = new_x[ind] #remove NaNs \n \n return new_x, ind",
"def morphological_dilation(masked_image, n): #n=3\r\n\tmask = np.isnan(masked_image)\r\n\ts = ndimage.morphology.generate_binary_structure(2, 1)\r\n\textended_mask = ndimage.binary_dilation(mask, structure=s, iterations=3).astype(mask.dtype)\r\n\treturn extended_mask\r\n\t#mask = np.isnan(masked_image)\r\n\t#idx = np.flatnonzero(mask)\r\n\t#expanded_idx = idx[:,None] + np.arange(1, n)\r\n\t#np.put(mask, expanded_idx, True, 'clip')\r\n\t#return mask\r",
"def fill_region(image,mask,value=1):\n\tim = image.copy().ravel()\n\tif image.ndim > 2:\n\t\tim_h, im_w, im_ch = image.shape\n\telse:\n\t\tim_ch = 1\n\t\tim_h, im_w = self.image.shape\n\t# linear indices of masked pixels\n\tind = masked_indices(mask)\n\tfor i in ind:\n\t\tfor ch in range(im_ch):\n\t\t\tim.data[i*im_ch+ch] = value\n\treturn im.reshape(image.shape)"
] |
[
"0.6416809",
"0.6038697",
"0.6009069",
"0.59721184",
"0.5879863",
"0.58542347",
"0.57309407",
"0.5600623",
"0.5584068",
"0.5473977",
"0.5460883",
"0.5431665",
"0.54253507",
"0.53936225",
"0.53427863",
"0.53318036",
"0.5318617",
"0.52430534",
"0.52396625",
"0.5229975",
"0.5221606",
"0.5163744",
"0.51469076",
"0.51223755",
"0.5115906",
"0.5104137",
"0.5062132",
"0.5051294",
"0.5026239",
"0.5025642"
] |
0.655959
|
0
|
Returns the CRS (Coordinate Reference System) of the raster
|
def get_raster_crs(raster_path):
raster = rasterio.open(raster_path)
return raster.crs
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def crs(self):\n return self._crs",
"def _get_raw_crs(self) -> riocrs.CRS:\n # Open metadata\n root, _ = self.read_mtd()\n\n # Get CRS\n crs_name = root.findtext(\".//MapProjection\")\n\n if not crs_name:\n crs_name = vectors.WGS84\n\n return riocrs.CRS.from_string(crs_name)",
"def crs(self):\n if 'crs' in self._data_array.coords:\n return self._data_array.coords['crs'].item()\n raise AttributeError('crs attribute is not available due to lack of crs coordinate.')",
"def getCRS(shp):\r\n if not isinstance(shp, geopandas.geodataframe.GeoDataFrame):\r\n shp = geopandas.read_file(shp)\r\n return shp.crs['init'][5:]",
"def crs(self):\n return self.dataframe.crs",
"def crs(self) -> CRS:\n # return horizontal crs by default to avoid errors downstream\n # with reproject / rasterize etc.\n if \"crs_wkt\" not in self.attrs:\n self.set_crs()\n if \"crs_wkt\" in self.attrs:\n crs = pyproj.CRS.from_user_input(self.attrs[\"crs_wkt\"])\n return crs",
"def map_crs(self):\n\n crs = self.canvas.mapSettings().destinationCrs().authid()\n return crs",
"def get_crds_nc(self, axes=None, shaped=False):\n return self._src_crds.get_crds_nc(axes=axes, shaped=shaped)",
"def crs(self) -> riocrs.CRS:\n raw_crs = self._get_raw_crs()\n\n if raw_crs.is_projected:\n utm = raw_crs\n else:\n # Open metadata\n root, _ = self.read_mtd()\n\n # Get the mean lon lat\n lon = float(root.findtext(\".//CenterLongitude\"))\n lat = float(root.findtext(\".//CenterLatitude\"))\n\n # Compute UTM crs from center long/lat\n utm = vectors.corresponding_utm_projection(lon, lat)\n utm = riocrs.CRS.from_string(utm)\n\n return utm",
"def cartopy_crs(self):\n return self.crs.to_cartopy()",
"def get_crds_cc(self, axes=None, shaped=False):\n return self._src_crds.get_crds_cc(axes=axes, shaped=shaped)",
"def get_crd_nc(self, axis, shaped=False):\n return self._src_crds.get_nc(axis, shaped=shaped)",
"def craster(self):\n r = self.iraster()\n if r:\n if len(r) == 1:\n return r\n head = self.data['IHDR']\n size = head['width']*head['height']\n r = pack('!%si'%size,*r)\n return compress(r)\n else:\n return None",
"def grouping_crs(self):\n return self._get_srid_name(\n self._engine.execute(select([FOOTPRINT_SRID_EXPRESSION])).scalar()\n )",
"def get_crds_ec(self, axes=None, shaped=False):\n return self._src_crds.get_crds_ec(axes=axes, shaped=shaped)",
"def CRS(site):\n return np.dot(CR(np.pi/2**(site)),SWAP)",
"def get_crd_cc(self, axis, shaped=False):\n return self._src_crds.get_cc(axis, shaped=shaped)",
"def lonlat2cr_for_geotif(path):\n old_cs, new_cs, gta, local_vars = _create_xform(path)\n transform = osr.CoordinateTransformation(new_cs, old_cs)\n\n def composite(lon, lat):\n \"\"\"xform from (lon, lat) to (c, r)\"\"\"\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return (~gta * transform.TransformPoint(lat, lon)[:2])\n \n return composite",
"def get_crds(self, name):\n\n coords = name.split('_')\n lat = float(coords[1])\n lon = float(coords[2])\n\n return [lon, lat]",
"def get_crs(url, layer_name):\n # get list of acceptable CRS' for the layer\n wms = WebMapService(url, version='1.3.0')\n crs_list = wms[layer_name].crsOptions\n return crs_list",
"def circpol(self):\n return self._circpol",
"def get_crds(self,ind):\n try:\n ind = na.array(ind)\n nx = self.dim[0]\n ny = self.dim[1]\n nxny = self.dim[0]*self.dim[1]\n crd = na.transpose(na.array([ (ind % nx), (ind / nx)%ny,ind / nxny]))\n return crd\n except Exception as error:\n print(\"failed in get_crds \", error)\n return -1",
"def srid(self):\n crs = self.crs\n if crs is not None:\n srid = crss.parseEPSGCode(crs,\n (crss.fromURL, crss.fromURN, crss.fromShortCode)\n )\n if srid is None and not crss.is_image_crs(crs):\n raise InvalidSubsettingCrsException(\n \"Could not parse EPSG code from URI '%s'\" % crs\n )\n return srid\n return None",
"def gecos(self):\n\t\treturn self.__gecos",
"def get_cartesian_coords(self):\n r = 1\n dec = self.dec + 90\n x = r * math.sin(np.deg2rad(dec)) * math.cos(np.deg2rad(self.ra))\n y = r * math.sin(np.deg2rad(dec)) * math.sin(np.deg2rad(self.ra))\n z = r * math.cos(np.deg2rad(dec))\n\n return [x, y, z]",
"def getCL(self):\r\n return self.cL;",
"def get_crd_ec(self, axis, shaped=False):\n return self._src_crds.get_ec(axis, shaped=shaped)",
"def crd(self):\r\n return self.__trajectory[0]",
"def cs(self):\n return self._cs",
"def CL(self):\n return self.__CL"
] |
[
"0.7451072",
"0.7410187",
"0.7228984",
"0.71266246",
"0.6917967",
"0.66791284",
"0.6674292",
"0.6614727",
"0.65814567",
"0.64536196",
"0.6270467",
"0.6167712",
"0.6128253",
"0.5984987",
"0.59417385",
"0.58857507",
"0.58390284",
"0.582477",
"0.581725",
"0.57973796",
"0.5796361",
"0.5722783",
"0.5711851",
"0.57008475",
"0.5670699",
"0.5662905",
"0.5602822",
"0.5598467",
"0.55921096",
"0.5578145"
] |
0.7941894
|
0
|
Get the position of the missed_value in all square neighbors
|
def position_surroundings(self, neighbour_pos, missed_value):
pos = []
for x, y in neighbour_pos:
position = self._square_matrix[x][y].get_pos_from_number(missed_value)
if position:
pos.append(position)
return pos
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_unsettled_spot(self):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif self.grid[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn",
"def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours",
"def get_0_pos(grid):\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] == 0:\n return i, j\n return -1, -1",
"def check_neighbours(coordinates):\n x_coord = coordinates[0]\n y_coord = coordinates[1]\n coordinates_value = 0\n for x_move in [-1, 0, 1]:\n x = x_coord + x_move\n for y_move in [-1, 0, 1]:\n y = y_coord + y_move\n try:\n value = grid[(x,y)]\n coordinates_value += value\n except KeyError:\n pass\n\n grid[coordinates] = coordinates_value\n # print(coordinates_value)\n return coordinates_value",
"def find_excited_locations(self):\n return np.asarray(np.where(self._grid == 8)).T",
"def get_neighbour_squares_idx(self, pos):\n if pos:\n possible_values = {0, 1, 2}\n col_variation = zip( [pos[0], pos[0]], possible_values - {pos[1]} )\n row_variation = zip( possible_values - {pos[0]}, [pos[1], pos[1]] )\n return list(col_variation), list(row_variation)",
"def find_position(self, val):\n edges = np.array(self.cell_edges)\n if val in edges:\n index = np.searchsorted(edges, val)\n return index, index\n else:\n edges -= val\n if edges[0] > 0:\n return -1, 0\n if edges[-1] < 0:\n return 0, -1\n index = 0\n for i, e in enumerate(edges):\n if e > 0:\n index = i\n break\n return index - 1, index",
"def get_played_positions(board):\n return np.argwhere(board.state != -1)",
"def get_neighb_coords(self, i, ci):\n j = self.conn[i][ci]\n rj = self.xyz[j].copy()\n if self.periodic:\n if self.use_pconn:\n img = self.pconn[i][ci]\n rj += np.dot(img, self.cell)\n else:\n all_rj = rj + self.images_cellvec\n all_r = all_rj - self.xyz[i]\n all_d = np.sqrt(np.add.reduce(all_r*all_r,1))\n closest = np.argsort(all_d)[0]\n return all_rj[closest]\n return rj",
"def _find_neighbours(self):\n\n neighbours = []\n for i, p in enumerate(self.frame_0):\n nearests = np.where(np.linalg.norm(self.frame_0 - p, axis=1) <= self.R_n)[0]\n # delete self index\n index = np.argwhere(nearests==i)\n nearests = np.delete(nearests, index)\n neighbours.append(nearests)\n\n return neighbours",
"def find_empty(self):\n num_rows = len(self.board)\n num_cols = len(self.board[0])\n\n for i in range(num_rows):\n for j in range(num_cols):\n if self.board[i][j] == 0:\n return (i, j)",
"def __get_position(self, value, state):\n coords = np.argwhere(state == value).flatten()\n return coords",
"def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours",
"def get_unknown_neighbours(self, row, col):\n return [cell for cell in self.get_neighbours(row, col) if cell.state == None ]",
"def _get_value(self, i, j):\n m = len(self.data)\n n = len(self.data[0])\n if i >= m or j >= n:\n return None\n if self.data[i][j] is None:\n indices = [(i+di, j+dj) for di, dj in [[0, -1], [0,1], [-1,0], [1,0]]]\n values = [self.data[x][y] for x,y in indices if 0<=x<m and 0<=y<n]\n if any(v is None for v in values):\n raise Exception(\"ERROR: Input contains adjacent missing values\")\n return sum(values)/float(len(values))\n else:\n return self.data[i][j]",
"def find_blank_square(self, state):\n\n return state.index(0)",
"def find_neighbors(self):\n x, y = self.position\n\n for i in range(3):\n for j in range(3):\n try:\n self.neighbors.append(self.stitches[(x - 1 + i, y - 1 + j)].position)\n except:\n pass\n\n # this cell will be added by default so we must delete at the end\n self.neighbors.remove(self.position)",
"def GetPosition(board):\n\tfor i in range(len(board.matrix)):\n\t\tfor j in range(len(board.matrix[i])):\n\t\t\tif board.matrix[i][j]==\"X\":\n\t\t\t\treturn i,j",
"def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]",
"def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)",
"def neighbours(self, i, j):\n nearest = []\n for x_offset, y_offset in [(0, -1), (0, 1), (1, 0), (-1, 0)]:\n try:\n nearest.append(self.as_list[checkNonNegIndex(i + x_offset)][checkNonNegIndex(j + y_offset)])\n except IndexError:\n continue\n except TypeError:\n continue\n return nearest",
"def heuristic_misplaced(self):\n misplaced = 0\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n if self.position[i][j] != self.PUZZLE_END_POSITION[i][j]:\n misplaced += 1\n\n return misplaced",
"def compute_neighbours(index, matrix):\n row, col = decode_to_matrix_cell(index, matrix)\n n1 = index + 1\n if n1 >= matrix.size or col == matrix.cols - 1:\n n1 = None\n\n n2 = index + matrix.cols\n if n2 >= matrix.size or row == matrix.rows - 1:\n n2 = None\n return n1, n2,",
"def find_blank_cell(self, board: list):\n cells = {}\n for i in range(9): # Iterate over rows\n for j in range(9): # Iterate over columns\n if board[i][j] == 0:\n cells[str(i) + ' ' + str(j)] = self.count_numbers(board, j, i)\n m = max(cells.values())\n for k in cells:\n if cells[k] == m:\n s = k.split()\n x, y = int(s[1]), int(s[0])\n return x, y",
"def find_coordinates(self, value):\n cells = {v.coordinates for v in self.cells if v.value == value}\n return cells if len(cells) > 0 else None",
"def get_neighbours(self):\n shape=self.cubeshape[1:]\n neighboursx=np.arange(self.xpos-(self.blocksize-1)/2,(self.xpos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursx=[x if (x>=0) & (x<=shape[1]-1) else np.nan for x in neighboursx ]\n neighboursy=np.arange(self.ypos-(self.blocksize-1)/2,(self.ypos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursy=[y if (y>=0) & (y<=shape[0]-1) else np.nan for y in neighboursy ]\n keys=[np.ravel_multi_index([y,x], shape) if np.all(np.isfinite(np.asarray([y,x]))) else np.nan for y in neighboursy for x in neighboursx]\n\n return keys",
"def findEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return [x,y]",
"def get_nr_of_misplaced_tiles(board):\n result = 0\n\n for idx, val in enumerate(board):\n if idx != val:\n result += 1\n\n return result",
"def find_los_neighbors(seats, occupied_self, i, j):\n values = []\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n values.append(\n find_nearest_los_seat(seats, occupied_seats, i, j, dx, dy)\n )\n return values",
"def findImmediateNeighbours(self):\n immediateNeighbours = []\n\n if self.xCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate - 1, self.yCoordinate))\n\n if self.xCoordinate + 1 < 395:\n immediateNeighbours.append(PixelPosition(self.xCoordinate + 1, self.yCoordinate))\n\n if self.yCoordinate + 1 < 500:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate + 1))\n\n if self.yCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate - 1))\n\n return immediateNeighbours"
] |
[
"0.7205239",
"0.6487098",
"0.62197286",
"0.621028",
"0.6183383",
"0.6171833",
"0.6132711",
"0.60654384",
"0.6031553",
"0.60310304",
"0.60268855",
"0.60259557",
"0.5989147",
"0.59822845",
"0.5950342",
"0.5935506",
"0.5918549",
"0.5903765",
"0.5888374",
"0.58830476",
"0.5880375",
"0.58799595",
"0.58712",
"0.5863049",
"0.58620584",
"0.5861769",
"0.58615965",
"0.5842851",
"0.58420295",
"0.5829846"
] |
0.74833155
|
0
|
Zero out (but remember) the weight on this node
|
def clear(self):
self.weight = 0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def zero_weight():\n return Weight(kg=0)",
"def reset_weights(self):\n self.head.reset_weights()",
"def clear(self):\n for i in range(0, len(self.weights)):\n self.weights[i] = 0",
"def initializeWeightsToZero(self):\n\t\t## YOUR CODE BELOW\n\t\t\n\t\tutil.raiseNotDefined()\n\t\treturn",
"def reset_weights(self):\r\n self._weights = deepcopy(self._tmp_weights)\r\n self._tmp_weights = None",
"def reset(self):\n self._weights.clear()",
"def zero_negative_weights(self):\n for k in range(len(self)):\n self[k] *= 0 if self[k] < 0 else 1\n self.finalized = True\n return self",
"def update_weights_negative(self):\n eta = self.config.eta\n self.w_xh -= eta * (self.x.T @ self.h)\n self.w_th -= eta * (self.t.T @ self.h)\n self.w_ho -= eta * (self.h.T @ self.o) \n self.w_hz -= eta * (self.h.T @ self.z)",
"def reset_weight_zero(self):\n self.node_embedding = np.random.uniform(low=-0.5, high=0.5, size=(self.vocab_size, self.layer1_size)).astype(\n np.float32)\n self.context_embedding = np.zeros((self.vocab_size, self.layer1_size), dtype=np.float32)\n\n self.centroid = np.zeros((self.k, self.layer1_size), dtype=np.float32)\n self.covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.inv_covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.pi = np.zeros((self.vocab_size, self.k), dtype=np.float32)\n log.info(\"reset communities data| k: {}\".format(self.k))",
"def reset(self):\n for i in range(0, len(self.current_state)):\n self.current_state[i] = 0\n\n for i in range(0, len(self.weights)):\n self.weights[i] = 0",
"def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()",
"def init_weights(self) -> None:\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)",
"def resetWeights(T):\n T.children = [(t,0) for t in T.children]\n for t,w in T.children:\n resetWeights(t)",
"def zero_grad(self):\n self.optimizer.zero_grad()",
"def zero_grad(self):\r\n self._optimizer.zero_grad()",
"def zero_grad(self):\n self.grad.zero_()",
"def test_node_weight_init(self):\n n = Node(value=2.0)\n self.assertEqual(n.weights, None)",
"def init_weights(self):\n with torch.no_grad():\n self._init_weights()",
"def reset(self):\n weight = self.module.weight.data\n self.sensitivity_in = torch.zeros(weight.shape[1]).to(weight.device)\n self._features = torch.Tensor()\n self._current_batch = 1",
"def init_weights(self):\r\n self.embedding.weight.data.uniform_(-0.1, 0.1)\r\n self.fc.bias.data.fill_(0)\r\n self.fc.weight.data.uniform_(-0.1, 0.1)",
"def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)",
"def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)",
"def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)",
"def reset(self):\n\t\tself.pos = self.start\n\n\t\tself.weighted_n_left = 0.0\n\t\tself.weighted_n_right = self.weighted_n_node_samples\n\n\t\tself.label_count_left \t= np.zeros(self.n_classes)\n\t\tself.label_count_right \t= np.copy(self.label_count_total)",
"def weights(self):\r\n\t\treturn None",
"def normalize_weight(self, Z):\n self.weight /= Z",
"def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.word.bias.data.fill_(0)\n self.word.weight.data.uniform_(-0.1, 0.1)",
"def empty(self):\n self.items = []\n self.totalWeight = 0",
"def clear(self):\n self.xi[:] = 0\n self.meanlogr[:] = 0\n self.weight[:] = 0\n self.npairs[:] = 0",
"def zero(self):\n return self.State.zero()"
] |
[
"0.77741355",
"0.73818123",
"0.7308508",
"0.71057844",
"0.706317",
"0.705924",
"0.6891523",
"0.68373996",
"0.67115164",
"0.66507685",
"0.66476434",
"0.6619531",
"0.6595727",
"0.64887065",
"0.64618355",
"0.64608634",
"0.6412731",
"0.6372227",
"0.63027966",
"0.6295642",
"0.6287023",
"0.6287023",
"0.6287023",
"0.6285075",
"0.62416077",
"0.623444",
"0.6224191",
"0.6189935",
"0.6164773",
"0.6155649"
] |
0.8046396
|
0
|
Dijkstra's algorithm to compute the shortest distance to all vertices vs from a given source vertex s by traveling the edges e
|
def dijkstra(vs, es, s, stop = None):
initialize_single_source(vs, es, s)
key = lambda x: -1 * x._ss_d
Q = pq(vs)
edict = defaultdict(set)
for e in es:
edict[e.v1].add(e)
edict[e.v2].add(e)
for i in range(len(vs)):
# min path to u is determined at end of loop
u = Q.pop()
for e in edict[u]:
v = e.v1 if e.v1 != u else e.v2
relax(u, v, e, Q)
if u == stop: return
result = [v._ss_d for v in vs]
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def min_path(vs, es, source, target):\n dijkstra(vs, es, source, stop = target)\n test = target\n result = []\n while test != source:\n e = test._ss_edge\n result.append(e)\n test = e.v1 if e.v1 != test else e.v2\n assert test == source and test._ss_edge is None\n return result[::-1]",
"def dijkstra(self, source=None, destination=None):\n for vertex in self.vertices():\n vertex.d = sys.maxint\n if not source:\n source = self.vertices()[0]\n q = simply_python.data_structures.FIFO_dict()\n source.d = 0\n q.append(source)\n while not q.isempty():\n source = q.pop()\n print source\n print source.d\n d = source.d\n for out_vertex in self.out_vertices(source):\n if out_vertex.d == sys.maxint:\n out_vertex.d = d + 1\n q.append(out_vertex)\n if out_vertex == destination:\n return out_vertex.d\n return d",
"def shortest_route(self, src, dest):\n\n # Dijkstra with unusual start condition to prevent src -> src == 0 distance\n x_in = set()\n a = defaultdict(lambda: float('inf'))\n v = self.V.copy()\n\n for node, cost in self.G[src].items():\n a[node] = cost\n x_in.add(node)\n v.remove(node)\n\n while x_in != self.V:\n mn = float('inf')\n new = None\n for x in x_in:\n for node, cost in self.G[x].items():\n if node in v:\n if (a[x] + cost) < mn: # optimize large/dense G with pri. q\n mn = a[x] + cost\n new = (x, node, cost)\n if new is None:\n break\n x, node, cost = new\n x_in.add(node)\n v.remove(node)\n a[node] = a[x] + cost\n return a[dest]",
"def dijkstra(self, s, t):\n l = []\n dist = {}\n prev = {}\n discovered = {}\n for v in self.get_vertices():\n dist[v] = float('inf')\n prev[v] = None\n discovered[v] = False\n dist[s] = 0\n l.append((s, dist[s]))\n while len(l) != 0:\n u = min(l, key=lambda x: x[1])\n l.remove(u)\n u = u[0]\n discovered[u] = True\n if u == t:\n break\n for v in self.get_adjacent_vertices_by_vertex(u):\n if not discovered[v]:\n alt = dist[u] + self.get_edge((u, v)).attr[\"WEIGHT\"]\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n l.append((v, dist[v]))\n # Create a graph according to visited nodes store in prev array\n u = t\n g = Graph(attr={DIRECTED: True})\n while u is not None:\n g.add_vertex(vertex.Vertex(u, {\"WEIGHT\": dist[u]}))\n if prev[u] is not None:\n g.add_vertex(vertex.Vertex(prev[u], {\"WEIGHT\": dist[prev[u]]}))\n g.add_edge(edge.Edge(prev[u], u))\n u = prev[u]\n else:\n break\n return g",
"def __dikjstra(self, start_node):\n visited = []\n unvisited = [x for x in self.__node]\n shortest_dist_from_start_node = 0\n current_node = start_node\n\n current_node.setShortestDist(shortest_dist_from_start_node)\n\n while current_node:\n #check unvisited neighbor\n for neighbor_node, distance in current_node.getNeighbors().items():\n #print(neighbor_node.getId(), distance) troubleshoot je ni\n if neighbor_node in visited:\n continue\n\n #add up shortest_dist_from_start_node with distance from neighbor distance\n calc_dist = shortest_dist_from_start_node + distance\n\n if calc_dist < neighbor_node.getShortestDist():\n neighbor_node.setShortestDist(calc_dist)\n neighbor_node.setPrevNode(current_node)\n\n # add current node to visited array\n visited.append(current_node)\n unvisited.remove(current_node)\n \n #update next node and next shortest distance\n next_shortest_dist_from_start_node = inf\n next_node = None\n\n for unvisited_node in unvisited:\n if unvisited_node.getShortestDist() < next_shortest_dist_from_start_node:\n next_shortest_dist_from_start_node = unvisited_node.getShortestDist()\n next_node = unvisited_node\n\n # update current node and shortest distance from start vertex\n if next_node:\n current_node = next_node\n shortest_dist_from_start_node = next_shortest_dist_from_start_node\n #if there are left over unvisited node\n else: \n if unvisited:\n current_node = unvisited[0]\n else:\n current_node = None",
"def getShortestPath(self, src, dest):\n vertices = self.floorGraph.getVertList()\n unvisitedQueue = []\n srcPath = Path()\n srcPath.addNode(src)\n srcPath.pathValue = 0\n unvisitedQueue.append(srcPath)\n connections = self.floorGraph.getVertex(src).getConnections()\n #initialisez distances\n for vertex in vertices:\n newPath = Path()\n newPath.nodeList = list(srcPath.nodeList)\n newPath.addNode(vertex)\n if self.floorGraph.getVertex(vertex) in connections:\n newPath.pathValue = self.floorGraph.getVertex(src).getWeight(self.floorGraph.getVertex(vertex))\n unvisitedQueue.append(newPath)\n else:\n newPath.pathValue = math.inf\n self.shortestDistanceMap[src+vertex] = newPath\n # updates distances as per shorter routes\n while len(unvisitedQueue) is not 0:\n unvisitedQueue = sorted(unvisitedQueue, key=functools.cmp_to_key(compareNodes))\n chkPath = unvisitedQueue.pop(0)\n chkNode = chkPath.nodeList[len(chkPath.nodeList)-1]\n for vertex in vertices:\n if(self.floorGraph.getVertex(vertex) in self.floorGraph.getVertex(chkNode).getConnections()):\n newWeight = chkPath.pathValue + self.floorGraph.getVertex(chkNode).getWeight(self.floorGraph.getVertex(vertex))\n if(newWeight < self.shortestDistanceMap[src+vertex].pathValue):\n self.shortestDistanceMap[src+vertex].pathValue = newWeight\n self.shortestDistanceMap[src+vertex].nodeList = list(chkPath.nodeList)\n self.shortestDistanceMap[src+vertex].nodeList.append(vertex)\n newPath = Path()\n newPath.nodeList = list(self.shortestDistanceMap[src+vertex].nodeList)\n newPath.pathValue = newWeight\n unvisitedQueue.append(newPath)\n print(self.shortestDistanceMap[src+dest].nodeList)\n print(self.shortestDistanceMap[src+dest].pathValue)",
"def single_dijkstra(graph, start, edge_weight_name):\r\n distances = []\r\n for x in start:\r\n try:\r\n value_set = nx.single_source_dijkstra_path_length(graph, source=x, weight=edge_weight_name)\r\n except nx.NetworkXNoPath:\r\n pass\r\n for key in value_set:\r\n\r\n distances.append([x,key,value_set[key]])\r\n return distances",
"def dijkstra(adj, cost, s, t):\n\n V = range(len(adj)) # set of nodes, sequentially numbered\n # Note!!: this is not entirely general - there is no quarantee that\n # the graph node list is sequentially numbered from 0 to n-1\n\n # for all u∈V:\n # dist[u] ← ∞, prev[u] ← nil\n # dist[v] will be an upper bound on the actual distance from s to v.\n dist = [approxInf for u in V] # initialize dist to completely unknown for all u∈V\n prev = [None for u in V]\n # visited = [False for u in V] # this is represented as dist[u] = infinite\n\n dist[s] = 0 # zero distance to start node\n\n\n # H ← MakeQueue(V ) {dist-values as keys} # this is the Unknown region, not(R)\n # the set of unknown (unvisited, or not fully visited) vertices\n H = make_queue(V) #, dist)\n\n while len(H) > 0: # H, set of unknown vertices is not empty:\n # On each iteration we take a vertex outside of R (in H) with the minimal dist-value,\n # add it to R, and relax all its outgoing edges.\n u = extract_min(H, dist) # [u, d] = extract_min(H)\n # Lemma: When a node u is selected via ExtractMin, dist[u] = d(S,u), actual minimum distance.\n # First node to be extracted will be the source s (since dist[s]==0)\n # Should we stop early if min node u == t (t is moved to known set R before unknown H is exhausted)?\n for i in range(len(adj[u])): # for all (u,v) ∈ E: Relax(u,v) # relax all _outgoing_ edges from u\n # edge relaxation procedure for an edge (u,v) just checks whether\n # going from s to v through u improves the current value of dist[v].\n v = adj[u][i] # v in adj[u]\n if dist[v] > (dist[u] + cost[u][i]): # + w(u,v):\n dist[v] = dist[u] + cost[u][i] # update the distance\n prev[v] = u # update the predecessor node\n # ChangePriority(H , v , dist[v]) # rather than priority queue, update dist and scan array for min dist\n\n return dist[t]",
"def dijkstra(self, src):\n unvisited = MinHeap() \n visited = set()\n dist = []\n for v_id in range(0, self.v):\n if v_id != src:\n dist.append(sys.maxsize)\n unvisited.insert((sys.maxsize, v_id))\n else:\n unvisited.insert((0, src))\n dist.append(0)\n while unvisited.queue:\n min_node = unvisited.extract_min()\n visited.add(min_node)\n for nbr,w in self.graph[min_node]:\n new_dist = w + dist[min_node]\n if new_dist < dist[nbr]:\n unvisited.decrease_key((dist[nbr], nbr), new_dist)\n dist[nbr] = new_dist \n return dist",
"def dijkstra(G, s):\n G.init_ss(s)\n S = [] # Vertices whose final shortest path weight has been determined\n\n # In OOP version, 0th element of each list is the Vertex (not ListVertex)\n V = [G.new_adj[v][0] for v in range(0, len(G.new_adj))] # Add all vertices to queue initially\n print(V)\n\n # Heap uses distance estimates of vertices sa the key for \"extract_root\"\n # values: vertices, keys: distance estimates\n pq = hs.PriorityQueue(V) # Priority Queue/Heap - keyed by `d` distance estimate\n pq.build_heap(\"min\") # Applies only to the keys `G.d`\n # pq.print_heap() # 1-indexed\n # pq.print_heap_idx() # 1-indexed\n \n while pq.heap_size is not 0:\n u = pq.extract_root(\"min\") # Min-heap Extract-Min with lowest distance estimate\n # print(f\"u: {u.u}, min_d: {u.d}\")\n # print(f\"G.d[u]: {G.d[u]}\")\n S.append(u.u)\n # print(f\"---------{S}---------\")\n for v_idx in range(1, len(G.new_adj[u.u])): # Begin at 1 since our Vertex `u` is at idx 0 in the same list as ListVertices `v`\n # print(G.new_adj[u.u][v_idx])\n G.relax(u.u, G.new_adj[u.u][v_idx].v) # w array is contained with the Graph class\n\n return",
"def getPath(\n self,\n source,\n dest,\n as_nodes=False,\n ):\n\n self.dist = {} # A map from nodes to their labels (float)\n self.predecessor = {} # A map from a node to a node\n\n # Initialize the distance labels to \"infinity\"\n\n vertices = self.g.nodes()\n for vertex in vertices:\n self.dist[vertex] = self.inf\n self.predecessor[vertex] = source\n\n # Further set up the distance from the source to itself and\n # to all one hops away.\n\n self.dist[source] = 0.0\n if self.g.is_directed():\n outEdges = self.g.out_edges([source])\n else:\n outEdges = self.g.edges([source])\n for edge in outEdges:\n self.dist[edge[1]] = self.g[edge[0]][edge[1]][self.wt]\n\n s = set(vertices)\n s.remove(source)\n currentMin = self._findMinNode(s)\n if currentMin == None:\n return None\n s.remove(currentMin)\n while currentMin != dest and len(s) != 0 and currentMin != None:\n if self.g.is_directed():\n outEdges = self.g.out_edges([currentMin])\n else:\n outEdges = self.g.edges([currentMin])\n for edge in outEdges:\n opposite = edge[1]\n if self.dist[currentMin] + self.g[edge[0]][edge[1]][self.wt] \\\n < self.dist[opposite]:\n self.dist[opposite] = self.dist[currentMin] \\\n + self.g[edge[0]][edge[1]][self.wt]\n self.predecessor[opposite] = currentMin\n s.add(opposite)\n\n currentMin = self._findMinNode(s)\n\n # print \"Current min node {}, s = {}\".format(currentMin, s)\n\n if currentMin == None:\n return None\n s.remove(currentMin)\n\n # Compute the path as a list of edges\n\n currentNode = dest\n predNode = self.predecessor.get(dest)\n node_list = [dest]\n done = False\n path = []\n while not done:\n path.append((predNode, currentNode))\n currentNode = predNode\n predNode = self.predecessor[predNode]\n node_list.append(currentNode)\n done = currentNode == source\n node_list.reverse()\n if as_nodes:\n return node_list\n else:\n return path",
"def djikstra(self, source, target):\r\n dist = {}\r\n prev = {}\r\n set_q = {}\r\n for vertex in self.vertices.keys():\r\n dist[vertex] = sys.maxsize\r\n prev[vertex] = None\r\n set_q[vertex] = dist[vertex]\r\n dist[source] = 0\r\n set_q[source] = 0\r\n while set_q:\r\n vertex_u = min(set_q, key=set_q.get)\r\n if vertex_u == target:\r\n break\r\n set_q.pop(vertex_u)\r\n for edge in self.edges[vertex_u]:\r\n alt = dist[vertex_u] + edge.distance\r\n if alt < dist[edge.destination]:\r\n dist[edge.destination] = alt\r\n set_q[edge.destination] = dist[edge.destination]\r\n prev[edge.destination] = vertex_u\r\n path = []\r\n vertex_u = target\r\n while prev[vertex_u]:\r\n path.insert(0, vertex_u)\r\n vertex_u = prev[vertex_u]\r\n path.insert(0, vertex_u)\r\n return path",
"def dijkstras(G,s,g,cost=(lambda v,w:1),verbose=1):\n if not callable(g):\n gtest = lambda x,goal=g: x==g\n else:\n gtest = g\n d = dict((v,float('inf')) for v in G.nodes())\n p = dict((v,None) for v in G.nodes())\n d[s] = 0\n Q = [(0,s)] #each element is a tuple (c,v) with c=cost from start, v=vertex\n nnodes = 0\n while len(Q) > 0:\n c,v = heapq.heappop(Q) #get the element in the queue with the least value of c\n nnodes += 1\n if gtest(v):\n #found a path\n if verbose: print(\"Dijkstra's succeeded in\",nnodes,\"iterations\")\n return predecessor_traverse(p,s,v),d,p\n for w in G.neighbors(v):\n dcand = d[v] + cost(v,w) #this is the cost of going through v to w\n if dcand < d[w]:\n #going through v is optimal\n #if the predecessor of w is not None, then we'll have to adjust the heap\n if p[w] is not None:\n Q = [(c,x) for (c,x) in Q if x is not w]\n heapq.heapify(Q)\n d[w] = dcand\n p[w] = v\n #put w on the queue\n heapq.heappush(Q,(dcand,w))\n #no path found\n if verbose: print(\"Dijkstra's failed in\",nnodes,\"iterations\")\n return None,d,p",
"def djikstra(nodes,links,source,dest):\n route = []\n vertexes = []\n for v in nodes:\n v.set_dist(float(\"inf\"))\n v.set_prev(None)\n heappush(vertexes, v)\n source.set_dist(0)\n heapify(vertexes)\n while vertexes:\n unsorted = False\n u = heappop(vertexes)\n if u == dest:\n break #because we found the destination no need to look further\n for v in u.get_links():\n if v.get_enabled():\n alt = u.get_dist() + 1\n target = v.get_target()\n if alt < target.get_dist():\n target.set_dist(alt)\n target.set_prev(u)\n unsorted = True #just a variable that help check if changes were made to the objects inside the heap\n if unsorted: #because i updated the variables but the heap wasn't maintained, i just heapify it again\n heapify(vertexes) \n #this is the part that saves the distance and route \n if dest.get_dist() == float(\"inf\"): #if there is no route then we just return None\n return None\n u = dest\n while u.get_prev() != None:\n v = u.get_prev()\n route.insert(0, v.get_specific_link(u)) \n u = v\n return route",
"def shortest_path_lengths(self, g, src):\n d = {} # d[v] is upper bound from s to v\n cloud = {} # map reachable v to its d[v] value\n pq = AdaptableHeapPriorityQueue() # vertex v will have key d[v]\n pqlocator = {} # map from vertex to its pq locator\n\n # for each vertex v of the graph, add an entry to the priority queue, with\n # the source having distance 0 and all others having infinite distance\n for v in g.vertices():\n if v is src:\n d[v] = 0\n else:\n d[v] = float('inf') # syntax for positive infinity\n pqlocator[v] = pq.add(d[v], v) # save locator for future updates\n\n while not pq.is_empty():\n key, u = pq.remove_min()\n cloud[u] = key # its correct d[u] value\n del pqlocator[u] # u is no longer in pq\n for e in g.incident_edges(u): # outgoing edges (u,v)\n v = e.opposite(u)\n if v not in cloud:\n # perform relaxation step on edge (u,v)\n wgt = e.element()\n if d[u] + wgt < d[v]: # better path to v?\n d[v] = d[u] + wgt # update the distance\n pq.update(pqlocator[v], d[v], v) # update the pq entry\n\n return cloud # only includes reachable vertices",
"def dijkstra_tree(self, s):\n l = []\n dist = {}\n prev = {}\n discovered = {}\n g = Graph(attr={DIRECTED: True})\n g.add_vertex(vertex.Vertex(s, {\"WEIGHT\": 0}))\n for v in self.get_vertices():\n dist[v] = float('inf')\n prev[v] = None\n discovered[v] = False\n dist[s] = 0\n l.append((s, dist[s]))\n while len(l) != 0:\n u = min(l, key=lambda x: x[1])\n l.remove(u)\n u = u[0]\n discovered[u] = True\n for v in self.get_adjacent_vertices_by_vertex(u):\n if not discovered[v]:\n alt = dist[u] + self.get_edge((u, v)).attr[\"WEIGHT\"]\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n l.append((v, dist[v]))\n g.add_vertex(vertex.Vertex(v, {\"WEIGHT\": dist[v]}))\n g.add_edge(edge.Edge(u, v, {\"WEIGHT\": dist[v]}))\n\n return g",
"def shortest_path(graph, src, dest, modifiers):\r\n # Distances to source node\r\n distances = {vertex: float(\"inf\") for vertex in range(graph.num_vertices)}\r\n # Previous node in optimal path\r\n previous = {vertex: -1 for vertex in range(graph.num_vertices)}\r\n # Shortest path from source to source is 0\r\n distances[src] = 0\r\n # Initialize priority queue and vertex set\r\n pqueue = [(distances[src], src)]\r\n vertex_set = {src}\r\n\r\n while len(pqueue) != 0:\r\n vertex_added = False\r\n curr = heappop(pqueue)[1]\r\n vertex_set.remove(curr)\r\n for neighbor in graph.outgoing(curr):\r\n alt = distances[curr] + weight(neighbor, modifiers)\r\n other = neighbor.other(curr) # Opposite vertex\r\n if alt < distances[other]:\r\n distances[other] = alt\r\n previous[other] = curr\r\n if other not in vertex_set:\r\n vertex_added = True\r\n pqueue.append((alt, other))\r\n vertex_set.add(other)\r\n if vertex_added:\r\n heapify(pqueue)\r\n\r\n # Shortest path\r\n shortest_path = []\r\n shortest_path_distance = distances[dest]\r\n\r\n # Traverse previous[] to look for shortest path to target\r\n current_node = dest\r\n while previous[current_node] != -1:\r\n shortest_path.append(current_node)\r\n current_node = previous[current_node]\r\n if len(shortest_path) != 0:\r\n shortest_path.append(current_node)\r\n shortest_path.reverse()\r\n\r\n return shortest_path, shortest_path_distance",
"def shortestPath(self, source, target):\n dist = {}\n prev = {}\n q = []\n for y,a in enumerate(self.sm):\n for x,b in enumerate(self.sm[y]):\n dist[(x,y)] = sys.maxint\n prev[(x,y)] = None\n q.append((x,y))\n dist[source] = 0\n\n while len(q) is not 0:\n # find the node with minimum value (u)\n d = deepcopy(dist)\n while True:\n b = dict(map(lambda item: (item[1],item[0]), d.items()))\n u = b[min(b.keys())]\n if u not in q:\n d.pop(u)\n else:\n break\n\n if dist[u] == sys.maxint: # remaining nodes are inaccessible\n break\n\n q.remove(u)\n\n\n if u == target: # target found\n break\n\n for v in self.getNeighbors(u):\n alt = dist[u] + 1\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n\n s = []\n u = target\n while prev[u] is not None:\n s.append(u)\n u = prev[u]\n s.reverse()\n\n return s",
"def lazy_dijkstra_short_path_value(g, n, s, e):\n vis = [0] * n\n prev = [None] * n\n dist = [inf] * n\n dist[0] = 0\n pq = PriorityQueue()\n # note that priority queue sorts by the first value of a tuple by default\n pq.put((0, s))\n\n while pq.qsize() != 0:\n minValue, index = pq.get()\n vis[index] = 1\n # Optimization to ignore stale (dist, index) pairs\n if dist[index] < minValue:\n continue\n for edge in g[index]:\n # if we have visited that vertex then don't relax the edge\n if vis[edge.to]:\n continue\n newDist = dist[index] + edge.cost\n # update if we find a better solution\n if newDist < dist[edge.to]:\n prev[edge.to] = index\n dist[edge.to] = newDist\n pq.put((newDist, edge.to))\n # Significant optimization given that Dijsktra's algorithm is\n # a greedy algorithm. The value will not change as we visit future nodes.\n if index == e:\n return dist[e]\n return inf",
"def dijkstra_shortest_path(graph_object: Graph, start_node: int) -> List[float]:\n\n def init_distance(g: Graph, s: int) -> List[float]:\n d = [inf] * len(g) # type: List[float]\n d[s] = 0.0\n return d\n\n # Assign a key value to all vertices in the input graph.\n # Initial value is inf for all but the first one\n distance = init_distance(graph_object, start_node)\n priority_queue = MinHeap.build([(i, priority) for i, priority in enumerate(distance)])\n\n while priority_queue: # Priority queue has O(V) elements\n min_index = priority_queue.pop() # O(log(V))\n for edge in graph_object.graph[min_index]: # A graph has at most 2E Edges in adjacency list, so O(E)\n source, destination, edge_weight = edge\n if priority_queue.contains_item(destination) and distance[source] + edge_weight < distance[destination]:\n distance[destination] = distance[source] + edge_weight\n priority_queue.push(item=destination, priority=edge_weight) # O(log(V))\n\n return distance",
"def bellman_fords_shortest_path(graph: Graph[T], source_vertex_data: T) -> \\\n Tuple[bool, Dict[Vertex[T], int], Dict[Vertex[T], Vertex[T]]]:\n\n vertex_distance_mapping: Dict[Vertex[T], int] = defaultdict(lambda: maxsize) # vertex_weight_mapping\n vertex_parent_mapping: Dict[Vertex[T], Vertex[T]] = dict()\n source_vertex: Vertex[T] = graph.get_vertex(source_vertex_data)\n\n vertex_distance_mapping[source_vertex] = 0\n vertex_parent_mapping[source_vertex] = None\n\n # Relax all the edges (V-1)th time.\n # Why (V-1) times? - https://www.youtube.com/watch?v=-mOEd_3gTK0&feature=youtu.be&list=PLrmLmBdmIlpu2f2g8ltqaaCZiq6GJvl1j&t=785\n for i in range(0, len(graph.vertices)-1): # run it (V-1) times... for i=0: i<(V-1); i++\n relax_edges(graph.edges, vertex_distance_mapping, vertex_parent_mapping)\n\n # Relax all the edges for one more time(Vth time) to check if there is any -ve weight cycle present.\n has_negative_weight_cycle: bool = relax_edges(graph.edges, vertex_distance_mapping, vertex_parent_mapping,\n check_negative_weight_cycle=True)\n if has_negative_weight_cycle:\n return has_negative_weight_cycle, dict(), dict()\n\n return has_negative_weight_cycle, vertex_distance_mapping, vertex_parent_mapping",
"def dijkstra(start, vertex_list, line_list, vertex_labels, polygons):\n # create stack Q with all vertices including the arbitrary starting point\n Q = {**vertex_labels}\n Q[0] = start\n vertex_labels_with_start = {**Q}\n dist = {}\n prev = {}\n for key, val in Q.items():\n dist[key] = 1e10\n prev[key] = None\n # start has zero distance to itself\n dist[0] = 0\n while Q:\n min_ = 1e10\n curr_vertex = None\n # simulates priority queue (min heap) with for loop\n for v in Q.keys():\n if dist[v] < min_:\n curr_vertex = v\n min_ = dist[v]\n # curr_vertex = min(dist, key=dist.get)\n if curr_vertex is None:\n print(\"Target cannot be reached!\")\n break\n Q.pop(curr_vertex)\n invalid_point = False\n for poly in polygons:\n if inside_polygon(vertex_labels_with_start[curr_vertex], poly):\n invalid_point = True\n break\n if invalid_point:\n continue\n if curr_vertex == len(vertex_list):\n break\n _, vis_labels = visibility_graph(vertex_labels_with_start[curr_vertex], vertex_list, line_list)\n # Just implement dijkstra - need a way to mark vertices with labels\n for elem in vis_labels:\n if elem in Q:\n alt = dist[curr_vertex] + np.sqrt(len2((diff_(vertex_labels_with_start[curr_vertex],\n vertex_labels_with_start[elem]))))\n if alt < dist[elem]:\n dist[elem] = alt\n prev[elem] = curr_vertex\n return dist, prev",
"def dijkstra(edges):\n\n # Initializing heap\n minimizingCandidateEdges = Heap(1 * len(edges), 4)\n\n # No vertices in candidate edges heap initially\n verticesInCurrentHeap = []\n\n # Initiaizing vertices in current heap with potential candidates from\n # source vertex initially\n for directedEdge in edges:\n # From conquered(start vertex) to unconquered territory\n # (all expect start vertex)\n if (directedEdge.startVertex == startVertex) and (directedEdge.endVertex != startVertex):\n verticesInCurrentHeap.append(directedEdge.endVertex)\n dijkstraCriterion = distances[ directedEdge.startVertex ] + directedEdge.edgeWeight\n directedEdge.setDijkstraCriterion(dijkstraCriterion)\n minimizingCandidateEdges.insertElement( directedEdge, minimizingCandidateEdges.getI() )\n else:\n continue\n\n # Variable to keep track of last removed vertex\n lastRemovedVertex = None\n\n # While all vertices are not conquered\n while len(visitedVertices) != noOfVertices:\n\n # Getting minimum candidate edge from heap\n minimumDijkstraCriterionEdge = minimizingCandidateEdges.extractMinimum()\n # Updating minimum distance in distances answer accordingly\n distances[ minimumDijkstraCriterionEdge.endVertex ] = minimumDijkstraCriterionEdge.dijkstraCriterion\n # Removing that edge\n minimizingCandidateEdges.removeMinimum()\n\n # Keeping track of indices to remove due to that edge\n indicesToRemove = []\n for i in range(minimizingCandidateEdges.i):\n edge = minimizingCandidateEdges.heap[i]\n # Continue if \"NaN\"\n if type(edge) == str:\n continue\n # Remove if this condition holds true\n if edge.endVertex == minimumDijkstraCriterionEdge.endVertex:\n indicesToRemove.append(i)\n\n # Removing all those edges from heap\n for i in indicesToRemove:\n minimizingCandidateEdges.removeMinimum(i)\n\n # Updating last removed vertex, conquered territory, unconquered\n # territory and vertices in current heap\n lastRemovedVertex = minimumDijkstraCriterionEdge.endVertex\n visitedVertices.append(lastRemovedVertex)\n notVisitedVertices.remove(lastRemovedVertex)\n verticesInCurrentHeap.remove(lastRemovedVertex)\n\n # Updating heap edges and adding new potential edges\n for directedEdge in edges:\n\n # From last removed vertex to unconquered territory\n if (directedEdge.startVertex == lastRemovedVertex) and (directedEdge.endVertex in notVisitedVertices):\n\n # Required variables\n flag = True\n earlierDijkstraCriterion = None\n newDijkstraCriterion = None\n\n # If not in heap, then add edge\n if (directedEdge.endVertex not in verticesInCurrentHeap):\n\n verticesInCurrentHeap.append(directedEdge.endVertex)\n dijkstraCriterion = distances[ directedEdge.startVertex ] + directedEdge.edgeWeight\n directedEdge.setDijkstraCriterion(dijkstraCriterion)\n minimizingCandidateEdges.insertElement( directedEdge, minimizingCandidateEdges.getI() )\n\n # Else if present in heap, check for current dijkstra\n # criterion and new dijkstra criterion, update if\n # required\n else:\n\n # New possible dijkstra criterion\n newDijkstraCriterion = distances[ directedEdge.startVertex ] + directedEdge.edgeWeight\n removeIndex = None\n\n # Getting earlier dijkstra edge and its older criterion\n for i in range(minimizingCandidateEdges.i):\n edge = minimizingCandidateEdges.heap[i]\n if type(edge) == str:\n continue\n if edge.endVertex == directedEdge.endVertex:\n earlierDijkstraCriterion = edge.dijkstraCriterion\n removeIndex = i\n break\n\n # If found such an index\n if removeIndex != None:\n\n # Update if new dijkstra criterion is lesser than\n # older one\n if newDijkstraCriterion < earlierDijkstraCriterion:\n minimizingCandidateEdges.removeMinimum(removeIndex)\n directedEdge.setDijkstraCriterion(newDijkstraCriterion)\n minimizingCandidateEdges.insertElement(directedEdge, minimizingCandidateEdges.getI())\n\n # Else if not found such an index\n else:\n # If earlier dijkstra criterion is none, that is the\n # edge doesn't exist in heap, then add the edge\n if earlierDijkstraCriterion == None:\n directedEdge.setDijkstraCriterion(newDijkstraCriterion)\n minimizingCandidateEdges.insertElement(directedEdge, minimizingCandidateEdges.getI())\n\n # If earlier dijkstra criterion is smaller and the\n # edge doesn't exist in heap, then add the edge\n elif newDijkstraCriterion > earlierDijkstraCriterion:\n directedEdge.setDijkstraCriterion(earlierDijkstraCriterion)\n minimizingCandidateEdges.insertElement(directedEdge, minimizingCandidateEdges.getI())",
"def dijkstra(graph, source):\n\n if not isinstance(graph, WeightedGraph):\n raise TypeError('dijkstra_heap(graph, source): graph must be a WeightedGraph object')\n if source not in graph.Keys:\n raise ValueError('dijkstra_heap(graph, source): source must be a key of the graph but {} is not a key of the graph'.format(source))\n \n # initialize framework\n init_sssp(graph)\n graph.Dictionary[source].distance = 0\n\n H = prepare_heap(graph, vertice_order)\n\n while not H.is_empty():\n vertice_min = H.remove_min()\n for _, vertice, weight in vertice_min.adj_list:\n relax(H, vertice_min, graph.Dictionary[vertice], weight)",
"def dijkstra(self,G, a, z):\n assert a in G\n assert z in G\n \n # Definicion de infinito como un valor mayor \n # al doble de suma de todos los pesos\n Inf = 0\n for u in G:\n for v, w in G[u]:\n Inf += w\n \n # Inicializacion de estructuras auxiliares:\n # L: diccionario vertice -> etiqueta\n # S: conjunto de vertices con etiquetas temporales\n # A: vertice -> vertice previo (en camino longitud minima)\n L = dict([(u, Inf) for u in G]) #py3: L = {u:Inf for u in G}\n L[a] = 0\n S = set([u for u in G]) #py3: S = {u for u in G}\n A = { }\n \n # Funcion auxiliar, dado un vertice retorna su etiqueta\n # se utiliza para encontrar el vertice the etiqueta minima\n def W(v):\n return L[v]\n # Iteracion principal del algoritmo de Dijkstra\n while z in S:\n u = min(S, key=W)\n S.discard(u)\n for v, w in G[u]:\n if v in S:\n if L[u] + w < L[v]:\n L[v] = L[u] + w\n A[v] = u\n \n # Reconstruccion del camino de longitud minima\n P = []\n u = z\n while u != a:\n P.append(u)\n u = A[u]\n P.append('a')\n P.reverse()\n \n # retorna longitud minima y camino de longitud minima\n return L[z], P",
"def dijkstra(graph, src, dest):\n\n distances = {}\n predecessors = {}\n Q = {}\n\n # a few sanity checks\n if src not in graph:\n raise TypeError(\"The root of the shortest path tree cannot be found\")\n if dest not in graph:\n raise TypeError(\"The target of the shortest path cannot be found\")\n for v in graph:\n distances[v] = float(\"inf\")\n predecessors[v] = None\n Q[v] = float(\"inf\")\n\n distances[src] = 0\n Q[src] = 0\n\n while Q:\n u = min(Q, key=Q.get)\n\n del Q[u]\n\n for neighbor in graph[u]:\n\n if neighbor in Q:\n\n new_distance = distances[u] + graph[u][neighbor]\n if new_distance < distances.get(neighbor):\n distances[neighbor] = new_distance\n Q[neighbor] = new_distance\n predecessors[neighbor] = u\n return (distances, predecessors)",
"def cost_distance(e):\n # Make sure we have a proper edge with two vertices\n if len(e) != 2:\n raise ValueError\n\n a = V_coord[e[0]]\n b = V_coord[e[1]]\n\n # Return the distance between two points\n return distance(a, b)",
"def dijkstra(graph, start):\n unvisited = []\n weight = {}\n prev = {}\n time = {}\n imp = {}\n cost = {}\n dist = {}\n for node in graph.keys():\n # add all nodes to 'unvisited' with no previous node and a weight of infinity\n unvisited.append(node)\n weight[node] = float('inf')\n time[node] = float('inf')\n imp[node] = float('inf')\n cost[node] = float('inf')\n dist[node] = float('inf')\n prev[node] = None\n\n # set the starting distance to be 0\n weight[start] = 0\n time[start] = 0\n imp[start] = 0\n cost[start] = 0\n dist[start] = 0\n\n # iterate until no node is left unvisited\n while len(unvisited) > 0:\n # get the lowest distance that has not yet been visited\n curr_node = min(weight.viewkeys() & unvisited, key=weight.get)\n # mark the node as visited\n unvisited.remove(curr_node)\n # iterate through each neighbor of the current node\n for neighbor in graph[curr_node]:\n # calculate distance to that node from this node\n tmp_weight = weight[curr_node] + neighbor[WEIGHT]\n tmp_time = time[curr_node] + neighbor[TIME]\n tmp_imp= imp[curr_node] + neighbor[IMP]\n tmp_cost = cost[curr_node] + neighbor[COST]\n tmp_dist = dist[curr_node] + neighbor[DISTANCE]\n # if this distance is less than the one already stored at that node\n if tmp_weight < weight[neighbor[NEXT_NODE]]:\n # we store this distance as its distance,\n weight[neighbor[NEXT_NODE]] = tmp_weight\n time[neighbor[NEXT_NODE]] = tmp_time\n imp[neighbor[NEXT_NODE]] = tmp_imp\n cost[neighbor[NEXT_NODE]] = tmp_cost\n dist[neighbor[NEXT_NODE]] = tmp_dist\n # and this node as its previous node\n prev[neighbor[NEXT_NODE]] = curr_node\n\n return weight, prev, time, imp, cost, dist",
"def dijkstra(graph,start,goal):\n shortest_distance = {}\n predecessor = {}\n unseenNodes = graph\n path = []\n for node in unseenNodes:\n shortest_distance[node] = infinity\n shortest_distance[start] = 0\n \n while unseenNodes:\n minNode = None\n for node in unseenNodes:\n if minNode is None:\n minNode = node\n elif shortest_distance[node] < shortest_distance[minNode]:\n minNode = node\n \n for childNode, weight in graph[minNode].items():\n \n if weight + shortest_distance[minNode] < shortest_distance[childNode]:\n shortest_distance[childNode] = weight + shortest_distance[minNode]\n predecessor[childNode] = minNode\n unseenNodes.pop(minNode)\n \n currentNode = goal\n while currentNode != start:\n try:\n path.insert(0,currentNode)\n currentNode = predecessor[currentNode]\n except KeyError:\n print('Path not reachable')\n break\n path.insert(0,start)\n if shortest_distance[goal] != infinity:\n return path",
"def dijkstra(self,start):\n path_weight = {i : float('inf') for i in range(self.n)}\n path_weight[start] = 0\n previous = {i : float('nan') for i in range(self.n)}\n remaining = PriorityQueue()\n for node,priority in path_weight.items():\n remaining.put((priority,node))\n\n while not remaining.empty():\n priority,node = remaining.get()\n for tgt,weight in self.edges[node].items():\n possibleNewWeight = path_weight[node] + weight\n if (possibleNewWeight < path_weight[tgt]):\n path_weight[tgt] = possibleNewWeight\n previous[tgt] = node\n \n return path_weight, previous"
] |
[
"0.737509",
"0.7104303",
"0.70790184",
"0.7037781",
"0.70306695",
"0.70016694",
"0.6955521",
"0.69549",
"0.6942236",
"0.693058",
"0.6905992",
"0.6871426",
"0.68237275",
"0.67962337",
"0.6783549",
"0.67635256",
"0.67245734",
"0.67188764",
"0.6700067",
"0.6671806",
"0.6660965",
"0.6622939",
"0.65865195",
"0.6563195",
"0.65603614",
"0.6530048",
"0.65239525",
"0.6522652",
"0.65118754",
"0.6492369"
] |
0.73207426
|
1
|
Return min path from source to target, on the graph described the vertices vs and edges es
|
def min_path(vs, es, source, target):
dijkstra(vs, es, source, stop = target)
test = target
result = []
while test != source:
e = test._ss_edge
result.append(e)
test = e.v1 if e.v1 != test else e.v2
assert test == source and test._ss_edge is None
return result[::-1]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def shortestPath(self, source, target):\n dist = {}\n prev = {}\n q = []\n for y,a in enumerate(self.sm):\n for x,b in enumerate(self.sm[y]):\n dist[(x,y)] = sys.maxint\n prev[(x,y)] = None\n q.append((x,y))\n dist[source] = 0\n\n while len(q) is not 0:\n # find the node with minimum value (u)\n d = deepcopy(dist)\n while True:\n b = dict(map(lambda item: (item[1],item[0]), d.items()))\n u = b[min(b.keys())]\n if u not in q:\n d.pop(u)\n else:\n break\n\n if dist[u] == sys.maxint: # remaining nodes are inaccessible\n break\n\n q.remove(u)\n\n\n if u == target: # target found\n break\n\n for v in self.getNeighbors(u):\n alt = dist[u] + 1\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n\n s = []\n u = target\n while prev[u] is not None:\n s.append(u)\n u = prev[u]\n s.reverse()\n\n return s",
"def shortest_path(self, source, target):\r\n key = self.d.keys()\r\n #check that endpoints are in graph\r\n if source not in key or target not in key:\r\n raise KeyError(str(source) + \" and \" + str(target) + \" must be in graph\")\r\n #initialize V,Q and M\r\n V = []\r\n vis = dict()\r\n Q = deque()\r\n Q.append(source)\r\n M = set(source)\r\n #while target has not been visited\r\n while target not in M:\r\n #take first element of Q\r\n current = Q.popleft()\r\n #add element to visited\r\n V.append(current)\r\n neighbors = self.d[current]\r\n #for each neighbor of element\r\n for n in neighbors:\r\n #if element has not been checked, add it to queue\r\n #also save traveled edge in visited\r\n if n not in M:\r\n Q.append(n)\r\n vis.update({n:current})\r\n M.add(n)\r\n L = [target]\r\n #reverse the order of the traveled edges\r\n while L[-1] in vis.keys():\r\n L.append(vis[L[-1]])\r\n return L[::-1]",
"def shortest_path(graph, source, target):\n return shortest_path_recursive(graph, source, target, set())",
"def getPath(\n self,\n source,\n dest,\n as_nodes=False,\n ):\n\n self.dist = {} # A map from nodes to their labels (float)\n self.predecessor = {} # A map from a node to a node\n\n # Initialize the distance labels to \"infinity\"\n\n vertices = self.g.nodes()\n for vertex in vertices:\n self.dist[vertex] = self.inf\n self.predecessor[vertex] = source\n\n # Further set up the distance from the source to itself and\n # to all one hops away.\n\n self.dist[source] = 0.0\n if self.g.is_directed():\n outEdges = self.g.out_edges([source])\n else:\n outEdges = self.g.edges([source])\n for edge in outEdges:\n self.dist[edge[1]] = self.g[edge[0]][edge[1]][self.wt]\n\n s = set(vertices)\n s.remove(source)\n currentMin = self._findMinNode(s)\n if currentMin == None:\n return None\n s.remove(currentMin)\n while currentMin != dest and len(s) != 0 and currentMin != None:\n if self.g.is_directed():\n outEdges = self.g.out_edges([currentMin])\n else:\n outEdges = self.g.edges([currentMin])\n for edge in outEdges:\n opposite = edge[1]\n if self.dist[currentMin] + self.g[edge[0]][edge[1]][self.wt] \\\n < self.dist[opposite]:\n self.dist[opposite] = self.dist[currentMin] \\\n + self.g[edge[0]][edge[1]][self.wt]\n self.predecessor[opposite] = currentMin\n s.add(opposite)\n\n currentMin = self._findMinNode(s)\n\n # print \"Current min node {}, s = {}\".format(currentMin, s)\n\n if currentMin == None:\n return None\n s.remove(currentMin)\n\n # Compute the path as a list of edges\n\n currentNode = dest\n predNode = self.predecessor.get(dest)\n node_list = [dest]\n done = False\n path = []\n while not done:\n path.append((predNode, currentNode))\n currentNode = predNode\n predNode = self.predecessor[predNode]\n node_list.append(currentNode)\n done = currentNode == source\n node_list.reverse()\n if as_nodes:\n return node_list\n else:\n return path",
"def shortest_path(graph, src, dest, modifiers):\r\n # Distances to source node\r\n distances = {vertex: float(\"inf\") for vertex in range(graph.num_vertices)}\r\n # Previous node in optimal path\r\n previous = {vertex: -1 for vertex in range(graph.num_vertices)}\r\n # Shortest path from source to source is 0\r\n distances[src] = 0\r\n # Initialize priority queue and vertex set\r\n pqueue = [(distances[src], src)]\r\n vertex_set = {src}\r\n\r\n while len(pqueue) != 0:\r\n vertex_added = False\r\n curr = heappop(pqueue)[1]\r\n vertex_set.remove(curr)\r\n for neighbor in graph.outgoing(curr):\r\n alt = distances[curr] + weight(neighbor, modifiers)\r\n other = neighbor.other(curr) # Opposite vertex\r\n if alt < distances[other]:\r\n distances[other] = alt\r\n previous[other] = curr\r\n if other not in vertex_set:\r\n vertex_added = True\r\n pqueue.append((alt, other))\r\n vertex_set.add(other)\r\n if vertex_added:\r\n heapify(pqueue)\r\n\r\n # Shortest path\r\n shortest_path = []\r\n shortest_path_distance = distances[dest]\r\n\r\n # Traverse previous[] to look for shortest path to target\r\n current_node = dest\r\n while previous[current_node] != -1:\r\n shortest_path.append(current_node)\r\n current_node = previous[current_node]\r\n if len(shortest_path) != 0:\r\n shortest_path.append(current_node)\r\n shortest_path.reverse()\r\n\r\n return shortest_path, shortest_path_distance",
"def findPath(self, source, destination, vertices, edges):\n connectedNodes = [vertices[index] for index, edge in enumerate(edges[source['index']]) if edge == 1]\n for node in connectedNodes:\n childConnectedNodes = [vertices[index] for index, edge in enumerate(edges[node['index']]) if edge == 1]\n for childNode in childConnectedNodes:\n if childNode['value'] == destination['value']:\n return node\n return None",
"def bellman_fords_shortest_path(graph: Graph[T], source_vertex_data: T) -> \\\n Tuple[bool, Dict[Vertex[T], int], Dict[Vertex[T], Vertex[T]]]:\n\n vertex_distance_mapping: Dict[Vertex[T], int] = defaultdict(lambda: maxsize) # vertex_weight_mapping\n vertex_parent_mapping: Dict[Vertex[T], Vertex[T]] = dict()\n source_vertex: Vertex[T] = graph.get_vertex(source_vertex_data)\n\n vertex_distance_mapping[source_vertex] = 0\n vertex_parent_mapping[source_vertex] = None\n\n # Relax all the edges (V-1)th time.\n # Why (V-1) times? - https://www.youtube.com/watch?v=-mOEd_3gTK0&feature=youtu.be&list=PLrmLmBdmIlpu2f2g8ltqaaCZiq6GJvl1j&t=785\n for i in range(0, len(graph.vertices)-1): # run it (V-1) times... for i=0: i<(V-1); i++\n relax_edges(graph.edges, vertex_distance_mapping, vertex_parent_mapping)\n\n # Relax all the edges for one more time(Vth time) to check if there is any -ve weight cycle present.\n has_negative_weight_cycle: bool = relax_edges(graph.edges, vertex_distance_mapping, vertex_parent_mapping,\n check_negative_weight_cycle=True)\n if has_negative_weight_cycle:\n return has_negative_weight_cycle, dict(), dict()\n\n return has_negative_weight_cycle, vertex_distance_mapping, vertex_parent_mapping",
"def djikstra(self, source, target):\r\n dist = {}\r\n prev = {}\r\n set_q = {}\r\n for vertex in self.vertices.keys():\r\n dist[vertex] = sys.maxsize\r\n prev[vertex] = None\r\n set_q[vertex] = dist[vertex]\r\n dist[source] = 0\r\n set_q[source] = 0\r\n while set_q:\r\n vertex_u = min(set_q, key=set_q.get)\r\n if vertex_u == target:\r\n break\r\n set_q.pop(vertex_u)\r\n for edge in self.edges[vertex_u]:\r\n alt = dist[vertex_u] + edge.distance\r\n if alt < dist[edge.destination]:\r\n dist[edge.destination] = alt\r\n set_q[edge.destination] = dist[edge.destination]\r\n prev[edge.destination] = vertex_u\r\n path = []\r\n vertex_u = target\r\n while prev[vertex_u]:\r\n path.insert(0, vertex_u)\r\n vertex_u = prev[vertex_u]\r\n path.insert(0, vertex_u)\r\n return path",
"def constructShortestPath(self):\r\n sp = []\r\n v = self.t\r\n while self.preds[v]: # is not None\r\n sp.append(v)\r\n v = self.preds[v]\r\n sp.append(self.s) # source\r\n sp.reverse() # to have the path from source to dest and not t to s\r\n return sp, self.graph.getCoords(sp)",
"def shortest_path(source, target):\n #although lecture checks for goal when a node is popped off the frontier, efficiency of search can be improved\n #by checking for a goal as nodes are ADDED. If goal detected, don't add it to frontier, just return the solution\n #immediately\n\n #create start point\n start = Node(state = source, parent = None, action = None)\n frontier = QueueFrontier()\n frontier.add(start)\n\n #create explored set\n explored = set()\n\n while True:\n #if nothing left in frontier, no path exists\n if frontier.empty():\n return None\n\n #choose a node from the frontier\n node = frontier.remove()\n #if node is goal, we have solution\n\n #add neighbors 2 frontier using function THATS ALR THERE DUMMY\n for (movie, star) in neighbors_for_person(node.state):\n newNode = Node(state = star, parent = node, action=movie)\n if not frontier.contains_state(newNode) and newNode.state not in explored:\n if newNode.state == target:\n #reverse the solution\n solution = []\n while newNode.parent is not None:\n actionTuple = (newNode.action, newNode.state)\n solution.append(actionTuple)\n newNode = newNode.parent\n solution.reverse()\n return solution\n else: frontier.add(newNode)\n\n #mark state as explored\n explored.add(node.state)",
"def getShortestPath(self, src, dest):\n vertices = self.floorGraph.getVertList()\n unvisitedQueue = []\n srcPath = Path()\n srcPath.addNode(src)\n srcPath.pathValue = 0\n unvisitedQueue.append(srcPath)\n connections = self.floorGraph.getVertex(src).getConnections()\n #initialisez distances\n for vertex in vertices:\n newPath = Path()\n newPath.nodeList = list(srcPath.nodeList)\n newPath.addNode(vertex)\n if self.floorGraph.getVertex(vertex) in connections:\n newPath.pathValue = self.floorGraph.getVertex(src).getWeight(self.floorGraph.getVertex(vertex))\n unvisitedQueue.append(newPath)\n else:\n newPath.pathValue = math.inf\n self.shortestDistanceMap[src+vertex] = newPath\n # updates distances as per shorter routes\n while len(unvisitedQueue) is not 0:\n unvisitedQueue = sorted(unvisitedQueue, key=functools.cmp_to_key(compareNodes))\n chkPath = unvisitedQueue.pop(0)\n chkNode = chkPath.nodeList[len(chkPath.nodeList)-1]\n for vertex in vertices:\n if(self.floorGraph.getVertex(vertex) in self.floorGraph.getVertex(chkNode).getConnections()):\n newWeight = chkPath.pathValue + self.floorGraph.getVertex(chkNode).getWeight(self.floorGraph.getVertex(vertex))\n if(newWeight < self.shortestDistanceMap[src+vertex].pathValue):\n self.shortestDistanceMap[src+vertex].pathValue = newWeight\n self.shortestDistanceMap[src+vertex].nodeList = list(chkPath.nodeList)\n self.shortestDistanceMap[src+vertex].nodeList.append(vertex)\n newPath = Path()\n newPath.nodeList = list(self.shortestDistanceMap[src+vertex].nodeList)\n newPath.pathValue = newWeight\n unvisitedQueue.append(newPath)\n print(self.shortestDistanceMap[src+dest].nodeList)\n print(self.shortestDistanceMap[src+dest].pathValue)",
"def shortest_path(self, source, target, via=None, weight='length', bbox=None):\n\n if self._graph_backend == 'networkx':\n return networkx.shortest_path(self._graph, source, target, weight=weight)\n else:\n if isinstance(via, list):\n return self._pgr.get_route(source, target, via_nodes=via, bbox_nodes=bbox)\n else:\n return self._pgr.get_route(source, target)",
"def path(most_important_up, most_important_down, total_distance, to_source2, to_source1):\n\n if total_distance == min(total_distance, to_source2[0], to_source1[0]):\n return source_to_source(most_important_up, most_important_down), total_distance\n elif to_source2[0] == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(to_source2[1]), to_source2[0]\n else:\n return most_important_to_source(to_source1[1], up=False), to_source1[0]",
"def test_find_shortest_path():\n g = Graph()\n node_1 = Node({'A':['B','C']})\n g.add(node_1)\n node_2 = Node({'B':['C','D']})\n g.add(node_2)\n node_3 = Node({'C':['D']})\n g.add(node_3)\n node_4 = Node({'D':['C']})\n g.add(node_4)\n node_5 = Node({'E':['C']})\n g.add(node_5)\n\n # zero path between node_1 and node_5\n path_0 = g.find_shortest_path(node_1, node_5)\n assert path_0 == None\n # only one path between node_5 and node_4\n path_1 = g.find_shortest_path(node_5, node_4)\n assert [ node.name for node in path_1 ] == [ node_5.name, node_3.name, node_4.name ]\n # three paths between node_1 and node_3, verify the shortest one is returned\n path_3 = g.find_shortest_path(node_1, node_3)\n assert [ node.name for node in path_3 ] == [ node_1.name, node_3.name ]",
"def shortest_route(self, src, dest):\n\n # Dijkstra with unusual start condition to prevent src -> src == 0 distance\n x_in = set()\n a = defaultdict(lambda: float('inf'))\n v = self.V.copy()\n\n for node, cost in self.G[src].items():\n a[node] = cost\n x_in.add(node)\n v.remove(node)\n\n while x_in != self.V:\n mn = float('inf')\n new = None\n for x in x_in:\n for node, cost in self.G[x].items():\n if node in v:\n if (a[x] + cost) < mn: # optimize large/dense G with pri. q\n mn = a[x] + cost\n new = (x, node, cost)\n if new is None:\n break\n x, node, cost = new\n x_in.add(node)\n v.remove(node)\n a[node] = a[x] + cost\n return a[dest]",
"def path_to_actor(self, source, target):\r\n #shortest path\r\n path = nx.shortest_path(self.graph, source, target)\r\n #shortest path length....\r\n len_path = nx.shortest_path_length(self.graph, source, target)\r\n return path, len_path/2",
"def least_cost_path(G, start, dest, cost):\n\n # Create a priority queue\n todo = pqueue.PQueue()\n todo.update(start, 0);\n\n # v in visited when the vertex v's least cost from start has been determined\n visited = set()\n\n # parent[v] is the vertex that just precedes v in the path from start to v\n parent = {}\n\n while todo and (dest not in visited):\n\n # priority queue operation\n # remove smallest estimated cost vertex from todo list\n (cur, c) = todo.pop_smallest()\n\n # it is now visited, and will never have a smaller cost\n visited.add(cur)\n\n for n in G.adj_to(cur):\n if n in visited: continue\n if todo.update(n, c+cost((cur,n))):\n parent[n] = cur\n\n # now, if there is a path, extract it. The graph may be disconnected\n # so in that case return None\n if dest not in visited:\n return None\n\n path = [dest]\n cur = dest\n while start not in path:\n cur = parent[cur]\n path.append(cur)\n\n path.reverse()\n return path",
"def shortest_path(self, source, destination, parameter=None):\n paths = []\n for path in self.graph.shortest_paths(source, destination, parameter):\n paths.append({'hops': path})\n return jsonify({'paths': paths})",
"def search_shortest_paths(\n self,\n src_nodes: list[TN],\n dst_node: TN,\n operation_src: str,\n operation_dest: str,\n domain: str,\n limit_dest_schemes: list[str],\n *,\n session: \"Session\",\n ) -> dict[TN, list[dict[str, Any]]]:\n\n for rse in itertools.chain(src_nodes, [dst_node], self._multihop_nodes):\n rse.ensure_loaded(load_attributes=True, load_info=True, session=session)\n self.ensure_edges_loaded(session=session)\n\n if self._multihop_nodes:\n # Filter out island source RSEs\n nodes_to_find = {node for node in src_nodes if node.out_edges}\n else:\n nodes_to_find = set(src_nodes)\n\n class _NodeStateProvider:\n _hop_penalty = self._hop_penalty\n\n def __init__(self, node: TN):\n self.enabled: bool = True\n self.cost: _Number = 0\n if node != dst_node:\n try:\n self.cost = int(node.attributes.get('hop_penalty', self._hop_penalty))\n except ValueError:\n self.cost = self._hop_penalty\n\n scheme_missmatch_found = {}\n\n class _EdgeStateProvider:\n def __init__(self, edge: TE):\n self.edge = edge\n self.chosen_scheme = {}\n\n @property\n def cost(self) -> _Number:\n return self.edge.cost\n\n @property\n def enabled(self) -> bool:\n try:\n matching_scheme = rsemgr.find_matching_scheme(\n rse_settings_src=self.edge.src_node.info,\n rse_settings_dest=self.edge.dst_node.info,\n operation_src=operation_src,\n operation_dest=operation_dest,\n domain=domain,\n scheme=limit_dest_schemes if self.edge.dst_node == dst_node and limit_dest_schemes else None,\n )\n self.chosen_scheme = {\n 'source_scheme': matching_scheme[1],\n 'dest_scheme': matching_scheme[0],\n 'source_scheme_priority': matching_scheme[3],\n 'dest_scheme_priority': matching_scheme[2],\n }\n return True\n except RSEProtocolNotSupported:\n scheme_missmatch_found[self.edge.src_node] = True\n return False\n\n paths = {dst_node: []}\n for node, distance, _, edge_to_next_hop, edge_state in self.dijkstra_spf(dst_node=dst_node,\n nodes_to_find=nodes_to_find,\n node_state_provider=_NodeStateProvider,\n edge_state_provider=_EdgeStateProvider):\n nh_node = edge_to_next_hop.dst_node\n edge_state = cast(_EdgeStateProvider, edge_state)\n hop = {\n 'source_rse': node,\n 'dest_rse': nh_node,\n 'hop_distance': edge_state.cost,\n 'cumulated_distance': distance,\n **edge_state.chosen_scheme,\n }\n paths[node] = [hop] + paths[nh_node]\n\n nodes_to_find.discard(node)\n if not nodes_to_find:\n # We found the shortest paths to all desired nodes\n break\n\n result = {}\n for node in src_nodes:\n path = paths.get(node)\n if path is not None:\n result[node] = path\n elif scheme_missmatch_found.get(node):\n result[node] = []\n return result",
"def shortest_path(edges, start, end):\n visitedNodes = []\n queue = [[start]]\n if start == end:\n return [start]\n \n while queue:\n path = queue.pop(0)\n node = path[-1]\n if node not in visitedNodes:\n neighbors = get_neighbors(edges, node)\n for neighbor in neighbors:\n newPath = list(path)\n newPath.append(neighbor)\n queue.append(newPath)\n if neighbor == end:\n return fix_format(edges, newPath)\n visitedNodes.append(node)\n return None",
"def shortest(self, from_node, to_node):\n print \"Shortest path from {} to {}\".format(from_node.name, to_node.name)\n current = from_node\n solution = {current.name: 0}\n visited = []\n if from_node.name == to_node.name:\n return \"No route necessary\"\n\n while current:\n if current.name == to_node.name:\n return \"Solution {}\".format(solution.get(to_node.name))\n\n for edge in current.edges:\n # look at routes from this node\n if edge.from_node.name != current.name:\n continue\n weight = (solution.get(edge.from_node.name) or 0) + edge.weight\n if not solution.get(edge.to_node.name):\n solution.update({edge.to_node.name: weight})\n elif solution.get(edge.to_node.name) > weight:\n solution.update({edge.to_node.name: weight})\n\n # find the lowest weight, go to that node next\n lowest = None\n next_node = None\n for node_name, weight in solution.iteritems():\n if node_name in visited:\n continue\n if lowest is None or weight < lowest:\n lowest = weight\n next_node = self.graph.nodes.get(node_name)\n visited.append(current.name)\n current = next_node\n return \"No solution\"",
"def shortest_path__dijkstra__priority_queue(self, source, target=None):\n dist = {} # best distances to `v` from `source`\n prev = {} # predecessors of `v`\n Q = PriorityQueue()\n\n dist[source] = 0\n Q.add_with_priority(source, 0)\n\n for v in self.vertices:\n if v != source:\n dist[v] = self.INFINITY # unknown distance from source to `v`\n prev[v] = None # predecessor of `v`\n\n # the main loop\n reached_target = False\n while not Q.is_empty and not reached_target:\n priority, u = Q.extract_min() # remove and return best vertex\n\n # go through all `v` neighbors of `u`\n for v, edge_weight in self.neighbors_of(u):\n alt = dist[u] + edge_weight\n if alt < dist[v]:\n # current known shortest path to `v` is...\n dist[v] = alt # with distance `alt`\n prev[v] = u # through vertex `u`\n\n if not Q.contains(v):\n Q.add_with_priority(v, alt)\n\n if target is not None and u == target:\n # break as soon as `target` is reached\n # no need to calculate shortest path between every pair of vertices\n reached_target = True\n\n if target is not None and reached_target:\n S = [] # holds the shortest path, or empty if None\n u = target\n if u in prev or u == source:\n while u is not None:\n S.append(u)\n u = prev.get(u)\n\n path = S[::-1]\n distance = sum([v.weight for v in S])\n else:\n path = None\n distance = None\n\n return path, distance, dist",
"def build(self, source, target):\n return self.shortestPath(self.graph, source, target)",
"def shortest_path(graph, start, end):\n nodes_to_visit = {start}\n visited_nodes = set()\n # Distance from start to start is 0\n distance_from_start = {start: 0}\n predecessors = {} # Store previous node for shortest route for each node\n\n while nodes_to_visit:\n # Get node with smallest weight\n current = min(\n [(distance_from_start[node], node) for node in nodes_to_visit]\n )[1]\n\n # If the end is reached, quit\n if current == end:\n break\n\n nodes_to_visit.discard(current)\n visited_nodes.add(current)\n\n edges = graph[current]\n unvisited_neighbours = set(edges).difference(visited_nodes)\n for neighbour in unvisited_neighbours:\n neighbour_distance = distance_from_start[current] + \\\n edges[neighbour]\n if neighbour_distance < distance_from_start.get(neighbour,\n float('inf')):\n distance_from_start[neighbour] = neighbour_distance\n predecessors[neighbour] = current\n nodes_to_visit.add(neighbour)\n\n return _deconstruct_path(predecessors, end)",
"def min_path(self, start, end, maxD=1e309):\n tdist, preceding_node = self.dijkstra(start, maxD)\n dist = tdist[end]\n backpath = [end]\n try:\n while end != start:\n end = preceding_node[end]\n backpath.append(end)\n path = list(reversed(backpath))\n except KeyError:\n path = None\n\n return dist, path",
"def pathTo(self, v): # O(# edges returned)\n if self.hasNegativeCycle():\n raise Exception(\"Negative cost cycle exists\")\n if not self.hasPathTo(v): return None\n path = [] # new Stack<DirectedEdge>()\n e = self._edgeTo[v]\n while e is not None: \n path.append(e) # push(e)\n e = self._edgeTo[e.get_from()]\n return path",
"def shortestPath( self, source, target, weight = None ):\n if weight == None:\n return nx.shortest_path(self._G, source, target)\n else:\n return nx.shortest_path(self._G, source, target, weight = weight)",
"def dfs(self, starting_vertex, destination_vertex):\n # create an empty stack \n stack = Stack()\n #push the starting vertex ID as list\n stack.push([starting_vertex])\n # create an empty Set to store the visited vertices\n visited = set()\n # while the stack is not empty ...\n while stack.size() > 0:\n # pop the first vertex\n path = stack.pop()\n vert = path[-1]\n # if that vertex has not been visited ..\n if vert not in visited:\n #check for target\n if vert == destination_vertex:\n return path\n # mark it is visited\n visited.add(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n #copy path to avoid pass by reference\n new_path = list(path) # make a copy\n new_path.append(neighbor)\n stack.push(new_path)",
"def one_way_path(most_important, total_distance, to_source2, to_source1):\n\n if total_distance == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(most_important), total_distance\n elif to_source2[0] == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(to_source2[1]), to_source2[0]\n else:\n return most_important_to_source(to_source1[1], up=False), to_source1[0]",
"def find_shortest_path(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return path\n if not graph.has_key(start):\n return None\n shortest = None\n for node in graph[start]:\n if node not in path:\n newpath = find_shortest_path(graph, node, end, path)\n if newpath:\n if not shortest or len(newpath) < len(shortest):\n shortest = newpath\n return shortest"
] |
[
"0.7420686",
"0.73137337",
"0.7145276",
"0.7083855",
"0.6987937",
"0.68091285",
"0.68024665",
"0.6783143",
"0.67707956",
"0.67509186",
"0.66613495",
"0.66289914",
"0.65742147",
"0.6509377",
"0.6435959",
"0.64237475",
"0.64171004",
"0.6385649",
"0.6310933",
"0.62928414",
"0.628684",
"0.6282392",
"0.627839",
"0.6268327",
"0.6267424",
"0.62542343",
"0.62336576",
"0.6176571",
"0.6155427",
"0.6145223"
] |
0.8422774
|
0
|
Require that user is a designated study admin or site admin.
|
def _requireStudyAdmin(self, user):
studyAdminsGroup = self.model('group').findOne({'name': 'Study Administrators'})
if not studyAdminsGroup or studyAdminsGroup['_id'] not in user['groups']:
if not user.get('admin', False):
raise AccessException(
'Only members of the Study Administrators group can create or modify studies.')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)",
"def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)",
"def validate_admin(self, request):\n\n self.validate_login(request)\n\n if request.session['id'] not in self.admins:\n handler.logHelper.log_it_visit(request, __name__ + '.validate_admin', authorized=False)\n raise PermissionDenied('You need to be an admin to access this page.')",
"def check_admin():\n if not current_user.is_admin:\n abort(403)",
"def check_admin():\n if not current_user.is_admin:\n abort(403)",
"async def assert_requester_is_admin(auth: Auth, request: SynapseRequest) -> None:\n requester = await auth.get_user_by_req(request)\n await assert_user_is_admin(auth, requester)",
"def _check_admin_only(self, request):\r\n api_key = request.params.get(self.api_field, None)\r\n\r\n if request.user is None:\r\n user = self.user_fetcher(api_key=api_key)\r\n else:\r\n user = request.user\r\n\r\n if user is not None and user.is_admin:\r\n request.user = user\r\n return True",
"def is_main_admin(self):\n if self.user is None:\n return False\n return self.user.has_permission(\"admin\")",
"def admin_required(func):\n\n @functools.wraps(func)\n def __wrapper(request, *args, **kwds):\n \"\"\"Makes it possible for admin_required to be used as a decorator.\"\"\"\n if request.user_is_admin:\n return func(request, *args, **kwds) # pylint: disable-msg=W0142\n else:\n return utility.forbidden(\n request,\n error_message='You must be an administrator to view this page.')\n\n return __wrapper",
"def user_is_admin(userobj):\n from .node import Node\n from .subject import Subject\n from .period import Period\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)",
"def require_project_administrator(project):\n if not test_project_administrator(project):\n raise cherrypy.HTTPError(403)",
"def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')",
"def test_admin_required(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/required')\n self.assertEqual(b'required', rv.data)",
"def admin_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session['user']['user_type'] != \"admin\":\n return abort(403)\n return f(*args, **kwargs)\n return decorated_function",
"def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))",
"def is_admin(self):\n return False",
"def admin(request):\n if not request.user.is_staff:\n return render_to_response('error.htm', {\n 'error': \"Sorry, you are not staff... (user permissions 'is_staff')\",\n })\n return render_to_response('admin.htm', {\n 'username': request.user,\n })",
"def is_not_admin(user):\n return not user.is_superuser",
"def is_user_admin(request):\n return request.user.is_superuser",
"def is_staff(self):\r\n return self.is_admin",
"def require_server_administrator():\n if not test_server_administrator():\n raise cherrypy.HTTPError(403)",
"def admin_required(f): # pragma: no cover\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if current_user.admin:\r\n return f(*args, **kwargs)\r\n else:\r\n return abort(403)\r\n return decorated_function",
"def test_user_isnt_admin():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n for page in ['pages', 'teams', 'scoreboard', 'chals', 'statistics', 'config']:\n r = client.get('/admin/{}'.format(page))\n assert r.location.startswith(\"http://localhost/login?next=\")\n assert r.status_code == 302\n destroy_ctfd(app)",
"def admin_required(func):\n @wraps(func)\n def wrapper(request):\n if not request.user:\n return web.json_response({'status': 'error', 'message': 'auth required'}, status=401)\n if request.user != config['server']['admin_username']:\n return web.json_response({'status': 'error', 'message': 'admin rights required'}, status=403)\n return func(request)\n return wrapper",
"def test_user_can_change_admin(self):\n self.assertTrue(self.story.user_can_change(self.admin_user))",
"def user_is_subjectadmin(userobj):\n from .subject import Subject\n return user_is_basenodeadmin(userobj, Subject)",
"def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)",
"def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False",
"def CAN_ASSIGN(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)",
"def is_admin(self, user):\n return user.name in self.admins"
] |
[
"0.69412476",
"0.68771887",
"0.6837426",
"0.6823092",
"0.6823092",
"0.6561765",
"0.6527417",
"0.65214103",
"0.6505473",
"0.649122",
"0.6457141",
"0.64561474",
"0.6453968",
"0.64320403",
"0.6393753",
"0.63883823",
"0.6383777",
"0.63770217",
"0.6367018",
"0.6363484",
"0.63585603",
"0.63579905",
"0.6329531",
"0.6323954",
"0.6315146",
"0.6308256",
"0.6302435",
"0.62755543",
"0.62704635",
"0.6262351"
] |
0.8191438
|
0
|
Method to retrieve the current values of the RAMSTKSurvivalData data model attributes.
|
def get_attributes(self):
_attributes = (self.survival_id, self.record_id, self.name,
self.source_id, self.failure_date, self.left_interval,
self.right_interval, self.status_id, self.quantity,
self.tbf, self.mode_type_id, self.nevada_chart,
self.ship_date, self.number_shipped, self.return_date,
self.number_returned, self.user_float_1,
self.user_float_2, self.user_float_3,
self.user_integer_1, self.user_integer_2,
self.user_integer_3, self.user_string_1,
self.user_string_2, self.user_string_3)
return _attributes
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_attributes(self, attributes):\n\n _error_code = 0\n _msg = \"RAMSTK SUCCESS: Updating RAMSTKSurvivalData {0:d} attributes.\". \\\n format(self.record_id)\n\n try:\n self.name = str(none_to_default(attributes[0], ''))\n self.source_id = int(none_to_default(attributes[1], 0))\n self.failure_date = none_to_default(attributes[2], date.today())\n self.left_interval = float(none_to_default(attributes[3], 0.0))\n self.right_interval = float(none_to_default(attributes[4], 0.0))\n self.status_id = int(none_to_default(attributes[5], 0))\n self.quantity = int(none_to_default(attributes[6], 0))\n self.tbf = float(none_to_default(attributes[7], 0.0))\n self.mode_type_id = int(none_to_default(attributes[8], 0))\n self.nevada_chart = int(none_to_default(attributes[9], 0))\n self.ship_date = none_to_default(attributes[10], date.today())\n self.number_shipped = int(none_to_default(attributes[11], 0))\n self.return_date = none_to_default(attributes[12], date.today())\n self.number_returned = int(none_to_default(attributes[13], 0))\n self.user_float_1 = float(none_to_default(attributes[14], 0.0))\n self.user_float_2 = float(none_to_default(attributes[15], 0.0))\n self.user_float_3 = float(none_to_default(attributes[16], 0.0))\n self.user_integer_1 = int(none_to_default(attributes[17], 0))\n self.user_integer_2 = int(none_to_default(attributes[18], 0))\n self.user_integer_3 = int(none_to_default(attributes[19], 0))\n self.user_string_1 = str(none_to_default(attributes[20], ''))\n self.user_string_2 = str(none_to_default(attributes[21], ''))\n self.user_string_3 = str(none_to_default(attributes[22], ''))\n except IndexError as _err:\n _error_code = error_handler(_err.args)\n _msg = \"RAMSTK ERROR: Insufficient number of input values to \" \\\n \"RAMSTKSurvivalData.set_attributes().\"\n except (TypeError, ValueError) as _err:\n _error_code = error_handler(_err.args)\n _msg = \"RAMSTK ERROR: Incorrect data type when converting one or \" \\\n \"more RAMSTKSurvivalData attributes.\"\n\n return _error_code, _msg",
"def data(self):\n\t\treturn vars(self)",
"def valuerefs(self):\r\n return self.data.values()",
"def get_data(self):\n return self._beta",
"def tlm_vals(self):\n raise NotImplementedError",
"def get_data(self, data):\n self.data = {}\n self.data[ATTR_PM1] = data['current']['values'][0]['value']\n self.data[ATTR_PM25] = data['current']['values'][1]['value']\n self.data[ATTR_PM25_LIMIT] = data['current']['standards'][0]['limit']\n self.data[ATTR_PM25_PERCENT] = (data['current']['standards'][0]\n ['percent'])\n self.data[ATTR_PM10] = data['current']['values'][2]['value']\n self.data[ATTR_PM10_LIMIT] = data['current']['standards'][1]['limit']\n self.data[ATTR_PM10_PERCENT] = (data['current']['standards'][1]\n ['percent'])\n self.data[ATTR_PRESSURE] = data['current']['values'][3]['value']\n self.data[ATTR_HUMIDITY] = data['current']['values'][4]['value']\n self.data[ATTR_TEMPERATURE] = data['current']['values'][5]['value']\n self.data[ATTR_CAQI] = data['current']['indexes'][0]['value']\n self.data[ATTR_CAQI_LEVEL] = (data['current']['indexes'][0]\n ['level'].lower().replace('_', ' '))",
"def valg(self):\n return self.ssp_list[self.count]",
"def device_state_attributes(self):\n # TODO: convert RH from Elk to AH ?\n #if self.current_humidity > 0:\n # humidity = self.current_humidity\n data = {\n 'hidden': self._hidden,\n 'temp_unit' : self.temperature_unit,\n }\n if self._device.temp_outside is not None and self._device.temp_outside > -460:\n data['temp_outside'] = self._device.temp_outside\n if self._device.temp_3 is not None and self._device.temp_3 > -460:\n data['temp_3'] = self._device.temp_3\n if self._device.temp_4 is not None and self._device.temp_4 > -460:\n data['temp_4'] = self._device.temp_4\n return data",
"def values(self):\n\t\treturn self.myVals",
"def get_time_step_values(self):\n return GravGradReader.get_time_step_values(self)",
"def risk_measures(self) -> Tuple[RiskMeasure, ...]:\n return self.__risk_measures",
"def get_regression(self):\n return self.regression",
"def get_data(self): # TODO: add smooth possibility\n return self.data",
"def get_time_step_values(self):\n return GravObsReader.get_time_step_values(self)",
"def _values(self):\n return self.__values",
"def get_data(self):\n return self.X_train, self.X_test, self.y_train, self.y_test",
"def get_attributes(self):\n\n endpoint = self._get_api_endpoint() + '/attributes'\n results = self.tq.get(endpoint, withp='attribute')\n if 'data' not in results:\n return {}\n\n return results['data']\n # tr = {}\n # for attribute in results['data']:\n # tr[attribute['attribute']['name']] = attribute['value']\n # return tr",
"def getMyInfoAsDict(self):\n list = ['name', 'version', 'systemSize', 'xMax', \n 'yMax', 'currentRound', 'currentHoursLeft']\n d = self.getSelectedAttr(list)\n return d",
"def getCurentData(self):\n if not self.labExperiment:\n super().getCurentData()\n else:\n return np.array(self.connection.query('get_actuator_data'))",
"def getPvals(self):\n pObj = self.getPref()\n self.cv['Vm'] = pObj.Vm\n self.cv['Va'] = pObj.Va",
"def t(self):\n index = self.var_index()\n return self.var_data(index)",
"def getstate(self):\r\n return Model.getstate(self) + [self.X,\r\n self.num_data,\r\n self.input_dim,\r\n self.kern,\r\n self.likelihood,\r\n self.output_dim,\r\n self._Xoffset,\r\n self._Xscale]",
"def state(self):\n return self.probe.get_data(self.variable)",
"def state(self):\n return self.probe.get_data(self.variable)",
"def get_temp_data(self):\n return self.data",
"def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);\n if callable(oValue):\n continue;\n asRet.append(sAttr);\n return asRet;",
"def getDataDict(self):\n # Used to compare data in MATLAB\n d = {'Vm': self.r_Vm,\n 'Va': self.r_Va,\n 'BusName': self.Busnam,\n 'BusNum': self.Extnum,\n }\n return d",
"def info(self):\n return {\n \"learning_rate\": self.learning_rate,\n \"learning_rate_decay\": self.learning_rate_decay,\n \"training_epochs\": self.training_epochs,\n \"batch_size\": self.batch_size,\n \"training_history\": self.training_history,\n \"iteration\": self.iteration,\n \"features\": self.featureset.as_dict()\n }",
"def Values(self):\r\n\t\treturn self._get_attribute('values')",
"def get(self):\r\n return self.x, self.f, self.evals, self.x_geno"
] |
[
"0.6175949",
"0.57532144",
"0.5705181",
"0.5604731",
"0.5507315",
"0.55062205",
"0.5477761",
"0.5460437",
"0.54542017",
"0.5453412",
"0.54475814",
"0.54407895",
"0.543573",
"0.54245627",
"0.5412884",
"0.54101944",
"0.5404969",
"0.53737324",
"0.5372602",
"0.53625053",
"0.53558695",
"0.533855",
"0.53264624",
"0.53264624",
"0.5326234",
"0.53219795",
"0.53209144",
"0.5318065",
"0.53112495",
"0.5304459"
] |
0.60048467
|
1
|
Method to set the RAMSTKSurvivalData data model attributes.
|
def set_attributes(self, attributes):
_error_code = 0
_msg = "RAMSTK SUCCESS: Updating RAMSTKSurvivalData {0:d} attributes.". \
format(self.record_id)
try:
self.name = str(none_to_default(attributes[0], ''))
self.source_id = int(none_to_default(attributes[1], 0))
self.failure_date = none_to_default(attributes[2], date.today())
self.left_interval = float(none_to_default(attributes[3], 0.0))
self.right_interval = float(none_to_default(attributes[4], 0.0))
self.status_id = int(none_to_default(attributes[5], 0))
self.quantity = int(none_to_default(attributes[6], 0))
self.tbf = float(none_to_default(attributes[7], 0.0))
self.mode_type_id = int(none_to_default(attributes[8], 0))
self.nevada_chart = int(none_to_default(attributes[9], 0))
self.ship_date = none_to_default(attributes[10], date.today())
self.number_shipped = int(none_to_default(attributes[11], 0))
self.return_date = none_to_default(attributes[12], date.today())
self.number_returned = int(none_to_default(attributes[13], 0))
self.user_float_1 = float(none_to_default(attributes[14], 0.0))
self.user_float_2 = float(none_to_default(attributes[15], 0.0))
self.user_float_3 = float(none_to_default(attributes[16], 0.0))
self.user_integer_1 = int(none_to_default(attributes[17], 0))
self.user_integer_2 = int(none_to_default(attributes[18], 0))
self.user_integer_3 = int(none_to_default(attributes[19], 0))
self.user_string_1 = str(none_to_default(attributes[20], ''))
self.user_string_2 = str(none_to_default(attributes[21], ''))
self.user_string_3 = str(none_to_default(attributes[22], ''))
except IndexError as _err:
_error_code = error_handler(_err.args)
_msg = "RAMSTK ERROR: Insufficient number of input values to " \
"RAMSTKSurvivalData.set_attributes()."
except (TypeError, ValueError) as _err:
_error_code = error_handler(_err.args)
_msg = "RAMSTK ERROR: Incorrect data type when converting one or " \
"more RAMSTKSurvivalData attributes."
return _error_code, _msg
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_data(self, df):\r\n # Check data is correct.\r\n cols = df.shape[1]\r\n conditions = [cols > 2,\r\n df.index.name == 'r',\r\n df.columns[0] == 't']\r\n if False in conditions:\r\n raise ValueError(f'{self} wrong data set.')\r\n\r\n # Set attributes and log\r\n self.data = df\r\n self._set_rate()\r\n logger.debug(f'{self} set data')",
"def set_attributes(self, attributes):\n _error_code = 0\n _msg = \"RAMSTK SUCCESS: Updating RAMSTKMode {0:d} attributes.\". \\\n format(self.hardware_id)\n\n try:\n self.critical_item = int(\n none_to_default(attributes['critical_item'], 0))\n self.description = str(\n none_to_default(attributes['description'],\n 'Failure Mode Description'))\n self.design_provisions = str(\n none_to_default(attributes['design_provisions'], ''))\n self.detection_method = str(\n none_to_default(attributes['detection_method'], ''))\n self.effect_end = str(\n none_to_default(attributes['effect_end'], 'End Effect'))\n self.effect_local = str(\n none_to_default(attributes['effect_local'], 'Local Effect'))\n self.effect_next = str(\n none_to_default(attributes['effect_next'], 'Next Effect'))\n self.effect_probability = float(\n none_to_default(attributes['effect_probability'], 0.0))\n self.hazard_rate_source = str(\n none_to_default(attributes['hazard_rate_source'], ''))\n self.isolation_method = str(\n none_to_default(attributes['isolation_method'], ''))\n self.mission = str(none_to_default(attributes['mission'], ''))\n self.mission_phase = str(\n none_to_default(attributes['mission_phase'], ''))\n self.mode_criticality = float(\n none_to_default(attributes['mode_criticality'], 0.0))\n self.mode_hazard_rate = float(\n none_to_default(attributes['mode_hazard_rate'], 0.0))\n self.mode_op_time = float(\n none_to_default(attributes['mode_op_time'], 0.0))\n self.mode_probability = str(\n none_to_default(attributes['mode_probability'], ''))\n self.mode_ratio = float(\n none_to_default(attributes['mode_ratio'], 0.0))\n self.operator_actions = str(\n none_to_default(attributes['operator_actions'], ''))\n self.other_indications = str(\n none_to_default(attributes['other_indications'], ''))\n self.remarks = str(none_to_default(attributes['remarks'], ''))\n self.rpn_severity = int(\n none_to_default(attributes['rpn_severity'], 1))\n self.rpn_severity_new = int(\n none_to_default(attributes['rpn_severity_new'], 1))\n self.severity_class = str(\n none_to_default(attributes['severity_class'], ''))\n self.single_point = int(\n none_to_default(attributes['single_point'], 0))\n self.type_id = int(none_to_default(attributes['type_id'], 0))\n except KeyError as _err:\n _error_code = 40\n _msg = \"RAMSTK ERROR: Missing attribute {0:s} in attribute \" \\\n \"dictionary passed to \" \\\n \"RAMSTKMode.set_attributes().\".format(_err)\n\n return _error_code, _msg",
"def SetData(self, data_):\n return _hypre.HypreParVector_SetData(self, data_)",
"def set_attributes(self):\n s = _setter(oself=self, e1=NameError, e2=AttributeError)\n\n s('oself.coef_ = oself.model.coef_')\n s('oself.intercept_ = oself.model.intercept_')\n\n self.time_prepare = None\n s('oself.time_prepare = oself.model.time_prepare')\n self.time_upload_data = None\n s('oself.time_upload_data = oself.model.time_upload_data')\n self.time_fitonly = None\n s('oself.time_fitonly = oself.model.time_fitonly')",
"def set_snapshot(self, data):\n self.sim.t = data[\"t\"]\n self.time = data[\"time\"]\n self.sim.vehicles = data[\"vehicles\"]\n self.sim.stations = data[\"stations\"]\n self.state = data[\"state\"]\n self.is_done = data[\"done\"]\n self.sim._add_base_stations_to_vehicles()\n self.sim.set_max_target(self.sim.max_target)",
"def set_data(self, data):\n self._model.set_data(data)\n self.__refresh()",
"def setData(self,newdata):\n self.record(inspect.currentframe())\n if np.shape(newdata) == np.shape(self.data):\n self.data = np.copy(newdata)",
"def _set_siscrate_attrs(self):\n self.attrs.update(\n {\n \"Created date\": np.bytes_(\"8/21/2012 12:26:06 PM\"),\n \"Description\": np.bytes_(\n \"SIS Crate of Digitizers:\\n\\n\"\n \"4 type 3302 boards: 8 channels per board, 100MHz per \"\n \"channel, 16 bit vertical resolution, \"\n \"1MSamp/channel\\n\\n\"\n \"2 type 3305 boards: 8 channels per board, 1.25GHz \"\n \"per channel, 10 bit vertical resolution. 2GB memory \"\n \"per board.\\n\\n\"\n \"Each 3305 board can be switched to be 4 channels at \"\n \"2.5 GHz, or 2 channels at 5.0GHz.\\n\"\n \"This module also provides access to the clock \"\n \"distributor board.\"\n ),\n \"Device name\": np.bytes_(\"SIS crate\"),\n \"Module IP address\": np.bytes_(\"192.168.7.3\"),\n \"Module VI path\": np.bytes_(\"Modules\\SIS crate\\SIS crate.vi\"),\n \"Type\": np.bytes_(\"Data acquisition\"),\n }\n )",
"def set_data(self,pdata):\n self.uid.data=pdata[0]\n self.pid.data=pdata[1]\n self.pName.data=pdata[2]\n self.pAge.data=pdata[3]\n self.dateOfSubmission.data=pdata[4]\n self.bedType.data=pdata[5]\n self.address.data=pdata[6]\n self.city.data=pdata[7]\n self.state.data=pdata[8]\n self.status.data=pdata[9]",
"def setDataRate(self, DataRate):\n \n self.DataRate = DataRate",
"def set_bios_settings(self, data=None):\n\n if not data:\n raise exception.SDFlexError(\"Could not apply settings with\"\n \" empty data\")\n sushy_system = self._get_sushy_system()\n\n try:\n for key in data.keys():\n sushy_system.bios.set_attribute(key, data[key])\n except sushy.exceptions.SushyError as e:\n message_extended_info = e.body.get('@Message.ExtendedInfo')\n error_message = message_extended_info[0]['Message']\n\n msg = (self._(\"Setting the value of Bios attribute \"\n \"'%(atrribute)s' is not succesfull. \"\n \"Error: %(error)s\") %\n {'error': str(error_message), 'atrribute': key})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)",
"def set_data(self, data):\n self.data = data",
"def set_data(self, data):\n\n pass",
"def SetData(self, data):\r\n\r\n self._data = data",
"def set_data(self, new_data):\n self.data = new_data",
"def set_stratum_data(self) -> None:\n if not self.stratum_factory:\n return\n\n stratum_stats = self.stratum_factory.get_stats()\n completed_jobs = 0\n blocks_found = 0\n estimated_hash_rate = 0.0\n for stats in stratum_stats:\n completed_jobs += stats.completed_jobs\n blocks_found += stats.blocks_found\n estimated_hash_rate = sum_weights(estimated_hash_rate, stats.estimated_hash_rate)\n\n self.completed_jobs = completed_jobs\n self.blocks_found = blocks_found\n self.estimated_hash_rate = estimated_hash_rate",
"def loadData(self, **kwargs):\n \n Simulation.loadData(self, **kwargs)\n if crp_flag:\n self.data['T'] = self.data.D*crp.kpc/crp.c_light/3600/24/365.25/1e6\n else: \n self.data['T'] = self.data.D*siu.kpc/siu.c_light/3600/24/365.25/1e6",
"def set_temp_data(self, data):\n self.data = data",
"def set_data_frame(self, data_frame: DataFrame):\n self.data_frame = data_frame",
"def setDataset(self,dataset):\n self.__dataSet = dataset",
"def assign_values(self, data):\n\n for key in self.__dict__.keys():\n if key in data.keys():\n setattr(self, key, data[key]) # handy built-in function",
"def set_data(self, df):\n self.df = df",
"def set_data(self, data):\n self.closeContext()\n self.clear()\n self.clear_messages()\n\n self.data = data\n if data is not None:\n n_instances = len(data)\n n_attrs = len(data.domain.attributes)\n self.infoLabel.setText(\"%i instances on input\\n%i attributes\" % (\n n_instances, n_attrs))\n\n self.graph_variables = [var for var in data.domain.attributes\n if var.is_continuous]\n if len(self.graph_variables) < 1:\n self.Information.not_enough_attrs()\n else:\n groupvars = [var for var in data.domain.variables +\n data.domain.metas if var.is_discrete]\n\n if len(groupvars) > 0:\n self.cb_attr.addItems([str(var) for var in groupvars])\n self.group_var = str(groupvars[0])\n self.group_variables = groupvars\n self.update_group_var()\n else:\n self._setup_plot()\n\n self.selection = []\n self.openContext(data)\n self.select_data_instances()\n self.commit()",
"def setData(self, data):\n self.data = data",
"def setData(self, data):\n self.data = data",
"def spindle_attributes(self):\n try:\n self.channels\n except AttributeError:\n # create if doesn't exist\n self.channels = [x[0] for x in self.data.columns]\n\n dfs =['spfiltEEG', 'spRMS', 'spRMSmavg'] # for > speed, don't store spRMS as an attribute\n [setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]\n self.spThresholds = pd.DataFrame(index=['Mean RMS', 'Low Threshold', 'High Threshold'])\n self.spindle_events = {}\n self.spindle_rejects = {}",
"def setData(self, data):\n self._data = data",
"def __init__(self, data_frame, mins_set):\n # super(FeaturePrevDelays, self).__init__()\n self.df = data_frame.copy()\n self.mins_set = mins_set",
"def assign_model_parameters(self,xmax,zmax,dh,duration):\n self.model_parameters['xmax']=xmax\n self.model_parameters['zmax']=zmax\n self.model_parameters['dh']=dh\n self.model_parameters['duration']=duration",
"def set_data(self, data):\n\n self._data = data"
] |
[
"0.5710973",
"0.55717885",
"0.549497",
"0.54572797",
"0.54424727",
"0.53505176",
"0.5346848",
"0.5270459",
"0.525862",
"0.52214503",
"0.52036566",
"0.51715165",
"0.5162624",
"0.5158768",
"0.5143201",
"0.5112687",
"0.51016325",
"0.5100083",
"0.50965685",
"0.5088955",
"0.5062179",
"0.5044884",
"0.50448",
"0.50405675",
"0.50405675",
"0.50321573",
"0.5011032",
"0.50075513",
"0.499133",
"0.49794957"
] |
0.68005687
|
0
|
Load dataset from files in a given folder. This function looks for files train_x.npy, train_y.npy, val_x.npy, val_y.npy, test_x.npy and test_y.npy Returns a DataSet instance
|
def load_from_path(name, folder):
tx_file = "{0:s}/train_x.npy".format(folder)
if not os.path.isfile(tx_file):
raise DataSetException("Training file not found.")
ty_file = "{0:s}/train_y.npy".format(folder)
if not os.path.isfile(ty_file):
ty_file = None
vx_file = "{0:s}/val_x.npy".format(folder)
if not os.path.isfile(vx_file):
vx_file = None
vy_file = "{0:s}/val_y.npy".format(folder)
if not os.path.isfile(vy_file):
vy_file = None
sx_file = "{0:s}/test_x.npy".format(folder)
if not os.path.isfile(sx_file):
sx_file = None
sy_file = "{0:s}/test_y.npy".format(folder)
if not os.path.isfile(sy_file):
sy_file = None
return DataSet(name=name, trainx=tx_file, trainy=ty_file, validationx=vx_file, validationy=vy_file,
testx=sx_file, testy=sy_file)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)",
"def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_train = np.load(\"data/X_train.npy\")\n\t\t\t\tX_val = np.load(\"data/X_val.npy\")\n\t\t\t\tY_train = np.load(\"data/Y_train.npy\")\n\t\t\t\tY_val = np.load(\"data/Y_val.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tdata_temp = np.zeros((50000,64,64,3))\n\t\t\t\tlabel_temp = []\n\n\t\t\t\tfor i in range(5):\n\n\t\t\t\t\tfile = path + str(i+1)\n\t\t\t\t\twith open(file, 'rb') as fo:\n\t\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\t\tlabel_temp.extend(temp_element[b'labels'])\n\n\t\t\t\t\tfor j in range(10000):\n\t\t\t\t\t\tdata_temp[j+(i*10000)] = self._reshape(temp_data[j])\n\n\t\t\t\tlabel_temp = np.eye(10)[np.array(label_temp)]\n\n\t\t\t\tnp.random.seed(123)\n\t\t\t\tpermutations = list(np.random.permutation(50000))\n\t\t\t\tX = data_temp[permutations, :, : , :] \n\t\t\t\tY = label_temp[permutations, :]\n\t\t\t\tX_train = X[0:40000, :, :, :] \n\t\t\t\tY_train = Y[0:40000, :]\n\t\t\t\tX_val = X[40000:50000, :, :, :] \n\t\t\t\tY_val = Y[40000:50000, :]\n\n\t\t\t\tnp.save(\"./data/X_train\", X_train)\n\t\t\t\tnp.save(\"./data/X_val\", X_val)\n\t\t\t\tnp.save(\"./data/Y_train\", Y_train)\n\t\t\t\tnp.save(\"./data/Y_val\", Y_val)\n\t\t\t\tbreak\n\n\t\treturn X_train, X_val, Y_train, Y_val",
"def load_dataset(data_dir, img_size):\n global input_set\n global test_set\n\n imgs = []\n img_files = os.listdir(data_dir)\n for img in img_files:\n # try:\n tmp = scipy.misc.imread(data_dir + \"/\" + img)\n x, y, z = tmp.shape # shape : width * length * chanel\n coords_x = int(x / img_size) # 坐标\n coords_y = int(y / img_size) #\n coords = [(q, r) for q in range(coords_x) for r in range(coords_y)] # 列表 x * y\n for coord in coords:\n imgs.append((data_dir + \"/\" + img, coord)) # 为列表添加文件目录\n # except BaseException:\n # print(\"oops\")\n test_size = min(10, int(len(imgs) * 0.2))\n random.shuffle(imgs)\n test_set = imgs[:test_size]\n train_set_X = imgs[test_size:][:200]\n train_set = imgs[test_size:][200:400]\n return",
"def load_datasets():\n from .dataset import num_classes, image_size\n\n train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n\n train_folders = maybe_extract(train_filename)\n test_folders = maybe_extract(test_filename)\n if not (len(train_folders) == len(test_folders) == num_classes):\n raise Exception('Expected %d folders, one per class. Found %d and %d instead.' % (\n num_classes, len(train_folders), len(test_folders)))\n print(\"Dataset folders: %s, %s\" % (train_folders, test_folders))\n\n # load datasets\n train_datasets = maybe_pickle(train_folders, 45000, image_size)\n test_datasets = maybe_pickle(test_folders, 1800, image_size)\n\n return train_datasets, test_datasets",
"def load_dataset(train_dir, shuffle_buffer=SHUFFLE_BUFFER, num_epochs=NUM_EPOCHS, batch_size=BATCH_SIZE):\n # Dataset creation from images (target is in filename)\n filenames = tf.constant(list(str(file) for file in train_dir.glob('*.png')))\n labels = list(map(_target_from_filename, train_dir.glob('*.png')))\n labels = tf.constant(labels)\n dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))\n dataset = dataset.map(lambda filename, label: _parse_single(filename, label))\n dataset = dataset.shuffle(shuffle_buffer).repeat(num_epochs).batch(batch_size)\n # dataset = dataset.apply(tf.contrib.data.prefetch_to_device('/gpu:0'))\n return dataset",
"def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_test = np.load(\"data/X_test.npy\")\n\t\t\t\tY_test = np.load(\"data/Y_test.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tX_test = np.zeros((10000,64,64,3))\n\t\t\t\tY_test = []\n\n\t\t\t\t\n\t\t\t\twith open(path, 'rb') as fo:\n\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\tY_test.extend(temp_element[b'labels'])\n\n\t\t\t\tfor j in range(10000):\n\t\t\t\t\tX_test[j] = self._reshape(temp_data[j])\n\n\t\t\t\tY_test = np.eye(10)[np.array(Y_test)]\n\t\t\t\t\n\t\t\t\tnp.save(\"./data/X_test\", X_test)\n\t\t\t\tnp.save(\"./data/Y_test\", Y_test)\n\n\t\t\t\tbreak\n\n\n\t\treturn X_test, Y_test",
"def load_datasets(self):\n if self.processed_extension == '.npz':\n logger.info(f'Loading sets from npz:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = sparse.load_npz(self.train_path)\n\n logger.info(f'val: {self.val_path}')\n self.val_data = sparse.load_npz(self.val_path)\n\n logger.info(f'test: {self.test_path}')\n self.test_data = sparse.load_npz(self.test_path)\n \n # Split x and y\n self.train_data = [sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,-1])]\n \n self.val_data = [sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,-1])]\n \n self.test_data = [sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,-1])]\n \n elif self.processed_extension == '.csv':\n logger.info(f'Loading sets from csv:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = pd.read_csv(self.train_path)\n train_cols = self.train_data.columns\n self.train_data = [self.train_data[train_cols.difference(['TARGET'])],\n self.train_data['TARGET']]\n \n logger.info(f'val: {self.val_path}')\n self.val_data = pd.read_csv(self.val_path)\n self.val_data = [self.val_data[train_cols.difference(['TARGET'])],\n self.val_data['TARGET']]\n \n logger.info(f'test: {self.test_path}')\n self.test_data = pd.read_csv(self.test_path)\n self.test_data = [self.test_data[train_cols.difference(['TARGET'])],\n self.test_data['TARGET']]\n else:\n raise AttributeError(f'Wrong extension: {self.processed_extension}')\n self.n_train = self.train_data[0].shape[0]\n self.n_val = self.val_data[0].shape[0]\n self.n_test = self.test_data[0].shape[0]\n self.input_size = self.train_data[0].shape[1]\n self.n_examples = self.n_train + self.n_val + self.n_test\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')",
"def load_data_in_folder(self):\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in range(0, idx_max-1):\n data = []\n for f in self.filenames[idx:idx+64]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))",
"def load_datasets(data_dir: str) -> Tuple[List[Annotation], List[Annotation], List[Annotation]]:\n train_data = annotations_from_jsonl(os.path.join(data_dir, 'train.jsonl'))\n val_data = annotations_from_jsonl(os.path.join(data_dir, 'val.jsonl'))\n test_data = annotations_from_jsonl(os.path.join(data_dir, 'test.jsonl'))\n return train_data, val_data, test_data",
"def load_dataset(self, testPrefix = 'cv9', root = 'datasets', classes = [ 'pos', 'neg' ]):\n\n\t\tfor senti_class in classes:\n\n\t\t\tdirname = os.path.join(root, senti_class)\n\n\t\t\tfor filename in os.listdir(dirname):\n\n\t\t\t\twith open(os.path.join(dirname, filename)) as file:\n\n\t\t\t\t\tcontent = file.read()\n\n\t\t\t\t\tif filename.startswith(testPrefix):\n\t\t\t\t\t\t# Testing data\n\t\t\t\t\t\tself.testing_set.append(content)\n\t\t\t\t\t\tself.testing_labels.append(senti_class)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Training data\n\t\t\t\t\t\tself.training_set.append(content)\n\t\t\t\t\t\tself.training_labels.append(senti_class)\n\n\t\tself._vectorize(self.vectorizer)",
"def load_data_in_folder(self):\n if self.data_filenames:\n print('removing existing data files')\n for f in tqdm(self.data_filenames):\n os.remove(f)\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in tqdm(range(0, idx_max-1)):\n data = []\n for f in self.filenames[idx:idx+self.batch_size]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))",
"def load_dataset_from_dir(dataset_dir):\n\n if not os.path.isdir(dataset_dir):\n raise FileNotFoundError(\"Dataset directory (%s) not found\" % dataset_dir)\n\n train_data = load_kg_file(os.path.join(dataset_dir, \"train.txt.gz\"))\n valid_data = load_kg_file(os.path.join(dataset_dir, \"valid.txt.gz\"))\n test_data = load_kg_file(os.path.join(dataset_dir, \"test.txt.gz\"))\n\n dataset = KgDataset()\n dataset.load_triples(train_data, tag=\"train\")\n dataset.load_triples(valid_data, tag=\"valid\")\n dataset.load_triples(test_data, tag=\"test\")\n return dataset",
"def load_training_dataset(data_dir):\n\tball_images = load_ball_images_to_memory(data_dir)\n\tgen = functools.partial(data_generator, data_dir, ball_images)\n\treturn tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))",
"def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels",
"def loadSets(self, indir=\"\"):\n\n if indir==\"\":\n print(\"specify folder\")\n return -1\n\n self.train = pd.read_pickle(\"{}/train.pkl\".format(indir))\n self.valid = pd.read_pickle(\"{}/valid.pkl\".format(indir))\n self.test = pd.read_pickle(\"{}/test.pkl\".format(indir))\n\n print(\"sets loaded\")",
"def load_dataset(path, test_or_train):\n senta_batch, sentb_batch, scores_batch = [], [], []\n with open(path, encoding='utf-8') as f:\n for i, line in enumerate(f):\n items = line.strip().split('\\t')\n if test_or_train == 'train':\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n elif test_or_train in ['dev', 'test']:\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n else:\n raise Exception(\"{} error\".format(test_or_train))\n senta_batch.append(senta)\n sentb_batch.append(sentb)\n scores_batch.append(score)\n return senta_batch, sentb_batch, scores_batch",
"def get_data_set(train=True):\n\n # 1\n train_or_test = \"train\" if train == True else \"test\"\n data_path = os.path.join(data_dir, \"aclImdb\",train_or_test)\n\n # 2\n pos_glob_pattern = os.path.join(data_path, \"pos\", \"*.txt\")\n neg_glob_pattern = os.path.join(data_path, \"neg\", \"*.txt\")\n pos_file_path_seq = glob.glob(pos_glob_pattern)\n neg_file_path_seq = glob.glob(neg_glob_pattern)\n\n # 3\n pos_dataset = [text_to_one_line(path) for path in pos_file_path_seq]\n neg_dataset = [text_to_one_line(path) for path in neg_file_path_seq]\n x = pos_dataset + neg_dataset\n y = [1.0] * len(pos_dataset) + [0.0] * len(neg_dataset)\n\n return x, y",
"def load_dataset(self):\n\n train_path = os.path.join(self.dataset_path, 'images_background')\n validation_path = os.path.join(self.dataset_path, 'images_evaluation')\n\n # First let's take care of the train alphabets\n for alphabet in os.listdir(train_path):\n if alphabet[0] == '.':\n continue\n alphabet_path = os.path.join(train_path, alphabet)\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.train_dictionary[alphabet] = current_alphabet_dictionary\n\n # Now it's time for the validation alphabets\n for alphabet in os.listdir(validation_path):\n alphabet_path = os.path.join(validation_path, alphabet)\n if alphabet[0] == '.':\n continue\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.evaluation_dictionary[alphabet] = current_alphabet_dictionary",
"def load_train_data():\r\n X_train = np.load('data/train/X_train.npy')\r\n scaling_train = np.load('data/train/scaling_train.npy')\r\n ids_train = np.load('data/train/ids_train.npy')\r\n y_train = np.load('data/train/y_train.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_train)\r\n\r\n return X_train, scaling_train, ids_train, y_train",
"def load_eval_dataset(data_dir):\n\tball_images = load_ball_images_to_memory(data_dir)\n\tgen = functools.partial(data_generator, data_dir, ball_images)\n\treturn tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))",
"def load_data(path_to_dir):\n train_pos = []\n train_neg = []\n test_pos = []\n test_neg = []\n with open(path_to_dir+\"train-pos.txt\", \"r\") as f:\n for i,line in enumerate(f):\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n train_pos.append(words)\n with open(path_to_dir+\"train-neg.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n train_neg.append(words)\n with open(path_to_dir+\"test-pos.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n test_pos.append(words)\n with open(path_to_dir+\"test-neg.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n test_neg.append(words)\n\n return train_pos, train_neg, test_pos, test_neg",
"def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet",
"def load_dataset(data_dir='flowers'):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n # Apply transformations on training set, leave alone validation and testing sets:\n data_transforms = {\n \"training\" : transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])]),\n # For validation and tesing sets, since they are the \"unseen\" data that used to measure the model performance, so they should not be applied by any transformations, however, resizing is stil needed.\n \"validation\" : transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])]),\n \"testing\" : transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n }\n \n # Load datasets with ImageFolder:\n image_datasets = {\n \"training\" : datasets.ImageFolder(train_dir, transform = data_transforms[\"training\"]),\n \"validation\" : datasets.ImageFolder(valid_dir, transform = data_transforms[\"validation\"]),\n \"testing\" : datasets.ImageFolder(test_dir, transform = data_transforms[\"testing\"])\n }\n \n # Using the image datasets and the trainforms, define the dataloaders: \n dataloaders = {\n \"training\" : torch.utils.data.DataLoader(image_datasets[\"training\"], batch_size = 64, shuffle = True),\n \"validation\" : torch.utils.data.DataLoader(image_datasets[\"validation\"], batch_size = 64),\n \"testing\" : torch.utils.data.DataLoader(image_datasets[\"testing\"], batch_size = 64)\n }\n \n return (dataloaders['training'],\n dataloaders['validation'],\n dataloaders['testing'],\n image_datasets['training'],\n image_datasets['validation'],\n image_datasets['testing'])",
"def load_dataset(split_ratio, save_root_dir):\n # Set the processed data directories\n train_ct, train_label_map, test_ct, test_label_map = train_test_split(split_ratio=split_ratio,\n save_root_dir=save_root_dir)\n\n train_dataset = train_label_map, train_ct\n test_dataset = test_label_map, test_ct\n return train_dataset, test_dataset",
"def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)",
"def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)",
"def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader",
"def load_datasets(path_sets, path_images):\n dataset_files = tuple(path_set_file.name \n for path_set_file in path_sets.glob('*.csv'))\n\n set_names = [dataset_file[: dataset_file.find('_')]\n for dataset_file in dataset_files]\n \n if len(dataset_files) == 3:\n name_order = ['training', 'validation', 'test']\n set_order = tuple(dataset_files.index(f'{name}_set.csv')\n for name in name_order)\n num_sets = 3\n else:\n training_index = dataset_files.index('training_set.csv')\n set_order = (training_index, 1 - training_index)\n num_sets = 2\n\n images_and_labels = [None] * num_sets * 2\n \n for k in range(num_sets):\n path_dataset_file = path_sets.joinpath(dataset_files[set_order[k]])\n\n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n dataset = list(csv_reader)\n\n path_dataset_images = [path_images.joinpath(f'label_{row[1]}', row[0])\n for row in dataset]\n\n images_and_labels[k] = np.array([np.fromfile(path_image, np.float64)\n for path_image\n in path_dataset_images])\n\n images_and_labels[k+num_sets] = [row[1] for row in dataset]\n\n return images_and_labels",
"def train_from_dir(self, path, cat):\n dirfiles = glob.glob(os.path.join(path, '*'))\n total = len(dirfiles)\n count = 0\n for infile in dirfiles:\n f = open(infile, \"r\")\n text = f.read()\n self.train(text, cat)",
"def dtrain(directory):\n return dataset(directory, 'train-images-idx3-ubyte',\n 'train-labels-idx1-ubyte')"
] |
[
"0.73315185",
"0.7232152",
"0.71712154",
"0.7141634",
"0.71336424",
"0.70842564",
"0.70469034",
"0.6936982",
"0.69206196",
"0.6883277",
"0.68169415",
"0.680937",
"0.6802778",
"0.6749382",
"0.67437637",
"0.66981876",
"0.6645925",
"0.6645303",
"0.6644778",
"0.6613466",
"0.6588453",
"0.6585984",
"0.6568393",
"0.65683913",
"0.6531245",
"0.6507077",
"0.6504531",
"0.6499851",
"0.64934534",
"0.6484419"
] |
0.7468724
|
0
|
Read vapor pressure parameters file and fill in parameters that are needed for vapor pressure curve
|
def readVP(self,species):
f = open('VPparams.txt', 'rU')
lines = f.readlines()
f.close()
parsing = False
for i in np.arange(len(lines)):
if lines[i].startswith(species):
parsing = True
else:
parsing = False
if parsing:
data = lines[i].split()
lnC, L0, Rv, da, db = data[1:len(data)]
self.lnC, self.L0, self.Rv, self.da, self.db = \
float(lnC), float(L0), float(Rv), float(da), float(db)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def pressure(self, values):\n self._part = float(values.get('art', self._part))\n self._pven = float(values.get('ven', self._pven))",
"def pressure(self, values):\n self._part = float(values.get('art', self._part))\n self._pven = float(values.get('ven', self._pven))",
"def pressure(self, values):\n self._part = float(values.get('art', self._part))\n self._pven = float(values.get('ven', self._pven))",
"def ReadParameterFile(pf):\n f = open(pf, \"r\")\n pf_dict = SetDefaultParameterValues()\n for line in f:\n if not line.split(): \n continue\n if line.split()[0][0] == \"#\": \n continue\n \n # This will prevent crashes if there is not a blank line at the end of the parameter file\n if line[-1] != '\\n': \n line += '\\n'\n \n # Cleave off end-of-line comments.\n line = line[:line.rfind(\"#\")].strip()\n \n # Read in the parameter name and the parameter value(s).\n parname, eq, parval = line.partition(\"=\")\n \n # Else, actually read in the parameter \n try: \n parval = float(parval)\n except ValueError:\n if re.search('/', parval): # For directory with more than one level\n parval = str(parval.strip())\n elif parval.strip().isalnum(): \n parval = str(parval.strip())\n elif parval.replace('_', '').strip().isalnum():\n parval = parval.strip()\n elif parval.partition('.')[-1] in ['dat', 'hdf5', 'h5', 'txt']:\n parval = str(parval.strip())\n else:\n parval = parval.strip().split(\",\")\n tmp = [] \n if parval[0][0] == '(':\n for element in parval: \n if element.strip(\" (,)\").isdigit(): \n tmp.append(float(element.strip(\"(,)\")))\n else: \n tmp.append(element.strip(\" (,)\"))\n parval = tuple(tmp) \n elif parval[0][0] == '[':\n for element in parval: \n tmp.append(float(element.strip(\"[,]\")))\n parval = list(tmp)\n else:\n print(parname, parval)\n raise ValueError('The format of this parameter is not understood.')\n \n pf_dict[parname.strip()] = parval\n \n return pf_dict",
"def load_params_from_file(self, input_file):\n\n ### FILL IN ###",
"def importParameterBoundaryFile(paramfilename):\n try:\n infile = open(paramfilename, \"r\")\n except IOError:\n\t print \"Unable to open file %s\" % (paramfilename)\n\t raise IOError(\"Unable to open parameter boundary file %s\" % (paramfilename))\n lines = infile.readlines()\n infile.close()\n\n # Parse\n paramdict = {}\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n continue\n elif line[0] == '#':\n continue\n else:\n terms = line.split()\n name = terms[0]\n value = float(terms[1])\n parmin = float(terms[2])\n parmax = float(terms[3])\n stepsize = float(terms[4])\n \n paramdict[name] = [value, parmin, parmax, stepsize]\n # ENDIF\n # ENDFOR\n\n return paramdict",
"def get_params(path = 'INPUT/conv_params'):\n\n # cd to Input, read the conv_params file in and pass each line to file reader\n list = file_reader(path)\n\n Ecuts = list[0] # first element returned from filereader is the energies\n start = int(Ecuts[0][0]) # the first element of this is the lower energy to start from. convert to integer for maths\n multiplier = int(Ecuts[1][0]) # middle element is the step size\n end = int(Ecuts[2][0]) # last element is upper bound on energy\n E_range = (end - start)//multiplier +1 # the number of energies you will create\n Es = [i*multiplier for i in range(E_range)] # take steps in the E_range of step size multiplier\n Ecuts = [[str(i+start)] for i in Es] # add the start energy to all these steps to shift them to correct energies\n # convert the numbers to strings for ease of file writing later\n\n kpts = list[1] # kpoints list is first element returned\n def_E = list[2] # default energy\n def_k = list[3] # default kpoints\n params = Settings(Ecuts, kpts, def_E, def_k) # create the settings object\n\n return params # return the object",
"def get_params(self, paramFile):\n\n with open(paramFile, 'r') as f:\n titleLine = next(f)\n\n for line in f:\n p, i, v = line.split(\",\")\n\n self.params.update(p, v, i)",
"def ppt_ratio_parameters(config_path):\n # Hardcoded HRU field formats for now\n ppt_field_format = 'PPT_{:02d}'\n ratio_field_format = 'PPT_RT_{:02d}'\n\n # Initialize hru_parameters class\n hru = support.HRUParameters(config_path)\n\n # Open input parameter config file\n inputs_cfg = ConfigParser.ConfigParser()\n try:\n inputs_cfg.readfp(open(config_path))\n except Exception as e:\n logging.error(\n '\\nERROR: Config file could not be read, '\n 'is not an input file, or does not exist\\n'\n ' config_file = {}\\n'\n ' Exception: {}\\n'.format(config_path, e))\n sys.exit()\n\n # Log DEBUG to file\n log_file_name = 'ppt_ratio_parameters_log.txt'\n log_console = logging.FileHandler(\n filename=os.path.join(hru.log_ws, log_file_name), mode='w')\n log_console.setLevel(logging.DEBUG)\n log_console.setFormatter(logging.Formatter('%(message)s'))\n logging.getLogger('').addHandler(log_console)\n logging.info('\\nGSFLOW Precipitation Ratio Parameters')\n\n # Units\n ppt_obs_units = support.get_param(\n 'ppt_obs_units', 'mm', inputs_cfg).lower()\n ppt_units_list = ['mm', 'cm', 'm', 'in', 'ft']\n # Compare against the lower case of the values in the list\n # but don't modify the acceptable units list\n if ppt_obs_units not in ppt_units_list:\n logging.error(\n '\\nERROR: Invalid observed precipitation units ({})\\n '\n 'Valid units are: {}'.format(\n ppt_obs_units, ', '.join(ppt_units_list)))\n sys.exit()\n\n # Convert units while reading obs values\n if ppt_obs_units == 'mm':\n units_factor = 1\n elif ppt_obs_units == 'cm':\n units_factor = 10\n elif ppt_obs_units == 'm':\n units_factor = 1000\n elif ppt_obs_units == 'in':\n units_factor = 25.4\n elif ppt_obs_units == 'ft':\n units_factor = 304.8\n else:\n units_factor = 1\n\n # Check input paths\n if not arcpy.Exists(hru.polygon_path):\n logging.error(\n '\\nERROR: Fishnet ({}) does not exist'.format(\n hru.polygon_path))\n sys.exit()\n\n # PPT Zones\n set_ppt_zones_flag = inputs_cfg.getboolean('INPUTS', 'set_ppt_zones_flag')\n if set_ppt_zones_flag:\n ppt_zone_orig_path = inputs_cfg.get('INPUTS', 'ppt_zone_path')\n try:\n ppt_zone_field = inputs_cfg.get('INPUTS', 'ppt_zone_field')\n except:\n logging.error(\n '\\nERROR: ppt_zone_field must be set in INI to apply '\n 'zone specific ppt ratios\\n')\n sys.exit()\n try:\n ppt_hru_id_field = inputs_cfg.get('INPUTS', 'ppt_hru_id_field')\n except:\n ppt_hru_id_field = None\n logging.warning(\n ' ppt_hru_id_field was not set in the INI file\\n'\n ' PPT ratios will not be adjusted to match station '\n 'values'.format(ppt_zone_field, hru.ppt_zone_id_field))\n\n # Field name for PSTA hard coded, but could be changed to be read from\n # config file like ppt_zone\n hru_psta_field = 'HRU_PSTA'\n\n try:\n ppt_obs_field_format = inputs_cfg.get(\n 'INPUTS', 'ppt_obs_field_format')\n except:\n ppt_obs_field_format = 'PPT_{:02d}'\n logging.info(' Defaulting ppt_obs_field_format = {}'.format(\n ppt_obs_field_format))\n\n if not arcpy.Exists(ppt_zone_orig_path):\n logging.error(\n '\\nERROR: PPT Zone ({}) does not exist'.format(\n ppt_zone_orig_path))\n sys.exit()\n # ppt_zone_path must be a polygon shapefile\n if arcpy.Describe(ppt_zone_orig_path).datasetType != 'FeatureClass':\n logging.error(\n '\\nERROR: ppt_zone_path must be a polygon shapefile')\n sys.exit()\n\n # Check ppt_zone_field\n if ppt_zone_field.upper() in ['FID', 'OID']:\n ppt_zone_field = arcpy.Describe(ppt_zone_orig_path).OIDFieldName\n logging.warning(\n '\\n NOTE: Using {} to set {}\\n'.format(\n ppt_zone_field, hru.ppt_zone_id_field))\n elif not arcpy.ListFields(ppt_zone_orig_path, ppt_zone_field):\n logging.error(\n '\\nERROR: ppt_zone_field field {} does not exist\\n'.format(\n ppt_zone_field))\n sys.exit()\n # Need to check that field is an int type\n # Only check active cells (HRU_TYPE >0)?!\n elif not [f.type for f in arcpy.Describe(ppt_zone_orig_path).fields\n if (f.name == ppt_zone_field and\n f.type in ['SmallInteger', 'Integer'])]:\n logging.error(\n '\\nERROR: ppt_zone_field field {} must be an integer type\\n'.format(\n ppt_zone_field))\n sys.exit()\n # Need to check that field values are all positive\n # Only check active cells (HRU_TYPE >0)?!\n elif min([row[0] for row in arcpy.da.SearchCursor(\n ppt_zone_orig_path, [ppt_zone_field])]) <= 0:\n logging.error(\n '\\nERROR: ppt_zone_field values must be positive\\n'.format(\n ppt_zone_field))\n sys.exit()\n\n # Check hru_psta_field\n if not arcpy.ListFields(ppt_zone_orig_path, hru_psta_field):\n logging.error(\n '\\nERROR: hru_psta_field field {} does not exist\\n'.format(\n hru_psta_field))\n sys.exit()\n # Need to check that field is an int type\n # Should we only check active cells (HRU_TYPE > 0)?\n elif not [f.type for f in arcpy.Describe(ppt_zone_orig_path).fields\n if (f.name == hru_psta_field and\n f.type in ['SmallInteger', 'Integer'])]:\n logging.error(\n '\\nERROR: hru_psta_field field {} must be an integer type\\n'.format(\n hru_psta_field))\n sys.exit()\n # Need to check that field values are all positive\n # Should we only check active cells (HRU_TYPE > 0)?\n elif min([row[0] for row in arcpy.da.SearchCursor(\n ppt_zone_orig_path, [hru_psta_field])]) <= 0:\n logging.error(\n '\\nERROR: hru_psta_field values must be positive\\n'.format(\n hru_psta_field))\n sys.exit()\n\n # Check ppt_hru_id_field\n # ppt_hru_id values are checked later\n if ppt_hru_id_field is not None:\n if not arcpy.ListFields(ppt_zone_orig_path, ppt_hru_id_field):\n logging.error(\n '\\nERROR: ppt_hru_id_field field {} does not exist\\n'.format(\n ppt_hru_id_field))\n sys.exit()\n # Need to check that field is an int type\n elif not [f.type for f in arcpy.Describe(ppt_zone_orig_path).fields\n if (f.name == ppt_hru_id_field and\n f.type in ['SmallInteger', 'Integer'])]:\n logging.error(\n '\\nERROR: ppt_hru_id_field field {} must be an integer type\\n'.format(\n ppt_hru_id_field))\n sys.exit()\n # Need to check that field values are not negative (0 is okay)\n elif min([row[0] for row in arcpy.da.SearchCursor(\n ppt_zone_orig_path, [ppt_hru_id_field])]) < 0:\n logging.error(\n '\\nERROR: ppt_hru_id_field values cannot be negative\\n'.format(\n ppt_hru_id_field))\n sys.exit()\n else:\n # If a zone shapefile is not used, PPT must be set manually\n ppt_obs_list = inputs_cfg.get('INPUTS', 'ppt_obs_list')\n\n # Check that values are floats\n try:\n ppt_obs_list = map(float, ppt_obs_list.split(','))\n except ValueError:\n logging.error(\n '\\nERROR: ppt_obs_list (mean monthly precipitation) '\n 'values could not be parsed as floats\\n')\n sys.exit()\n\n # Check that there are 12 values\n if len(ppt_obs_list) != 12:\n logging.error(\n '\\nERROR: There must be exactly 12 mean monthly '\n 'observed precipitation values based to ppt_obs_list\\n')\n sys.exit()\n logging.info(\n ' Observed Mean Monthly PPT ({}):\\n {}\\n'\n ' (Script will assume these are listed in month order, '\n 'i.e. Jan, Feb, ...)'.format(\n ppt_obs_units, ', '.join(map(str, ppt_obs_list))))\n\n # Check if all the values are 0\n if ppt_obs_list == ([0.0] * 12):\n logging.error(\n '\\nERROR: The observed precipitation values are all 0.\\n'\n ' To compute PPT ratios, please set the ppt_obs_list '\n 'parameter in the INI with\\n observed mean monthly PPT '\n 'values (i.e. from a weather station)')\n sys.exit()\n\n # Get the PPT HRU ID\n try:\n ppt_hru_id = inputs_cfg.getint('INPUTS', 'ppt_hru_id')\n except:\n ppt_hru_id = 0\n\n # Check that the ppt_hru_id is a valid cell hru_id\n # If ppt_hru_id is 0, PPT ratios will not be adjusted\n if ppt_hru_id > 0:\n # Check that HRU_ID is valid\n logging.info(' PPT HRU_ID: {}'.format(ppt_hru_id))\n arcpy.MakeTableView_management(\n hru.polygon_path, \"layer\",\n \"{} = {}\".format(hru.id_field, ppt_hru_id))\n if (ppt_hru_id != 0 and\n int(arcpy.GetCount_management(\"layer\").getOutput(0)) == 0):\n logging.error(\n '\\nERROR: ppt_hru_id {0} is not a valid cell hru_id'\n '\\nERROR: ppt_ratios will NOT be forced to 1'\n ' at cell {0}\\n'.format(ppt_hru_id))\n ppt_hru_id = 0\n arcpy.Delete_management(\"layer\")\n else:\n logging.info(\n ' PPT ratios will not be adjusted to match station values\\n'\n ' (ppt_hru_id = 0)')\n\n # Could add a second check that HRU_PSTA has values >0\n\n # Build output folders if necessary\n ppt_ratio_temp_ws = os.path.join(hru.param_ws, 'ppt_ratio')\n if not os.path.isdir(ppt_ratio_temp_ws):\n os.mkdir(ppt_ratio_temp_ws)\n ppt_zone_path = os.path.join(ppt_ratio_temp_ws, 'ppt_zone.shp')\n # ppt_zone_clip_path = os.path.join(ppt_ratio_temp_ws, 'ppt_zone_clip.shp')\n\n\n # Set ArcGIS environment variables\n arcpy.CheckOutExtension('Spatial')\n env.overwriteOutput = True\n # env.pyramid = 'PYRAMIDS -1'\n env.pyramid = 'PYRAMIDS 0'\n env.workspace = hru.param_ws\n env.scratchWorkspace = hru.scratch_ws\n\n # Set month list based on flags\n month_list = range(1, 13)\n ppt_field_list = [ppt_field_format.format(m) for m in month_list]\n ratio_field_list = [ratio_field_format.format(m) for m in month_list]\n\n # Check fields\n logging.info('\\nAdding PPT ratio fields if necessary')\n # PPT zone fields\n support.add_field_func(\n hru.polygon_path, hru.ppt_zone_id_field, 'LONG')\n # PPT ratio fields\n for ratio_field in ratio_field_list:\n support.add_field_func(hru.polygon_path, ratio_field, 'DOUBLE')\n\n # Calculate PPT zone ID\n if set_ppt_zones_flag:\n logging.info('\\nCalculating cell HRU Precipitation Zone ID')\n ppt_zone_desc = arcpy.Describe(ppt_zone_orig_path)\n ppt_zone_sr = ppt_zone_desc.spatialReference\n logging.debug(' Zones: {}'.format(ppt_zone_orig_path))\n logging.debug(' Projection: {}'.format(ppt_zone_sr.name))\n logging.debug(' GCS: {}'.format(ppt_zone_sr.GCS.name))\n\n # Reset PPT_ZONE_ID\n # if set_ppt_zones_flag:\n logging.info(' Resetting {} to 0'.format(hru.ppt_zone_id_field))\n arcpy.CalculateField_management(\n hru.polygon_path, hru.ppt_zone_id_field, 0, 'PYTHON')\n\n # If ppt_zone spat_ref doesn't match hru_param spat_ref\n # Project ppt_zone to hru_param spat ref\n # Otherwise, read ppt_zone directly\n if hru.sr.name != ppt_zone_sr.name:\n logging.info(' Projecting precipitation zones...')\n # Set preferred transforms\n transform_str = support.transform_func(\n hru.sr, ppt_zone_sr)\n logging.debug(' Transform: {}'.format(transform_str))\n # Project ppt_zone shapefile\n arcpy.Project_management(\n ppt_zone_orig_path, ppt_zone_path, hru.sr,\n transform_str, ppt_zone_sr)\n del transform_str\n else:\n arcpy.Copy_management(ppt_zone_orig_path, ppt_zone_path)\n\n # # Remove all unnecessary fields\n # for field in arcpy.ListFields(ppt_zone_path):\n # skip_field_list = ppt_obs_field_list + [ppt_zone_field, 'Shape']\n # if field.name not in skip_field_list:\n # try:\n # arcpy.DeleteField_management(ppt_zone_path, field.name)\n # except:\n # pass\n\n # Set ppt zone ID\n logging.info(' Setting {}'.format(hru.ppt_zone_id_field))\n support.zone_by_centroid_func(\n ppt_zone_path, hru.ppt_zone_id_field, ppt_zone_field,\n hru.polygon_path, hru.point_path, hru)\n # support.zone_by_area_func(\n # ppt_zone_layer, hru.ppt_zone_id_field, ppt_zone_field,\n # hru.polygon_path, hru, hru_area_field, None, 50)\n\n # Set HRU_PSTA\n logging.info(' Setting {}'.format(hru.hru_psta_field))\n support.zone_by_centroid_func(\n ppt_zone_path, hru.hru_psta_field, hru_psta_field,\n hru.polygon_path, hru.point_path, hru)\n\n del ppt_zone_desc, ppt_zone_sr\n else:\n # Set all cells to zone 1\n arcpy.CalculateField_management(\n hru.polygon_path, hru.ppt_zone_id_field, 1, 'PYTHON')\n\n # Calculate ratios\n logging.info('\\nCalculating mean monthly PPT ratios')\n if set_ppt_zones_flag:\n # Read mean monthly values for each zone\n ppt_obs_dict = dict()\n ppt_obs_field_list = [\n ppt_obs_field_format.format(m) for m in month_list]\n fields = [ppt_zone_field] + ppt_obs_field_list\n logging.debug(' Obs. Fields: {}'.format(', '.join(fields)))\n\n with arcpy.da.SearchCursor(ppt_zone_path, fields) as s_cursor:\n for row in s_cursor:\n ppt_obs_dict[int(row[0])] = map(float, row[1:13])\n\n # Convert values to mm if necessary to match PRISM\n if units_factor != 1:\n ppt_obs_dict = {z: p * units_factor for z, p in ppt_obs_dict}\n\n ppt_zone_list = sorted(ppt_obs_dict.keys())\n logging.debug(' PPT Zones: {}'.format(ppt_zone_list))\n\n # Print the observed PPT values\n logging.debug(' Observed PPT')\n for zone, ppt_obs in ppt_obs_dict.items():\n logging.debug(' {}: {}'.format(\n zone, ', '.join(['{:.2f}'.format(x) for x in ppt_obs])))\n\n # Default all zones to a ratio of 1\n ppt_ratio_dict = {z: [1] * 12 for z in ppt_zone_list}\n\n # Get list of HRU_IDs for each zone\n fields = [hru.ppt_zone_id_field, hru.id_field]\n zone_hru_id_dict = defaultdict(list)\n with arcpy.da.SearchCursor(hru.polygon_path, fields) as s_cursor:\n for row in s_cursor:\n zone_hru_id_dict[int(row[0])].append(int(row[1]))\n\n # Check that PPT_HRU_IDs are in the correct zone\n # Default all PPT Zone HRU IDs to 0\n ppt_hru_id_dict = {z: 0 for z in ppt_zone_list}\n if ppt_hru_id_field is not None:\n fields = [ppt_zone_field, ppt_hru_id_field]\n logging.debug(' PPT Zone ID field: {}'.format(ppt_zone_field))\n logging.debug(' PPT HRU ID field: {}'.format(ppt_hru_id_field))\n with arcpy.da.SearchCursor(ppt_zone_path, fields) as s_cursor:\n for row in s_cursor:\n ppt_zone = int(row[0])\n hru_id = int(row[1])\n if hru_id == 0 or hru_id in zone_hru_id_dict[ppt_zone]:\n ppt_hru_id_dict[ppt_zone] = hru_id\n logging.debug(' {}: {}'.format(ppt_zone, hru_id))\n else:\n logging.error(\n '\\nERROR: HRU_ID {} is not in PPT ZONE {}'.format(\n hru_id, ppt_hru_id_dict[ppt_zone]))\n sys.exit()\n\n # Get gridded PPT values for each PPT_HRU_ID\n fields = [hru.ppt_zone_id_field, hru.id_field] + ppt_field_list\n # ppt_ratio_dict = dict()\n with arcpy.da.SearchCursor(hru.polygon_path, fields) as s_cursor:\n for row in s_cursor:\n ppt_zone = int(row[0])\n hru_id = int(row[1])\n if hru_id == 0:\n pass\n elif hru_id in ppt_hru_id_dict.values():\n ppt_gridded_list = map(float, row[2:14])\n ppt_obs_list = ppt_obs_dict[ppt_zone]\n ppt_ratio_list = [\n float(o) / p if p > 0 else 0\n for o, p in zip(ppt_obs_list, ppt_gridded_list)]\n ppt_ratio_dict[ppt_zone] = ppt_ratio_list\n del ppt_hru_id_dict, zone_hru_id_dict, fields\n\n logging.debug(' PPT Ratio Adjustment Factors:')\n for k, v in ppt_ratio_dict.items():\n logging.debug(' {}: {}'.format(\n k, ', '.join(['{:.3f}'.format(x) for x in v])))\n\n # DEADBEEF - ZONE_VALUE is calculated in zone_by_centroid_func\n # There is probably a cleaner way of linking these two\n fields = [hru.ppt_zone_id_field] + ppt_field_list + ratio_field_list\n with arcpy.da.UpdateCursor(hru.polygon_path, fields) as u_cursor:\n for row in u_cursor:\n ppt_zone = int(row[0])\n for i, month in enumerate(month_list):\n ppt_i = fields.index(ppt_field_format.format(month))\n ratio_i = fields.index(ratio_field_format.format(month))\n\n if ppt_zone in ppt_zone_list:\n ppt_obs = ppt_obs_dict[ppt_zone][i]\n else:\n ppt_obs = 0\n\n if ppt_obs > 0:\n row[ratio_i] = (\n ppt_ratio_dict[ppt_zone][i] * row[ppt_i] / ppt_obs)\n else:\n row[ratio_i] = 0\n u_cursor.updateRow(row)\n del row\n else:\n # Get gridded precip at PPT_HRU_ID\n fields = [hru.id_field] + ppt_field_list\n logging.debug(' Fields: {}'.format(', '.join(ppt_field_list)))\n\n # Convert values to mm if necessary to match PRISM\n if units_factor != 1:\n ppt_obs_list = [p * units_factor for p in ppt_obs_list]\n logging.debug(\n '\\nConverted Mean Monthly PPT ({}):\\n {}'.format(\n ppt_obs_units, ', '.join(map(str, ppt_obs_list))))\n\n # Scale all ratios so gridded PPT will match observed PPT at target cell\n if ppt_hru_id != 0:\n ppt_gridded_list = map(float, arcpy.da.SearchCursor(\n hru.polygon_path, fields,\n '\"{}\" = {}'.format(hru.id_field, ppt_hru_id)).next()[1:])\n logging.info(' Gridded PPT: {}'.format(\n ', '.join(['{:.2f}'.format(p) for p in ppt_gridded_list])))\n\n # Ratio of MEASURED or OBSERVED PPT to GRIDDED PPT\n # This will be multiplied by GRIDDED/OBSERVED below\n ppt_ratio_list = [\n float(o) / p if p > 0 else 0\n for o, p in zip(ppt_obs_list, ppt_gridded_list)]\n logging.info(' Obs./Gridded: {}'.format(\n ', '.join(['{:.3f}'.format(p) for p in ppt_ratio_list])))\n else:\n ppt_ratio_list = [1 for p in ppt_obs_list]\n\n # Use single mean monthly PPT for all cells\n # Assume ppt_obs_list is in month order\n fields = ppt_field_list + ratio_field_list\n with arcpy.da.UpdateCursor(hru.polygon_path, fields) as u_cursor:\n for row in u_cursor:\n for i, month in enumerate(month_list):\n ppt_i = fields.index(ppt_field_format.format(month))\n ratio_i = fields.index(ratio_field_format.format(month))\n\n if ppt_obs_list[i] > 0:\n row[ratio_i] = (\n ppt_ratio_list[i] * row[ppt_i] / ppt_obs_list[i])\n else:\n row[ratio_i] = 0\n u_cursor.updateRow(row)\n del row",
"def get_pressure_options():\n pressure_settings = {\n \"use_log\": \"off\",\n \"time\": \"0\",\n \"records\": \"0\",\n \"ad2val\": get_now_measure(),\n \"status\": checker_pressure.status,\n }\n try:\n with open(\"./data/pressure_sensor.json\", \"r\") as f: # Read the settings from file\n file_data = json.load(f)\n for key, value in file_data.iteritems():\n if key in pressure_settings:\n pressure_settings[key] = value\n except IOError:\n defaultpressure = {\n \"use_log\": \"off\",\n \"time\": \"0\",\n \"records\": \"0\",\n \"ad2val\": 0,\n \"status\": \"\",\n }\n\n with open(\"./data/pressure_sensor.json\", \"w\") as f: # write defalult settings to file\n json.dump(defaultpressure, f)\n\n except Exception:\n pass\n\n return pressure_settings",
"def loadParams(self):\n\n if len(self.filParams) < 3:\n return\n\n if not os.access(self.filParams, os.R_OK):\n return\n\n print(\"Priors.loadParams INFO: loading priors from %s\" \\\n % (self.filParams))\n\n # This is a little bit painful without just using something\n # more mature like astropy.table or pandas:\n hypers = np.genfromtxt(self.filParams, usecols=(1,2))\n\n # Convert the angular arguments to radians\n hypers[4] = np.radians(hypers[4])\n hypers[5] = np.radians(hypers[5])\n hypers[7] = np.radians(hypers[7])\n\n # transpose into hyperparams\n self.hyper = np.transpose(hypers)\n\n # now we need to read in the function names. This only really\n # has meaning for the mixed prior...\n strNames = np.genfromtxt(self.filParams, usecols=(0), dtype='str')\n self.mixedNames = list(strNames)\n\n # Finally, read in the name of the function\n with open(self.filParams, 'r') as rObj:\n for sLine in rObj:\n if sLine.find('#') < 0:\n continue\n if sLine.find('NAME') < 0:\n continue\n\n vLine = sLine.strip().split()\n self.namePrior = vLine[-1]",
"def load_params():\n with open('params.p', mode='rb') as in_file:\n return pickle.load(in_file)",
"def read_gbvi_parameters(filename):\n\n parameters = dict()\n \n infile = open(filename, 'r')\n for line in infile:\n # Strip trailing comments\n index = line.find('%')\n if index != -1:\n line = line[0:index] \n\n # Parse parameters\n elements = line.split()\n if len(elements) == 3:\n [atomtype, radius, gamma] = elements\n parameters['%s_%s' % (atomtype,'radius')] = float(radius)\n parameters['%s_%s' % (atomtype,'gamma')] = float(gamma)\n\n return parameters",
"def fromfile(self,file):\n self.d.update(params_file(file))",
"def read_from_pln(self, path):\n\n # Read the .pln file contents to a dictionary.\n pln_dict = read_pln_file(path)\n\n # Look for each attribute listed in self.attributes in the results\n # dictionary.\n for attr in self.attributes:\n\n # Get the corresponding ExoParameter object.\n current = getattr(self, attr)\n\n # Look for this attribute in the results dictionary and set\n # ExoParameter.value.\n key_str = attr\n try:\n current.value = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.value = current.default\n\n # Look for reference and URL information in the results dictionary,\n # and use this to set ExoParameter.reference and ExoParameter.url.\n # Skip 'transit' since 'transitref' and 'transiturl', are separate\n # fields in the references section.\n if not attr == \"transit\":\n\n key_str = \"\".join([attr, \"ref\"])\n try:\n current.reference = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.reference = None\n\n key_str = \"\".join([attr, \"url\"])\n try:\n current.url = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.url = None\n\n # If this attribute can take uncertainty values, look for these in\n # the results dictionary, then set ExoParameter.uncertainty and\n # ExoParameter.uncertainty_upper.\n if current.uncertain_flag:\n\n key_str = \"\".join([\"u\", attr])\n try:\n current.uncertainty = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.uncertainty = None\n\n key_str = \"\".join([\"u\", attr, \"d\"])\n try:\n current.uncertainty_upper = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.uncertainty_upper = None\n\n # If there are still keyword / value pairs in pln_dict, these fields\n # are not in the self.attributes list, which is built from\n # self.template_file.\n \"\"\"\n if len(pln_dict.keys()) > 0:\n print(\"{0} contains unknown .pln fields: {1}\".format(\n path, pln_dict.keys()))\n print(\"Add fields to {0} to include.\".format(self.template_file))\n \"\"\"\n\n # Trigger uncertainty calculations.\n self._populate_uncertainties()",
"def read_params(fname):\n f = open(fname, 'r')\n par = {} #output\n for i in range(10): # esta dentro de las primeras 10 lineas\n l = f.readline().split()\n #print \" ---> \", l\n number = u'%s' % l[-1] # presumably a number\n if not number.replace('.','').replace('-','').isnumeric():\n if l[0]=='#####':\n break\n else:\n continue # we proceed ONLY IF this is numeric string\n #print ' FIRST: ', l[0]\n if l[0]=='#####':\n #print \"IM I HERE????\"\n break # end of header\n\n name = l[1][:-1] # l[0] es '#', y -1 para comernos el \":\"\n value = np.float(l[2]) # l[2] es el valor\n par[name] = value\n\n return par",
"def readParams(file_name):\n try:\n info = np.load(file_name,allow_pickle=True)[()]\n except FileNotFoundError:\n if file_name.split('/')[-2] == 'checkpoint':\n lfc_id_dir = '/expres/extracted/lfc_cal/lfc_id/'\n file_name = lfc_id_dir + os.path.basename(file_name)\n info = np.load(file_name,allow_pickle=True)[()]\n else:\n raise FileNotFoundError\n # Assemble information into \"fit-able\" form\n num_orders = len(info['params'])\n lines = [p[:,1] for p in info['params'] if p is not None]\n errs = [np.sqrt(cov[:,1,1]) for cov in info['cov'] if cov is not None]\n ordrs = [o for o in np.arange(86) if info['params'][o] is not None]\n waves = [w for w in info['wvln'] if w is not None]\n # I believe, but am not sure, that the wavelengths are multiplied by order\n # to separate them from when orders overlap at the edges\n waves = [wvln for order, wvln in zip(ordrs,waves)]\n ordrs = [np.ones_like(x) * m for m,x in zip(ordrs, lines)]\n\n x = np.concatenate(lines)\n y = np.concatenate(ordrs)\n e = np.concatenate(errs)\n w = np.concatenate(waves)\n # Note: default of pipeline includes ThAr lines, which we're not including here\n \n return (x,y,w,e)",
"def ParseParameters(self, parameters_file_name):\n\n print(datetime.now().strftime(\"%H:%M:%S\") + \": Reading file \" + parameters_file_name + \"...\")\n\n parameters_file = open(parameters_file_name, \"r\")\n\n for line in parameters_file:\n line_parts = re.split(r'\\t+', line) # split elements of the line separated by tabs\n if(len(line_parts) == 2):\n self.AddParameter(line_parts[0], float(line_parts[1].strip()))\n else: # we want 2 elements per line\n raise Exception(\"Invalid format for the line \" + line)\n \n parameters_file.close()\n\n # check that all the mandatory parameters are correctly set\n if(self.npop1_1 == \"N/A\" or\n self.npop1_2 == \"N/A\" or\n self.npop2_1 == \"N/A\" or\n self.npop2_2 == \"N/A\" or\n self.t_div == \"N/A\" or\n self.npop_a == \"N/A\" or\n self.mu == \"N/A\"):\n raise Exception(\"One of mandatory parameters is missing!\")\n\n # check that introgression-related parameters are correctly set (either both equal 1 or both equal 0)\n if(self.t_i == 1 and self.p_i == 1): # introgression\n self.number_of_parameters = 9\n elif(self.t_i == 0 and self.p_i == 0): # no introgression\n self.number_of_parameters = 7\n else:\n raise Exception(\"Inconsistent values provided for parameters T_I and P_I!\")\n\n print(datetime.now().strftime(\"%H:%M:%S\") + \": Reading file done.\")",
"def update_pars(self, par_n, par_v):\n\t\tif self.init_et is None:\n\t\t\tself.load_init()\n\t\t\n\t\tself.update_et_pars(par_n, par_v)\n\t\tself.write_init()",
"def initialize(filename='params.yaml'):\n home_path = str(Path.home())\n project_path = 'Documents/SideProjects/sailboatsfactory'\n work_path = 'src/nn-core'\n params_path = join(home_path, join(project_path, work_path))\n yaml_file = join(params_path, filename)\n print(\"Reading parameters from:\", filename)\n with open(yaml_file, 'r') as f:\n my_params = load(f)\n my_params['x_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n my_params['y_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n\n raw = data.read(my_params)\n adjusted = adjust(raw, my_params)\n\n return adjusted, my_params",
"def _grab_injection_parameters_from_file(\n self, path, cls=None, add_nans=True, **kwargs\n ):\n if cls is None:\n from pesummary.core.file.injection import Injection\n cls = Injection\n data = cls.read(path, **kwargs).samples_dict\n for i in self.parameters:\n if i not in data.keys():\n data[i] = float(\"nan\")\n return data",
"def load_ps(self):\n self.ps = self.read_var(self.psvar)\n self.test_shape(self.psvar, self.ps.shape, 2)",
"def set_dimensional_parameters(self,\n file_name='dimensional_parameters.txt'):\n with open(file_name, 'r') as f:\n parameter_data = yaml.load(f)\n # Set initial conditions\n self.H_0 = parameter_data.get('H_0')\n self.A_1 = parameter_data.get('A_1')\n self.A_2 = parameter_data.get('A_2')\n self.K_1 = parameter_data.get('K_1')\n self.K_2 = parameter_data.get('K_2')\n # Set Dimensional parameters\n self.mu_H = parameter_data.get('mu_H')\n self.mu_1 = parameter_data.get('mu_1')\n self.mu_2 = parameter_data.get('mu_2')\n self.z_1 = parameter_data.get('z_1')\n self.z_1_tilde = parameter_data.get('z_1_tilde')\n self.z_2_tilde = parameter_data.get('z_2_tilde')\n self.pi_h_tilde = parameter_data.get('pi_h_tilde')\n self.pi_h = parameter_data.get('pi_h')\n self.pi_V = parameter_data.get('pi_V')\n self.pi_V_tilde = parameter_data.get('pi_V_tilde')\n self.a_1 = parameter_data.get('a_1')\n self.a_2 = parameter_data.get('a_2')\n self.e_1 = parameter_data.get('e_1')\n self.e_2 = parameter_data.get('e_2')\n self.omega_1 = parameter_data.get('omega_1')\n self.omega_2 = parameter_data.get('omega_2')\n self.eta_1 = parameter_data.get('eta_1')\n # Initial conditions\n self.initial_conditions = parameter_data.get('initial_conditions')\n # Noise amplitude\n self.beta_1 = parameter_data.get('beta_1')\n self.beta_2 = parameter_data.get('beta_2')\n self.beta_3 = parameter_data.get('beta_3')",
"def load_p(self):\n self.p = self.read_var(self.pvar)\n new_arr = []\n for p in range(np.shape(self.p)[0]):\n new_arr.append(p)\n self.p = new_arr\n self.p = np.array(self.p)\n self.test_shape(self.pvar, self.p.shape, 1)",
"def read_from(self, filename):\n if os.path.exists(filename):\n logger.info(\"Reading parameters from file {0}\".format(filename))\n cl, icoord, ispec, ireg, xori, yori, dx, dy, nx,\\\n ny, valex, snr, varbak = np.loadtxt(filename, comments='#', unpack=True)\n\n self.cl = cl\n self.icoordchange = int(icoord)\n self.ispec = int(ispec)\n self.ireg = int(ireg)\n self.xori = xori\n self.yori = yori\n self.dx = dx\n self.dy = dy\n self.nx = int(nx)\n self.ny = int(ny)\n self.valex = valex\n self.snr = snr\n self.varbak = varbak\n\n # Compute domain limits for later use\n self.xend = self.xori + (self.nx - 1) * self.dx\n self.yend = self.yori + (self.ny - 1) * self.dy\n\n return self\n else:\n logger.error(\"File {0} does not exist\".format(filename))\n raise FileNotFoundError('File does not exist')",
"def load_standard_parameters(self):\n paradic = {'x':'0',\n 'y':'0',\n 'n_oct':'8',\n 'n_spo':'3',\n 'sigma_min':'0.8',\n 'delta_min':'0.5',\n 'sigma_in':'0.5',\n 'C_DoG':'0.015',\n 'C_edge':'10',\n 'n_bins':'36',\n 'lambda_ori':'1.5',\n 't':'0.8',\n 'n_hist':'4',\n 'n_ori':'8',\n 'lambda_descr':'6',\n 'flag_match':'1',\n 'C_match':'0.6'}\n self.cfg['param']['paradic'] = paradic\n self.cfg.save()",
"def read_pressure(self, time_step, directory=None, **kwargs):\n print('[time-step {}] reading pressure field ...'.format(time_step)),\n dim3 = (len(self.grid) == 3)\n # get grid stations and number of cells along each direction\n x, y = self.grid[:2]\n nx, ny = x.size - 1, y.size - 1\n if dim3:\n z = self.grid[2]\n nz = z.size - 1\n # directory with numerical solution\n if not directory:\n directory = os.path.join(self.directory, '{:0>7}'.format(time_step))\n # read pressure\n phi_file_path = os.path.join(directory, 'phi.dat')\n p = PetscBinaryIO.PetscBinaryIO().readBinaryFile(phi_file_path)[0]\n # set pressure Field object\n if dim3:\n p = Field(label='pressure',\n time_step=time_step,\n x=0.5 * (x[:-1] + x[1:]),\n y=0.5 * (y[:-1] + y[1:]),\n z=0.5 * (z[:-1] + z[1:]),\n values=p.reshape((nz, ny, nx)))\n else:\n p = Field(label='pressure',\n time_step=time_step,\n x=0.5 * (x[:-1] + x[1:]),\n y=0.5 * (y[:-1] + y[1:]),\n values=p.reshape((ny, nx)))\n print('done')\n return p",
"def load_params():\n file_name = filedialog.askopenfilename(\n filetypes=[(\"JSON\", \"*.json\")])\n if file_name:\n self.parent_class.classes[\"fractal\"].curve.load_from_file(\n file_name)\n self.parent_class.classes[\"fractal\"].curve.set_parent_parameters(\n )\n self.rules_frame_class.fill_entries_from_rules(\n self.parent_class.classes[\"fractal\"].rules)\n # fill the entries in rules input on load\n self.set_recursion_depth_entry(\n self.parent_class.classes[\"fractal\"].recursion_depth)\n self.set_base_length_entry(\n self.parent_class.classes[\"fractal\"].base_length)\n self.rules_frame_class.render_preview()",
"def postpro(file,show=True):\n #folder = get_folder(atom,xyz,dn)\n p = None\n volume = None\n if is_complete(file,show):\n with open(file) as f:\n lines = f.readlines()\n for line in lines:\n if line.rfind(\"| Cartesian Polarization \") != -1:\n p = float64(split_line(line)[-3:]) #\n if line.rfind(\"| Unit cell volume \") != -1:\n volume = float(split_line(line)[-2])\n return p, volume\n else :\n return None,None",
"def write_parameters(par, version='git-devel'):\n # read template\n file = findpath('sesiflows.seistools') + '/' + 'specfem2d/par-' + version\n with open(file, 'r') as f:\n lines = f.readlines()\n lines[-1] = ' '.join(['1', str(par.NX), '1', str(par.NZ), '1'])\n\n # write parameter file\n file = 'DATA/Par_file'\n _writelines(file, lines)\n setpar('xmin', str(par.XMIN))\n setpar('xmax', str(par.XMAX))\n setpar('nx', str(par.NX))\n setpar('nt', str(par.NT))\n setpar('deltat', str(par.DT))\n setpar('nsources', str(1))\n\n # write interfaces file\n file = 'DATA/interfaces.dat'\n lines = []\n lines.extend('2\\n')\n lines.extend('2\\n')\n lines.extend('%f %f\\n'%(par.XMIN, par.ZMIN))\n lines.extend('%f %f\\n'%(par.XMAX, par.ZMIN))\n lines.extend('2\\n')\n lines.extend('%f %f\\n'%(par.XMIN, par.ZMAX))\n lines.extend('%f %f\\n'%(par.XMAX, par.ZMAX))\n lines.extend(str(par.NZ))\n _writelines(file, lines)"
] |
[
"0.6387034",
"0.6387034",
"0.6387034",
"0.6251736",
"0.61700636",
"0.61116606",
"0.58959746",
"0.5895648",
"0.58670104",
"0.5846452",
"0.58459103",
"0.57842356",
"0.5780423",
"0.5775546",
"0.57528794",
"0.5751866",
"0.5723168",
"0.5717525",
"0.56907386",
"0.56444967",
"0.56058186",
"0.55936086",
"0.55920494",
"0.5585596",
"0.557443",
"0.5550797",
"0.5544736",
"0.55297446",
"0.5492007",
"0.5472435"
] |
0.6842268
|
0
|
Subclasses should implement this method in order to take action when a task is scanned There is no diff passed, because this method is called when there is no information about the previous state of a task.
|
def task_scanned(now_task):
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def task_changed(old_task, diff, now_task):",
"def task_scanned_template(task_id):\n #now_task = self._task_api_service.get_current_task_state(task_id)\n task = _client.tasks.find_by_id(task_id)\n altered_task = self.task_scanned(task)\n\n #diff = self._task_diff_service.calculate_diff_for_tasks(now_task, altered_task)\n #updated_task = self._task_api_service.patch_task(now_task, diff)\n #self._storage_service.store_task(updated_task)",
"def task_stagnant(task):",
"def _task_filter(self, task):\n raise NotImplementedError(\"Subclasses should implement this!\")",
"def has_task_changed(self, task):\n raise NotImplementedError()",
"def mark_as_in_progress(self, task):\n raise NotImplementedError('')",
"def task(self):",
"def task(self):",
"def task_status():\n pass",
"def _add_task_action(self, task):\n if not task.is_alive():\n return",
"def before_run(self, key: str, task: Task, executor: \"TaskGraphExecutor\") -> None:",
"def task2(self):\n\n pass",
"def _task_started(self, task_key, version):\n\n pass",
"def task1(self):\n \n pass",
"def is_task_stagnant(task):",
"def __post_init__(self) -> None:\n for node in self.task_graph:\n self.task_graph.nodes[node][\"info\"] = TaskInfo(TaskStatus.NOT_STARTED)",
"def __post_init__(self) -> None:\n for node in self.task_graph:\n self.task_graph.nodes[node][\"info\"] = TaskInfo(TaskStatus.NOT_STARTED)",
"def task():",
"def choose_action(self, state, task=0):\n pass",
"def check_task(self): \n return self.buffer[0]",
"def check_repeated_task(self, task):\n task_status = task in self.tasks_asked\n\n # append if never asked\n if task_status == False:\n self.tasks_asked.append(task)\n\n return task_status",
"def run(self):\n modify_tasks = filter(self._task_filter, acm.FAelTask.Select(''))\n print([task.Name() for task in modify_tasks])\n for task in modify_tasks:\n #new_task = task.Clone()\n self._update(task)\n try:\n task.Commit()\n except:\n print('Skipping: Task already exists')",
"def on_task_output(cls, task: Task, config: dict) -> None:",
"def set_task_not_started(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 2)\n\n # Refresh the table\n self.write_tasks_table()",
"def task(self, name):\n pass",
"def task():\n pass",
"def task():\n pass",
"def _update(self, task):\n raise NotImplementedError(\"Subclasses should implement this!\")",
"def before_task_start(self, task_db, task_spec):\n # No-op by default.\n pass",
"def run_task(self) -> Task:"
] |
[
"0.6763695",
"0.6342406",
"0.6217184",
"0.6168291",
"0.61488265",
"0.61090827",
"0.60786176",
"0.60786176",
"0.59393173",
"0.59091526",
"0.5881955",
"0.58488715",
"0.58312756",
"0.5780966",
"0.57751566",
"0.57066983",
"0.57066983",
"0.56682956",
"0.5654614",
"0.5641109",
"0.5634073",
"0.56328094",
"0.5632466",
"0.5625493",
"0.5617789",
"0.5594003",
"0.5594003",
"0.5564043",
"0.5543808",
"0.55385095"
] |
0.78586817
|
0
|
Template method strategy for creating action for taskChanged we have old and now states, and can pass this to a handler.
|
def task_changed_template(task_id):
#Assume for now that we can definitely get the event from an event stream
old_task = self._storage_service.get_stored_task(task_id)
now_task = self._task_api_service.get_current_task_state(task_id)
diff = self._task_diff_service.calculate_diff_for_tasks(old_task, now_task)
altered_task = self.task_changed(old_task, diff, now_task)
diff = self._task_diff_service.calculate_diff_for_tasks(now_task, altered_task)
updated_task = self._task_api_service.patch_task(now_task, diff)
self._storage_service.store_task(updated_task)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def task_changed(old_task, diff, now_task):",
"def task_changed(self, fields):\n update = {}\n for field in fields:\n update[field] = self.__data[field]\n\n self.connection.update_task(self.name, update)",
"def update_job_state(self, *tasks, **extra_args):\n pass",
"def test_update_task_states(self):\r\n changed = self.combinedoe.update_task_states()\r\n self.assertFalse(changed)\r\n\r\n current_task = self.combinedoe.current_task\r\n current_task.change_state(CombinedOpenEndedV1Module.DONE)\r\n changed = self.combinedoe.update_task_states()\r\n\r\n self.assertTrue(changed)",
"def change_task_state(self, new_state):\n self.task_state = new_state",
"def choose_action(self, state, task=0):\n pass",
"def __update_task(self, tasks, **extra_args):\n for task in tasks:\n assert isinstance(\n task, Task), \"Core.update_job_state: passed an argument\" \\\n \" which is not a `Task` instance.\"\n task.update_state()",
"def updater(task):\n\n # The following code updates the lights depending upon the status of a task\n if task.labels:\n imgLbl = globs.DISP_SCREEN.progress.rightList[task.localid]\n if task.status == 0: \n pixmap = QPixmap('images/amber.png')\n elif task.status == 2 or task.status == 5:\n pixmap = QPixmap('images/green.png')\n globs.CONTROLLER.pbar.updateCompleted(task.localid + 1)\n elif task.status == 4:\n pixmap = QPixmap('images/red.png')\n imgLbl.setPixmap(pixmap)\n\n\n # Get ready to handle errors\n if task.status == 4:\n globs.CONTROLLER.resbtn.setDisabled(False)\n globs.CONTROLLER.fixbtn.setDisabled(False)\n globs.CONTROLLER.taskMonitor.errorBox.append(\"Errors Encountered While Performing Task #%d\"%(task.localid+1))\n globs.CONTROLLER.taskMonitor.errorBox.append(\"\")\n\n # Read Spooled RowCounts into accessible dictionaries\n if task.op == 100:\n if task.status == 2:\n readRows(task.phase)\n rowcmenu = getattr(globs,'RowCMenu'+task.phase)\n print(\"Creating and Adding New View Row Actions for Phase %s\"%task.phase)\n actions = [QAction(comp, globs.MAIN_WINDOW) for comp in globs.COMPONENTS] \n for act_i in range(len(actions)):\n rowcmenu.addAction(actions[act_i])\n actions[act_i].triggered.connect(lambda f, i = act_i: updateTable(task.phase, \"ROW\", globs.COMPONENTS[i]))\n elif task.op == 101:\n if task.status == 2:\n readInvalidObjects(task.phase)\n menu = getattr(globs,\"InvalidCMenu\"+task.phase)\n print(\"Creating and Adding New View Invalid Object Count Actions for Phase %s\"%task.phase)\n actions = [QAction(comp, globs.MAIN_WINDOW) for comp in globs.COMPONENTS] \n for act_i in range(len(actions)):\n menu.addAction(actions[act_i])\n actions[act_i].triggered.connect(lambda f, i = act_i: updateTable(task.phase, \"INVALIDOBJ\", globs.COMPONENTS[i]))\n globs.LAST_TASK = task\n\n ### Unrequired Code (Mainly for Automatically Showing the Tables)\n\n # globs.DISP_SCREEN.currentWidget.hide()\n # globs.DISP_SCREEN.tview.show()\n # globs.DISP_SCREEN.currentWidget = globs.DISP_SCREEN.tview\n # dct = globs.InvalidCountDict\n # lst = dct[globs.COMPONENTS[4]]\n # globs.DISP_SCREEN.tview.setData(lst, \"Owner; Constraint_Name; Table_Name; Status\")\n # rowcmenu = globs.RowCMenu\n # print(\"Creating and Adding New View Row Actions\")\n # actions = [QAction(comp, globs.MAIN_WINDOW) for comp in globs.COMPONENTS] \n # funcs = []\n # for act_i in range(len(actions)):\n # rowcmenu.addAction(actions[act_i])\n # actions[act_i].triggered.connect(lambda f, i = act_i: updateTable(globs.COMPONENTS[i]))",
"def _on_state_change(\n self, entity: Union[str, dict], attribute: str, old: str, new: str, kwargs: dict\n ) -> None:\n if new == self.properties[CONF_TARGET_STATE]:\n if self.properties.get(CONF_DELAY):\n self.handles[HANDLE_TOGGLE_STATE] = self.run_in(\n self._on_schedule_toggle,\n self.properties[CONF_DELAY],\n state=self.properties[CONF_SWITCH_STATE],\n )\n else:\n self.toggle(state=self.properties[CONF_SWITCH_STATE])\n else:\n if HANDLE_TOGGLE_STATE in self.handles:\n handle = self.handles.pop(HANDLE_TOGGLE_STATE)\n self.cancel_timer(handle)",
"def state_changed(self, old_state, new_state, target_state):\n pass",
"def edit_event_task(self):\n self.edit_event()",
"def update_task(self, name, fields):\n pass",
"def state_changed(self, oldstate, newstate, event, *args, **kwargs):",
"def state_changed(self, oldstate, newstate, event, *args, **kwargs):",
"def _update(self, task):\n raise NotImplementedError(\"Subclasses should implement this!\")",
"def create_task_event(sender, instance, name, source, target, **kwargs):\n ev_type = \"error\" if target in [\"failed\", \"rejected\"] else \"info\"\n ev = ReleaseEvent(\n event_type=ev_type,\n message=\"task {} changed from {} to {}\".format(\n instance.kf_id, source, target\n ),\n release=instance.release,\n task=instance,\n release_service=instance.release_service,\n )\n ev.save()",
"def call_runner_target_handlers(self, old_state: State, new_state: State) -> State:\n self.logger.debug(\n \"Flow '{name}': Handling state change from {old} to {new}\".format(\n name=self.flow.name,\n old=type(old_state).__name__,\n new=type(new_state).__name__,\n )\n )\n for handler in self.flow.state_handlers:\n new_state = handler(self.flow, old_state, new_state) or new_state\n\n return new_state",
"def has_task_changed(self, task):\n raise NotImplementedError()",
"def timestamper(task, old_state, new_state):\n new_state.timestamp = pendulum.now(\"utc\")\n if hasattr(old_state, \"timestamp\"):\n duration = (new_state.timestamp - old_state.timestamp).in_seconds()\n task.logger.info(\n \"{} seconds passed in between state transitions\".format(duration)\n )\n return new_state",
"def _update_task(cls, workbook, task, state, task_output):\n task_spec = workbook.tasks.get(task[\"name\"])\n task_runtime_context = task[\"task_runtime_context\"]\n\n # Compute the outbound_context, state and exec_flow_context.\n outbound_context = data_flow.get_outbound_context(task, task_output)\n state, task_runtime_context = retry.get_task_runtime(\n task_spec, state, outbound_context, task_runtime_context)\n\n # Update the task.\n update_values = {\"state\": state,\n \"output\": task_output,\n \"task_runtime_context\": task_runtime_context}\n task = db_api.task_update(task[\"id\"], update_values)\n\n return task, outbound_context",
"def state_changed(target, new_value, old_value, initiator):\n\n if (new_value == _WorkState.RUNNING and\n (old_value not in [_WorkState.RUNNING, _WorkState.PAUSED] or\n target.time_started == None)):\n target.time_started = datetime.utcnow()\n target.time_finished = None\n\n elif new_value in (_WorkState.DONE, _WorkState.FAILED):\n target.time_finished = datetime.utcnow()",
"def transition(self, key, start, finish, *args, **kwargs):\n # Only update Tethys on tasks (keys) it cares about\n tracked_key = self.scheduler.get_metadata(keys=[key], default=False)\n\n if tracked_key:\n # Build update dask job status request against bound Tethys host\n combined_status = '{}-{}'.format(start, finish)\n url = self.tethys_endpoint + '/update-dask-job-status/' + key + '/?status=' + combined_status\n\n # Prevent deadlock\n if start != 'released':\n # Submit update request to Tethys Asynchronously\n http_client = AsyncHTTPClient()\n http_client.fetch(url, method='GET')",
"def _set_task(self, goal):\n if goal.actionID == 'dh_change':\n self.dh_change(goal)\n elif goal.actionID == 'set_rcvel':\n self.set_rcvel(goal)\n elif goal.actionID == 'gate_pass':\n self.gate_pass(goal)\n elif goal.actionID == 'object_center':\n self.object_center(goal)\n elif goal.actionID == 'arm':\n self.arm(goal.arm)\n elif goal.actionID == 'rc_off':\n self.rc_off()\n else:\n rospy.loginfo('%s actionID not recognized'%goal.actionID)",
"def update_task_states(self):\r\n changed = False\r\n if not self.ready_to_reset:\r\n self.task_states[self.current_task_number] = self.current_task.get_instance_state()\r\n current_task_state = json.loads(self.task_states[self.current_task_number])\r\n if current_task_state['child_state'] == self.DONE:\r\n self.current_task_number += 1\r\n if self.current_task_number >= (len(self.task_xml)):\r\n self.state = self.DONE\r\n self.current_task_number = len(self.task_xml) - 1\r\n else:\r\n self.state = self.INITIAL\r\n changed = True\r\n self.setup_next_task()\r\n return changed",
"def _update_state(context, node, instance, state):\n values = {'task_state': state}\n if not instance:\n values['instance_uuid'] = None\n values['instance_name'] = None\n db.bm_node_update(context, node['id'], values)",
"def take_action(self, state):",
"def _complete_task(self, need_transfer):\n instance = self.instance\n task = self.task\n transition = self.transition\n\n task.status = \"completed\"\n task.save()\n\n to_node = self.to_node if need_transfer else instance.cur_node\n self.to_node = to_node\n\n event = None\n pre_last_event = instance.last_event()\n if pre_last_event and pre_last_event.new_node.node_type == \"router\":\n event = pre_last_event\n event.new_node = to_node\n event.save()\n\n if not event:\n event = create_event(\n instance,\n transition,\n comment=self.comment,\n user=self.operator,\n old_node=task.node,\n new_node=to_node,\n task=task,\n )\n\n if self.attachments:\n event.attachments.add(*self.attachments)\n\n self.last_event = event\n\n return event",
"def update_task(request, tid):\n try:\n slogger.task[tid].info(\"update task request\")\n labels = request.POST['labels']\n task.update(tid, labels)\n except Exception as e:\n slogger.task[tid].error(\"cannot update task\", exc_info=True)\n return HttpResponseBadRequest(str(e))\n\n return HttpResponse()",
"def state_change(\n self,\n cb: CircuitBreaker,\n old_state: CircuitBreakerState | None,\n new_state: CircuitBreakerState,\n ) -> None:",
"def StatusChanged(self, state, info):\n pass"
] |
[
"0.76264495",
"0.62410367",
"0.62073314",
"0.6146876",
"0.60325545",
"0.5998154",
"0.59310573",
"0.59260416",
"0.5913432",
"0.59117657",
"0.5901368",
"0.5867086",
"0.5856196",
"0.5856196",
"0.58469135",
"0.58452576",
"0.58048415",
"0.5733319",
"0.5696745",
"0.56281507",
"0.5626541",
"0.55545855",
"0.5539155",
"0.55161446",
"0.54915965",
"0.54830885",
"0.5446205",
"0.5443759",
"0.5429905",
"0.5423899"
] |
0.628052
|
1
|
Subclasses should implement this method in order to take action when a task changes It should be assumed that the difference between the old and the new task is represented accurately, but that there may have been intermediate changes in the meantime. For instance, if the task had State A and then becomes State B, and this method is called, the diff will be between states A and B. If the task had State A, and then becomes State B, and then State C, and this method is called, the delta will be between states A and C. In other words, we can represent roughly the changes from the last time this script made a change to the task and the current state of the task (but perhaps not between).
|
def task_changed(old_task, diff, now_task):
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def timestamper(task, old_state, new_state):\n new_state.timestamp = pendulum.now(\"utc\")\n if hasattr(old_state, \"timestamp\"):\n duration = (new_state.timestamp - old_state.timestamp).in_seconds()\n task.logger.info(\n \"{} seconds passed in between state transitions\".format(duration)\n )\n return new_state",
"def state_changed(target, new_value, old_value, initiator):\n\n if (new_value == _WorkState.RUNNING and\n (old_value not in [_WorkState.RUNNING, _WorkState.PAUSED] or\n target.time_started == None)):\n target.time_started = datetime.utcnow()\n target.time_finished = None\n\n elif new_value in (_WorkState.DONE, _WorkState.FAILED):\n target.time_finished = datetime.utcnow()",
"def task_changed_template(task_id):\n #Assume for now that we can definitely get the event from an event stream\n old_task = self._storage_service.get_stored_task(task_id)\n now_task = self._task_api_service.get_current_task_state(task_id)\n diff = self._task_diff_service.calculate_diff_for_tasks(old_task, now_task)\n\n altered_task = self.task_changed(old_task, diff, now_task)\n\n diff = self._task_diff_service.calculate_diff_for_tasks(now_task, altered_task)\n updated_task = self._task_api_service.patch_task(now_task, diff)\n self._storage_service.store_task(updated_task)",
"def state_changed(self, old_state, new_state, target_state):\n pass",
"def diff(self):\n if self.event == 'Create':\n old = ''\n else:\n # Get the Change just ahead of _this_ change because that has the\n # state of the Resource before this Change occurred.\n # TODO(nickpegg): Get rid of this if we change the behavior of\n # Change to store the previous version of the object\n old_change = Change.objects.filter(\n change_at__lt=self.change_at,\n resource_id=self.resource_id,\n resource_name=self.resource_name\n ).order_by(\n '-change_at'\n ).first()\n old = json.dumps(old_change._resource, indent=2, sort_keys=True)\n\n if self.event == 'Delete':\n current = ''\n else:\n resource = apps.get_model(self._meta.app_label, self.resource_name)\n obj = resource.objects.get(pk=self.resource_id)\n\n serializer_class = self.get_serializer_for_resource(\n self.resource_name)\n serializer = serializer_class(obj)\n current = json.dumps(serializer.data, indent=2, sort_keys=True)\n\n diff = \"\\n\".join(difflib.ndiff(\n old.splitlines(),\n current.splitlines()\n ))\n\n return diff",
"def has_task_changed(self, task):\n raise NotImplementedError()",
"def test_update_task_states(self):\r\n changed = self.combinedoe.update_task_states()\r\n self.assertFalse(changed)\r\n\r\n current_task = self.combinedoe.current_task\r\n current_task.change_state(CombinedOpenEndedV1Module.DONE)\r\n changed = self.combinedoe.update_task_states()\r\n\r\n self.assertTrue(changed)",
"def has_task_changed(self, task):\n\n reloaded = self.get_task(task.id)\n\n if reloaded is None:\n return True\n\n if reloaded.when != task.when:\n return True\n\n return False",
"def changed(self):\n\t\tpass",
"def _on_state_change(\n self, entity: Union[str, dict], attribute: str, old: str, new: str, kwargs: dict\n ) -> None:\n new_value = float(new)\n\n above = self.properties.get(CONF_ABOVE)\n below = self.properties.get(CONF_BELOW)\n\n if above and new_value >= above:\n self.toggle(state=self.properties[CONF_STATE])\n elif below and new_value < below:\n self.toggle(state=self.properties[CONF_STATE])\n else:\n self.toggle(opposite_of=self.properties[CONF_STATE])",
"def changeState(curr, new):\n trans = Transitions()\n if new not in trans.states():\n raise TaskStateException(\"New '%s' status is not valid\" %new)\n if curr not in trans:\n raise TaskStateException(\"Current '%s' status is not valid\" %curr)\n if new not in trans[curr]:\n raise TaskStateException(\"Transition from '%s' to '%s' is forbidden.\" %(curr, new))\n ## transition is valid\n return new",
"def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')",
"def delta(self) -> None:",
"def task_changed(self, fields):\n update = {}\n for field in fields:\n update[field] = self.__data[field]\n\n self.connection.update_task(self.name, update)",
"def update_task_states(self):\r\n changed = False\r\n if not self.ready_to_reset:\r\n self.task_states[self.current_task_number] = self.current_task.get_instance_state()\r\n current_task_state = json.loads(self.task_states[self.current_task_number])\r\n if current_task_state['child_state'] == self.DONE:\r\n self.current_task_number += 1\r\n if self.current_task_number >= (len(self.task_xml)):\r\n self.state = self.DONE\r\n self.current_task_number = len(self.task_xml) - 1\r\n else:\r\n self.state = self.INITIAL\r\n changed = True\r\n self.setup_next_task()\r\n return changed",
"def state_changed(self, oldstate, newstate, event, *args, **kwargs):",
"def state_changed(self, oldstate, newstate, event, *args, **kwargs):",
"def state_change(\n self,\n cb: CircuitBreaker,\n old_state: CircuitBreakerState | None,\n new_state: CircuitBreakerState,\n ) -> None:",
"def debug_state_change(self, old_state, new_state):\n raise NotImplementedError",
"def change_task_state(self, new_state):\n self.task_state = new_state",
"def get_change(self, ):\n return self.get_parameter('change')",
"def diff(self):\n return self.client.api.diff(self.id)",
"def changed(self, *args, **kwargs): # real signature unknown\n pass",
"def step_changes(self) -> pd.Series:\n return self._get_deltas().copy()",
"def diff(self, x1, x2):\n return self.State.diff(x1, x2)",
"def _diff(self, param, diff):\n pass",
"def delta(self, incoming, outgoing, above):\n raise NotImplementedError",
"def has_changed(self):\n return self.get_old_value() != self.get_current_value()",
"def _determine_changes(self, job_name_a, job_name_b):\n\n node_a = self._graph_a.get_node(job_name_a)\n node_b = self._graph_b.get_node(job_name_b)\n changes = []\n self._changes[job_name_b] = changes\n\n # Check for same job type name and version\n if node_a.job_type_name != node_b.job_type_name:\n msg = 'Job type changed from %s to %s'\n changes.append(Change('JOB_TYPE_CHANGE', msg % (node_a.job_type_name, node_b.job_type_name)))\n return\n if node_a.job_type_version != node_b.job_type_version:\n msg = 'Job type version changed from %s to %s'\n changes.append(Change('JOB_TYPE_VERSION_CHANGE', msg % (node_a.job_type_version, node_b.job_type_version)))\n return\n\n # Check that A and B have matching parents that are identical to one another\n a_parent_names = set(a_parent.node_name for a_parent in node_a.parents)\n for b_parent in node_b.parents:\n b_parent_name = b_parent.node_name\n if b_parent_name not in self._identical_nodes:\n changes.append(Change('PARENT_CHANGE', 'Parent job %s changed' % b_parent_name))\n return # B has a parent that is not identical to any other node\n matched_a_parent_name = self._identical_nodes[b_parent_name]\n if matched_a_parent_name not in a_parent_names:\n changes.append(Change('NEW_PARENT', 'New parent job %s added' % b_parent_name))\n return # B has a parent that does not match a parent of A\n a_parent_names.remove(matched_a_parent_name)\n if a_parent_names:\n changes.append(Change('REMOVED_PARENT', 'Previous parent job %s removed' % a_parent_names.pop()))\n return # A has a parent that does not match a parent of B\n\n # Check that A and B use the same inputs\n a_inputs = dict(node_a.inputs)\n for b_input_name in node_b.inputs:\n if b_input_name not in a_inputs:\n changes.append(Change('NEW_INPUT', 'New input %s added' % b_input_name))\n return # B input not defined for A\n b_input = node_b.inputs[b_input_name]\n a_input = a_inputs[b_input_name]\n if not a_input.is_equal_to(b_input, self._matched_recipe_inputs, self._identical_nodes):\n changes.append(Change('INPUT_CHANGE', 'Input %s changed' % b_input_name))\n return # A and B have a non-matching input\n del a_inputs[b_input_name]\n if a_inputs:\n changes.append(Change('REMOVED_INPUT', 'Previous input %s removed' % a_inputs.keys().pop()))\n return # A input not defined for B",
"def updated_if_changed(attr, t_list, first=False):\n existing = getattr(self, attr)\n new = get_transition_date_from_history(\n t_list, state_history, first=first\n )\n if new != existing:\n setattr(self, attr, new)"
] |
[
"0.6563116",
"0.6327758",
"0.6302768",
"0.62952566",
"0.6154934",
"0.58865243",
"0.5851994",
"0.5839404",
"0.5794097",
"0.57631093",
"0.57603806",
"0.5752358",
"0.57121783",
"0.5634282",
"0.5623804",
"0.5582487",
"0.5582487",
"0.55473316",
"0.55443513",
"0.55149525",
"0.549995",
"0.544815",
"0.5444166",
"0.54414934",
"0.5437321",
"0.54324704",
"0.5389293",
"0.53639114",
"0.5347489",
"0.53464985"
] |
0.80793357
|
0
|
The task may be stagnant. What stagnant means depends on the business logic. One example may be that you expect that the task needs to have, say, a comment placed on it every week; and if that's the requirement, you should check the task that we think might be stagnant in order to verify.
|
def is_task_stagnant(task):
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def task_stagnant(task):",
"def task_stagnant_template(task_id):\n # The intent of this is not to incur *any* restrictions on external sources (like cron) because\n # that goes against the simplicity clause. This is simply checked by this framework every time\n # major events get executed (like a new run of the CLI app, a new webhook is received).\n # Since we use a local DB, we can make this pretty reactive - we can check the last previous state\n # of the task to find if it's likely to be stale.\n\n # This is possible because:\n # If we have to scan, we can scan for tasks that are older than config.stagnant or whatever. We're scanning anyway. All tasks get checked.\n # If we don't have to scan, we would have gotten task added, and can forward them to the scan (for instance, stagnant can be 10 minutes, and we didn't run this for a day - a task got added, but can also be stagnant).\n # If we didn't get task added, it should be in the DB insofar as deleted or checked off tasks can still\n # be queried in the API.\n \n # Therefore, we take a double-checked-lock sort of approach: try with the cached task if it exists,\n # If it passes as stagnant, we update and try again (since it might need updating). If it's still\n # stagnant, pass it on.\n \n old_task = self._storage_service.get_stored_task(task_id)\n if self.is_task_stagnant(old_task):\n now_task = self._task_api_service.get_current_task_state(task_id)\n if self.is_task_stagnant(now_task):\n\n altered_task = self.task_stagnant(now_task)\n\n diff = self._task_diff_service.calculate_diff_for_tasks(now_task, altered_task)\n updated_task = self._task_api_service.patch_task(now_task, diff)\n self._storage_service.store_task(updated_task)",
"def test_full_task(self):\n task = Task({\n 'name': 'test',\n 'id': 1,\n 'stage_id' : [1, 'name'],\n 'date_deadline': '2018-10-31',\n 'date_start': '2018-10-20 00:00:00',\n 'date_end': '2018-10-31 23:59:00',\n 'partial_messages': [{'date':'2018-10-21 12:00:00'}],\n 'kanban_state': 'blocked',\n 'full_project_name': 'test_project',\n 'planned_hours': 100,\n 'priority': '1'\n })\n self.assertIsNotNone(task)\n self.assertEqual(task.project, 'test_project')",
"def test_create_two_independent_tasks():\n task1 = Task(\"print book\",\"Fabio\",True,1)\n task2 = Task(\"buy book\", \"Elisa\", False, 2)\n\n assert task1.summary != task2.summary",
"def dumb_task():\n return True",
"def test_dag_tasks_present(self):\n self.assertEqual(self.tasks, [\n \"harvest_oai\",\n \"create_collection\",\n \"combine_index\",\n \"solr_alias_swap\",\n \"success_slack_trigger\"\n ])",
"def test_dag_tasks_present(self):\n self.assertEqual(self.tasks, [\n \"harvest_oai\",\n \"create_collection\",\n \"combine_index\",\n \"solr_alias_swap\",\n \"success_slack_trigger\"\n ])",
"def create_flight_needs_task(self):\n duration = self.trip.arrival_date_time - self.trip.departure_date_time\n if duration > timedelta(hours=2):\n self.tasks.append(self.trip.tasks.create(\n title=\"Flight Must Have !\",\n comments=\"It's a long flight ! Don't forget your earplugs and your sleep mask.\",\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=1)\n ))\n else:\n self.tasks.append(self.trip.tasks.create(\n title=\"Flight Must Have !\",\n comments=\"Take some food and some drinks for your flight\",\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=1)\n ))",
"def _verify_task(self, task_type: str = None) -> bool:\n\n return task_type in [\n self.BINARY_CLASSIFICATION, self.CATEGORICAL_CLASSIFICATION,\n self.REGRESSION\n ]",
"def task_status():\n pass",
"def _validate_task(task: str) -> str:\n supported = (\"regression\", \"binary\")\n if not any([task in item for item in supported]):\n raise ValueError(f\"{task} is not a supported task.\")\n return task",
"def task_type(self):\n pass",
"def can_dry_run(self, task: \"TaskView\") -> bool:\n return False",
"def task_scanned(now_task):",
"def test_allowed_if_in_task(self):\n\n @task_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n request = self.factory.get(\"/\")\n request.META[_TASK_NAME_HEADER] = \"test\"\n with sleuth.fake(\"djangae.environment.is_in_task\", True):\n response = view(request)\n\n self.assertEqual(response.status_code, 200)",
"def create_malaria_task(self):\n if not self.a_country is self.d_country and self.a_country.malaria_presence:\n self.tasks.append(self.trip.tasks.create(\n title=\"Protection against mosquitoes\",\n comments=\"Insect repellent, insecticide-treated bednet and pre-treating clothing\",\n category=TaskCategory.objects.get(name=\"Health\"),\n deadline=self.trip.departure_date_time - timedelta(days=3)\n ))",
"def checkRely(self, task):\n if not isinstance(task, dict):\n return False\n keys = task.get(\"rely\")\n #is empty or crontab, explain upstream is true\n if not keys or task.get(\"task_type\") == \"crontab\":\n return True\n\n keyl = []\n for k, v in keys.items():\n keyl.append(k)\n\n date = task.get(\"task_day\")\n if not date:\n date = self.date\n\n mkeys = [{\"task_key\": k} for k in keyl]\n tlist = {}\n for doc in self.mgdb.task_history.find({\"$or\": mkeys, \"task_day\": date}):\n tlist[doc.get(\"task_key\")] = doc\n\n if not tlist or len(tlist) != len(mkeys):\n #when debug, always return true.\n if self.config.get(\"is_debug\"):\n return True\n else:\n return False\n for c, d in tlist.iteritems():\n if d.get(\"status\") != \"finished\":\n return False\n\n return True",
"def executeTask(self, user, doc, task):\n if task == \"2a\":\n if(doc !=\"\"):\n return self.task2a(doc)\n else:\n print( \"DOC UUID needs to be specified\")\n return \"DOC UUID needs to be specified\"\n elif task == \"2b\":\n if(doc !=\"\"):\n return self.task2b(doc)\n else:\n print( \"DOC UUID needs to be specified\")\n return \"DOC UUID needs to be specified\"\n elif task == \"3a\":\n return self.task3a()\n elif task ==\"3b\":\n return self.task3b()\n elif task == \"4\":\n return self.listToStringFormat(self.task4())\n elif task == \"5a\":\n if((doc !=\"\") and (user!=\"\")):\n return self.listToStringFormat(self.task5(doc, user, sorting =\"readership\"))\n else:\n print( \"Both Doc UUID and visitor UUID need to be specified\")\n return \"Both Doc UUID and visitor UUID need to be specified\"\n elif task == \"5b\":\n if((doc !=\"\") and (user!=\"\")):\n return self.listToStringFormat(self.task5(doc, user, sorting = \"count\"))\n else:\n print( \"Both Doc UUID and visitor UUID need to be specified\")\n return \"Both Doc UUID and visitor UUID need to be specified\"\n elif task == \"6\":\n if(doc !=\"\"):\n return self.task6(doc)\n else:\n print( \"DOC UUID needs to be specified\")\n return \"DOC UUID needs to be specified\"\n elif task == \"7\":\n if(doc !=\"\"):\n return self.task7(doc)\n else:\n print( \"DOC UUID needs to be specified\")\n return \"DOC UUID needs to be specified\"\n else:\n print(\"NO SUCH TASK\")\n return \"NO SUCH TASK\"",
"def task_is_failure(task):\n\n if task and task.state == 'FAILURE':\n return True\n return False",
"def test_missing_dates(self):\n task = Task({\n 'name': 'test',\n 'id': 1,\n 'stage_id' : [1, 'name'],\n 'date_deadline': False,\n 'date_start': False,\n 'date_end': False,\n 'full_project_name': 'test_project',\n 'partial_messages': [{'date':'2018-10-21 12:00:00'}],\n 'kanban_state': 'blocked',\n 'planned_hours': 100,\n 'priority': '1'\n })\n self.assertIsNotNone(task)",
"def test_allowed_if_in_task(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n request = self.factory.get(\"/\")\n request.META[_TASK_NAME_HEADER] = \"test\"\n\n response = view(request)\n self.assertEqual(response.status_code, 200)",
"def schedule_task(self, Tau):\n return random.choice(self.tasks)",
"def test_ensure_not_ts_pass(self):\n self.assertEqual(ensure_not_ts(self.jobset1), 'completed')",
"def updateTask(task):\n # First check to see if task exists\n detailed_ticket = jutdaapi.get_detailed_ticket(task._ticket_id)\n if not detailed_ticket:\n print 'task does not exist yet'\n return False\n # If so, check that things have actually changed (diff edited and orig)\n database_task = ticketToTask(detailed_ticket)\n if task._orig == task:\n return 'no changes to make'\n return True\n # If so, check that no one else has made changes (diff orig and database)\n if not database_task == task._orig:\n print 'task has changed in database; refresh task!'\n return False\n #priority = (task.priority + 2) / 2\n priority = task.priority\n if task.assigner not in ['no one', 'Unassigned']:\n title = '('+task.assigner.title()+') '+task.name\n #if task.name[-1] == ' ':\n # title = task.name + 'for: '+task.assigner.title()\n #else:\n # title = task.name + ' for: '+task.assigner.title()\n else:\n title = task.name\n description = task.description\n #if task.assigner != 'no one':\n # description += '<tasktrackermeta assigner=\"'+task.assigner+'\"/>'\n if 't' not in task.id:\n description += '<tasktrackermeta id=\"'+task.id+'\"/>'\n return jutdaapi.edit_ticket(task._ticket_id, title=title, queue=None, submitter_email=None,\n description=description, priority=priority)",
"def is_task(self, task_id, tasks):\r\n for t in tasks:\r\n if t.id == task_id:\r\n return True\r\n return False",
"def task(self, name):\n pass",
"def task():",
"def task():\n pass",
"def task():\n pass",
"def test_task_instance(self) -> None:\n self.assertTrue(isinstance(self.test_task, Tasks))"
] |
[
"0.7626539",
"0.66782004",
"0.64053106",
"0.63221794",
"0.6262628",
"0.62052846",
"0.62052846",
"0.61971396",
"0.6168194",
"0.6131822",
"0.61091554",
"0.60977364",
"0.60569406",
"0.5902786",
"0.5871476",
"0.58550227",
"0.58522946",
"0.5828267",
"0.5806666",
"0.57601726",
"0.5758834",
"0.5758817",
"0.5756198",
"0.5736045",
"0.57210857",
"0.5716392",
"0.57127666",
"0.57082134",
"0.57082134",
"0.5696148"
] |
0.83593345
|
0
|
get all the children on gmac_id from redis DB, much FASTER
|
def get_all_children_from_redis(gmac_id, as_objects=True):
conn = get_redis_connection()
klass = GoogleMapsAddressComponent
results = klass.get_all_children_id_list_from_redis_by_pk(gmac_id)
if as_objects:
results = klass.objects.filter(pk__in=results)
return results
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_all_children_id_list_from_redis_by_pk(gmac_id):\n try:\n gmac = GoogleMapsAddressComponent.objects.get(pk=gmac_id)\n conn = get_redis_connection()\n key = GoogleMapsAddressComponent.get_redis_all_children_key(gmac_id)\n length = conn.llen(key)\n return conn.lrange(key, 0, length)\n except GoogleMapsAddressComponent.DoesNotExist:\n return None",
"def get_children_from_redis(gmac_id, as_objects=True):\n conn = get_redis_connection()\n klass = GoogleMapsAddressComponent\n results = []\n queue = []\n children = klass.get_children_id_list_from_redis_by_pk(gmac_id)\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = klass.get_children_id_list_from_redis_by_pk(node)\n results.extend(children)\n queue.extend(children)\n if as_objects:\n results = klass.objects.filter(pk__in=results)\n return results",
"def sync_all_children_to_redis(self):\n conn = get_redis_connection()\n key = GoogleMapsAddressComponent.get_redis_all_children_key(self.pk)\n # First, we make sure the key gets destroyed if it exists\n conn.delete(key)\n # Now we add the keys of the children to the list\n children = self.get_all_children_seq()\n for child in children:\n conn.lpush(key, child.pk)",
"def _query_children_for_cache_children(self, course_key, items):\r\n # first get non-draft in a round-trip\r\n query = {\r\n '_id': {'$in': [\r\n course_key.make_usage_key_from_deprecated_string(item).to_deprecated_son() for item in items\r\n ]}\r\n }\r\n return list(self.collection.find(query))",
"def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n '(SELECT subject_id '\n 'FROM cvterm_relationship '\n 'WHERE object_id = %s '\n 'UNION '\n 'SELECT cr.subject_id '\n 'FROM cvterm_relationship cr '\n 'INNER JOIN children ch ON ch.subject_id = cr.object_id) '\n 'SELECT * FROM children')\n ids = connect(child_ids,id,conn)\n list_of_ids = []\n for item in ids:\n list_of_ids.append(item[0])\n return(list_of_ids)",
"async def get_child_ids(db, post_id):\n sql = \"SELECT id FROM hive_posts WHERE parent_id = :id AND is_deleted = '0'\"\n return await db.query_col(sql, id=post_id)",
"def children(parent, data):\n\n kids = []\n for pid in data:\n if data[pid][\"parentId1\"] == parent or data[pid][\"parentId2\"] == parent:\n kids.append(pid)\n\n return kids",
"def get_children(self, refobj):\n children = cmds.listConnections(\"%s.children\" % refobj, d=False)\n if not children:\n children = []\n return children",
"def children(self):\n return self.hashring_watch.get_children()",
"def get_children(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_children()\n return set_parents",
"def get_nodes(self, parent, keys, limit):\n queue = deque(parent.children)\n\n while len(queue) != 0:\n node = queue.popleft()\n if node.real:\n keys.append(node.value)\n\n if len(keys) == limit:\n break\n\n queue.extend(node.children)",
"def retrieveTrees(c):\n\n all_nodes = dict()\n root_nodes = list()\n c.execute('''SELECT id, parent_id, title FROM node''')\n data_db = c.fetchall()\n \n # Initialize nodes list\n for data_line in data_db:\n db_child_id = data_line[0]\n db_parent_id = data_line[1]\n child_title = data_line[2]\n \n node = Node(db_child_id, child_title)\n all_nodes[db_child_id] = node\n if not db_parent_id:\n root_nodes.append(node)\n \n # Create relations\n for data_line in data_db:\n db_child_id = data_line[0]\n db_parent_id = data_line[1]\n if db_parent_id:\n all_nodes[db_parent_id].append(all_nodes[db_child_id])\n \n return (all_nodes, root_nodes,)",
"def Children( cls, pid ):\n\t\tres = []\n\t\tpid = int(pid)\n\t\tfor cpid, cmd in cls.List().items():\n\t\t\tppid = int(cls.Status(cpid)[\"ppid\"])\n\t\t\tif ppid == pid:\n\t\t\t\tres.append( (cpid, None, cmd))\n\t\treturn res",
"def get_children(self):\n return self.children",
"def get_children(cur, node):\n sql = \"\"\"\n SELECT\n *\n FROM\n nodes\n WHERE\n parent=%s\n ORDER BY\n position;\n \"\"\"\n cur.execute(sql, (str(node), ))\n for result in cur:\n yield NodeData(**result)",
"def get_children(ppid):\n\n pid_dct = {}\n for proc in build_process_list():\n proc[\"_children\"] = []\n pid_dct[proc[\"pid\"]] = proc\n\n # fill in children array\n for pid in list(pid_dct.keys()):\n parent_pid = pid_dct[pid][\"parent_pid\"]\n\n if parent_pid in pid_dct:\n pid_dct[parent_pid][\"_children\"].append(pid)\n\n # now just walk down the tree\n if ppid is None or ppid not in pid_dct:\n # process has quit, we exit\n return []\n\n accepted = []\n to_accept = collections.deque([ppid, ])\n \n while to_accept:\n head = pid_dct[to_accept.popleft()]\n\n # do not include the monitoring pid\n if head[\"pid\"] != ppid:\n accepted.append(head)\n\n to_accept.extend(head.get(\"_children\", []))\n head[\"children\"] = head[\"_children\"]\n del head[\"_children\"]\n\n # deleting children breaks infinite loops\n # but Dima, can a process tree contain a loop? yes - via race-condition in reading procfs\n\n return accepted",
"def get_child_ids(cur, node):\n sql = \"\"\"\n SELECT\n id\n FROM\n nodes\n WHERE\n parent=%s\n ORDER BY\n position;\n \"\"\"\n cur.execute(sql, (str(node), ))\n for result in cur:\n yield str(result['id'])",
"def get_object_childs(self, obj_name):\n index = 0\n children_list = []\n child = 0\n parent_handle = self.get_object_handle(obj_name)\n while child != -1:\n res, child = vrep.simxGetObjectChild(self.client_id, parent_handle, index, vrep.simx_opmode_blocking)\n if res == vrep.simx_return_ok:\n children_list.append(child)\n index = index + 1\n else:\n print('Remote fucntion get_object_childs call failed.')\n return []\n del children_list[len(children_list) - 1]\n return children_list",
"def _cache_children(self, course_key, items, depth=0):\r\n\r\n data = {}\r\n to_process = list(items)\r\n while to_process and depth is None or depth >= 0:\r\n children = []\r\n for item in to_process:\r\n self._clean_item_data(item)\r\n children.extend(item.get('definition', {}).get('children', []))\r\n data[Location._from_deprecated_son(item['location'], course_key.run)] = item\r\n\r\n if depth == 0:\r\n break\r\n\r\n # Load all children by id. See\r\n # http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%24or\r\n # for or-query syntax\r\n to_process = []\r\n if children:\r\n to_process = self._query_children_for_cache_children(course_key, children)\r\n\r\n # If depth is None, then we just recurse until we hit all the descendents\r\n if depth is not None:\r\n depth -= 1\r\n\r\n return data",
"def get_children(self, table_name):\n return self._child_map[table_name]",
"def getChildren(self, path):\n \n self._sharedState.lock.acquire()\n try:\n try:\n self.update(path)\n children = list()\n entries = self._client.list(self._workingCopyPath + path, recurse=False)\n for entry in entries:\n entryPath = entry[0].path[self._workingPathLength:]\n formerEntry = self._sharedState.getFromCache(path)\n if formerEntry is None:\n newEntry = _Info(entry[0])\n else:\n newEntry = _Info(entry[0])\n newEntry.logMessage = formerEntry.logMessage # creation date and owner do not change\n self._sharedState.addToCache(entryPath, newEntry)\n children.append(entryPath)\n del children[0] # First item is always the queried path\n return children\n except ClientError, error:\n raise SubversionError(error)\n finally:\n self._sharedState.lock.release()",
"def list():\n\n return {\"cncs\": [{\"id\": id.split(\"/\")[-1]} for id in sorted(flask.current_app.redis.keys(\"/cnc/*\"))]}",
"def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)",
"def get_children(obj):\n ret = obj.to_dict()\n if obj.children.all():\n ret.__setitem__('children',[get_children(j) for j in obj.children.all()])\n return ret",
"def children_ids(self):\n return self._children_ids",
"def get_children(self):\n return []",
"def test_get_all_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_all_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)",
"def getChildren(self):\n return self.directories.values()",
"def descendants(db: Redis[bytes], *addresses: hash_t) -> set[hash_t]:\n queue = list(addresses)\n out = set()\n\n while len(queue) > 0:\n curr = queue.pop()\n for el in db.smembers(join(OPERATIONS, curr, \"children\")):\n h = hash_t(el.decode())\n out.add(h)\n if h not in queue:\n queue.append(h)\n return out",
"def get_children(self):\n return self.items"
] |
[
"0.8152746",
"0.79024535",
"0.6686107",
"0.65141815",
"0.641508",
"0.639633",
"0.6068185",
"0.60418916",
"0.6006778",
"0.5979434",
"0.597477",
"0.5762077",
"0.57298154",
"0.5690725",
"0.56153274",
"0.5591066",
"0.55792207",
"0.55651486",
"0.5542288",
"0.5512591",
"0.55021805",
"0.548904",
"0.548435",
"0.54779565",
"0.54631394",
"0.5410705",
"0.54075605",
"0.54054725",
"0.5401285",
"0.5393837"
] |
0.7953534
|
1
|
synchronizes all the children to a Redis list
|
def sync_all_children_to_redis(self):
conn = get_redis_connection()
key = GoogleMapsAddressComponent.get_redis_all_children_key(self.pk)
# First, we make sure the key gets destroyed if it exists
conn.delete(key)
# Now we add the keys of the children to the list
children = self.get_all_children_seq()
for child in children:
conn.lpush(key, child.pk)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _sync_children(self, source_parent, destination_parent, new_child):\n destination_reordered = []\n destination_children = set(destination_parent.fields['children'])\n source_children = source_parent.fields['children']\n orphans = destination_children - set(source_children)\n for child in source_children:\n if child == new_child or child in destination_children:\n destination_reordered.append(child)\n destination_parent.fields['children'] = destination_reordered\n return orphans",
"def _sync_children(self, source_parent, destination_parent, new_child):\r\n destination_reordered = []\r\n destination_children = destination_parent['fields']['children']\r\n source_children = source_parent['fields']['children']\r\n orphans = set()\r\n for child in destination_children:\r\n try:\r\n source_children.index(child)\r\n except ValueError:\r\n orphans.add(child)\r\n for child in source_children:\r\n if child == new_child or child in destination_children:\r\n destination_reordered.append(child)\r\n destination_parent['fields']['children'] = destination_reordered\r\n return orphans",
"def update(self):\n map(lambda x: x.update(), self._children.values())",
"def _set_child_data(self, child_data: List[ExperimentData]):\n self._child_data = ThreadSafeOrderedDict()\n for data in child_data:\n self.add_child_data(data)\n self._db_data.metadata[\"child_data_ids\"] = self._child_data.keys()",
"def sync_all_lists(self):\r\n print(\"Started syncing influencer master lists with DB\")\r\n screen_names_on_lists = []\r\n self._add_or_update(screen_names_on_lists)\r\n print(\"Removing entries which are no longer on any list\")\r\n self._delete_entries_not_in_list(screen_names_on_lists) # remove entries from DB if they are on no list\r\n print(\"Sync complete\")",
"def sync_tree_cache(self) -> None:\n self.sync_tree_with_data(self.tree_cache, self.data_cache)",
"def WriteDataToRedisList(ListName,data):\n redispool = redis.ConnectionPool(host=RedisIP,port=RedisPort,db=RedisDB)\n redata = redis.Redis(connection_pool=redispool)\n redata.lpush(ListName,data)",
"def get_all_children_id_list_from_redis_by_pk(gmac_id):\n try:\n gmac = GoogleMapsAddressComponent.objects.get(pk=gmac_id)\n conn = get_redis_connection()\n key = GoogleMapsAddressComponent.get_redis_all_children_key(gmac_id)\n length = conn.llen(key)\n return conn.lrange(key, 0, length)\n except GoogleMapsAddressComponent.DoesNotExist:\n return None",
"def get_children_from_redis(gmac_id, as_objects=True):\n conn = get_redis_connection()\n klass = GoogleMapsAddressComponent\n results = []\n queue = []\n children = klass.get_children_id_list_from_redis_by_pk(gmac_id)\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = klass.get_children_id_list_from_redis_by_pk(node)\n results.extend(children)\n queue.extend(children)\n if as_objects:\n results = klass.objects.filter(pk__in=results)\n return results",
"def merge(self, list):\n for n in list:\n self.add_child(n)",
"def last_buy(self):\n multi_data = []\n while not self.infoQueue.empty():\n multi_data.append(self.infoQueue.get_nowait())\n self.redisHandle.set_multiple_data(multi_data)\n print(\"flush all data\")",
"def get_all_children_from_redis(gmac_id, as_objects=True):\n conn = get_redis_connection()\n klass = GoogleMapsAddressComponent\n results = klass.get_all_children_id_list_from_redis_by_pk(gmac_id)\n if as_objects:\n results = klass.objects.filter(pk__in=results)\n return results",
"def children(self):\n return self.hashring_watch.get_children()",
"def rebalance_children(self, node, current=None):\n print('Balancing children...')\n if current is not None:\n node = self.__getitem__(current)\n node['edges'] = sorted(node['edges'])\n self.rebalance_children(node, current=node)\n else:\n node = self.__getitem__(node)\n node['edges'] = sorted(node['edges'])",
"def get_nodes(self, parent, keys, limit):\n queue = deque(parent.children)\n\n while len(queue) != 0:\n node = queue.popleft()\n if node.real:\n keys.append(node.value)\n\n if len(keys) == limit:\n break\n\n queue.extend(node.children)",
"def sync(self):\n for subscription in self.getSubscriptionList():\n #user_id = subscription.getZopeUser()\n #uf = self.getPortalObject().acl_users\n #user = uf.getUserById(user_id).__of__(uf)\n #newSecurityManager(None, user)\n subscription.activate(activity='SQLQueue',\n tag=subscription.getId(),\n priority=ACTIVITY_PRIORITY\n ).SubSync(subscription.getPath())",
"def flushdb(self):\n allKeys = self.redis.keys(self.appendKeys(\"*\"))\n # for some reason deleteing with a list of keys isn't working\n p = self.redis.pipeline()\n for key in allKeys:\n p.delete(key)\n p.execute()",
"def neco__generator_multiset_update_pid_tree(ms, pid_tree):\n for pid, n in ms:\n pid_tree.expanded_insert(pid)\n pid_tree.expanded_insert(pid.next(n))",
"async def move_inner_items_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n progress = ClickIndeterminate(\"Moving inner items\")\n progress.start()\n moving = True\n stmt = select(Group).options(selectinload(Group.children), selectinload(Group.items))\n while moving:\n moving = False\n result = await dbsession.execute(stmt)\n for group in result.scalars():\n if len(group.items) > 0 and len(group.children) > 0:\n sub_group = Group(value=group.value, label=group.label, split=\"inner\")\n dbsession.add(sub_group)\n sub_group.parent = group\n for item in list(group.items):\n item.group = sub_group\n dbsession.add(item)\n await dbsession.commit()\n moving = True\n break\n await dbsession.commit()\n progress.stop()",
"def copy_children(self):\n\n # Create a group\n self.fileh.create_group('/', 'agroup')\n # Create several objects there\n for i in range(10):\n # Create a new array\n self.fileh.create_array('/agroup', 'array' + str(i), self.a1)\n # Excercise copy_children\n for i in range(self.nobjects):\n # Create another group for destination\n self.fileh.create_group('/', 'anothergroup' + str(i))\n # Copy children from /agroup to /anothergroup+i\n self.fileh.copy_children('/agroup', '/anothergroup' + str(i))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print((\"Time for Undo, Redo (copy_children):\", undo, \"s, \",\n redo, \"s\"))",
"def populate_redis(self, d):\n for k, v in d.items():\n self.redis_conn.set(k, v)",
"def _clear_ancestor_caches(self):\r\n for page in Page.objects.get(id=self.id).get_ancestors():\r\n key = 'stoat:pages:%d:children' % (page.id)\r\n cache.delete(key)",
"def collect_children(self):\n\t\twhile self.active_children:\n\t\t\tif len(self.active_children) < self.max_children:\n\t\t\t\toptions = os.WNOHANG\n\t\t\telse:\n\t\t\t\t# If the maximum number of children are already\n\t\t\t\t# running, block while waiting for a child to exit\n\t\t\t\toptions = 0\n\t\t\ttry:\n\t\t\t\tpid, status = os.waitpid(0, options)\n\t\t\texcept os.error:\n\t\t\t\tpid = None\n\t\t\tif not pid: break\n\t\t\tself.active_children.remove(pid)",
"def flush(self):\n super().flush()\n self.dists = {}",
"def _cache_children(self, course_key, items, depth=0):\r\n\r\n data = {}\r\n to_process = list(items)\r\n while to_process and depth is None or depth >= 0:\r\n children = []\r\n for item in to_process:\r\n self._clean_item_data(item)\r\n children.extend(item.get('definition', {}).get('children', []))\r\n data[Location._from_deprecated_son(item['location'], course_key.run)] = item\r\n\r\n if depth == 0:\r\n break\r\n\r\n # Load all children by id. See\r\n # http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%24or\r\n # for or-query syntax\r\n to_process = []\r\n if children:\r\n to_process = self._query_children_for_cache_children(course_key, children)\r\n\r\n # If depth is None, then we just recurse until we hit all the descendents\r\n if depth is not None:\r\n depth -= 1\r\n\r\n return data",
"def _query_children_for_cache_children(self, course_key, items):\r\n # first get non-draft in a round-trip\r\n query = {\r\n '_id': {'$in': [\r\n course_key.make_usage_key_from_deprecated_string(item).to_deprecated_son() for item in items\r\n ]}\r\n }\r\n return list(self.collection.find(query))",
"def update_children(event: TraceEvent) -> None:\n parents = [event]\n iteration = 0\n while parents and iteration < MAX_TRACE_SIZE:\n iteration += 1\n parent = parents.pop()\n parent.children.sort(key=child_sort_key)\n for child in parent.children:\n child.generation = parent.generation + 1 if parent.generation is not None else None\n parents.append(child)",
"def _subnode_ids(self):\n for ticket in self:\n ticket.subnode_ids = self.search([\n ('parent_id', '=', ticket.id),\n ('type.has_children', '=', True)])",
"def run(self):\n init()\n list_name = comet_config.REDIS_NAMESPACE + \"incoming/\" + self.service_name\n list_name_processing = list_name + \"/processing\"\n self.redis = r\n while True:\n try:\n item = self.redis.brpoplpush(list_name, list_name_processing)\n self.process_incoming(item)\n self.redis.lrem(list_name_processing, item)\n\n except redis.ConnectionError:\n pass",
"def _rec(jet, parent, node_id, outers_list):\n if jet[\"tree\"][node_id, 0] == -1:\n outers_list.append(jet[\"content\"][node_id])\n else:\n _rec(jet, node_id, jet[\"tree\"][node_id, 0], outers_list)\n _rec(jet, node_id, jet[\"tree\"][node_id, 1], outers_list)\n\n return outers_list"
] |
[
"0.5846084",
"0.5746459",
"0.5725607",
"0.56974477",
"0.5600698",
"0.55633664",
"0.55478585",
"0.5531039",
"0.5527182",
"0.5519314",
"0.5518972",
"0.55158573",
"0.5498162",
"0.5470758",
"0.5470408",
"0.54163337",
"0.54001063",
"0.53624684",
"0.53552",
"0.53313947",
"0.52982",
"0.52753115",
"0.5250027",
"0.52372515",
"0.52348167",
"0.52182096",
"0.5215122",
"0.5191509",
"0.51855075",
"0.5179187"
] |
0.85803103
|
0
|
Generator function that outputs the paper title, index, and citations for each entry
|
def __citationsFromFile(file):
# Tokens for parsing
titleToken = '#*'
indexToken = '#index'
citationToken = '#%'
# Predicates for error checking
noneNone = lambda *items: all([item is not None for item in items])
allNone = lambda *items: all([item is None for item in items])
# Next entry data
title = None
index = None
citations = []
# Basic stats
withCitations = 0
withoutCitations = 0
totalPapers = 0
for line in file:
line = line.strip()
# Parse entry, enforcing that data appears in title -> index -> citations order
if line.startswith(titleToken):
assert allNone(title, index) and len(citations) == 0
title = line[len(titleToken):].strip('.')
elif line.startswith(indexToken):
assert noneNone(title) and allNone(index) and len(citations) == 0
index = int(line[len(indexToken):])
elif line.startswith(citationToken):
assert noneNone(title, index)
newCitationId = int(line[len(citationToken):])
assert newCitationId >= 0
citations.append(newCitationId)
elif len(line) == 0:
totalPapers += 1
# Yield this entry if it has any citations,
if noneNone(title, index):
if len(citations) > 0:
withCitations += 1
yield title, index, citations
else:
withoutCitations += 1
title = None
index = None
citations = []
# Output some basic statistics about papers with/without citations
withCitationsPercent = 100.0 * (float(withCitations) / totalPapers)
withoutCitationsPercent = 100.0 * (float(withoutCitations) / totalPapers)
print "\n\nTotal Papers: %d" % totalPapers
print " With References: %d (%2.2f%%)\n Without References: %d (%2.2f%%)" % (
withCitations, withCitationsPercent, withoutCitations, withoutCitationsPercent
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def cite_report(self):\n\n num_cites = np.sum(np.array([q.citation_count for q in self.mypapers]))\n print(\"number of citations = \", num_cites)\n\n for n, p in enumerate(self.mypapers):\n clean_tags = re.compile(\"<.*?>\")\n title = re.sub(clean_tags, \"\", p.title[0])\n tt = textwrap.wrap(title, 60)\n print(f\"{n+1:3} | {p.citation_count:4} {tt[0]}\")\n if len(tt) > 1:\n for line in tt[1:]:\n print(\"{:3} | {:4} {}\".format(\"\", \"\", line))\n print(\"{:3} | {:4} {}\".format(\"\", \"\", \"\"))",
"def list_publications(bib_format=\"dict\"):\n\n def get_bibtex(key, value):\n total_keys = [\n \"title\",\n \"journal\",\n \"volume\",\n \"issue\",\n \"number\",\n \"pages\",\n \"numpages\",\n \"year\",\n \"month\",\n \"publisher\",\n \"url\",\n \"doi\",\n \"issn\",\n ]\n bibtex_str = (\n \"@article{\"\n + key\n + \",\\n\"\n + \" author={\"\n + \" and \".join(value[\"author\"])\n + \"},\\n\"\n )\n for key in total_keys:\n if key in value.keys():\n bibtex_str += \" \" + key + \"={\" + value[key] + \"},\\n\"\n bibtex_str += \"}\\n\"\n return bibtex_str\n\n def get_apa(value):\n apa_str = \" & \".join(value[\"author\"])\n if \"year\" in value.keys():\n apa_str += \" (\" + value[\"year\"] + \"). \"\n if \"title\" in value.keys():\n apa_str += value[\"title\"] + \". \"\n if \"journal\" in value.keys():\n apa_str += value[\"journal\"] + \", \"\n if \"volume\" in value.keys():\n apa_str += value[\"volume\"] + \", \"\n if \"pages\" in value.keys():\n apa_str += value[\"pages\"] + \". \"\n if \"doi\" in value.keys():\n apa_str += \"doi: \" + value[\"doi\"] + \"\\n\"\n return apa_str\n\n publication_dict = s.publication_lst\n if bib_format.lower() == \"dict\":\n return publication_dict\n elif bib_format.lower() == \"bibtex\":\n total_str = \"\"\n for pub in publication_dict:\n for key, value in pub.items():\n total_str += get_bibtex(key, value)\n return total_str\n elif bib_format.lower() == \"apa\":\n total_str = \"\"\n for pub in publication_dict:\n for key, value in pub.items():\n total_str += get_apa(value)\n return total_str\n else:\n raise ValueError(\"Supported Bibformats are ['dict', 'bibtex', 'apa']\")",
"def _populate(self):\n\n # Assume the first word is what we want, and we can find well formed years\n # This sucks, but will work for these ones.\n # Roll on bibtex for citations in the CIM.\n\n citation_detail = self.doc.citation_detail\n author = citation_detail.split(',')[0]\n match = '([^\\w])19|20\\d\\d([^\\w])*?'\n m = re.search(match, citation_detail)\n if m:\n year = m.group(0)\n else:\n year = None\n\n # one error in existing es-doc content to be fixed:\n if 'van Vuuren DP' in author:\n author = 'van Vuuren'\n print 'applying vv fix'\n\n self.year = int(year)\n\n # We assume that this table will have entries which ne\n\n # I use the first three letters of a an authors name, and for\n # three or more authors, EA, and then the year for my bibtex citation string\n self.citeguess = author[0:3] + 'EA' + year[2:]\n # This is what will appear in the table:\n self.citestring = '%s et al. (%s)' % (author, year)\n # Keep this for a reference list for checking against the eventual bibtex reference list.\n self.text = citation_detail",
"def get_papers_by_institution(inst_names):\n papers = list(ads.SearchQuery(aff=\"IATE\"))\n for p in papers:\n print(p.author)\n print(f\"Paper from year {p.year} with \"\n f\"{len(p.author)} authors and \"\n f\"{p.citation_count} citations.\")\n input()",
"def cv_list(self):\n\n mystr = \"\"\n for p in self.mypapers:\n mystr += f\"{p.title[0]}\\n\"\n if len(p.author) > 12:\n a = f\"{p.author[0]} et al. \"\n elif len(p.author) > 2:\n a = \", \".join(p.author[:-1]) + f\" & {p.author[-1]} \"\n elif len(p.author) == 2:\n a = f\"{p.author[0]} & {p.author[1]} \"\n else:\n a = f\"{p.author[0]} \"\n\n mystr += f\"{a}\"\n mystr += f\"{p.year}, {p.pub}\"\n if p.volume is not None:\n mystr += f\", {p.volume}\"\n if p.issue is not None:\n mystr += f\", {p.issue}\"\n if p.page is not None:\n mystr += f\", {p.page[0]}\"\n mystr += \"\\n\\n\"\n return mystr",
"def get_papers_info(author_url, existing_papers):\n\n\tauthor_dict = {}\n\n\tauthor_page_tree = get_tree(author_url)\n\t# Get the <a> elements for the papers on the author's page\n\ta_elems = get_a_elems_for_papers(author_page_tree)\n\t# get the dates corresponding to each paper\n\tpaper_dates = get_dates_for_papers(author_page_tree)\n\t# zip into a list of (a_elem, date) pairs\n\ta_elem_dates = zip(a_elems, paper_dates)\n\t# Each a is a paper\n\tfor a, date in a_elem_dates:\n\t\t# Title of paper is the text content of the a element\n\t\tpaper_title = a.text_content()\n\t\t# Check if paper has already been checked, if so, move on to next paper\n\t\tif paper_title in existing_papers:\n\t\t\tcontinue\n\n\t\tpaper_url = a.get(\"href\")\n\n\t\tpaper_tree = get_tree(paper_url)\n\t\t# Get list of the paper's authors\n\t\tauthors = get_paper_authors(paper_tree)\n\t\t# Get paper abstract\n\t\tabstract = get_paper_abstract(paper_tree)\n\t\t# Get paper keywords\n\t\tkeywords = get_paper_keywords(paper_tree)\n\t\t# Get paper id number from its url\n\t\tpaper_id = re.search(\"[0-9]+\", paper_url).group()\n\t\t# Add paper to dictionary with id as key and metadata as values\n\t\tauthor_dict[paper_id] = {\n\t\t\t\t\t\t\"title\": paper_title,\n\t\t\t\t\t\t\"authors\": authors,\n\t\t\t\t\t\t\"abstract\": abstract,\n\t\t\t\t\t\t\"url\": paper_url,\n\t\t\t\t\t\t\"keywords\": keywords,\n\t\t\t\t\t\t'year': date\n\t\t} \n\n\treturn author_dict",
"def results(self):\n out = []\n fields = 'eid doi pii pubmed_id title subtype creator afid affilname '\\\n 'affiliation_city affiliation_country author_count '\\\n 'author_names author_ids author_afids coverDate '\\\n 'coverDisplayDate publicationName issn source_id eIssn '\\\n 'aggregationType volume issueIdentifier article_number '\\\n 'pageRange description authkeywords citedby_count '\\\n 'openaccess fund_acr fund_no fund_sponsor'\n doc = namedtuple('Document', fields)\n for item in self._json:\n info = {}\n # Parse affiliations\n try:\n info[\"affilname\"] = _join(item['affiliation'], 'affilname')\n info[\"afid\"] = _join(item['affiliation'], 'afid')\n info[\"aff_city\"] = _join(item['affiliation'], 'affiliation-city')\n info[\"aff_country\"] = _join(item['affiliation'],\n 'affiliation-country')\n except KeyError:\n pass\n # Parse authors\n try:\n # Deduplicate list of authors\n authors = _deduplicate(item['author'])\n # Extract information\n surnames = _replace_none([d['surname'] for d in authors])\n firstnames = _replace_none([d['given-name'] for d in authors])\n info[\"auth_names\"] = \";\".join([\", \".join([t[0], t[1]]) for t in\n zip(surnames, firstnames)])\n info[\"auth_ids\"] = \";\".join([d['authid'] for d in authors])\n affs = []\n for auth in authors:\n aff = listify(_deduplicate(auth.get('afid', [])))\n affs.append('-'.join([d['$'] for d in aff]))\n info[\"auth_afid\"] = (';'.join(affs) or None)\n except KeyError:\n pass\n date = item.get('prism:coverDate')\n if isinstance(date, list):\n date = date[0].get('$')\n new = doc(article_number=item.get('article-number'),\n title=item.get('dc:title'), fund_sponsor=item.get('fund-sponsor'),\n subtype=item.get('subtype'), issn=item.get('prism:issn'),\n creator=item.get('dc:creator'), affilname=info.get(\"affilname\"),\n author_names=info.get(\"auth_names\"), doi=item.get('prism:doi'),\n coverDate=date, volume=item.get('prism:volume'),\n coverDisplayDate=item.get('prism:coverDisplayDate'),\n publicationName=item.get('prism:publicationName'),\n source_id=item.get('source-id'), author_ids=info.get(\"auth_ids\"),\n aggregationType=item.get('prism:aggregationType'),\n issueIdentifier=item.get('prism:issueIdentifier'),\n pageRange=item.get('prism:pageRange'),\n author_afids=info.get(\"auth_afid\"), fund_no=item.get('fund-no'),\n affiliation_country=info.get(\"aff_country\"),\n citedby_count=item.get('citedby-count'),\n openaccess=item.get('openaccess'), eIssn=item.get('prism:eIssn'),\n author_count=item.get('author-count', {}).get('$'),\n affiliation_city=info.get(\"aff_city\"), afid=info.get(\"afid\"),\n description=item.get('dc:description'), pii=item.get('pii'),\n authkeywords=item.get('authkeywords'), eid=item.get('eid'),\n fund_acr=item.get('fund-acr'), pubmed_id=item.get('pubmed-id'))\n out.append(new)\n return out or None",
"def fill(self):\n if self.source == 'citations':\n url = self._scholarly.URLS(\"CITATIONPUB\").format(self.id_citations)\n soup = self._scholarly._get_soup(\n self._scholarly.URLS('HOST').format(url))\n self.bib['title'] = soup.find('div', id='gsc_vcd_title').text\n\n if soup.find('a', class_='gsc_vcd_title_link'):\n self.bib['url'] = soup.find(\n 'a', class_='gsc_vcd_title_link')['href']\n\n for item in soup.find_all('div', class_='gs_scl'):\n key = item.find(class_='gsc_vcd_field').text\n val = item.find(class_='gsc_vcd_value')\n if key == 'Authors':\n self.bib['author'] = ' and '.join(self.get_authorlist(val))\n elif key == 'Journal':\n self.bib['journal'] = val.text\n elif key == 'Volume':\n self.bib['volume'] = val.text\n elif key == 'Issue':\n self.bib['number'] = val.text\n elif key == 'Pages':\n self.bib['pages'] = val.text\n elif key == 'Publisher':\n self.bib['publisher'] = val.text\n elif key == 'Publication date':\n self.bib['year'] = arrow.get(val.text).year\n elif key == 'Description':\n if val.text[0:8].lower() == 'abstract':\n val = val.text[9:].strip()\n self.bib['abstract'] = val\n elif key == 'Total citations':\n self.id_scholarcitedby = re.findall(\n self._scholarly.URLS('SCHOLARPUBRE'), val.a['href'])[0]\n\n # number of citation per year\n years = [int(y.text) for y in soup.find_all(class_='gsc_vcd_g_t')]\n cites = [int(c.text) for c in soup.find_all(class_='gsc_vcd_g_al')]\n self.cites_per_year = dict(zip(years, cites))\n\n if soup.find('div', class_='gsc_vcd_title_ggi'):\n self.bib['eprint'] = soup.find(\n 'div', class_='gsc_vcd_title_ggi').a['href']\n self._filled = True\n\n elif self.source == 'scholar':\n self.bib['add_to_lib'] = self.url_add_sclib\n\n try:\n bibtex = self._scholarly._get_soup(self.url_scholarbib)\n bibtex = bibtex.find('pre').string\n self.bib.update(bibtexparser.loads(bibtex).entries[0])\n self.bib['author_count'] = str(\n len(self.bib['author'].split('and')))\n self.bib['age'] = str(\n int(date.today().year) - int(self.bib['year']))\n except:\n # did not find year\n pass\n\n self._filled = True\n return self",
"def getCitationsData():\n # Follows https://github.com/simonw/irma-scrapers/issues/1\n citationsResponse = requests.get(\"https://api.github.com/repos/greenelab/covid19-review/git/trees/output\", headers=headers).json()\n treeEntry = [t for t in citationsResponse[\"tree\"] if t[\"path\"] == \"references.json\"][0] \n citations = json.loads(base64.b64decode(requests.get(treeEntry[\"url\"]).json()[\"content\"]))\n\n citationsDF = pd.DataFrame(citations)\n citationsDF[\"Covid19-review_paperLink\"] = citationsDF.id.apply(lambda x: \"https://greenelab.github.io/covid19-review/#ref-\" + x)\n citationsDF = citationsDF[[\"DOI\", \"title\", \"issued\", \"container-title\", \"URL\", \"Covid19-review_paperLink\"]]\n citationsDF.rename(columns={\"DOI\": \"doi\", \"issued\": \"date\", \"container-title\": \"publication\"}, inplace=True)\n\n # Convert date to string\n def dateStringFromDateParts(row):\n try:\n dateParts = row['date']['date-parts'][0]\n if len(dateParts) == 3:\n return \"-\".join([str(dateParts[1]), str(dateParts[2]), str(dateParts[0])])\n elif len(dateParts) == 2:\n return \"-\".join([str(dateParts[1]), str(dateParts[0])])\n elif len(dateParts) == 1:\n return str(dateParts[0])\n else:\n return\n except:\n return\n\n citationsDF.date = citationsDF.apply(dateStringFromDateParts, axis=1)\n\n citationsDF.set_index(\"doi\", inplace=True)\n return citationsDF",
"def setUp(self):\n self.url = reverse(\"td_biblio:entry_list\")\n self.paginate_by = 20\n self.n_publications_per_year = 3\n self.start_year = 2000\n self.end_year = 2014\n self.n_publications = self.end_year - self.start_year\n self.n_publications *= self.n_publications_per_year\n self.n_authors = self.n_publications * 3\n self.publications_years = []\n self.max_page_num = self.n_publications / self.paginate_by\n if self.n_publications % self.paginate_by:\n self.max_page_num += 1\n\n # Entry (14 * 3 = 42)\n for y in range(self.start_year, self.end_year, 1):\n for i in range(1, 1 + self.n_publications_per_year):\n date = datetime.date(y, i, 1)\n EntryWithAuthorsFactory(publication_date=date)\n self.publications_years.append(y)",
"def make_doi_table(dataset: ObservatoryDataset) -> List[Dict]:\n\n records = []\n for paper in dataset.papers:\n # Doi, events and grids\n doi = paper.doi.upper()\n events = make_doi_events(doi, paper.events)\n\n # Affiliations: institutions, countries, regions, subregion, funders, journals, publishers\n institutions = make_doi_institutions(paper.authors)\n countries = make_doi_countries(paper.authors)\n regions = make_doi_regions(paper.authors)\n subregions = make_doi_subregions(paper.authors)\n funders = make_doi_funders(paper.funders)\n journals = make_doi_journals(paper.in_unpaywall, paper.journal)\n publishers = make_doi_publishers(paper.publisher)\n\n # Make final record\n records.append(\n {\n \"doi\": doi,\n \"crossref\": {\n \"type\": paper.type,\n \"title\": paper.title,\n \"published_year\": paper.published_date.year,\n \"published_month\": paper.published_date.month,\n \"published_year_month\": f\"{paper.published_date.year}-{paper.published_date.month}\",\n \"funder\": [{\"name\": funder.name, \"DOI\": funder.doi} for funder in paper.funders],\n },\n \"unpaywall\": {},\n \"unpaywall_history\": {},\n \"open_citations\": {},\n \"events\": events,\n \"affiliations\": {\n \"doi\": doi,\n \"institutions\": institutions,\n \"countries\": countries,\n \"subregions\": subregions,\n \"regions\": regions,\n \"groupings\": [],\n \"funders\": funders,\n \"authors\": [],\n \"journals\": journals,\n \"publishers\": publishers,\n },\n }\n )\n\n # Sort to match with sorted results\n records.sort(key=lambda r: r[\"doi\"])\n\n return records",
"def main():\n papers_with_references = add_references_to_papers(PAPERS_FILE, TEXT_DIR)\n citations = match_citations_with_papers(papers_with_references)\n insert_into_citation_table(citations)",
"def info(entry: BibItem) -> str:\n return \"{title}{author}{date}\".format(\n title=(\n \"Title: {}\\n\".format(re.sub(r\"[}{]\", \"\", entry[\"title\"]))\n if \"title\" in entry\n else \"\"\n ),\n author=(\n \"Author{plural}: {author}\\n\".format(\n plural=\"s\" if len(entry[\"author\"]) > 1 else \"\",\n author=\"; \".join(entry[\"author\"]),\n )\n if \"author\" in entry\n else \"\"\n ),\n date=(\n \"Year: {}\\n\".format(entry[\"date\"].split(\n \"-\")[0]) if \"date\" in entry else \"\"\n ),\n )",
"def get_items(self):\n export_file = self.cmdline_args.file # see setup_parser\n for a in get_articles(export_file):\n yield node(\n heading=dt_heading(\n a.added,\n # 'pocket' permalink is pretty convenient to jump straight into Pocket app\n link(title='pocket', url=a.pocket_link) + ' · ' + link(title=a.title, url=a.url),\n ),\n children=[node( # comments are displayed as org-mode child entries\n heading=dt_heading(hl.created, hl.text)\n ) for hl in a.highlights]\n )",
"def make_citation(meta):\n pass",
"def process_bib_entry(\n cid, bibdatabase, bibnums, fallback_fmt=\"[{author_abbrev}, {year}]\"\n):\n entry = bibdatabase[cid]\n if cid not in bibnums:\n bibnums[cid] = len(bibnums) + 1\n\n if \"doi\" in entry:\n return r'<a href=\"https://doi.org/{doi}\">{text}</a>'.format(\n doi=entry[\"doi\"], text=bibnums[cid]\n )\n elif \"url\" in entry:\n return r'<a href=\"{url}\">{text}</a>'.format(url=entry[\"url\"], text=bibnums[cid])\n elif \"link\" in entry:\n return r'<a href=\"{url}\">{text}</a>'.format(\n url=entry[\"link\"], text=bibnums[cid]\n )\n else:\n return bibnums[cid]\n # add_abbreviated_author(entry)\n # split_date(entry)\n # return DefaultFormatter().format(fallback_fmt, **entry)",
"def to_citation(self, type):\n acs_authors = \"; \".join(self.format_authors(\"acs\"))\n # Some articles don't come with pages. :-(\n pages_with_endash = (self.pages.replace(\"-\", \"\\u2013\") if self.pages\n else \"\")\n # Actually, not using quote() generally gives results that work fine.\n # The only issue is that when using Markdown URLs with parentheses in\n # Jupyter notebooks, the conversion to HTML gets it wrong, thinking\n # that the URL ends at the first close parentheses in the URL. (In\n # the notebook itself, it is fine, only the conversion to HTML messes\n # up.) So we might as well escape them generally.\n doi_url = f\"https://doi.org/{urllib.parse.quote(self.doi)}\"\n\n # BibLaTeX\n if type in [\"bib\", \"b\"]:\n # Create (hopefully) unique identifier\n author_decoded = unidecode(self.authors[0][\"family\"])\n journal_initials = \"\".join(c for c in self.journal_short\n if c.isupper())\n ref_identifier = f\"{author_decoded}{self.year}{journal_initials}\"\n ref_identifier = \"\".join(ref_identifier.split()) # remove spaces\n # Author names in bib style\n author_names = \" and \".join(self.format_authors(\"bib\"))\n journal = self.journal_short.replace(\". \", \".\\\\ \")\n # Open and close braces\n # Truthfully we don't need this. However, including the doubled\n # curly braces in the f-string makes vim's indentation go crazy.\n open, close = \"{\", \"}\"\n # Make the citation\n s = (f\"@article{open}{ref_identifier},\\n\"\n f\" doi = {{{self.doi}}},\\n\"\n f\" author = {{{author_names}}},\\n\"\n f\" journal = {{{journal}}},\\n\"\n f\" title = {{{self.title}}},\\n\"\n f\" year = {{{self.year}}},\\n\")\n if self.volume is not None:\n s += f\" volume = {{{self.volume}}},\\n\"\n if self.issue is not None:\n s += f\" issue = {{{self.issue}}},\\n\"\n if self.pages is not None:\n s += f\" pages = {{{self.pages.replace('-', '--')}}},\\n\"\n s += close\n # Replace Unicode characters with their LaTeX equivalents\n for char in _g.unicodeLatexDict:\n s = s.replace(char, _g.unicodeLatexDict[char])\n return s\n\n # Just DOI\n if type in [\"doi\", \"d\"]:\n return self.doi\n\n # The rest all have a long vs short type.\n # Discern long vs short type\n long = False\n if type[0].upper() == type[0]:\n long = True\n type = type.lower()\n\n # reStructuredText\n if type in [\"rst\", \"r\"]:\n author_title = f\"{acs_authors} {self.title}. \" if long else \"\"\n vol_issue = (f\"*{self.volume}* ({self.issue}), \" if self.issue\n else f\"*{self.volume},* \")\n return (author_title\n + f\"*{self.journal_short}* **{self.year},** \"\n + vol_issue\n + f\"{pages_with_endash}. \"\n + f\"`DOI: {self.doi} <{doi_url}>`_\")\n\n # Markdown\n if type in [\"markdown\", \"m\"]:\n author_title = f\"{acs_authors} {self.title}. \" if long else \"\"\n vol_issue = (f\"*{self.volume}* ({self.issue}), \" if self.issue\n else f\"*{self.volume},* \")\n return (author_title\n + f\"*{self.journal_short}* **{self.year},** \"\n + vol_issue\n + f\"{pages_with_endash}. \"\n + f\"[DOI: {self.doi}]({doi_url})\")\n\n # Word\n elif type in [\"word\", \"w\"]:\n author_title = f\"{acs_authors} {self.title}. \" if long else \"\"\n vol_issue = (f\"{self.volume} ({self.issue}), \" if self.issue\n else f\"{self.volume}, \")\n return (author_title\n + f\"{self.journal_short} {self.year}, \"\n + vol_issue\n + f\"{pages_with_endash}.\")\n\n else:\n raise ValueError(\"Invalid citation type '{type}' given\")",
"def calculate_name_index_data(refdict: dict, citelist: list, specific_names: list) -> Tuple[list, dict, dict, dict,\n dict, dict, dict, dict,\n dict, dict]:\n name_table = create_name_table(citelist)\n unique_names = list()\n nameset = set()\n # total_binomial_year_cnts = {}\n total_binomial_year_cnts = collections.Counter()\n for c in citelist:\n if c.name != \".\":\n clean = clean_name(c.name)\n if not clean.lower() in nameset:\n nameset |= {clean.lower()}\n unique_names.append(clean)\n y = refdict[c.cite_key].year()\n if y is not None:\n total_binomial_year_cnts.update([y])\n # if y in total_binomial_year_cnts:\n # total_binomial_year_cnts[y] += 1\n # else:\n # total_binomial_year_cnts[y] = 1\n unique_names.sort(key=lambda s: s.lower())\n\n # identify genera used per paper\n genera_per_paper = {}\n for c in citelist:\n if c.name != \".\":\n if c.cite_key not in genera_per_paper:\n genera_per_paper[c.cite_key] = set()\n genera_set = genera_per_paper[c.cite_key]\n genera_set |= {extract_genus(clean_name(c.name))}\n genus_cnts = {}\n for c in genera_per_paper:\n y = refdict[c].year()\n if y is not None:\n if init_data().start_year <= y <= init_data().current_year:\n genera_set = genera_per_paper[c]\n for genus in genera_set:\n genus = clean_genus(genus)\n if genus != \"\":\n genus_cnts.setdefault(genus, {y: 0 for y in range(init_data().start_year,\n init_data().current_year + 1)})[y] += 1\n # if genus not in genus_cnts:\n # genus_cnts[genus] = {y: 0 for y in range(init_data().start_year,\n # init_data().current_year + 1)}\n # gcnts = genus_cnts[genus]\n # gcnts[y] += 1\n\n binomial_usage_cnts_by_year = {}\n binomial_location_applications = {}\n binomial_usage_cnts = {}\n for name in unique_names:\n binomial_usage_cnts_by_year[name], tmptotal = calculate_binomial_yearly_cnts(name, refdict, citelist)\n if tmptotal > 0:\n binomial_usage_cnts[name] = tmptotal\n binomial_location_applications[name] = calculate_binomial_locations(name, citelist)\n\n # specific_year_cnts = {}\n specific_year_cnts = collections.Counter()\n specific_usage_cnts_by_year = {}\n specific_location_applications = {}\n specific_usage_cnts = {}\n for name in specific_names:\n (specific_usage_cnts_by_year[name.name],\n tmptotal) = calculate_specific_name_yearly_cnts(name, unique_names, binomial_usage_cnts_by_year)\n if tmptotal > 0:\n specific_usage_cnts[name.name] = tmptotal\n specific_location_applications[name] = calculate_specific_locations(name, unique_names,\n binomial_location_applications)\n tmpkey = name.priority_source\n if tmpkey != \".\":\n y = refdict[tmpkey].year()\n if y is not None:\n specific_year_cnts.update([y])\n # if y in specific_year_cnts:\n # specific_year_cnts[y] += 1\n # else:\n # specific_year_cnts[y] = 1\n return (unique_names, binomial_usage_cnts_by_year, specific_usage_cnts_by_year, genus_cnts,\n total_binomial_year_cnts, name_table, specific_location_applications, binomial_location_applications,\n binomial_usage_cnts, specific_usage_cnts)",
"def get_self_citations(new_record_list, citationdic, initial_selfcitdict, config):\n i = 0 #just for debugging ..\n #get the tags for main author, coauthors, ext authors from config\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n for t in tags:\n try:\n dummy = config.get(config.get(\"rank_method\", \"function\"), t)\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_selfcitdict\n\n r_mainauthortag = config.get(config.get(\"rank_method\", \"function\"), \"first_author\")\n r_coauthortag = config.get(config.get(\"rank_method\", \"function\"), \"additional_author\")\n r_extauthortag = config.get(config.get(\"rank_method\", \"function\"), \"alternative_author_name\")\n #parse the tags\n mainauthortag = tagify(parse_tag(r_mainauthortag))\n coauthortag = tagify(parse_tag(r_coauthortag))\n extauthortag = tagify(parse_tag(r_extauthortag))\n\n selfcites = initial_selfcitdict\n for k in new_record_list:\n if (i % 1000 == 0):\n mesg = \"Selfcites done \"+str(i)+\" of \"+str(len(new_record_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i+1\n #get the author of k\n authorlist = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authorlist.append(coauthl)\n authorlist.append(extauthl)\n #author tag\n #print \"record \"+str(k)+\" by \"+str(authorlist)\n #print \"is cited by\"\n #get the \"x-cites-this\" list\n if citationdic.has_key(k):\n xct = citationdic[k]\n for c in xct:\n #get authors of c\n cauthorlist = get_fieldvalues(c, mainauthortag)\n coauthl = get_fieldvalues(c, coauthortag)\n extauthl = get_fieldvalues(c, extauthortag)\n cauthorlist.extend(coauthl)\n cauthorlist.extend(extauthl)\n #print str(c)+\" by \"+str(cauthorlist)\n for ca in cauthorlist:\n if (ca in authorlist):\n #found!\n if selfcites.has_key(k):\n val = selfcites[k]\n #add only if not there already\n if val:\n if not c in val:\n val.append(c)\n selfcites[k] = val\n else:\n #new key for selfcites\n selfcites[k] = [c]\n\n mesg = \"Selfcites done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return selfcites",
"def publication_info(self, key, value):\n _publication_info = self.get(\"publication_info\", [])\n for v in force_list(value):\n temp_info = {}\n pages = clean_pages_range(\"c\", v)\n if pages:\n temp_info.update(pages)\n temp_info.update(\n {\n \"journal_issue\": clean_val(\"n\", v, str),\n \"journal_title\": clean_val(\"p\", v, str),\n \"journal_volume\": clean_val(\"v\", v, str),\n \"year\": clean_val(\"y\", v, int),\n }\n )\n\n text = \"{0} {1}\".format(\n clean_val(\"o\", v, str) or \"\", clean_val(\"x\", v, str) or \"\"\n ).strip()\n if text:\n temp_info.update({\"note\": text})\n if temp_info:\n _publication_info.append(temp_info)\n return _publication_info",
"def make_citation_authors(res):\n if \"author\" in res.keys():\n first_author = res['author'][0]['family'] + \", \" + res['author'][0]['given']\n last_author = res['author'][-1]['given'] + \" \" + res['author'][-1]['family']\n middle_authors = \", \".join(\" \".join([x['given'], x['family']]) for x in res['author'][1:-1])\n #assemble authors\n author_string = first_author\n author_string = author_string + \", \" + middle_authors if middle_authors != '' else author_string\n author_string = author_string + \", and \" + last_author if len(res['author']) > 1 else author_string\n \n author_string = author_string + \".\" if author_string[-1] != \".\" else author_string\n else:\n author_string = \"Unknown Authors\"\n\n return clean_txt(author_string.capitalize())",
"def __init__(self):\n # simple header only necessary for this table of MIPs\n self.header = '<tr class=\"ename\"><td colspan=\"1\">{{d.name}} ({{mips}})</td></tr>'\n self.mips = 'core MIPS recorded by ES-DOC'\n self._settemplates(self.onecol, self.twocol)\n\n r = Repo()\n c = r.getbyname('CMIP6', 'mips')\n self.doc = c\n self.related = []\n\n # two properties to help the use of the resulting table in a paper\n self.reference_list = []\n # nocite list intended for cutting and pasting directly into a latex document!\n self.nocite = ''\n\n for m in c.sub_projects:\n mdoc = r.getbyname(m.name, 'mips')\n description = mdoc.long_name+' - '\n citations = []\n\n # for these purposees we only want the things with a doi\n # and we want only one, so we have to develop a heuristic to get the right one\n for ci in mdoc.citations:\n cidoc = esd.retrieve(ci.id)\n ref = Reference(cidoc)\n if ref.populated:\n citations.append(ref)\n citations.sort(key=lambda cc: cc.year)\n index = -1\n if len(citations) > 1:\n # start by getting the most recent\n year = citations[-1].year\n index = -1\n for i in range(len(citations)-1):\n if citations[i].year == year:\n if m.name in citations[i].text:\n # it's one of the most recent and it has the MIP name\n # in the document title.\n index = i\n description += citations[index].citestring\n self.reference_list.append(citations[index].text)\n self.nocite += '\\\\nocite{%s}\\n' % citations[index].citeguess\n\n # overwrite the real description for the purpose of this table\n mdoc.description = description\n self.related.append(mdoc)\n\n # want alphabetical order\n self.related.sort(key=lambda mm: mm.name)",
"def getGenerator(self):\n coldatapage = pywikibot.Page(self.site, 'Data:Completely_indexed_painting_collections.tab')\n collectionjson = json.loads(coldatapage.text)\n colquery = 'SELECT ?item ?institution WHERE { VALUES ?item {'\n\n for collectioninfo in collectionjson.get('data'):\n colquery += ' wd:%s' % (collectioninfo[0],)\n colquery +=' } ?item wdt:P1612 ?institution }'\n colsq = pywikibot.data.sparql.SparqlQuery()\n colqueryresult = colsq.select(colquery)\n\n for resultitem in colqueryresult:\n institution = resultitem.get('institution').replace(' ', '_')\n query = 'file: hastemplate:Institution:%s incategory:Paintings_without_Wikidata_item -incategory:Paintings_from_completely_indexed_collections' % (institution,)\n gen = pagegenerators.SearchPageGenerator(query, total=1000, namespaces=6, site=self.site)\n for filepage in gen:\n yield filepage\n\n creatordatapage = pywikibot.Page(self.site, 'Data:Completely_indexed_painters.tab')\n creatorjson = json.loads(creatordatapage.text)\n creatorquery = 'SELECT ?item ?creator WHERE { VALUES ?item {'\n\n for creatorinfo in creatorjson.get('data'):\n creatorquery += ' wd:%s' % (creatorinfo[0],)\n creatorquery +=' } ?item wdt:P1472 ?creator }'\n creatorsq = pywikibot.data.sparql.SparqlQuery()\n creatorqueryresult = creatorsq.select(creatorquery)\n\n for resultitem in creatorqueryresult:\n creator = resultitem.get('creator').replace(' ', '_')\n query = 'file: hastemplate:Creator:%s incategory:Paintings_without_Wikidata_item -incategory:Paintings_by_completely_indexed_painters' % (creator,)\n gen = pagegenerators.SearchPageGenerator(query, total=1000, namespaces=6, site=self.site)\n for filepage in gen:\n yield filepage",
"def _process_results(items: List[dict]) -> Iterator[GBook]:\n # todo write a test for this func\n for book in items:\n volume = book['volumeInfo']\n\n authors = volume.get('authors')\n if not authors: # If authors is blank, just move on.\n continue\n\n authors = [util.split_author(a) for a in authors]\n\n isbns = []\n for ident in volume.get('industryIdentifiers', []):\n if ident['type'] == 'ISBN_10':\n try:\n isbns.append(int('978' + ident['identifier']))\n except ValueError: # eg an X in the identifier.\n pass\n elif ident['type'] == 'ISBN_13':\n isbns.append(int(ident['identifier']))\n\n if not isbns:\n continue\n\n price = book['saleInfo'].get('retailPrice')\n if price:\n price = price['amount']\n\n try:\n pub_date = saturn.from_str(volume['publishedDate'], 'YYYY-MM-DD')\n except ParserError: # Might be just a year\n pub_date = saturn.from_str(f\"{volume['publishedDate']}-01-01\", 'YYYY')\n except KeyError:\n pub_date = None\n\n yield GBook(\n title=volume['title'],\n authors=authors,\n isbns=isbns,\n\n internal_id=book['id'],\n\n language=volume.get('language').lower(),\n description=volume.get('description'),\n publication_date=pub_date,\n publisher=volume.get('publisher'),\n categories=volume.get('categories', []),\n\n book_url=volume.get('infoLink'),\n epub_url=book['accessInfo']['epub'].get('downloadLink'),\n pdf_url=book['accessInfo']['pdf'].get('downloadLink'),\n purchase_url=book['saleInfo'].get('buyLink'),\n price=price,\n )",
"def createIndex(pages): \n index = defaultdict(list)\n for url, content, links in pages:\n counts = getNumberTerms(content)\n for term, count in counts.items():\n index[term].append((url, count))\n return index",
"def build_bibtex_entry(metadata: dict):\n # We truncate the author list in the BibTeX entry after 'Developers',\n # because not all journals are happy with printing an excessively long\n # author list, e.g. Phys. Rev. wants at most 15 authors. By truncating the\n # author list here, the user who copies the BibTeX entry doesn't have to\n # make the decision where to truncate.\n authors = [\n pybtex.database.Person(author[\"Name\"])\n for author in (\n metadata[\"Authors\"][\"Core\"][\"List\"]\n + metadata[\"Authors\"][\"Developers\"][\"List\"]\n )\n ] + [pybtex.database.Person(\"others\")]\n entry = pybtex.database.Entry(\n \"software\",\n persons=dict(author=authors),\n fields=dict(\n title=(\n r\"\\texttt{\"\n + metadata[\"Name\"]\n + \" v\"\n + metadata[\"Version\"]\n + \"}\"\n ),\n # The 'version' field is not used by revtex4-2, so we also put the\n # version in the title\n version=metadata[\"Version\"],\n publisher=\"Zenodo\",\n doi=metadata[\"Doi\"],\n url=metadata[\"Homepage\"],\n howpublished=(\n r\"\\href{https://doi.org/\"\n + metadata[\"Doi\"]\n + \"}{\"\n + metadata[\"Doi\"]\n + \"}\"\n ),\n license=metadata[\"License\"],\n year=str(metadata[\"PublicationDate\"].year),\n month=str(metadata[\"PublicationDate\"].month),\n ),\n )\n entry.key = \"spectrecode\"\n return entry",
"def issueListing(self, v, i):\n #list of URLS within the issue\n# links = []\n issURL = self.link(vol = v, iss = i )\n html=urlopen(issURL)\n soup=BeautifulSoup(html,'html.parser')\n URLs = [] #Empty list\n \n# titles = soup.find_all('h5', class_=\"title\")\n# authors = soup.find_all('h6', class_=\"authors\")\n# pubs = soup.find_all('h6', class_=\"pub-info\")\n# for t, a, p in zip(titles, authors, pubs):\n blocks = soup.find_all('div', class_=\"article panel article-result\")\n for b in blocks:\n# print(b)\n titletag = b.find('h5', class_=\"title\")\n title = titletag.get_text()\n #Extract abstract url from title head\n aURL = titletag.find('a', href = True)['href']\n alink = 'https://journals.aps.org' + aURL\n #Print out the scraped information\n print(title)\n print(alink)\n #Extract research area and topic keywords\n kwlist = b.find('ul', class_=\"inline-list subjects\")\n #If the list tag exists\n if kwlist:\n lis = kwlist.find_all('li')\n kws = [li.get_text() for li in lis] \n print(kws)\n #Add utf-8 encode\n# print(kws.encode('utf-8')) \n print('----------------------------------------------------------------') \n #Collect URLs in the issue\n URLs.append('https://journals.aps.org' + aURL)\n return URLs",
"def __call__(self, filename, manuscript=\"\", journal=\"\", author=[], \n publisher=\"\"):\n\n # check the parameters\n while len(author) < 4:\n author.append(\"\")\n\n doc = getDocument(filename)\n\n Story = []\n\n # Section 1\n Story.append(\n Paragraph(\n \"\"\"<seqreset id=\"main\" /><seq id=\"main\">. THIS Amendment hereby \n modifies and supplements the attached Publication Agreement \n concerning the following Article:\"\"\", styles['outer_style'])\n )\n\n journal_info_table = Table([\n [fillInRow(manuscript, \"(manuscript title)\", width=inch*5)],\n [fillInRow(journal, \"(journal name)\", width=inch*5)],\n ],\n )\n journal_info_table.hAlign = 'LEFT'\n Story.append(journal_info_table)\n\n # Section 2\n Story.append(\n Paragraph(\n \"\"\"<seq id=\"main\">. The parties to the Publication Agreement and\n to this Amendment are:\"\"\", styles['outer_style'])\n )\n\n journal_info_table = Table([\n [fillInRow(author[0], \"(corresponding author)\", width=inch*5)],\n [Paragraph(\"and\", styles['outer_style'])],\n [fillInRow(journal, \"(the Publisher)\", width=inch*5)],\n ],\n )\n journal_info_table.hAlign = 'LEFT'\n Story.append(journal_info_table)\n\n # Section 3\n Story.append(\n Paragraph(\n \"\"\"<seq id=\"main\">. The parties agree that wherever there is any\n conflict between the Amendment and the Publication Agreement, \n the provisions of this Amendment are paramount and the \n Publication Agreement shall be construed accordingly.\"\"\",\n styles['outer_style'])\n )\n\n # Section 4\n Story.append(\n Paragraph(\n \"\"\"<seq id=\"main\">. Notwithstanding any terms in the Publication\nAgreement to the contrary and in addition to the rights retained by Author \nor licensed by Published to Author in the Publication Agreement and any fair \nuse rights of Author, Author and Publisher agree that the Author shall also \nretain the following rights:\"\"\",\n styles['outer_style'])\n )\n\n # 4a\n Story.append(\n Paragraph(\n \"\"\"a. The Author shall, without limitation, have the non-exclusive right to use, reproduce, distribute, create derivative works including update, perform, and display publicly, the Article in electronic, digital or print form in connection with the Author's teaching, conference presentations, lectures, other scholarly works, and for all of Author's academic and professional activities. \"\"\",\n styles['inner_style'])\n )\n\n # 4b\n Story.append(\n Paragraph(\n \"\"\"b. Once the Article has been published by Publisher, the Author shall also have all the non-exclusive rights necessary to make, or to authorize others to make, the final published version of the Article available in digital form over the Internet, including but not limited to a website under the control of the Author or the Author's employer or through any digital repository, such as MIT's DSpace or the National Library of Medicine's PubMed Central database.\"\"\",\n styles['inner_style'])\n )\n\n #4c\n Story.append(\n Paragraph(\n \"\"\"c. The Author further retains all non-exclusive rights necessary to grant to the Author's employing institution the non-exclusive right to use, reproduce, distribute, display, publicly perform, and make copies of the work in electronic, digital or in print form in connection with teaching, digital repositories, conference presentations, lectures, other scholarly works, and all academic and professional activities conducted at the Author's employing institution. \"\"\",\n styles['inner_style'])\n )\n\n # Section 5\n Story.append(\n Paragraph(\n \"\"\"<seq id=\"main\" />. <b>Final Agreement.</b> This Amendment and the Publication Agreement, taken together, constitute the final agreement between the Author and the Publisher with respect to the publication of the Article and allocation of rights under copyright in the Article. Any modification of or additions to the terms of this Amendment or to the Publication Agreement must be in writing and executed by both Publisher and Author in order to be effective.\"\"\",\n styles['outer_style'])\n )\n\n # Signature\n journal_info_table = Table([\n [\"AUTHOR\", \" \", \"PUBLISHER\"],\n [fillInRow(\"\", \"(corresponding author on behalf of all authors)\"),\n \"\", fillInRow(\"\", \"\")],\n [fillInRow(\"\", \"Date\"),\n \"\",\n fillInRow(\"\", \"Date\")]\n ],\n colWidths=[inch*3, inch*.25, inch*3],\n )\n\n journal_info_table.hAlign = 'LEFT'\n Story.append(journal_info_table)\n\n # MIT Directive\n Story.append(\n Paragraph(\"<b>MIT Authors:</b>\", styles['outer_style'])\n )\n Story.append(\n Paragraph(\"Please fax a copy of the agreement to 617-253-8894. Direct any questions to [email protected]\",\n styles['inner_style'])\n )\n\n\n agreement = \"%s %s\" % (self.NAME, self.VERSION)\n doc.build(Story, \n onFirstPage=mit_pageInfo, onLaterPages=mit_pageInfo)",
"def make_papers(\n *,\n n_papers: int,\n authors: AuthorList,\n funders: FunderList,\n publishers: PublisherList,\n fields_of_study: List,\n repositories: List[Repository],\n faker: Faker,\n min_title_length: int = 2,\n max_title_length: int = 10,\n min_authors: int = 1,\n max_authors: int = 10,\n min_funders: int = 0,\n max_funders: int = 3,\n min_events: int = 0,\n max_events: int = 100,\n min_fields_of_study: int = 1,\n max_fields_of_study: int = 20,\n min_repos: int = 1,\n max_repos: int = 10,\n min_year: int = 2017,\n max_year: int = 2021,\n) -> PaperList:\n\n papers = []\n\n for i, _ in enumerate(range(n_papers)):\n # Random title\n n_words_ = random.randint(min_title_length, max_title_length)\n title_ = faker.sentence(nb_words=n_words_)\n\n # Random date\n published_date_ = pendulum.from_format(\n str(\n faker.date_between_dates(\n date_start=pendulum.datetime(min_year, 1, 1), date_end=pendulum.datetime(max_year, 12, 31)\n )\n ),\n \"YYYY-MM-DD\",\n ).date()\n published_date_ = pendulum.date(year=published_date_.year, month=published_date_.month, day=published_date_.day)\n\n # Output type\n output_type_ = random.choice(OUTPUT_TYPES)\n\n # Pick a random list of authors\n n_authors_ = random.randint(min_authors, max_authors)\n authors_ = random.sample(authors, n_authors_)\n\n # Random funder\n n_funders_ = random.randint(min_funders, max_funders)\n if n_funders_ > 0:\n funders_ = random.sample(funders, n_funders_)\n else:\n funders_ = []\n\n # Random publisher\n publisher_ = random.choice(publishers)\n\n # Journal\n journal_ = random.choice(publisher_.journals)\n\n # Random DOI\n doi_ = make_doi(publisher_.doi_prefix)\n\n # Random events\n n_events_ = random.randint(min_events, max_events)\n events_ = []\n today = datetime.now()\n today_ts = int(today.timestamp())\n start_date = datetime(today.year - 2, today.month, today.day)\n start_ts = int(start_date.timestamp())\n\n for _ in range(n_events_):\n event_date_ = date_between_dates(start_ts=start_ts, end_ts=today_ts)\n events_.append(Event(source=random.choice(EVENT_TYPES), event_date=event_date_))\n\n # Fields of study\n n_fos_ = random.randint(min_fields_of_study, max_fields_of_study)\n level_0_index = 199\n fields_of_study_ = [random.choice(fields_of_study[:level_0_index])]\n fields_of_study_.extend(random.sample(fields_of_study, n_fos_))\n\n # Open access status\n publisher_is_free_to_read_ = True\n if journal_.license is not None:\n # Gold\n license_ = journal_.license\n else:\n license_ = random.choice(LICENSES)\n if license_ is None:\n # Bronze: free to read on publisher website but no license\n publisher_is_free_to_read_ = bool(random.getrandbits(1))\n # Hybrid: license=True\n\n # Green: in a 'repository'\n paper_repos = []\n if bool(random.getrandbits(1)):\n # There can be multiple authors from the same institution so the repos have to be sampled from a set\n n_repos_ = random.randint(min_repos, max_repos)\n repos = set()\n for repo in [author.institution.repository for author in authors_] + repositories:\n repos.add(repo)\n paper_repos += random.sample(repos, n_repos_)\n\n # Make paper\n paper = Paper(\n i,\n type=\"journal-article\",\n doi=doi_,\n title=title_,\n published_date=published_date_,\n output_type=output_type_,\n authors=authors_,\n funders=funders_,\n journal=journal_,\n publisher=publisher_,\n events=events_,\n fields_of_study=fields_of_study_,\n publisher_license=license_,\n publisher_is_free_to_read=publisher_is_free_to_read_,\n repositories=paper_repos,\n in_scihub=bool(random.getrandbits(1)),\n in_unpaywall=True,\n )\n papers.append(paper)\n\n # Make a subset of papers not in Unpaywall\n not_in_unpaywall = random.sample([paper for paper in papers], 3)\n for paper in not_in_unpaywall:\n paper.in_unpaywall = False\n\n # Create paper citations\n # Sort from oldest to newest\n papers.sort(key=lambda p: p.published_date)\n\n for i, paper in enumerate(papers):\n # Create cited_by\n n_papers_forwards = len(papers) - i\n n_cited_by = random.randint(0, int(n_papers_forwards / 2))\n paper.cited_by = random.sample(papers[i + 1 :], n_cited_by)\n\n return papers",
"def index():\n rows=db().select(db.titles.ALL,orderby=db.titles.title_name)\n \n #print type(value)\n \n coloms=db().select(db.titles.ALL,orderby=db.titles.title_name)\n #print coloms\n return dict(rows=rows,coloms=coloms)"
] |
[
"0.6398447",
"0.6009631",
"0.59154475",
"0.5905178",
"0.58975077",
"0.5821678",
"0.5820468",
"0.5633226",
"0.56244",
"0.56077796",
"0.5602855",
"0.55395496",
"0.55325943",
"0.5529402",
"0.5523234",
"0.5480759",
"0.54773694",
"0.5460018",
"0.5447724",
"0.5437615",
"0.5431244",
"0.5421888",
"0.5385542",
"0.5351843",
"0.53322875",
"0.53266805",
"0.5306078",
"0.5298462",
"0.52945596",
"0.5283142"
] |
0.6384073
|
1
|
Read GFF3formatted data in the specified file (or filelike object) Return a pandas dataframe with ID, Parent, seqid, source, type, start, end, score, strand, phase, and attributes columns. The ID and Parent are extracted from the attributes columns, and the dataframe is indexed by ID
|
def gff3_to_dataframe( file ):
result = _read_gff3_using_pandas( file )
extract_attributes_to_columns( result, ['ID', 'Parent', 'Name' ] )
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _read_gff3_using_pandas( file ):\n import pandas\n result = pandas.read_table(\n file,\n comment = '#',\n names = [ 'seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase', 'attributes' ],\n na_values = \".\",\n dtype = {\n 'seqid': str,\n 'source': str,\n 'type': str,\n 'start': int,\n 'end': int,\n 'score': float,\n 'strand': str,\n 'phase': str,\n 'attributes': str\n }\n )\n return result",
"def parseGFF3(filename):\n\n # Parse with transparent decompression\n openFunc = gzip.open if filename.endswith(\".gz\") else open\n with openFunc(filename) as infile:\n for line in infile:\n if line.startswith(\"#\"):\n continue\n parts = line.strip().split(\"\\t\")\n # If this fails, the file format is not standard-compatible\n assert len(parts) == len(gffInfoFields)\n # Normalize data\n normalizedInfo = {\n \"seqid\": None if parts[0] == \".\" else urllib.unquote(parts[0]),\n \"source\": None if parts[1] == \".\" else urllib.unquote(parts[1]),\n \"type\": None if parts[2] == \".\" else urllib.unquote(parts[2]),\n \"start\": None if parts[3] == \".\" else int(parts[3]),\n \"end\": None if parts[4] == \".\" else int(parts[4]),\n \"score\": None if parts[5] == \".\" else float(parts[5]),\n \"strand\": None if parts[6] == \".\" else urllib.unquote(parts[6]),\n \"phase\": None if parts[7] == \".\" else urllib.unquote(parts[7]),\n \"attributes\": parseGFFAttributes(parts[8])\n }\n # Alternatively, you can emit the dictionary here, if you need\n # mutability:\n # yield normalizedInfo\n yield GFFRecord(**normalizedInfo)",
"def parse_gff3(filename):\n genes = OrderedDict()\n transcript_to_locus = {}\n\n count_per_transcript = defaultdict(lambda: 1)\n\n with open(filename) as gff_in:\n for line in gff_in:\n # Skip comments\n if not line.strip()[0] == '#':\n line_data = parse_line(line)\n\n # Parts (e.g. CDS or Exon) might not have an ID. One will be added here\n if ID_ATTRIBUTE not in line_data['attributes'].keys() and line_data['feature'] in PARTS_FEATURES:\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n counter_id = line_data['attributes'][PARENT_ATTRIBUTE] + '.' + line_data['feature'] + '.'\n new_id = counter_id + str(count_per_transcript[counter_id])\n count_per_transcript[counter_id] += 1\n line_data['attributes'][ID_ATTRIBUTE] = new_id\n\n # Every line needs a valid ID\n if ID_ATTRIBUTE in line_data['attributes'].keys():\n\n if line_data['feature'] in LOCUS_FEATURES:\n genes[line_data['attributes'][ID_ATTRIBUTE]] = {\n 'data': line_data,\n 'transcripts': OrderedDict()\n }\n\n elif line_data['feature'] in TRANSCRIPT_FEATURES:\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n parent_id = line_data['attributes'][PARENT_ATTRIBUTE]\n\n if parent_id in genes.keys():\n genes[parent_id]['transcripts'][line_data['attributes'][ID_ATTRIBUTE]] = {\n 'data': line_data,\n 'parts': []\n }\n\n transcript_to_locus[line_data['attributes'][ID_ATTRIBUTE]] = \\\n line_data['attributes'][PARENT_ATTRIBUTE]\n\n elif line_data['feature'] in PARTS_FEATURES:\n\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n parent_id = line_data['attributes'][PARENT_ATTRIBUTE]\n grandparent_id = transcript_to_locus[parent_id]\n\n genes[grandparent_id]['transcripts'][parent_id]['parts'].append(line_data)\n\n return genes",
"def parse_sequences_from_gff_metadata( file ):\n import pandas\n result = []\n for line in file:\n if line.startswith( '##sequence-region' ):\n parts = line.strip().split( \" \" )\n nameStartEnd = parts[-3:] # last 3 elements\n result.append({\n \"seqid\": nameStartEnd[0],\n \"start\": int( nameStartEnd[1] ),\n \"end\": int( nameStartEnd[2] )\n })\n elif not line[0] == '#':\n # quit when we meet the first non-metadata line\n break\n return pandas.DataFrame( result )",
"def parse(self,gff3_line):\r\n split_line = gff3_line.strip().split('\\t')\r\n self.seqid = split_line[0]\r\n self.source = split_line[1]\r\n self.type = split_line[2]\r\n self.start = int(split_line[3])\r\n self.end = int(split_line[4])\r\n self.score = split_line[5]\r\n self.strand = split_line[6]\r\n self.phase = split_line[7]\r\n self.attributes.parse(split_line[8])\r\n return self",
"def read_gff3_line(line):\n cols = line.strip().split('\\t')\n if ASSUME_OFFBYONE:\n cols[3] = str(int(cols[3]) - 1)\n known_fields = set()\n fields = {'seqid': cols[0], 'source': cols[1], 'type': cols[2],\n 'start': cols[3], 'end': cols[4], 'score': cols[5],\n 'strand': cols[6], 'phase': cols[7]}\n known_fields.update(fields.keys())\n attrlist = cols[8]\n attributes = dict()\n for attr in attrlist.split(';'):\n if not attr.strip():\n continue\n k, v = attr.strip().split('=')\n if k.lower() == 'dbxref':\n try:\n subkey, subvalue = v.split(':')\n except ValueError:\n if SHOW_PARSER_WARNING:\n sys.stderr.write('\\nWarning: skipping Dbxref value {} - no key! Line: {}'.format(v, line.strip()))\n continue\n assert subkey not in attributes, 'Sub-key already in attributes list: {} in line {}'.format(subkey, line)\n attributes[subkey] = subvalue.strip()\n known_fields.add(subkey)\n continue\n elif ',' in v:\n raise ValueError('List of values for key {}: {} in line {}'.format(k, v, line))\n else:\n # who knows what crazy stuff could be here...\n pass\n attributes[k] = v.strip()\n known_fields.add(k)\n fields.update(attributes)\n return fields, known_fields",
"def read_gff3(self,gff3_file):\r\n with open(gff3_file) as infile:\r\n set = None\r\n for line in infile:\r\n if line[0] == '#':\r\n if line[:3] == '###' and set:\r\n self.sets.append(set)\r\n set = None\r\n if line.startswith(\"##sequence-region\"):\r\n splitline = line.split()\r\n self.sequence_regions[splitline[1]] = line\r\n #TODO: properly deal with comment lines.\r\n self.sets.append(line)\r\n else:\r\n line = GFF3_line(set,line)\r\n #adding the feature individually\r\n self.features_id[line.attributes.id] = line\r\n if line.attributes.name:\r\n if line.attributes.name in self.features_name:\r\n #TODO: find a way to handle features that have the same name.\r\n pass#print(line.attributes.id, line.attributes.name, self.features_name[line.attributes.name].attributes.id)\r\n else:\r\n self.features_name[line.attributes.name] = line\r\n #adding the set of features\r\n if line.type == \"region\" and not line.attributes.parent:\r\n #this feature has been deemed redundant and is not used in recent versions of the gff3,\r\n if set:\r\n #this is the first element of a set,\r\n # old set needs to be added to the list and a new set created\r\n self.sets.append(set)\r\n set = GT_seq_location()\r\n else:\r\n set = GT_seq_location()\r\n #if the set is none, it was also during init, and we need to set the owner_set again\r\n line._owner_set = set\r\n set._flanking_region = line\r\n elif line.type == \"flanking_region\":\r\n if set and set.flanking_region:\r\n # this can also be the first element of a set,\r\n # if the set already has a flanking region\r\n # old set needs to be added to the list and a new set created\r\n self.sets.append(set)\r\n set = GT_seq_location()\r\n else:\r\n set = GT_seq_location()\r\n #if the set is none, it was also during init, and we need to set the owner_set again\r\n line._owner_set = set\r\n set.flanking_region = line\r\n elif line.type == \"region\" and line.attributes.parent:\r\n set.gt_seq_region.append(line)\r\n elif line.type == \"PCR_product\":\r\n set.pcr_product.append(line)\r\n elif line.type == \"forward_primer\":\r\n set.forward_primer.append(line)\r\n elif line.type == \"reverse_primer\":\r\n set.reverse_primer.append(line)\r\n elif line.type == \"SNP\":\r\n set.snp.append(line)\r\n else:\r\n pass#print(\"line of type {} not added.\".format(line.type))\r\n if set:\r\n # there was no '###' at the end of the file so the last set needs to be added.\r\n self.sets.append(set)",
"def parse_data(self, path_to_file):\n\n line_dict, rel_dict = self.create_dicts(path_to_file)\n \n line_df = self.create_dataframe(line_dict, ['line'])\n rel_df = self.create_dataframe(rel_dict, ['relation'])\n\n line_df['relation'] = rel_df['relation']\n\n return (line_df, rel_df)",
"def read_in_gtf(infile_path, gtf_rows_to_skip): \n\tdf = pd.read_csv(infile_path, compression='gzip', sep='\\t', dtype=str, header=None, skiprows=range(gtf_rows_to_skip))\n\n\tcols = ['#chrom', 'source', 'feature', 'chromStart', 'chromEnd', 'score', 'strand', 'frame', 'transcript_id']\n\tdf.columns = cols\n\n\treturn df",
"def GFFParse(gff_file):\n genes, utr5, exons=dict(), dict(), dict()\n transcripts, utr3, cds=dict(), dict(), dict()\n # TODO Include growing key words of different non-coding/coding transcripts \n features=['mrna', 'transcript', 'ncrna', 'mirna', 'pseudogenic_transcript', 'rrna', 'snorna', 'snrna', 'trna', 'scrna', 'mrna_te_gene']\n gff_handle=open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line=gff_line.strip('\\n\\r').split('\\t')\n if re.match(r'#|>', gff_line[0]): # skip commented line or fasta identifier line \n continue\n if len(gff_line)==1: # skip fasta sequence/empty line if present \n continue \n assert len(gff_line)==9, '\\t'.join(gff_line) # not found 9 tab-delimited fields in this line \n if '' in gff_line: # skip this line if there any field with an empty value\n print 'Skipping..', '\\t'.join(gff_line)\n continue\n if gff_line[-1][-1]==';': # trim the last ';' character \n gff_line[-1]=gff_line[-1].strip(';')\n if gff_line[2].lower() in ['gene', 'pseudogene', 'transposable_element_gene']:\n gid, gene_info=None, dict()\n gene_info['start']=int(gff_line[3])\n gene_info['stop']=int(gff_line[4])\n gene_info['chr']=gff_line[0]\n gene_info['source']=gff_line[1]\n gene_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=') # gff attributes are separated by key=value pair \n if attb[0]=='ID':\n gid=attb[1]\n break\n genes[(gff_line[0], gid)]=gene_info # store gene information based on the chromosome and gene symbol.\n elif gff_line[2].lower() in features: \n gid, mrna_info=None, dict() \n mrna_info['start']=int(gff_line[3])\n mrna_info['stop']=int(gff_line[4])\n mrna_info['chr']=gff_line[0]\n mrna_info['strand']=gff_line[6]\n mrna_info['type'] = gff_line[2]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n gid=attb[1]\n elif attb[0]=='ID':\n mrna_info[attb[0]]=attb[1]\n for fid in gid.split(','): # child may be mapped to multiple parents ex: Parent=AT01,AT01-1-Protein \n if (gff_line[0], fid) in transcripts:\n transcripts[(gff_line[0], fid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], fid)]=[mrna_info]\n elif gff_line[2].lower() in ['exon', 'pseudogenic_exon']:\n tids, exon_info=None, dict()\n exon_info['start']=int(gff_line[3])\n exon_info['stop']=int(gff_line[4])\n exon_info['chr']=gff_line[0]\n exon_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)]=[exon_info]\n elif gff_line[2].lower() in ['five_prime_utr']:\n utr5_info, tids=dict(), None\n utr5_info['start']=int(gff_line[3])\n utr5_info['stop']=int(gff_line[4])\n utr5_info['chr']=gff_line[0]\n utr5_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)]=[utr5_info]\n elif gff_line[2].lower() in ['cds']:\n cds_info, tids=dict(), None\n cds_info['start']=int(gff_line[3])\n cds_info['stop']=int(gff_line[4])\n cds_info['chr']=gff_line[0]\n cds_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)]=[cds_info]\n elif gff_line[2].lower() in ['three_prime_utr']:\n utr3_info, tids=dict(), None\n utr3_info['start']=int(gff_line[3])\n utr3_info['stop']=int(gff_line[4])\n utr3_info['chr']=gff_line[0]\n utr3_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)]=[utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds",
"def parse_gff(g):\n # We also want to store the mRNA->gene information!\n mrna_par = {}\n # And the CDS->mRNA information\n cds_dat = {}\n with open(g, 'r') as f:\n for line in f:\n # if the line is empty or starts with a #, we will skip it\n if line.startswith('#') or line == '\\n':\n continue\n else:\n tmp = line.strip().split('\\t')\n feat_type = tmp[2]\n if feat_type == 'mRNA':\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n tx_id = m.split('=')[1]\n if m.startswith('Parent='):\n tx_par = m.split('=')[1]\n mrna_par[tx_id] = tx_par\n elif feat_type == 'CDS':\n scaf = tmp[0]\n start = tmp[3]\n end = tmp[4]\n strand = tmp[6]\n phase = tmp[7]\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n cds_id = m.split('=')[1]\n if m.startswith('Parent='):\n cds_par = m.split('=')[1]\n if strand == '-':\n strand = -1\n else:\n strand = 1\n # Watch out for transcripts where there are multiple CDS.\n # This will require a nested dictionary of lists.\n if cds_par in cds_dat:\n pass\n else:\n cds_dat[cds_par] = {}\n if cds_id in cds_dat[cds_par]:\n pass\n else:\n cds_dat[cds_par][cds_id] = []\n # We want to make a SequenceFeature for each CDS chunk\n # Keep in mind that GFF is 1-based, so we have to adjust\n # the start position!\n cds_feat = SeqFeature(\n FeatureLocation(int(start)-1, int(end), strand=strand),\n type=\"CDS\",\n id=cds_id)\n # Add some qualifiers to modify the behavior\n # Use the \"standard\" genetic code from NCBI\n cds_feat.qualifiers['transl_tabl'] = [1]\n # Then, append it into the corresponding dictionary item\n # keeping the chromosome (scaffold) name and phase with it\n cds_dat[cds_par][cds_id].append((cds_feat, scaf, phase))\n else:\n continue\n return (mrna_par, cds_dat)",
"def parse_data(path_to_file):\n\n line_dict, rel_dict = create_dicts(path_to_file)\n \n line_df = create_dataframe(line_dict, ['line'])\n rel_df = create_dataframe(rel_dict, ['relation'])\n\n line_df['relation'] = rel_df['relation']\n\n return (line_df, rel_df)",
"def convert_GFF2_to_GFF3(line):\n gff3 = GFF3_line()\n if len(line.strip().split('\\t')) == 9:\n gff3.seqid, \n gff3.source, \n gff3.type, \n gff3.start, \n gff3.end, \n gff3.score, \n gff3.strand, \n gff3.phase, \n attr = line.strip().split('\\t')\n if gff3.type == 'similarity':\n return None\n attr = attr.split(';')\n for pair in attr:\n k,v = pair.split()\n gff3.attributes[k] = v\n gff3.attributes_order.append(k)\n gff3.refreshAttrStr()\n elif len(line.strip().split('\\t')) == 8:\n gff3.seqid, \n gff3.source, \n gff3.type, \n gff3.start, \n gff3.end, \n gff3.score, \n gff3.strand, \n gff3.phase = line.strip().split('\\t')\n gff3.attributes['ID'] = '.'\n gff3.attributes_order.append('ID')\n if gff3.type == 'similarity':\n return None\n\n return gff3",
"def load_gff(filepath):\n # GFF fields\n colnames = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand',\n 'phase', 'attributes']\n\n # get lines from file\n with open(filepath, 'r') as fp:\n lines = fp.readlines()\n\n # filter out non-gene entries\n gene_rows = [ x for x in lines if 'gene\\t' in x]\n\n # Next, let's create a StringIO buffer -- this is similar to the file\n # and url handles we have seen so far. We can then pass this to a csv\n # reader instance in the same way we have seen for actual files\n\n # First though, let's collapse the rows back into a single string\n csv_str = \"\".join(gene_rows)\n str_buffer = StringIO.StringIO(csv_str)\n\n return csv.DictReader(str_buffer, fieldnames=colnames, delimiter='\\t')",
"def run(input, output, additional=None, fasta_path=None, seed_path=None):\r\n version = \"##gff-version 3\\n\"\r\n gff3_columns = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase', 'attributes']\r\n gff3 = pd.DataFrame(columns=gff3_columns)\r\n table = pd.read_csv(input, sep='\\t')\r\n\r\n if seed_path:\r\n seed_file = pd.read_csv(seed_path, sep='\\t')\r\n\r\n if fasta_path is not None:\r\n fasta_file = ''\r\n open(fasta_path, 'w').close()\r\n\r\n\r\n if additional:\r\n table_to_add = pd.read_csv(additional, sep='\\t')\r\n table = table.append(table_to_add)\r\n\r\n for index, row in table.iterrows():\r\n name = handleGivenName(row['name'], table, 'name')\r\n seqId = row['seqName']\r\n name5p = handleGivenName(row['5pname'], table, '5pname')\r\n seq5p = row['5pseq']\r\n name3p = handleGivenName(row['3pname'], table, '3pname')\r\n seq3p = row['3pseq']\r\n strand = row['strand']\r\n hairpin = row['hairpinSeq']\r\n start = row['start']\r\n end = row['end']\r\n\r\n if row['5pRC'] >= row['3pRC']:\r\n name5p += '|m'\r\n name3p += '|s'\r\n else:\r\n name5p += '|s'\r\n name3p += '|m'\r\n\r\n seq5p_freq = len(table[(table['5pseq'] == seq5p) | (table['3pseq'] == seq5p)])\r\n seq3p_freq = len(table[(table['5pseq'] == seq3p) | (table['3pseq'] == seq3p)])\r\n\r\n name5p += f'|{seq5p_freq}'\r\n name3p += f'|{seq3p_freq}'\r\n\r\n\r\n if seed_path is not None:\r\n if not pd.isnull(seq5p):\r\n seq5p_seed = seq5p[1:8].upper().replace(\"T\", \"U\")\r\n try:\r\n name5p += '|' + seed_file[seed_file['seed'] == seq5p_seed][\"miRBase_name\"].iloc[0]\r\n except:\r\n name5p += '|' + seq5p_seed\r\n\r\n if not pd.isnull(seq3p):\r\n seq3p_seed = seq3p[1:8].upper().replace(\"T\", \"U\")\r\n try:\r\n name3p += '|' + seed_file[seed_file['seed'] == seq3p_seed][\"miRBase_name\"].iloc[0]\r\n except:\r\n name3p += '|' + seq3p_seed\r\n \r\n if fasta_path is not None:\r\n if not pd.isnull(seq5p):\r\n fasta_file += f'>{name5p}\\n{seq5p}\\n'\r\n if not pd.isnull(seq3p):\r\n fasta_file += f'>{name3p}\\n{seq3p}\\n'\r\n\r\n if len(fasta_file) > 100000:\r\n with open(fasta_path, 'a+') as f:\r\n f.write(fasta_file)\r\n fasta_file = ''\r\n\r\n gff_row = [[seqId, '.', 'pre_miRNA', start, end, '.', strand, '.', f'ID={name}']]\r\n\r\n if strand == '+':\r\n try:\r\n offset5p = len(hairpin.split(seq5p)[0])\r\n start5p = start + offset5p\r\n end5p = start + offset5p + len(seq5p) - 1\r\n gff_row.append([seqId, '.', 'miRNA', start5p, end5p, '.', strand, '.', f'ID={name5p}'])\r\n except:\r\n pass\r\n\r\n try:\r\n offset3p = len(hairpin.split(seq3p)[0])\r\n start3p = start + offset3p\r\n end3p = start + offset3p + len(seq3p) - 1\r\n gff_row.append([seqId, '.', 'miRNA', start3p, end3p, '.', strand, '.', f'ID={name3p}'])\r\n except:\r\n pass\r\n\r\n else:\r\n try:\r\n offset5p = len(hairpin.split(seq5p)[0])\r\n end5p = end - offset5p\r\n start5p = end - offset5p - len(seq5p) + 1\r\n gff_row.append([seqId, '.', 'miRNA', start5p, end5p, '.', strand, '.', f'ID={name5p}'])\r\n except:\r\n pass\r\n\r\n try:\r\n offset3p = len(hairpin.split(seq3p)[0])\r\n end3p = end - offset3p\r\n start3p = end - offset3p - len(seq3p) + 1\r\n gff_row.append([seqId, '.', 'miRNA', start3p, end3p, '.', strand, '.', f'ID={name3p}'])\r\n except:\r\n pass\r\n\r\n miRNAs = pd.DataFrame(gff_row, columns=gff3_columns)\r\n\r\n gff3 = gff3.append(miRNAs)\r\n\r\n with open(output, 'w') as file:\r\n file.write(version)\r\n\r\n if fasta_path is not None:\r\n with open(fasta_path, 'a+') as f:\r\n f.write(fasta_file)\r\n\r\n gff3.to_csv(output, index=False, header=False, mode=\"a\", sep='\\t')",
"def _read_gtf(gtf):\n if not gtf:\n return gtf\n db = defaultdict(list)\n with open(gtf) as in_handle:\n for line in in_handle:\n if line.startswith(\"#\"):\n continue\n cols = line.strip().split(\"\\t\")\n name = [n.split(\"=\")[1] for n in cols[-1].split(\";\") if n.startswith(\"Name\")]\n chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6]\n if cols[2] == \"miRNA_primary_transcript\":\n db[name[0]].append([chrom, int(start), int(end), strand])\n return db",
"def read_transcript_data(fn):\n\n def _read_lines(fn):\n # NC_000007.13\tRefSeq\tcDNA_match\t50344265\t50344518\t254\t+\t.\tID=aln58042;Target=NM_001220765.2 1 254 +;gap_count=0;identity=0.0691326;idty=1;num_ident=428;num_mismatch=0;pct_coverage=6.91326;pct_identity_gap=100;pct_identity_ungap=100;score=254\n # NC_000002.11 RefSeq cDNA_match 179671939 179672150 212 - . ID=ed951d46-194c-477a-a480-4bc64530c5ba;Target=NM_001267550.2 1 212 +;gap_count=0;identity=0.999991;idty=1;num_ident=109223;num_mismatch=1;pct_coverage=100;pct_identity_gap=99.9991;pct_identity_ungap=99.9991\n line_re = re.compile(\n \"(?P<ref_ac>\\S+)\\s+(?P<origin>\\S+)\\s+(?P<match_type>\\S+)\\s+\"\n \"(?P<g_start>\\d+)\\s+(?P<g_end>\\d+)\\s+(?P<score>\\S+)\\s+\"\n \"(?P<strand>[-+])\\s+\\.\\s+ID=(?P<aln>[^;]+);Target=(?P<tx_ac>\\S+)\"\n \"\\s+(?P<tx_start>\\d+)\\s+(?P<tx_end>\\d+).+?\"\n \"pct_coverage=(?P<pct_coverage>[^;]+);\"\n \"pct_identity_gap=(?P<pct_identity_gap>[^;]+);\"\n \"pct_identity_ungap=(?P<pct_identity_ungap>[^;]+)\"\n )\n fh = io.open(fn, \"rb\")\n while fh.peek(1)[0] == \"#\":\n fh.readline()\n while fh.peek(3)[0:3] != \"###\":\n line = fh.readline()\n try:\n yield line_re.match(line).groupdict()\n except AttributeError:\n raise Exception(\"Failed at\", line)\n raise StopIteration\n def _key(e):\n return (e[\"tx_ac\"], not e[\"ref_ac\"].startswith(\"NC_\"), e[\"ref_ac\"], e[\"aln\"])\n return itertools.groupby(sorted(_read_lines(fn), key=_key), key=_key)",
"def get_ngs_resequencing_file(self) -> pd.DataFrame:\n return pd.read_csv(self.sequence_data_paths.ngs_path / Path(\"ngs_dataset.csv.gz\"), index_col=0) # type: ignore",
"def read_data(filename):\n \n ######################################################\n # Disadvantage here: only includes J_up = 11 here, #\n # please manually add more if you have #\n # J_up >= 12 CO lines #\n ######################################################\n \n ascii_data = ascii.read(\n filename, names=[\n \"SOURCE\", \"z\", \"D_L\", \"line_width\",\n \"CO_J_1\", \"eCO_J_1\", \"CO_J_2\", \"eCO_J_2\", \"CO_J_3\", \"eCO_J_3\",\n \"CO_J_4\", \"eCO_J_4\", \"CO_J_5\", \"eCO_J_5\", \"CO_J_6\", \"eCO_J_6\",\n \"CO_J_7\", \"eCO_J_7\", \"CO_J_8\", \"eCO_J_8\", \"CO_J_9\", \"eCO_J_9\",\n \"CO_J_10\", \"eCO_J_10\", \"CO_J_11\", \"eCO_J_11\", \"CI_1\", \"eCI_1\",\n \"CI_2\", \"eCI_2\"])\n\n pd = ascii_data.to_pandas()\n pd = pd.set_index('SOURCE')\n return pd.T",
"def read_nodes_dmp(fname):\n df = pd.read_csv(fname, sep=\"|\", header=None, index_col=False,\n names=['tax_id', \n 'parent_tax_id',\n 'rank', \n 'embl_code',\n 'division_id', \n 'inherited_div_flag', # 1 or 0\n 'genetic_code_id', \n 'inherited_GC_flag', # 1 or 0\n 'mitochondrial_genetic_code_id', \n 'inherited_MGC_flag', # 1 or 0\n 'GenBank_hidden_flag',\n 'hidden_subtree_root_flag', # 1 or 0 \n 'comments'])\n return df.assign(rank = lambda x: x['rank'].str.strip(),\n embl_code = lambda x: x['embl_code'].str.strip(),\n comments = lambda x: x['comments'].str.strip())",
"def get_data(filename):\n df = gpd.read_file(filename, index=\"OBJECTID\")\n # Sort along the length column, which orders the points from top\n # to bottom\n fplencol = [x for x in df.columns if x.startswith(\"fpLen\")][0]\n df = df.sort_values(fplencol, ascending=True)\n df = run_checks(df)\n snapdf = gpd.read_file(\n filename.replace(\"smpldef3m\", \"snaps3m\"), index=\"OBJECTID\"\n )\n # Compute full rotation string\n # OK, be careful here. Presently, the 8 char field covers\n # 2010 thru 2017, so we rotate to cover the first and last years\n # 2007 2011[1]\n # 2008 2010[0]\n # 2009 2011[1]\n # 2018 2016[6]\n # 2019 2017[7]\n # 2020 2018[6]\n s = df[\"CropRotatn_CY_2017\"]\n df[\"landuse\"] = (\n s.str[1]\n + s.str[0]\n + s.str[1]\n + s\n + s.str[6]\n + s.str[7]\n + s.str[6]\n + s.str[7]\n )\n s = df[\"Management_CY_2017\"]\n df[\"management\"] = (\n s.str[1]\n + s.str[0]\n + s.str[1]\n + s\n + s.str[6]\n + s.str[7]\n + s.str[6]\n + s.str[7]\n )\n return df, snapdf",
"def parse_gff(path):\n fasta = find_fasta(path)\n if not fasta:\n raise FileNotFoundError(f\"Could not find partner FASTA file for {path}\")\n\n # Parse FASTA and create GFFUtils database\n fasta = parse_infile(fasta, \"fasta\")\n gff = gffutils.create_db(\n str(path),\n \":memory:\",\n force=True,\n merge_strategy=\"create_unique\",\n sort_attribute_values=True\n )\n regions = find_regions(gff.directives)\n\n for record in fasta:\n # Normalise Feature location based on ##sequence-region directive.\n # Necessary for extracted GFF3 files that still store coordinates\n # relative to the entire region, not to the extracted FASTA.\n # If no sequence-region directive is found, assumes 1 (i.e. sequence start).\n cds, gene = parse_cds_features(\n gff.region(seqid=record.id, featuretype=[\"gene\", \"CDS\"]),\n regions[record.id][0] - 1 if record.id in regions else 0\n )\n if not cds:\n LOG.warning(\"Found no CDS features in %s [%s]\", record.id, path)\n record.features = sorted(\n [*gene, *merge_cds_features(cds)],\n key=lambda f: f.location.start\n )\n\n return fasta",
"def loadFromFile(fileName):\n rel = Relation()\n\n with open(fileName, \"r\") as f:\n lines = f.readlines()\n\n try:\n relName = \"\"\n fieldNames = []\n fieldTypes = []\n dataPart = False\n datasets = []\n classColName = None\n skipCols = []\n skipCounter = 0\n for l in lines:\n l = l.strip()\n if \"\" == l or \"%\" == l[0]:\n continue\n\n if \"@\" == l[0]:\n if not dataPart:\n fields = re.split(\"\\s+\", l.strip())\n if \"@RELATION\" == fields[0].upper():\n relName = fields[1]\n elif \"@ATTRIBUTE\" == fields[0].upper():\n if \"NUMERIC\" == fields[2].upper() or \"REAL\" == fields[2].upper():\n fieldTypes.append(float)\n fieldNames.append(fields[1])\n else:\n classColName = fields[1]\n skipCols.append(skipCounter)\n skipCounter += 1\n elif \"@DATA\" == fields[0].upper():\n if len(fieldNames) != 0:\n if classColName is None:\n # class column is numeric, but we need a string\n classColName = fieldNames[-1]\n fieldTypes[-1] = str\n else:\n skipCols.pop() # last column is class column, don't skip it\n fieldNames.append(classColName)\n fieldTypes.append(str)\n dataPart = True\n rel.relName = relName\n rel.fieldNames = fieldNames\n elif dataPart:\n fieldsTmp = re.split(\",\", l.strip())\n fields = []\n for i, f_ in enumerate(fieldsTmp):\n if i not in skipCols:\n fields.append(f_)\n\n for i, t in enumerate(fieldTypes):\n fields[i] = t(fields[i])\n\n if len(fields) > 1:\n rel.allClasses.add(fields[-1])\n datasets.append(fields)\n rel.datasets = datasets\n rel.numDatasets = len(datasets)\n rel.activeClasses = set(rel.allClasses)\n except:\n raise Exception(\"ARFF parsing error!\")\n\n return rel",
"def read(self):\n\t\tentities = dict()\n\t\trelations = set()\n\t\tedges = set()\n\t\twith open(self.file_path, encoding=\"utf-8\") as f:\n\t\t\tfor line in tqdm(f):\n\t\t\t\tif(self.prob == 1.0 or random() < self.prob):\n\t\t\t\t\tsource, relation, target, _ = line.split(\" \", 3)\n\t\t\t\t\tis_dataprop = target.startswith('\"')\n\t\t\t\t\tif source not in entities:\n\t\t\t\t\t\tentities[source] = dict(degree=0, out_degree=0, in_degree=0, data_properties={})\n\t\t\t\t\tentities[source][\"out_degree\"] += 1\n\t\t\t\t\tentities[source][\"degree\"] += 1\n\t\t\t\t\tif not is_dataprop:\n\t\t\t\t\t\tif target not in entities:\n\t\t\t\t\t\t\tentities[target] = dict(degree=0, out_degree=0, in_degree=0, data_properties={})\n\t\t\t\t\t\tentities[target][\"in_degree\"] += 1\n\t\t\t\t\t\tentities[target][\"degree\"] += 1\n\t\t\t\t\t\trelations.add(relation)\n\t\t\t\t\t\tedges.add((relation, source, target))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif(self.include_dataprop):\n\t\t\t\t\t\t\tentities[source][\"data_properties\"][relation] = target\n\n\t\treturn (entities, relations, edges)",
"def load_swc(file_name):\n\n df = pd.read_csv(file_name, delimiter=' ', header=None, comment='#',\n names=['sample', 'identifier', 'x', 'y', 'z', 'r', 'parent'],\n skipinitialspace=True).astype({'sample':int,'identifier':int,'x':float,'y':float,'z':float,'r':float,'parent':int})\n return df",
"def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z",
"def main(inFilepath, outFilepath):\n\n gff_df=read_gff(inFilepath, additional_lst=[\"ID\"])\n attribute_lst=[]\n for _, row in gff_df.iterrows():\n orfId = \"{}_{}\".format(row[\"seqname\"], row[\"ID\"].split(\"_\")[-1])\n att = \"{};orf_id={}\".format(row[\"attribute\"], orfId)\n attribute_lst.append(att)\n gff_df[\"attribute\"]=attribute_lst\n write_gff(gff_df, outFilepath)\n print(\"DONE: output {}\".format(outFilepath))",
"def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))",
"def _read_in_file(path, idc):\n info('read in file %s' % path)\n\n if not os.path.exists(path):\n info('file path not exist: %s' % path)\n sys.exit(1)\n try:\n if path.endswith('csv.gz'):\n mat = pd.read_csv(path, compression='gzip', index_col=0)\n elif path.endswith('.parquet'):\n mat = pd.read_parquet(path)\n else:\n mat = pd.read_csv(path, sep='\\t', index_col=0)\n except:\n traceback.print_exc(file=sys.stderr) # maybe the file type problem\n sys.exit(1)\n # TARGET-RT, too few sample is avaliable\n mat = mat[~mat.project_id.isin(['TARGET-RT'])]\n # check file title\n if 'project_id' not in mat.columns.tolist():\n info('project_id not in column names')\n sys.exit(1)\n if 'sample_type' not in mat.columns.tolist():\n info('sample_type is not in columns')\n sys.exit(1)\n # specify to needed genes:\n # the gene not in matrix columns\n diffgene = list(set(idc) - set(mat.columns.tolist()))\n if diffgene:\n info('these genes %s are not in the expression matrix of this cancer, skip %s' % (\n str(diffgene), str(path)))\n # return(pd.DataFrame()) # return a empty dataframe\n return (mat)",
"def _get_position_data(file):\n return pd.read_csv(file)"
] |
[
"0.7699503",
"0.65951246",
"0.6426204",
"0.62687147",
"0.6233554",
"0.6230566",
"0.61066884",
"0.6091305",
"0.60736585",
"0.59793144",
"0.5864509",
"0.58356196",
"0.58144176",
"0.5696015",
"0.566202",
"0.56447494",
"0.5621449",
"0.5619404",
"0.557519",
"0.5569083",
"0.5558418",
"0.5507507",
"0.5484679",
"0.5478144",
"0.5473915",
"0.5469115",
"0.5467803",
"0.546333",
"0.5449864",
"0.54415846"
] |
0.81290424
|
0
|
Helper function to read the given GFF3 file into a dataframe, without any postprocessing.
|
def _read_gff3_using_pandas( file ):
import pandas
result = pandas.read_table(
file,
comment = '#',
names = [ 'seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase', 'attributes' ],
na_values = ".",
dtype = {
'seqid': str,
'source': str,
'type': str,
'start': int,
'end': int,
'score': float,
'strand': str,
'phase': str,
'attributes': str
}
)
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gff3_to_dataframe( file ):\n result = _read_gff3_using_pandas( file )\n extract_attributes_to_columns( result, ['ID', 'Parent', 'Name' ] )\n return result",
"def read_in_gtf(infile_path, gtf_rows_to_skip): \n\tdf = pd.read_csv(infile_path, compression='gzip', sep='\\t', dtype=str, header=None, skiprows=range(gtf_rows_to_skip))\n\n\tcols = ['#chrom', 'source', 'feature', 'chromStart', 'chromEnd', 'score', 'strand', 'frame', 'transcript_id']\n\tdf.columns = cols\n\n\treturn df",
"def parseGFF3(filename):\n\n # Parse with transparent decompression\n openFunc = gzip.open if filename.endswith(\".gz\") else open\n with openFunc(filename) as infile:\n for line in infile:\n if line.startswith(\"#\"):\n continue\n parts = line.strip().split(\"\\t\")\n # If this fails, the file format is not standard-compatible\n assert len(parts) == len(gffInfoFields)\n # Normalize data\n normalizedInfo = {\n \"seqid\": None if parts[0] == \".\" else urllib.unquote(parts[0]),\n \"source\": None if parts[1] == \".\" else urllib.unquote(parts[1]),\n \"type\": None if parts[2] == \".\" else urllib.unquote(parts[2]),\n \"start\": None if parts[3] == \".\" else int(parts[3]),\n \"end\": None if parts[4] == \".\" else int(parts[4]),\n \"score\": None if parts[5] == \".\" else float(parts[5]),\n \"strand\": None if parts[6] == \".\" else urllib.unquote(parts[6]),\n \"phase\": None if parts[7] == \".\" else urllib.unquote(parts[7]),\n \"attributes\": parseGFFAttributes(parts[8])\n }\n # Alternatively, you can emit the dictionary here, if you need\n # mutability:\n # yield normalizedInfo\n yield GFFRecord(**normalizedInfo)",
"def read_gff3(self,gff3_file):\r\n with open(gff3_file) as infile:\r\n set = None\r\n for line in infile:\r\n if line[0] == '#':\r\n if line[:3] == '###' and set:\r\n self.sets.append(set)\r\n set = None\r\n if line.startswith(\"##sequence-region\"):\r\n splitline = line.split()\r\n self.sequence_regions[splitline[1]] = line\r\n #TODO: properly deal with comment lines.\r\n self.sets.append(line)\r\n else:\r\n line = GFF3_line(set,line)\r\n #adding the feature individually\r\n self.features_id[line.attributes.id] = line\r\n if line.attributes.name:\r\n if line.attributes.name in self.features_name:\r\n #TODO: find a way to handle features that have the same name.\r\n pass#print(line.attributes.id, line.attributes.name, self.features_name[line.attributes.name].attributes.id)\r\n else:\r\n self.features_name[line.attributes.name] = line\r\n #adding the set of features\r\n if line.type == \"region\" and not line.attributes.parent:\r\n #this feature has been deemed redundant and is not used in recent versions of the gff3,\r\n if set:\r\n #this is the first element of a set,\r\n # old set needs to be added to the list and a new set created\r\n self.sets.append(set)\r\n set = GT_seq_location()\r\n else:\r\n set = GT_seq_location()\r\n #if the set is none, it was also during init, and we need to set the owner_set again\r\n line._owner_set = set\r\n set._flanking_region = line\r\n elif line.type == \"flanking_region\":\r\n if set and set.flanking_region:\r\n # this can also be the first element of a set,\r\n # if the set already has a flanking region\r\n # old set needs to be added to the list and a new set created\r\n self.sets.append(set)\r\n set = GT_seq_location()\r\n else:\r\n set = GT_seq_location()\r\n #if the set is none, it was also during init, and we need to set the owner_set again\r\n line._owner_set = set\r\n set.flanking_region = line\r\n elif line.type == \"region\" and line.attributes.parent:\r\n set.gt_seq_region.append(line)\r\n elif line.type == \"PCR_product\":\r\n set.pcr_product.append(line)\r\n elif line.type == \"forward_primer\":\r\n set.forward_primer.append(line)\r\n elif line.type == \"reverse_primer\":\r\n set.reverse_primer.append(line)\r\n elif line.type == \"SNP\":\r\n set.snp.append(line)\r\n else:\r\n pass#print(\"line of type {} not added.\".format(line.type))\r\n if set:\r\n # there was no '###' at the end of the file so the last set needs to be added.\r\n self.sets.append(set)",
"def parse_standard_gff3(\n gff: Path,\n gffutil_parse_args: Optional[GffutilsParseArgs] = GffutilsParseArgs(),\n parse_func: Optional[Callable[[FeatureDB, List[str]], Iterable[AnnotationCollectionModel]]] = default_parse_func,\n gffutil_transform_func: Optional[Callable[[Feature], Feature]] = None,\n db_fn: Optional[str] = \":memory:\",\n) -> Iterable[ParsedAnnotationRecord]:\n db = gffutils.create_db(str(gff), db_fn, transform=gffutil_transform_func, **gffutil_parse_args.__dict__)\n if sum(db.count_features_of_type(i) for i in db.featuretypes()) == 0:\n raise EmptyGFF3Exception(\"Parsing this GFF3 led to zero features. Is it empty or corrupted?\")\n logger.info(f\"Parsed {gff}\")\n for i in db.featuretypes():\n logger.info(f\"Found feature type {i} with {db.count_features_of_type(i)} features\")\n # get the sequences\n chrom_query = db.execute(\"SELECT DISTINCT seqid FROM features\")\n chroms = [x[\"seqid\"] for x in chrom_query]\n logger.info(f\"Found {len(chroms)} sequences\")\n for annot in parse_func(db, chroms):\n yield ParsedAnnotationRecord(annot)",
"def load_gff(filepath):\n # GFF fields\n colnames = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand',\n 'phase', 'attributes']\n\n # get lines from file\n with open(filepath, 'r') as fp:\n lines = fp.readlines()\n\n # filter out non-gene entries\n gene_rows = [ x for x in lines if 'gene\\t' in x]\n\n # Next, let's create a StringIO buffer -- this is similar to the file\n # and url handles we have seen so far. We can then pass this to a csv\n # reader instance in the same way we have seen for actual files\n\n # First though, let's collapse the rows back into a single string\n csv_str = \"\".join(gene_rows)\n str_buffer = StringIO.StringIO(csv_str)\n\n return csv.DictReader(str_buffer, fieldnames=colnames, delimiter='\\t')",
"def readData(f):\n line = f.readline()\n fieldnames = [x.strip() for x in line.split(\",\")]\n line = f.readline().strip()\n data = []\n while line != \"\":\n if line[0] != \"#\":\n fields = line.split(\",\")\n data.append((fields[0], [extractSI(v)[0] for v in fields[1:]]))\n line = f.readline().strip()\n # Man, working out this next incantation out was non-trivial!\n # They really want you to be snarfing data in csv or some other format they understand!\n res = pd.DataFrame.from_items(data, columns=fieldnames[1:], orient=\"index\")\n return res",
"def gp_dataframe_import(filename):\n path = os.path.join('..', 'data', filename)\n frame = pd.read_csv(path)\n return frame",
"def GFFParse(gff_file):\n genes, utr5, exons=dict(), dict(), dict()\n transcripts, utr3, cds=dict(), dict(), dict()\n # TODO Include growing key words of different non-coding/coding transcripts \n features=['mrna', 'transcript', 'ncrna', 'mirna', 'pseudogenic_transcript', 'rrna', 'snorna', 'snrna', 'trna', 'scrna', 'mrna_te_gene']\n gff_handle=open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line=gff_line.strip('\\n\\r').split('\\t')\n if re.match(r'#|>', gff_line[0]): # skip commented line or fasta identifier line \n continue\n if len(gff_line)==1: # skip fasta sequence/empty line if present \n continue \n assert len(gff_line)==9, '\\t'.join(gff_line) # not found 9 tab-delimited fields in this line \n if '' in gff_line: # skip this line if there any field with an empty value\n print 'Skipping..', '\\t'.join(gff_line)\n continue\n if gff_line[-1][-1]==';': # trim the last ';' character \n gff_line[-1]=gff_line[-1].strip(';')\n if gff_line[2].lower() in ['gene', 'pseudogene', 'transposable_element_gene']:\n gid, gene_info=None, dict()\n gene_info['start']=int(gff_line[3])\n gene_info['stop']=int(gff_line[4])\n gene_info['chr']=gff_line[0]\n gene_info['source']=gff_line[1]\n gene_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=') # gff attributes are separated by key=value pair \n if attb[0]=='ID':\n gid=attb[1]\n break\n genes[(gff_line[0], gid)]=gene_info # store gene information based on the chromosome and gene symbol.\n elif gff_line[2].lower() in features: \n gid, mrna_info=None, dict() \n mrna_info['start']=int(gff_line[3])\n mrna_info['stop']=int(gff_line[4])\n mrna_info['chr']=gff_line[0]\n mrna_info['strand']=gff_line[6]\n mrna_info['type'] = gff_line[2]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n gid=attb[1]\n elif attb[0]=='ID':\n mrna_info[attb[0]]=attb[1]\n for fid in gid.split(','): # child may be mapped to multiple parents ex: Parent=AT01,AT01-1-Protein \n if (gff_line[0], fid) in transcripts:\n transcripts[(gff_line[0], fid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], fid)]=[mrna_info]\n elif gff_line[2].lower() in ['exon', 'pseudogenic_exon']:\n tids, exon_info=None, dict()\n exon_info['start']=int(gff_line[3])\n exon_info['stop']=int(gff_line[4])\n exon_info['chr']=gff_line[0]\n exon_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)]=[exon_info]\n elif gff_line[2].lower() in ['five_prime_utr']:\n utr5_info, tids=dict(), None\n utr5_info['start']=int(gff_line[3])\n utr5_info['stop']=int(gff_line[4])\n utr5_info['chr']=gff_line[0]\n utr5_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)]=[utr5_info]\n elif gff_line[2].lower() in ['cds']:\n cds_info, tids=dict(), None\n cds_info['start']=int(gff_line[3])\n cds_info['stop']=int(gff_line[4])\n cds_info['chr']=gff_line[0]\n cds_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)]=[cds_info]\n elif gff_line[2].lower() in ['three_prime_utr']:\n utr3_info, tids=dict(), None\n utr3_info['start']=int(gff_line[3])\n utr3_info['stop']=int(gff_line[4])\n utr3_info['chr']=gff_line[0]\n utr3_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)]=[utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds",
"def read_fermi_3fhl():\n with open('fermi_3fhl.csv') as fh:\n lines = fh.readlines()\n colnames = lines[0].strip().split(',')\n data = []\n for line in lines[1:]:\n parts = line.strip().split(',')\n row = dict(zip(colnames, parts))\n for name in ['Flux', 'GLON', 'GLAT', 'Signif_Avg']:\n row[name] = float(row[name])\n data.append(row)\n return data",
"def readFlow(fn):\n with open(fn, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n return None\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n # print 'Reading %d x %d flo file\\n' % (w, h)\n data = np.fromfile(f, np.float32, count=2*int(w)*int(h))\n # Reshape data into 3D array (columns, rows, bands)\n # The reshape here is for visualization, the original code is (w,h,2)\n return np.resize(data, (int(h), int(w), 2))",
"def read_flow(filename):\n with open(filename, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n data = np.fromfile(f, np.float32, count=int(2*w*h))\n # Reshape data into 3D array (columns, rows, bands)\n return np.resize(data, (h[0], w[0], 2))",
"def read_using_fguide(fname, fguide):\n if isinstance(fguide, basestring):\n fguide = FeatureGuide(fguide)\n\n kwargs = {\n 'index_col': PandasDataset.index_from_feature_guide(fguide),\n 'usecols': fguide.all_names\n }\n return pd.read_csv(fname, **kwargs)",
"def load_data(dataset_path: str):\n data = arff.loadarff(dataset_path)\n data_frame = pd.DataFrame(data[0])\n return data_frame",
"def read_flow(filename):\n f = open(filename, 'rb')\n magic = np.fromfile(f, np.float32, count=1)\n data2d = None\n\n if 202021.25 != magic:\n print 'Magic number incorrect. Invalid .flo file'\n raise ValueError\n else:\n w = np.fromfile(f, np.int32, count=1)[0]\n h = np.fromfile(f, np.int32, count=1)[0]\n #print \"Reading %d x %d flo file\" % (h, w)\n data2d = np.fromfile(f, np.float32, count=2 * w * h)\n # reshape data into 3D array (columns, rows, channels)\n data2d = np.resize(data2d, (h, w, 2))\n f.close()\n return data2d",
"def parse(self):\n if self.filename.endswith('.gz'):\n compression = 'gzip'\n elif self.filename.endswith('.bz2'):\n compression = 'bz2'\n else:\n compression = None\n df = pd.read_table(self.filename, compression=compression)\n\n # drop empty column from extra tab\n df.dropna(axis=1, how='all', inplace=True)\n return df",
"def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data",
"def parse_gff(g):\n # We also want to store the mRNA->gene information!\n mrna_par = {}\n # And the CDS->mRNA information\n cds_dat = {}\n with open(g, 'r') as f:\n for line in f:\n # if the line is empty or starts with a #, we will skip it\n if line.startswith('#') or line == '\\n':\n continue\n else:\n tmp = line.strip().split('\\t')\n feat_type = tmp[2]\n if feat_type == 'mRNA':\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n tx_id = m.split('=')[1]\n if m.startswith('Parent='):\n tx_par = m.split('=')[1]\n mrna_par[tx_id] = tx_par\n elif feat_type == 'CDS':\n scaf = tmp[0]\n start = tmp[3]\n end = tmp[4]\n strand = tmp[6]\n phase = tmp[7]\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n cds_id = m.split('=')[1]\n if m.startswith('Parent='):\n cds_par = m.split('=')[1]\n if strand == '-':\n strand = -1\n else:\n strand = 1\n # Watch out for transcripts where there are multiple CDS.\n # This will require a nested dictionary of lists.\n if cds_par in cds_dat:\n pass\n else:\n cds_dat[cds_par] = {}\n if cds_id in cds_dat[cds_par]:\n pass\n else:\n cds_dat[cds_par][cds_id] = []\n # We want to make a SequenceFeature for each CDS chunk\n # Keep in mind that GFF is 1-based, so we have to adjust\n # the start position!\n cds_feat = SeqFeature(\n FeatureLocation(int(start)-1, int(end), strand=strand),\n type=\"CDS\",\n id=cds_id)\n # Add some qualifiers to modify the behavior\n # Use the \"standard\" genetic code from NCBI\n cds_feat.qualifiers['transl_tabl'] = [1]\n # Then, append it into the corresponding dictionary item\n # keeping the chromosome (scaffold) name and phase with it\n cds_dat[cds_par][cds_id].append((cds_feat, scaf, phase))\n else:\n continue\n return (mrna_par, cds_dat)",
"def parse_from_file (path):\n with open(path) as f:\n return NFFG.parse(f.read())",
"def read_gff3_line(line):\n cols = line.strip().split('\\t')\n if ASSUME_OFFBYONE:\n cols[3] = str(int(cols[3]) - 1)\n known_fields = set()\n fields = {'seqid': cols[0], 'source': cols[1], 'type': cols[2],\n 'start': cols[3], 'end': cols[4], 'score': cols[5],\n 'strand': cols[6], 'phase': cols[7]}\n known_fields.update(fields.keys())\n attrlist = cols[8]\n attributes = dict()\n for attr in attrlist.split(';'):\n if not attr.strip():\n continue\n k, v = attr.strip().split('=')\n if k.lower() == 'dbxref':\n try:\n subkey, subvalue = v.split(':')\n except ValueError:\n if SHOW_PARSER_WARNING:\n sys.stderr.write('\\nWarning: skipping Dbxref value {} - no key! Line: {}'.format(v, line.strip()))\n continue\n assert subkey not in attributes, 'Sub-key already in attributes list: {} in line {}'.format(subkey, line)\n attributes[subkey] = subvalue.strip()\n known_fields.add(subkey)\n continue\n elif ',' in v:\n raise ValueError('List of values for key {}: {} in line {}'.format(k, v, line))\n else:\n # who knows what crazy stuff could be here...\n pass\n attributes[k] = v.strip()\n known_fields.add(k)\n fields.update(attributes)\n return fields, known_fields",
"def _parse_textfile(self):\n\n field_names = list(self.FIELD_NAME_TO_INDEX.keys())\n field_indices = list(self.FIELD_NAME_TO_INDEX.values())\n frame = pd.read_csv(\n self.filepath,\n header=None, # MAGIC file has no header line\n delimiter=self.DELIMITER,\n usecols=field_indices,\n names=field_names,\n converters=self.FIELD_CONVERTERS,\n )\n return frame",
"def pt3_reader(filename):\n with open(filename, 'rb') as f:\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Binary file header\n header_dtype = np.dtype([\n ('Ident', 'S16' ),\n ('FormatVersion', 'S6' ),\n ('CreatorName', 'S18' ),\n ('CreatorVersion', 'S12' ),\n ('FileTime', 'S18' ),\n ('CRLF', 'S2' ),\n ('Comment', 'S256' ),\n ('NumberOfCurves', 'int32' ),\n ('BitsPerRecord', 'int32' ), # bits in each T3 record\n ('RoutingChannels', 'int32' ),\n ('NumberOfBoards', 'int32' ),\n ('ActiveCurve', 'int32' ),\n ('MeasurementMode', 'int32' ),\n ('SubMode', 'int32' ),\n ('RangeNo', 'int32' ),\n ('Offset', 'int32' ),\n ('AcquisitionTime', 'int32' ), # in ms\n ('StopAt', 'uint32'),\n ('StopOnOvfl', 'int32' ),\n ('Restart', 'int32' ),\n ('DispLinLog', 'int32' ),\n ('DispTimeAxisFrom', 'int32' ),\n ('DispTimeAxisTo', 'int32' ),\n ('DispCountAxisFrom', 'int32' ),\n ('DispCountAxisTo', 'int32' ),\n ])\n header = np.fromfile(f, dtype=header_dtype, count=1)\n\n if header['FormatVersion'][0] != b'2.0':\n raise IOError((\"Format '%s' not supported. \"\n \"Only valid format is '2.0'.\") % \\\n header['FormatVersion'][0])\n\n dispcurve_dtype = np.dtype([\n ('DispCurveMapTo', 'int32'),\n ('DispCurveShow', 'int32')])\n dispcurve = np.fromfile(f, dispcurve_dtype, count=8)\n\n params_dtype = np.dtype([\n ('ParamStart', 'f4'),\n ('ParamStep', 'f4'),\n ('ParamEnd', 'f4')])\n params = np.fromfile(f, params_dtype, count=3)\n\n repeat_dtype = np.dtype([\n ('RepeatMode', 'int32'),\n ('RepeatsPerCurve', 'int32'),\n ('RepeatTime', 'int32'),\n ('RepeatWaitTime', 'int32'),\n ('ScriptName', 'S20' )])\n repeatgroup = np.fromfile(f, repeat_dtype, count=1)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Hardware information header\n hw_dtype = np.dtype([\n ('HardwareIdent', 'S16' ),\n ('HardwarePartNo', 'S8' ),\n ('HardwareSerial', 'int32'),\n ('SyncDivider', 'int32'),\n ('CFDZeroCross0', 'int32'),\n ('CFDLevel0', 'int32'),\n ('CFDZeroCross1', 'int32'),\n ('CFDLevel1', 'int32'),\n ('Resolution', 'f4'),\n ('RouterModelCode', 'int32'),\n ('RouterEnabled', 'int32')])\n hardware = np.fromfile(f, hw_dtype, count=1)\n\n rtr_dtype = np.dtype([\n ('InputType', 'int32'),\n ('InputLevel', 'int32'),\n ('InputEdge', 'int32'),\n ('CFDPresent', 'int32'),\n ('CFDLevel', 'int32'),\n ('CFDZCross', 'int32')])\n router = np.fromfile(f, rtr_dtype, count=4)\n\n # Time tagging mode specific header\n ttmode_dtype = np.dtype([\n ('ExtDevices', 'int32' ),\n ('Reserved1', 'int32' ),\n ('Reserved2', 'int32' ),\n ('InpRate0', 'int32' ),\n ('InpRate1', 'int32' ),\n ('StopAfter', 'int32' ),\n ('StopReason', 'int32' ),\n ('nRecords', 'int32' ),\n ('ImgHdrSize', 'int32')])\n ttmode = np.fromfile(f, ttmode_dtype, count=1)\n\n # Special header for imaging. How many of the following ImgHdr\n # array elements are actually present in the file is indicated by\n # ImgHdrSize above.\n ImgHdr = np.fromfile(f, dtype='int32', count=ttmode['ImgHdrSize'][0])\n\n # The remainings are all T3 records\n t3records = np.fromfile(f, dtype='uint32', count=ttmode['nRecords'][0])\n\n timestamps_unit = 1./ttmode['InpRate0']\n nanotimes_unit = 1e-9*hardware['Resolution']\n\n metadata = dict(header=header, dispcurve=dispcurve, params=params,\n repeatgroup=repeatgroup, hardware=hardware,\n router=router, ttmode=ttmode, imghdr=ImgHdr)\n return t3records, timestamps_unit, nanotimes_unit, metadata",
"def __init__(self, file_path: str):\n self._data: pd.DataFrame = self.read_input_and_split_tuples(file_path)",
"def read_data(fname, cols):\n df = (pd.read_csv(fname, header=None, sep=r\"\\s+\", comment=\"#\",\n names=cols, dtype=np.float64)\n .iloc[1:]) # First line is the total number of trees\n # Could reset_index, but we don't shuffle the DataFrame\n return df",
"def _dataframe_from_feather(fn, **kwargs):\n\treturn pd.read_feather(fn, **kwargs)",
"def parse_sequences_from_gff_metadata( file ):\n import pandas\n result = []\n for line in file:\n if line.startswith( '##sequence-region' ):\n parts = line.strip().split( \" \" )\n nameStartEnd = parts[-3:] # last 3 elements\n result.append({\n \"seqid\": nameStartEnd[0],\n \"start\": int( nameStartEnd[1] ),\n \"end\": int( nameStartEnd[2] )\n })\n elif not line[0] == '#':\n # quit when we meet the first non-metadata line\n break\n return pandas.DataFrame( result )",
"def input_dataframe(self, filename, **kwargs):\n\n # Set defaults for index_col and header\n kwargs['index_col'] = kwargs.pop('index_col', 0)\n kwargs['header'] = kwargs.pop('header', 0)\n\n # Use any kwargs for this function and any file settings from default\n file_settings = self.file_format_settings.copy()\n file_settings.update(kwargs)\n\n # Update the file settings with anything that's in file-specific overrides\n if filename in self.file_format_overrides:\n file_settings.update(self.file_format_overrides[filename])\n\n # Load a dataframe\n return pd.read_csv(self.input_path(filename), **file_settings)",
"def parse_data(self, path_to_file):\n\n line_dict, rel_dict = self.create_dicts(path_to_file)\n \n line_df = self.create_dataframe(line_dict, ['line'])\n rel_df = self.create_dataframe(rel_dict, ['relation'])\n\n line_df['relation'] = rel_df['relation']\n\n return (line_df, rel_df)",
"def read_mumax3_table(filename):\n \n table = pd.read_csv(filename, sep='\\t')\n table.columns = ' '.join(table.columns).split()[1::2]\n \n return table",
"def read_data(filepath):\n df = pd.read_csv(filepath)\n return df"
] |
[
"0.7671827",
"0.68784577",
"0.68401384",
"0.6406066",
"0.63211954",
"0.60683244",
"0.5996769",
"0.5982758",
"0.5978171",
"0.59537876",
"0.5921268",
"0.5912143",
"0.5900824",
"0.59005004",
"0.58473027",
"0.5828351",
"0.5819334",
"0.5815665",
"0.57983077",
"0.5795755",
"0.5755246",
"0.5752821",
"0.57248247",
"0.57196414",
"0.57063633",
"0.5687702",
"0.5686372",
"0.56862295",
"0.56842166",
"0.5674254"
] |
0.7767748
|
0
|
GFF3 files from the Ensembl ftp site list sequences and their lengths in the file metadata. This function parses this information and returns it as a pandas dataframe. It's use may be specific to the Ensembl files.
|
def parse_sequences_from_gff_metadata( file ):
import pandas
result = []
for line in file:
if line.startswith( '##sequence-region' ):
parts = line.strip().split( " " )
nameStartEnd = parts[-3:] # last 3 elements
result.append({
"seqid": nameStartEnd[0],
"start": int( nameStartEnd[1] ),
"end": int( nameStartEnd[2] )
})
elif not line[0] == '#':
# quit when we meet the first non-metadata line
break
return pandas.DataFrame( result )
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _read_gff3_using_pandas( file ):\n import pandas\n result = pandas.read_table(\n file,\n comment = '#',\n names = [ 'seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase', 'attributes' ],\n na_values = \".\",\n dtype = {\n 'seqid': str,\n 'source': str,\n 'type': str,\n 'start': int,\n 'end': int,\n 'score': float,\n 'strand': str,\n 'phase': str,\n 'attributes': str\n }\n )\n return result",
"def gff3_to_dataframe( file ):\n result = _read_gff3_using_pandas( file )\n extract_attributes_to_columns( result, ['ID', 'Parent', 'Name' ] )\n return result",
"def GFFParse(gff_file):\n genes, utr5, exons=dict(), dict(), dict()\n transcripts, utr3, cds=dict(), dict(), dict()\n # TODO Include growing key words of different non-coding/coding transcripts \n features=['mrna', 'transcript', 'ncrna', 'mirna', 'pseudogenic_transcript', 'rrna', 'snorna', 'snrna', 'trna', 'scrna', 'mrna_te_gene']\n gff_handle=open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line=gff_line.strip('\\n\\r').split('\\t')\n if re.match(r'#|>', gff_line[0]): # skip commented line or fasta identifier line \n continue\n if len(gff_line)==1: # skip fasta sequence/empty line if present \n continue \n assert len(gff_line)==9, '\\t'.join(gff_line) # not found 9 tab-delimited fields in this line \n if '' in gff_line: # skip this line if there any field with an empty value\n print 'Skipping..', '\\t'.join(gff_line)\n continue\n if gff_line[-1][-1]==';': # trim the last ';' character \n gff_line[-1]=gff_line[-1].strip(';')\n if gff_line[2].lower() in ['gene', 'pseudogene', 'transposable_element_gene']:\n gid, gene_info=None, dict()\n gene_info['start']=int(gff_line[3])\n gene_info['stop']=int(gff_line[4])\n gene_info['chr']=gff_line[0]\n gene_info['source']=gff_line[1]\n gene_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=') # gff attributes are separated by key=value pair \n if attb[0]=='ID':\n gid=attb[1]\n break\n genes[(gff_line[0], gid)]=gene_info # store gene information based on the chromosome and gene symbol.\n elif gff_line[2].lower() in features: \n gid, mrna_info=None, dict() \n mrna_info['start']=int(gff_line[3])\n mrna_info['stop']=int(gff_line[4])\n mrna_info['chr']=gff_line[0]\n mrna_info['strand']=gff_line[6]\n mrna_info['type'] = gff_line[2]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n gid=attb[1]\n elif attb[0]=='ID':\n mrna_info[attb[0]]=attb[1]\n for fid in gid.split(','): # child may be mapped to multiple parents ex: Parent=AT01,AT01-1-Protein \n if (gff_line[0], fid) in transcripts:\n transcripts[(gff_line[0], fid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], fid)]=[mrna_info]\n elif gff_line[2].lower() in ['exon', 'pseudogenic_exon']:\n tids, exon_info=None, dict()\n exon_info['start']=int(gff_line[3])\n exon_info['stop']=int(gff_line[4])\n exon_info['chr']=gff_line[0]\n exon_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)]=[exon_info]\n elif gff_line[2].lower() in ['five_prime_utr']:\n utr5_info, tids=dict(), None\n utr5_info['start']=int(gff_line[3])\n utr5_info['stop']=int(gff_line[4])\n utr5_info['chr']=gff_line[0]\n utr5_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)]=[utr5_info]\n elif gff_line[2].lower() in ['cds']:\n cds_info, tids=dict(), None\n cds_info['start']=int(gff_line[3])\n cds_info['stop']=int(gff_line[4])\n cds_info['chr']=gff_line[0]\n cds_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)]=[cds_info]\n elif gff_line[2].lower() in ['three_prime_utr']:\n utr3_info, tids=dict(), None\n utr3_info['start']=int(gff_line[3])\n utr3_info['stop']=int(gff_line[4])\n utr3_info['chr']=gff_line[0]\n utr3_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)]=[utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds",
"def parseGFF3(filename):\n\n # Parse with transparent decompression\n openFunc = gzip.open if filename.endswith(\".gz\") else open\n with openFunc(filename) as infile:\n for line in infile:\n if line.startswith(\"#\"):\n continue\n parts = line.strip().split(\"\\t\")\n # If this fails, the file format is not standard-compatible\n assert len(parts) == len(gffInfoFields)\n # Normalize data\n normalizedInfo = {\n \"seqid\": None if parts[0] == \".\" else urllib.unquote(parts[0]),\n \"source\": None if parts[1] == \".\" else urllib.unquote(parts[1]),\n \"type\": None if parts[2] == \".\" else urllib.unquote(parts[2]),\n \"start\": None if parts[3] == \".\" else int(parts[3]),\n \"end\": None if parts[4] == \".\" else int(parts[4]),\n \"score\": None if parts[5] == \".\" else float(parts[5]),\n \"strand\": None if parts[6] == \".\" else urllib.unquote(parts[6]),\n \"phase\": None if parts[7] == \".\" else urllib.unquote(parts[7]),\n \"attributes\": parseGFFAttributes(parts[8])\n }\n # Alternatively, you can emit the dictionary here, if you need\n # mutability:\n # yield normalizedInfo\n yield GFFRecord(**normalizedInfo)",
"def read_fusion_domains(fus_path, wt_path, fusion_annot, min_domain_len=25, min_domain_frac=0.5):\n # read in domains\n domains = pd.read_csv(fus_path, sep='\\t')\n wt_domains = pd.read_csv(wt_path, sep='\\t')\n\n # filter out transmembrane helix\n is_tm = domains[\"Domain_ID\"].str.startswith('TMhelix')\n domains = domains[~is_tm].copy()\n\n # filter out short domains\n domain_len = domains['Protein_end'] - domains['Protein_start']\n domains = domains[domain_len>=min_domain_len].copy()\n\n # merge in info about where the fusion happened\n domains = pd.merge(domains, fusion_annot[['ID', 'CodonPos1', 'CodonPos2']].drop_duplicates('ID'), on='ID', how='left')\n domains['origin of domain'] = \"3'_gene domain\"\n domains.loc[domains['Protein_end']<=domains['CodonPos1'], 'origin of domain'] = \"5'_gene domain\"\n\n # separate out protein ID\n domains['ID_five'] = domains['ID'].str.split('-', expand=True)[0]\n domains['ID_three'] = domains['ID'].str.split('-', expand=True)[1].str.split(':', expand=True)[0]\n\n # check 5' domains\n domains_five = domains[domains[\"origin of domain\"]==\"5'_gene domain\"].copy()\n is_good_domain = []\n for ix, row in domains_five.iterrows():\n domain_end = row['Protein_end']\n fus_codon_pos = row['CodonPos1']\n if fus_codon_pos==domain_end:\n pid = row['ID_five']\n is_pid = wt_domains['PROT_ID']==pid\n is_loc = (fus_codon_pos>=wt_domains['Protein_start']) & (fus_codon_pos<=wt_domains['Protein_end'])\n tmp_df = wt_domains.loc[is_pid & is_loc,:]\n if len(tmp_df)==0:\n is_good_domain.append(True)\n continue\n else:\n tmp = tmp_df.iloc[0]\n frac_domain = (fus_codon_pos - tmp['Protein_start']) / (tmp['Protein_end'] - tmp['Protein_start'])\n if frac_domain>=min_domain_frac:\n is_good_domain.append(True)\n else:\n is_good_domain.append(False)\n else:\n is_good_domain.append(True)\n domains_five = domains_five.loc[is_good_domain,:]\n # check 3' domains\n domains_three = domains[domains[\"origin of domain\"]==\"3'_gene domain\"].copy()\n is_good_domain = []\n for ix, row in domains_three.iterrows():\n domain_start = row['Protein_start']\n fus_codon_pos = row['CodonPos2']\n if fus_codon_pos==domain_start:\n pid = row['ID_three']\n is_pid = wt_domains['PROT_ID']==pid\n is_loc = (fus_codon_pos>=wt_domains['Protein_start']) & (fus_codon_pos<=wt_domains['Protein_end'])\n tmp_df = wt_domains.loc[is_pid & is_loc,:]\n if len(tmp_df)==0:\n is_good_domain.append(True)\n continue\n else:\n tmp = tmp_df.iloc[0]\n frac_domain = (tmp['Protein_end']-fus_codon_pos) / (tmp['Protein_end'] - tmp['Protein_start'])\n if frac_domain>=min_domain_frac:\n is_good_domain.append(True)\n else:\n is_good_domain.append(False)\n else:\n is_good_domain.append(True)\n domains_three = domains_three.loc[is_good_domain,:]\n # append results\n domains = pd.concat([domains_five, domains_three])\n\n # aggregate all domains for one fusion into a single line\n domains = domains[~domains['Domain_name'].isnull()]\n domains_agg = domains.groupby(['ID', 'origin of domain'])['Domain_name'].agg(lambda x: ','.join(x))\n domains_agg = domains_agg.reset_index()\n domains_agg = domains_agg.pivot(index='ID', columns='origin of domain', values='Domain_name')\n domains_agg = domains_agg.reset_index()\n\n return domains_agg",
"def get_fhcrc_flow_manifest(self) -> pd.DataFrame:\n return pd.read_csv(self.sequence_data_paths.manifest_path / Path(\"FHCRC_flow_manifest.csv.gz\"), index_col=0) # type: ignore",
"def readMaf( options, data ):\n regex = 's\\s+([\\w\\d\\-]+?)\\.([\\w\\d\\.\\+\\-]+?)\\s+(\\d+)\\s+(\\d+)\\s+([-+])\\s+(\\d+)\\s+([\\-actgurykmswbdhvnACTGURYKMSWBDHVN]+)'\n pat = re.compile( regex )\n mf = open( options.maf )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n for line in mf:\n if line.startswith('#HPL'):\n d = line.split(' ')\n # example line: \"#HPL=12049 5=1 3=1 SPL=123412 S5=0 S3=12\"\n # there will be one hpl line per options.other line\n # in blocks that contain the options.ref\n hpl = int( d[0][5:] ) # comment at start of this field\n hFive = int( d[1][2] )\n hThree = int( d[2][2] )\n spl = int( d[3][4:] ) # no comment at start of this field\n hplList.append( { 'hpl': hpl, 'hFive': hFive, \n 'hThree': hThree, 'spl': spl } )\n continue\n if line.startswith('s'):\n line = line.strip()\n ml, order = extractMafLine( line, order, pat, options, data )\n if ml is None:\n sys.stderr.write( 'regexp fail on file %s line: \\'%s\\'\\n'\n 'Regex: \\'%s\\'\\n' % ( options.maf, line, regex ) )\n sys.exit( 1 )\n if ml == 'notOurGenome':\n continue\n if ml.length != len( ml.sequence ):\n sys.stderr.write( 'Error while working on file %s :\\n '\n 'printed sequence length (%d) not equal to actual sequence '\n 'length (%d) ref genome:%s other genome:%s line below:\\n%s\\n' % \n ( options.maf, ml.length, len( ml.sequence ), options.ref, options.other, line ) )\n sys.exit( 1 )\n mafLineList.append( ml )\n else:\n # end of the block\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )",
"def parse_gff(g):\n # We also want to store the mRNA->gene information!\n mrna_par = {}\n # And the CDS->mRNA information\n cds_dat = {}\n with open(g, 'r') as f:\n for line in f:\n # if the line is empty or starts with a #, we will skip it\n if line.startswith('#') or line == '\\n':\n continue\n else:\n tmp = line.strip().split('\\t')\n feat_type = tmp[2]\n if feat_type == 'mRNA':\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n tx_id = m.split('=')[1]\n if m.startswith('Parent='):\n tx_par = m.split('=')[1]\n mrna_par[tx_id] = tx_par\n elif feat_type == 'CDS':\n scaf = tmp[0]\n start = tmp[3]\n end = tmp[4]\n strand = tmp[6]\n phase = tmp[7]\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n cds_id = m.split('=')[1]\n if m.startswith('Parent='):\n cds_par = m.split('=')[1]\n if strand == '-':\n strand = -1\n else:\n strand = 1\n # Watch out for transcripts where there are multiple CDS.\n # This will require a nested dictionary of lists.\n if cds_par in cds_dat:\n pass\n else:\n cds_dat[cds_par] = {}\n if cds_id in cds_dat[cds_par]:\n pass\n else:\n cds_dat[cds_par][cds_id] = []\n # We want to make a SequenceFeature for each CDS chunk\n # Keep in mind that GFF is 1-based, so we have to adjust\n # the start position!\n cds_feat = SeqFeature(\n FeatureLocation(int(start)-1, int(end), strand=strand),\n type=\"CDS\",\n id=cds_id)\n # Add some qualifiers to modify the behavior\n # Use the \"standard\" genetic code from NCBI\n cds_feat.qualifiers['transl_tabl'] = [1]\n # Then, append it into the corresponding dictionary item\n # keeping the chromosome (scaffold) name and phase with it\n cds_dat[cds_par][cds_id].append((cds_feat, scaf, phase))\n else:\n continue\n return (mrna_par, cds_dat)",
"def mel_gff_list():\n\tmod_gff3 = sys.argv[1]\n\twith open(mod_gff3, 'r') as f:\n\t\tgff = [line.strip().split('\\t') for line in f]\n\t\tf.close()\n\treturn gff\n\t#gff_list ex/:\n\t#['2L', 'FlyBase', 'gene', '7529', '9484', '.', '+', '.', 'ID=FBgn0031208;Name=CG11023;Ontology_term=SO:0000010,SO:0000087,GO:0016929,GO:0016926;Dbxref=FlyBase:FBan0011023,FlyBase_Annotation_IDs:CG11023,GB_protein:ACZ94128,GB_protein:AAO41164,GB:AI944728,GB:AJ564667,GB_protein:CAD92822,GB:BF495604,UniProt/TrEMBL:Q86BM6,INTERPRO:IPR003653,GB_protein:AGB92323,UniProt/TrEMBL:M9PAY1,OrthoDB7_Drosophila:EOG796K1P,OrthoDB7_Diptera:EOG7X1604,EntrezGene:33155,UniProt/TrEMBL:E1JHP8,UniProt/TrEMBL:Q6KEV3,OrthoDB7_Insecta:EOG7Q8QM7,OrthoDB7_Arthropoda:EOG7R5K68,OrthoDB7_Metazoa:EOG7D59MP,InterologFinder:33155,BIOGRID:59420,FlyAtlas:CG11023-RA,GenomeRNAi:33155;gbunit=AE014134;derived_computed_cyto=21A5-21A5'], ['2L', 'FlyBase', 'gene', '9839', '21376', '.', '-', '.', 'ID=FBgn0002121;Name=l(2)gl;fullname=lethal (2) giant larvae;Alias=Lgl,lgl,lethal giant larvae,lethal giant larve,lethal giant larva,lethal(2)giant larvae,Complementation group 2.1,Lethal Giant Larvae,dlgl,p127l(2)gl,LGL,l(2) giant larva,CG2671,L(2)GL,p127,l(2)giant larvae,D-LGL,l(2),gl,l[[2]]gl,l-gl,lethal-giant-larvae,Lethal giant larvae,Lethal (2) giant larvae,L(2)gl,Lethal (2) giant larva,Lethal-giant-larvae,MENE (2L)-B,lethal(2) giant larvae,p127[l(2)gl],lethal(2)-giant larvae,lethal-2-giant larvae,l(2) giant larvae,lethal- giant-larvae,Lethal(2)giant larvae,Lethal-2-giant larvae;Ontology_term=SO:0000010,SO:0000087,GO:0005578,GO:0005886,GO:0007269,GO:0016082,GO:0008021,GO:0008283,GO:0016334,GO:0016336,GO:0016333,GO:0016335,GO:0016327,GO:0005829,GO:0045175,GO:0016332,GO:0045184,GO:0007399,GO:0005938,GO:0005737,GO:0007179,GO:0045197,GO:0045196,GO:0002009,GO:0005918,GO:0008105,GO:0045167,GO:0008104,GO:0045746,GO:0007423,GO:0008285,GO:0001738,GO:0016323,GO:0007391,GO:0005856,GO:0030154,GO:0042127,GO:0005614,GO:0045159,GO:0035072,GO:0007559,GO:0045200,GO:0008360,GO:0019991,GO:0007406,GO:0051726,GO:0051668,GO:0007314,GO:0016325,GO:0030036,GO:0030863,GO:0035070,GO:0055059,GO:0035212,GO:0035293,GO:0090163,GO:0048730,GO:0000132,GO:0098725,GO:0060429,GO:0007293,GO:0045176,GO:0072697,GO:0000149,SO:0000548,GO:0005920,GO:0017022,GO:0004860,GO:0006469;Dbxref=FlyBase:FBan0002671,FlyBase_Annotation_IDs:CG2671,INTERPRO:IPR015943,GB_protein:AAN10503,GB_protein:AAG22256,GB_protein:AAN10502,GB_protein:AAN10501,GB_protein:AAF51570,GB_protein:AAG22255,INTERPRO:IPR017986,GB:AA246243,GB:AW942062,GB:AY051654,GB_protein:AAK93078,GB:BH809482,GB:CZ471313,GB:CZ482024,GB:CZ484691,GB:M17022,GB_protein:AAA28671,GB_protein:AAA28672,GB:X05426,GB_protein:CAA29007,UniProt/Swiss-Prot:P08111,INTERPRO:IPR000664,INTERPRO:IPR001680,INTERPRO:IPR013577,GB_protein:AGB92324,UniProt/TrEMBL:M9NCX1,UniProt/TrEMBL:M9PBJ2,OrthoDB7_Drosophila:EOG7CW2GT,OrthoDB7_Diptera:EOG7DRVK2,GB_protein:AFH03479,GB_protein:AFH03478,GB_protein:AFH03481,GB_protein:AFH03480,EntrezGene:33156,INTERPRO:IPR013905,BDGP_clone:PC00404,OrthoDB7_Insecta:EOG7SRGKH,OrthoDB7_Arthropoda:EOG7ZDD82,OrthoDB7_Metazoa:EOG79W94C,InterologFinder:33156,FlyAtlas:CG2671-RB,BIOGRID:59421,Fly-FISH:CG2671,GenomeRNAi:33156,INTERACTIVEFLY:/cytoskel/lethl2g1.htm;gbunit=AE014134;derived_computed_cyto=21A5-21A5'],\n\t# ['2L', 'FlyBase', 'ncRNA', '286383', '288292', '.', '+', '.', 'ID=FBtr0347595;Name=CR46263-RA;Parent=FBgn0267996;Dbxref=FlyBase_Annotation_IDs:CR46263-RA;score_text=Weakly Supported;score=0'], ['2L', 'FlyBase', 'gene', '287252', '289144', '.', '-', '.', 'ID=FBgn0025686;Name=Amnionless;fullname=Amnionless ortholog;Alias=FBgn0031246,CG11592,CK02467,BEST:CK02467,dAMN,Amnionless;Ontology_term=SO:0000010,SO:0000087,GO:0046331,GO:0097206,GO:0016021,GO:0097017;Dbxref=FlyBase:FBan0011592,FlyBase_Annotation_IDs:CG11592,GB_protein:AAF51514,GB:AA141784,GB:CZ468687,UniProt/TrEMBL:Q9VPN2,GB_protein:AGB92350,OrthoDB7_Drosophila:EOG7CGKJK,EntrezGene:33199,BDGP_clone:IP03221,OrthoDB7_Diptera:EOG774804,INTERPRO:IPR026112,OrthoDB7_Insecta:EOG7G266G,OrthoDB7_Arthropoda:EOG7P65FW,OrthoDB7_Metazoa:EOG7ZGX2W,InterologFinder:33199,FlyAtlas:CG11592-RA,GenomeRNAi:33199;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292419', '293222', '.', '+', '.', 'ID=FBgn0031247;Name=CG11562;Alias=FBgn0063011,BcDNA:RE44650;Ontology_term=SO:0000010,SO:0000087,GO:0005739,GO:0003674,GO:0008150;Dbxref=FlyBase:FBan0011562,FlyBase_Annotation_IDs:CG11562,GB_protein:AAF51513,GB:AI520524,GB:AI945841,GB:AY119645,GB_protein:AAM50299,GB:BE662187,GB:BI358003,UniProt/TrEMBL:Q9VPN3,OrthoDB7_Drosophila:EOG7HTW3H,OrthoDB7_Diptera:EOG7200K9,EntrezGene:33200,BDGP_clone:RE44650,OrthoDB7_Insecta:EOG7B9454,OrthoDB7_Arthropoda:EOG7RK278,OrthoDB7_Metazoa:EOG78H3X3,FlyAtlas:CG11562-RA,INTERPRO:IPR031568,Fly-FISH:CG11562,GenomeRNAi:33200;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292959', '294681', '.', '-', '.', 'ID=FBgn0017457;Name=U2af38;fullname=U2 small nuclear riboprotein auxiliary factor 38;Alias=FBgn0010626,U2AF38,U2AF,dU2AF38,DU2AF38,CG3582,dU2AF[38],l(2)06751,u2af38,U2AF 38;Ontology_term=GO:0089701,SO:0000010,SO:0000087,GO:0000398,GO:0008187,GO:0005681,GO:0005686,GO:0000381,GO:0005634,GO:0003729,GO:0007052,GO:0071011,GO:0008380,GO:0000166,GO:0046872;Dbxref=FlyBase:FBan0003582,FlyBase_Annotation_IDs:CG3582,GB_protein:AAF51512,GB:AA264081,GB:AA820431,GB:AC004115,GB:AC008371,GB:AI061776,GB:AI455418,GB:AI944553,GB:AQ026079,GB:AY058537,GB_protein:AAL13766,GB:U67066,GB_protein:AAB17271,UniProt/Swiss-Prot:Q94535,INTERPRO:IPR000504,INTERPRO:IPR000571,INTERPRO:IPR009145,INTERPRO:IPR012677,GB_protein:AGB92351,UniProt/TrEMBL:M9PBM1,OrthoDB7_Drosophila:EOG7FRM2M,OrthoDB7_Diptera:EOG700KS6,EntrezGene:33201,BDGP_clone:LD24048,OrthoDB7_Insecta:EOG76QSHP,OrthoDB7_Arthropoda:EOG7KMJ7T,OrthoDB7_Metazoa:EOG70089G,apodroso:10448-U2af38[k14504],InterologFinder:33201,FlyAtlas:CG3582-RA,BIOGRID:59457,Fly-FISH:CG3582,GenomeRNAi:33201;gbunit=AE014134;derived_computed_cyto=21B7-21B8']]",
"def read_FIREXAQ_files(path, folder='merge', var=''):\n df_list=[]\n flag_list=[]\n files2use = sorted(glob.glob(f'{path}/{folder}/*{var}*.ict'))\n for infileN, infile in enumerate( files2use ):\n with open(infile) as thefile:\n try:\n header= np.array([next(thefile) for x in range(90) ])\n except:\n continue\n start = header[6].replace(',',' ').split()\n start_date = datetime_( int( start[0] ),\n int( start[1] ),\n int( start[2] ))\n # Find where the header ends and values begin - manually narrowed down\n for nskip in range(675,680):\n try:\n fh = np.loadtxt(infile, skiprows=nskip, delimiter=',')\n break\n except:\n continue\n thefile = open(infile,'r')\n c = thefile.readlines()\n column_names = c[nskip-1].replace(' ','').split(',')\n df = pd.DataFrame(fh, index=fh[:,0], columns=column_names)\n\n # Use a different approach for\n if (var=='thru'):\n df = find_FIREXAQ_times(df, start_date, UseTimeStart=True)\n else:\n df = find_FIREXAQ_times(df, start_date, UseTimeStart=False)\n # Include the RF from the file name #\n # NOTE: research flight (RF) ID not included in filename or files,\n # so using filenumber instead. This will not work if reading\n # merge file (var = 'thru').\n df['FileNumber'] = infileN\n\n df_list.append(df)\n df = pd.concat(df_list)\n return df",
"def get_ngs_resequencing_file(self) -> pd.DataFrame:\n return pd.read_csv(self.sequence_data_paths.ngs_path / Path(\"ngs_dataset.csv.gz\"), index_col=0) # type: ignore",
"def metadata(filename):\n import numpy as np\n import pandas as pd\n\n infos = \"\"\"IGRAID 1- 11 Character\nWMOID 13- 17 Integer\nNAME 19- 48 Character\nNAMFLAG 50- 50 Character\nLATITUDE 52- 60 Real\nLATFLAG 62- 62 Character\nLONGITUDE 64- 72 Real\nLONFLAG 74- 74 Character\nELEVATION 76- 81 Real\nELVFLAG 83- 83 Character\nYEAR 85- 88 Integer\nMONTH 90- 91 Integer\nDAY 93- 94 Integer\nHOUR 96- 97 Integer\nDATEIND 99- 99 Integer\nEVENT 101-119 Character\nALTIND 121-122 Character\nBEFINFO 124-163 Character\nBEFFLAG 164-164 Character\nLINK 166-167 Character\nAFTINFO 169-208 Character\nAFTFLAG 209-209 Character\nREFERENCE 211-235 Character\nCOMMENT 236-315 Character\nUPDCOM 316-346 Character\nUPDDATE 348-354 Character\n\"\"\"\n\n colspecs = []\n header = []\n types = {}\n for iline in infos.splitlines():\n if iline == '':\n continue\n ih = iline[0:11].strip().lower()\n header.append(ih)\n ii = int(iline[13:16]) - 1\n ij = int(iline[17:20])\n colspecs.append((ii, ij))\n it = iline[22:].strip()\n if it == 'Character':\n it = 'str'\n\n elif it == 'Real':\n it = 'float'\n\n else:\n it = 'int'\n\n types[ih] = it\n\n data = pd.read_fwf(filename, colspecs=colspecs, header=None, dtype=types, names=header)\n data = data.replace('nan', '')\n data['date'] = pd.to_datetime((data.year * 1000000 +\n np.where(data.month.values == 99, 6, data.month.values) * 10000 +\n np.where(data.day.values == 99, 15, data.day.values) * 100 +\n np.where(data.hour.values == 99, 0, data.hour.values)).apply(str), format='%Y%m%d%H')\n return data",
"def read_fermi_3fhl():\n with open('fermi_3fhl.csv') as fh:\n lines = fh.readlines()\n colnames = lines[0].strip().split(',')\n data = []\n for line in lines[1:]:\n parts = line.strip().split(',')\n row = dict(zip(colnames, parts))\n for name in ['Flux', 'GLON', 'GLAT', 'Signif_Avg']:\n row[name] = float(row[name])\n data.append(row)\n return data",
"def get_SequencedFischerData():\n filteredOn = { #For reference, to know how dataset has been filtered\n 'minimalSurvivalLastFollowup': 365*5\n }\n print('Filtering settings:', filteredOn)\n\n #Metadata\n metadata = pd.read_table(\n gzip.open(\n os.path.join(\n privatedir,\n \"R2_grabbed_data/Fischer498/metadata_src/GSE49710_series_matrix.txt.gz\"\n ), 'rt', encoding=\"UTF-8\"\n ), skiprows=47,skipfooter=44799-66,engine='python',header=None\n )\n metadata.index = metadata[0].apply(lambda x: x.replace('!',''))\n del metadata[0]\n metadata = metadata.T\n i = count()\n metadata.columns = [c.replace('ch1',str(next(i))) if c.startswith('Sample_char') else c for c in metadata.columns]\n del i, metadata['Sample_source_name_ch1'], metadata['Sample_status'], metadata['Sample_organism_ch1']\n metadata.columns = [metadata[c][metadata.first_valid_index()].split(':')[0].replace(' ','_')\n if c.startswith('Sample_char') else c for c in metadata.columns]\n metadata = metadata.applymap(lambda x: x.split(': ')[1] if ': ' in x else x)\n \n metadatasurv = pd.read_table(\n gzip.open(\n os.path.join(\n privatedir,\n \"R2_grabbed_data/Fischer498/metadata_src/GSE62564_series_matrix.txt.gz\"\n ),'rt',encoding=\"UTF-8\"\n ), skiprows=51,skipfooter=102-74,engine='python'\n )\n metadatasurv.index = metadatasurv['!Sample_geo_accession'].apply(lambda x: x.replace('!',''))\n del metadatasurv['!Sample_geo_accession']\n metadatasurv = metadatasurv.T\n metadatasurv.columns = range(len(metadatasurv.columns))\n metadatasurv = metadatasurv[list(range(7,21))]\n metadatasurv.columns = [v.split(':')[0].replace(' ','_') for v in metadatasurv.ix[metadatasurv.first_valid_index()]]\n metadatasurv = metadatasurv.applymap(lambda x: x.split(': ')[1] if not x is np.nan else x)\n metadatasurv.index = metadata.index #Both sample sets are sorted the same way, but different gse names\n assert sum(metadatasurv.age == metadata.age_at_diagnosis) == len(metadata)\n metadata['overall_survival'] = metadatasurv.os_day.apply(int)\n metadata['eventfree_survival'] = metadatasurv.efs_day.apply(int)\n del metadatasurv\n metadata.Sample_geo_accession = metadata.Sample_geo_accession.apply(lambda x: x.lower())\n metadata.set_index(\"Sample_geo_accession\",inplace=True)\n metadata.Sample_title = metadata.Sample_title.apply(lambda x: x[5:].replace(' patient ',''))\n metadata.death_from_disease = metadata.death_from_disease == '1'\n metadata.progression = metadata.progression == '1'\n\n #Expression data\n ## Array expression\n exprdata_A = pd.read_table(os.path.join(privatedir,'R2_grabbed_data/Fischer498/GSE49710_R2.txt'))\n exprdata_A.index = exprdata_A.pop('#H:hugo')\n del exprdata_A['probeset']\n \n ## RNAseq gene expression\n exprdata_G = pd.read_table(\n gzip.open(\n os.path.join(\n privatedir,\n 'R2_grabbed_data/Fischer498/sequencedData/GSE49711_SEQC_NB_TUC_G_log2.txt.gz'\n ), 'rt',encoding=\"UTF-8\"\n ),\n index_col = '00gene_id'\n )\n exprdata_G.columns = [\n metadata.reset_index().set_index('Sample_title').ix[c.split('_')[1]].Sample_geo_accession\n for c in exprdata_G.columns\n ]\n\n ## RNAseq transcript level expression\n exprdata_T = pd.read_table(\n gzip.open(\n os.path.join(\n privatedir,\n 'R2_grabbed_data/Fischer498/sequencedData/GSE49711_SEQC_NB_TUC_T_log2.txt.gz'\n ), 'rt',encoding=\"UTF-8\"\n ),\n index_col = '00transcript_id'\n )\n exprdata_T.columns = exprdata_G.columns\n\n ## RNAseq junction level expression\n exprdata_J = pd.read_table(\n gzip.open(\n os.path.join(\n privatedir,\n 'R2_grabbed_data/Fischer498/sequencedData/GSE49711_SEQC_NB_TUC_J_log2.txt.gz'\n ), 'rt',encoding=\"UTF-8\"\n ),\n index_col = 'sample_ID'\n )\n exprdata_J.columns = exprdata_G.columns\n\n # Default expression dataset -> exprdata_G\n exprdata = exprdata_G\n \n #aCGH\n aCGH = pd.read_table(\n os.path.join(privatedir,'R2_grabbed_data/Fischer498/SEQC_aCGH/SEQC_aCGH_all_146.txt')\n )\n geosearch = metadata[['Sample_title']].copy()\n geosearch.reset_index(inplace=True)\n geosearch.set_index('Sample_title',inplace=True)\n aCGH.Sample = aCGH.Sample.apply(lambda x: geosearch.ix[x].Sample_geo_accession)\n del geosearch\n aCGH['log2ratio'] = (aCGH.CN/2).apply(np.log2)\n #Convert coordinates to hg38\n lo = LSD.get_lift19to38()\n aCGH['Start38'] = aCGH.T.apply(lambda x: lo.convert_coordinate(x.Chromosome,x.Start)).apply(lambda x: x[0][1] if x else np.nan)\n aCGH['End38'] = aCGH.T.apply(lambda x: lo.convert_coordinate(x.Chromosome,x.End)).apply(lambda x: x[0][1] if x else np.nan)\n del lo, aCGH['Start'], aCGH['End']\n aCGH = aCGH.dropna().copy()\n #Assign genes to regions\n genannot = LSD.get_ensemblGeneannot()\n aCGH['genes'] = aCGH.T.apply(lambda x: {f.attributes['gene_name'][0] for f in genannot.region('{}:{}-{}'\n .format(x.Chromosome[3:],int(x.Start38),int(x.End38)),featuretype='gene')})\n aCGH['nrGenes'] = aCGH.genes.apply(len)\n del genannot\n #To set cut offs look at hist => aCGH.log2ratio.hist(ax=ax,bins='auto')\n aCGH['annotation'] = aCGH.log2ratio.apply(lambda x: 'gain' if x > 0.3 else ('loss' if x < -0.3 else 'normal'))\n\n # Filter patients whom according to metadata survived, but had last follow up before 5 years\n metadata = metadata[metadata.death_from_disease |\n (~metadata.death_from_disease &\n (metadata.overall_survival > filteredOn['minimalSurvivalLastFollowup']))]\n exprdata_A = exprdata_A[metadata.index]\n exprdata_G = exprdata_G[metadata.index]\n exprdata_T = exprdata_T[metadata.index]\n exprdata_J = exprdata_J[metadata.index]\n aCGH = aCGH[aCGH.Sample.isin(metadata.index)]\n \n return Dataset(**locals())",
"def index_gff(gff, logger):\n f_in = open(gff, \"r\")\n gene_start_stop_dict = dict()\n gene_scaff_dict = dict()\n gene_first_exon_dict = dict()\n gene_direction = dict()\n gene_gff_line = dict()\n gene_set = set([])\n for line in f_in:\n if line.startswith(\"#\"):\n continue\n if not line.strip():\n continue\n assert len(line.split(\"\\t\")) == 9 , \"GFF fields wrong length should be 9\"\n scaff, source, feature, start, stop, score, \\\n direction, frame, gene_info = line.split(\"\\t\")\n gene = split_gene_name(gene_info)\n scaff = scaff.rstrip()\n if feature == \"gene\":\n gene_gff_line[gene] = line\n gene_set.add(gene)\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_start_stop_dict[gene] = start_stop\n gene_scaff_dict[gene] = scaff\n gene_direction[gene] = direction\n if not gene in gene_first_exon_dict.keys():\n if feature == \"exon\" or feature == \"CDS\":\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_first_exon_dict[gene] = start_stop\n f_in.close()\n logger.info(\"Number of genes = %d\", len(gene_set))\n return gene_start_stop_dict, gene_first_exon_dict, \\\n gene_scaff_dict, gene_direction, gene_set, gene_gff_line",
"def read_filelist_into_dataframe(file_list, pref_name, junk=5, return_idex_version=False,postname_len=4,backjunk=0):\n pref_len = len(pref_name)\n\n q,sq = read_twocol_data(file_list[0],junk=junk,backjunk=backjunk,shh=True)\n df_all_sq = pd.DataFrame(index=q)\n for i in range(len(file_list)):\n if postname_len != 0:\n this_column_name = file_list[i][pref_len:-postname_len]\n else:\n this_column_name = file_list[i][pref_len:]\n\n q,sq = read_twocol_data(file_list[i],junk=junk,backjunk=backjunk,shh=True)\n df_all_sq[this_column_name] = sq\n \n \n return df_all_sq",
"def parse_gff3(filename):\n genes = OrderedDict()\n transcript_to_locus = {}\n\n count_per_transcript = defaultdict(lambda: 1)\n\n with open(filename) as gff_in:\n for line in gff_in:\n # Skip comments\n if not line.strip()[0] == '#':\n line_data = parse_line(line)\n\n # Parts (e.g. CDS or Exon) might not have an ID. One will be added here\n if ID_ATTRIBUTE not in line_data['attributes'].keys() and line_data['feature'] in PARTS_FEATURES:\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n counter_id = line_data['attributes'][PARENT_ATTRIBUTE] + '.' + line_data['feature'] + '.'\n new_id = counter_id + str(count_per_transcript[counter_id])\n count_per_transcript[counter_id] += 1\n line_data['attributes'][ID_ATTRIBUTE] = new_id\n\n # Every line needs a valid ID\n if ID_ATTRIBUTE in line_data['attributes'].keys():\n\n if line_data['feature'] in LOCUS_FEATURES:\n genes[line_data['attributes'][ID_ATTRIBUTE]] = {\n 'data': line_data,\n 'transcripts': OrderedDict()\n }\n\n elif line_data['feature'] in TRANSCRIPT_FEATURES:\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n parent_id = line_data['attributes'][PARENT_ATTRIBUTE]\n\n if parent_id in genes.keys():\n genes[parent_id]['transcripts'][line_data['attributes'][ID_ATTRIBUTE]] = {\n 'data': line_data,\n 'parts': []\n }\n\n transcript_to_locus[line_data['attributes'][ID_ATTRIBUTE]] = \\\n line_data['attributes'][PARENT_ATTRIBUTE]\n\n elif line_data['feature'] in PARTS_FEATURES:\n\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n parent_id = line_data['attributes'][PARENT_ATTRIBUTE]\n grandparent_id = transcript_to_locus[parent_id]\n\n genes[grandparent_id]['transcripts'][parent_id]['parts'].append(line_data)\n\n return genes",
"def get_flare_list(start, end, source='NASA', file_format=\"hessi_flare_list_%Y%m.fits\", inc=relativedelta(months=+1)):\r\n\r\n formats = {\r\n 5: \"%y-%m\", # YY-mm\r\n 6: \"%Y%m\", # YYYYmm\r\n 7: \"%Y-%m\", # YYYY-mm\r\n 8: \"%Y%m%d\", # YYYYmmdd\r\n 10: \"%Y-%m-%d\", # YYYY-mm-dd\r\n 19: \"%Y-%m-%dT%H:%M:%S\", # YYYY-mm-ddThh:MM:ss\r\n }\r\n try:\r\n start_dt = datetime.strptime(start, formats[len(start)])\r\n end_dt = datetime.strptime(end, formats[len(end)])\r\n except (KeyError, ValueError):\r\n raise ValueError(\"invalid datetime\")\r\n\r\n format_str = file_format[file_format.index(\"%\"):file_format.rindex(\"%\") + 2]\r\n cur_format = start_dt.strftime(format_str)\r\n end_format = end_dt.strftime(format_str)\r\n\r\n if source in KNOWN_FLARE_LIST_SOURCES:\r\n source = KNOWN_FLARE_LIST_SOURCES[source]\r\n\r\n cur_dt = start_dt\r\n result = pd.DataFrame()\r\n while cur_format <= end_format:\r\n file = file_format.replace(format_str, cur_format)\r\n cur_dt = cur_dt + inc\r\n cur_format = cur_dt.strftime(format_str)\r\n\r\n # allow missing files with a warning, e.g. there is no file for 2014-07\r\n try:\r\n result = result.append(read_flare_list_file(source + file), ignore_index=True)\r\n except HTTPError as e:\r\n if e.code == 404:\r\n warnings.warn(\"Skipped: \" + file + \" (\" + str(e.code) + \" \" + e.msg + \")\")\r\n else:\r\n raise\r\n except FileNotFoundError as e:\r\n warnings.warn(\"Skipped: \" + file + \" (file not found)\")\r\n\r\n # filter results for more detailed time constraints (if applicable)\r\n if len(end) < 8:\r\n end_dt += relativedelta(months=+1, microseconds=-1) # add month -1ms to address inclusive right bound\r\n elif len(end) <= 10:\r\n end_dt += relativedelta(days=+1, microseconds=-1) # add day if end date was specified on a day-basis\r\n\r\n left_bound = result['END_TIME'].searchsorted(start_dt, 'left') # END_TIME >= start_dt\r\n right_bound = result['START_TIME'].searchsorted(end_dt, 'right') # START_TIME <= end_dt (inclusive)\r\n return result[left_bound:right_bound]",
"def readData(f):\n line = f.readline()\n fieldnames = [x.strip() for x in line.split(\",\")]\n line = f.readline().strip()\n data = []\n while line != \"\":\n if line[0] != \"#\":\n fields = line.split(\",\")\n data.append((fields[0], [extractSI(v)[0] for v in fields[1:]]))\n line = f.readline().strip()\n # Man, working out this next incantation out was non-trivial!\n # They really want you to be snarfing data in csv or some other format they understand!\n res = pd.DataFrame.from_items(data, columns=fieldnames[1:], orient=\"index\")\n return res",
"def gff3_parsed (gff3_file, sam_dic):\n\n #A special type of dictionary in which the values were saved in a list\n gff_dic = defaultdict(list)\n\n gff3_file = open(arg.gff3_infile)\n gff3_dic = {}\n\n gene_dic = {}\n exon_list = []\n gene_idx = 1\n\n counter_1 = 0\n counter_2 = 0\n counter_3 = 0\n counter_4 = 0\n counter_5 = 0\n counter_6 = 0\n counter_7 = 0\n idx_pseudogene = 0\n\n #A dictionary\n gene_idexes = {\"gene\": gene_idx, \"exon\": gene_idx,\n \"pseudogene\": \"pseudogene\"}\n\n\n for line in gff3_file:\n if line.startswith(\"##\"):\n pass\n elif line.startswith(\"#!\"):\n pass\n else:\n line_information = line.strip().split()\n\n # Make a dic with the genes present on Gg genome and its anotattion\n if line_information[2] == (\"gene\"):\n # deal with the PREVIOUS gene\n #This peace of code add to the gff3_dic(the main dic of gff3 file)\n #the information of which are the exons of one particular gene\n #Note: this happends at the same time that the gene information\n #were parsed\n if exon_list:\n gff3_dic[gene_idx][\"exon_list\"] = exon_list\n gene_idx += 1\n\n exon_list = []\n #parse the gene information and add this information to a new dic (gff3_dic)\n #with all the information related to the genes present in gff3 file (Cg_Nara5)\n # deal with CURRENT gene\n scaffold = line_information [0]\n gene_beg = line_information[3]\n gene_end = line_information [4]\n gene_loc = [gene_beg, gene_end]\n gene_strand = line_information[6]\n gene_information = line_information [8]\n gene_information = line.strip().split(\";\")\n gene_description = [gene_information[2]]\n gff3_dic[gene_idx] = {\"scaffold\": scaffold,\n \"gene_range\": gene_loc,\n \"description\": gene_description,\n \"exon_list\": None,\n \"strand\": gene_strand}\n\n # Make a list with the exons-genes present on Gg genome and its anotattion\n # If in this line the \"gene\" keyword is not present but the \"exon\"\n #keyword are append the range information to the exon list which\n # will be added to main gff3 dic\n elif line_information[2] == (\"exon\"):\n exon_beg = line_information[3]\n exon_end = line_information [4]\n exon_loc = (exon_beg, exon_end)\n exon_list.append(exon_loc)\n\n exon_information = line_information [8]\n exon_information = line.strip().split()[8].split(\";\")[0]\n gff3_dic[gene_idx][\"exon_reference\"] = exon_information\n #At the same time - regardless the previous code if the line has\n #any of this keywords the information of the gene_range were added\n # to the gff_dic.\n if line_information[2] in [\"gene\", \"exon\", \"pseudogene\"]:\n\n gene_range = (line_information[3], line_information[4])\n\n #Note: this peace of code happends because the gene description\n #of the gene is not the same as the exon description. Therefore,\n #the gene description has to be recovered\n\n if line_information[2] == \"gene\":\n gene_information = line_information [8]\n gene_information = line.strip().split(\";\")\n gene_description = [gene_information[2]]\n\n # Example:\n # gff_dic[scaffold1] = [[1, \"gene\", (82, 1159), description],\n # 1, \"exon\", (82, 603), description],\n # 2, \"gene\", (1440, 4998), description\n # pseudogene_idx, pseudogene, (1999, 3000)]]\n\n #To keep only the information regardless gene_idx (gene index)\n #to the gene or the exons present in this gene. When I have\n #pseudogenes, the gene index is replaced for pseudogene\n if line_information[2] in [\"exon\", \"gene\"]:\n idx = gene_idx\n else:\n idx_pseudogene += 1\n idx = \"pseudogene_\"+ str(idx_pseudogene)\n\n #add the previous information in a different format in which\n #the key is the sacffold and the values are the index (to easly\n #acess the information present in gff3 dictionary), the keyword\n #(gene, exon, pseudogene), the range, and the description.\n #All these informations will be used to perfome the SNP range\n # discover only within the true scaffold and not in all the scaffolds\n #present in the gff3 file. Making the code mor efficient and realibel\n gff_dic[line_information[0]].append([idx,\n line_information[2],\n gene_range,\n gene_description])\n\n # Add last exon list to last gene index\\\n else:\n if exon_list:\n gff3_dic[gene_idx][\"exon_list\"] = exon_list\n\n print (\"Step 3a - Parse the .gff3 file -- Done\")\n\n\n for locus, info_dict in sam_dic.items():\n\n # Get all info from current scaffold\n # scaffold_info is a list containing all genes, exons and pseudogenes\n # of the scaffold in sam_dic\n\n scaffold_info = gff_dic[info_dict[\"scaffold\"]]\n #we create two different \"values\" in the sam_dic dictionary with the len\n #of the real snp location in which all the \"values\" begin with \"intergenic\" or None\n #and as we make the check codes this values will be replaced for new\n # values or will be remain like this\n\n info_dict[\"element_type\"] = [\"intergenic\"] * len(info_dict[\"real_snp_localization\"])\n info_dict[\"element_range\"] = [None] * len(info_dict[\"real_snp_localization\"])\n info_dict[\"gene_index\"] = \"intergenic\"\n\n # Check if locus is in any range\n # The enumerate function give the value of the \"value\" as well as the\n #position of the value. Example: l = [\"a\", \"b\", \"c\"]\n #enumerate (l) --- (0, \"a\"); (1, \"b\"); (2, \"c\")\n #pos - the position of the snp in the list\n #snp - is the real snp localization under analyse\n\n # Get the position of the snp in the list. This position will\n # be used to create a key for the gene_inf_dic.\n for pos, snp in enumerate(info_dict[\"real_snp_localization\"]):\n # The \"element\" is the several lists present in the gff_dic.\n #Note: all the lists regardless the type has exactly the same length.\n # Example : [10459, \"gene\", (\"18930\", \"23805\"), [\"description=LysM domain-containing protein\"]\n #So for each list we will check if the SNP is in the range\n for element in scaffold_info:\n element_beg = int(element[2][0])\n element_end = int(element[2][1])\n element_range= range(element_beg, element_end)\n\n\n # YAY, one of the SNP matches one element of the scaffold\n if snp in element_range:\n\n info_dict[\"gene_index\"] = element[0]\n\n # ELEMENT KEY:\n # \"exon\": The SNP is in a coding region\n # \"gene\": The SNP is in an intron\n # \"pseudogene\": The SNP is in a pseudogene\n info_dict[\"element_type\"][pos] = element[1]\n\n info_dict[\"element_range\"][pos] = element[2]\n\n info_dict[\"description\"] = element[3]\n\n\n\n #Get the main statistics from our dataset\n\n for locus, locus_info in sam_dic.items():\n\n element_type = locus_info[\"element_type\"]\n\n # Adding information for loci in a intergenic region\n #The set return an object with only 1 \"element\" in that case \"intergenic\"\n #So if the locus has 2 snps 1 in a intergenic region and other in a gene\n # this locus will not count as a intergenic locus, because the set will\n #have two elenets {\"intergenic\", \"gene\"} and not only 1 {\"intergenic\"}.\n #Note: The set works for each element_type present in sam_dic (loop)\n if set(element_type) == {\"intergenic\"}:\n counter_1 += 1\n\n # Adding information for SNPs in intergenic region\n #This counter gives the number of times the intergenic word appears\n counter_2 += element_type.count(\"intergenic\")\n\n # Adding information for loci in pseudogenes\n if \"pseudogene\" in element_type:\n counter_3 += 1\n\n #Adding information for SNPs in pseudogene\n counter_4 += element_type.count(\"pseudogene\")\n\n #Adding information for loci in genes\n #As previously refered the gene information were recorded in two different formats\n #gene- when the SNP were in a gene but not in a exon (aka intron)\n #exon - when the SNP were in a gene and in a specific exon\n #So in order to have the statistics for the gene we need to search\n #booth keywords on the element_type . Not in this particular case the set\n #doesn\"t work because the set don\"t has an order (gene, exon) or (exon, gene)\n\n if \"gene\" in element_type or \"exon\" in element_type:\n counter_5 += 1\n\n #Adding information for SNPs in gene\n\n counter_6 += element_type.count(\"exon\") + element_type.count(\"gene\")\n\n #Adding information for SNPs in exons\n\n counter_7 += element_type.count(\"exon\")\n\n\n\n print(\"Data resume:\")\n print(\"Number of loci in a non coding region: {}\".format(counter_1))\n print(\"Number of SNPs in a non coding region: {}\".format(counter_2))\n\n print(\"Number of loci located in pseudogenes:{}\".format(counter_3))\n print(\"Number of SNPs located in pseudogenes:{}\".format(counter_4))\n\n print(\"Number of loci located in genes: {}\".format(counter_5))\n print(\"Number of SNPs located in genes: {}\".format(counter_6))\n print(\"Number of SNPs located in exons: {}\".format(counter_7))\n\n\n\n# print(gff3_dic[6207])\n return (sam_dic, gff3_dic)",
"def lfp_extract(files):\r\n \r\n if 'lfpdata' in locals():\r\n del lfpdata\r\n \r\n for i, file in enumerate(files):\r\n \r\n ### load data\r\n matdat = sio.loadmat(file, variable_names = ['lfpsegs', 'lfpdata', 'fs', 'chnAreas'], \r\n struct_as_record = False, squeeze_me = True) \r\n \r\n \r\n \r\n ### extract the noused channels, only calculate once\r\n if i == 0:\r\n \r\n # chnAreas\r\n chnAreas = matdat['chnAreas'].tolist()\r\n \r\n # fs: sample rate\r\n fs = matdat['fs'] \r\n \r\n \r\n\r\n ### dealing lfp data\r\n \r\n # lfp (np.ndarray): nareas * ntemp * ntrials or ntemp * nareas * ntrials\r\n if 'lfpdata' in matdat.keys():\r\n lfpdata_1file = matdat['lfpdata']\r\n elif 'lfpsegs' in matdat.keys():\r\n lfpdata_1file = matdat['lfpsegs']\r\n\r\n n1, n2, n3 = lfpdata_1file.shape\r\n if n1 > n2: # ntemp * nareas * ntrials\r\n lfpdata_1file = np.transpose(lfpdata_1file, (1, 0, 2))\r\n \r\n # concatenate to lfpdata for all files\r\n if 'lfpdata' not in locals():\r\n lfpdata = lfpdata_1file\r\n else:\r\n lfpdata = np.concatenate((lfpdata, lfpdata_1file), axis = 2)\r\n \r\n \r\n return lfpdata, chnAreas, fs",
"def read_xyz(filename, freq):\n\n\n#xyz file\n\n Atoms = []\n Coordinates = []\n\n xyz = open(filename)\n frame = 0\n while True:\n\n n_atoms = xyz.readline()\n\n if n_atoms == '':\n break\n else:\n n_atoms = int(n_atoms)\n title = xyz.readline()\n\n if frame%freq==0:\n atoms, coordinates = read_frame(xyz, n_atoms)\n Coordinates.append(coordinates)\n Atoms.append(atoms)\n\n else:\n read_frame(xyz, n_atoms)\n frame+=1\n\n return Atoms, Coordinates",
"def parse_standard_gff3(\n gff: Path,\n gffutil_parse_args: Optional[GffutilsParseArgs] = GffutilsParseArgs(),\n parse_func: Optional[Callable[[FeatureDB, List[str]], Iterable[AnnotationCollectionModel]]] = default_parse_func,\n gffutil_transform_func: Optional[Callable[[Feature], Feature]] = None,\n db_fn: Optional[str] = \":memory:\",\n) -> Iterable[ParsedAnnotationRecord]:\n db = gffutils.create_db(str(gff), db_fn, transform=gffutil_transform_func, **gffutil_parse_args.__dict__)\n if sum(db.count_features_of_type(i) for i in db.featuretypes()) == 0:\n raise EmptyGFF3Exception(\"Parsing this GFF3 led to zero features. Is it empty or corrupted?\")\n logger.info(f\"Parsed {gff}\")\n for i in db.featuretypes():\n logger.info(f\"Found feature type {i} with {db.count_features_of_type(i)} features\")\n # get the sequences\n chrom_query = db.execute(\"SELECT DISTINCT seqid FROM features\")\n chroms = [x[\"seqid\"] for x in chrom_query]\n logger.info(f\"Found {len(chroms)} sequences\")\n for annot in parse_func(db, chroms):\n yield ParsedAnnotationRecord(annot)",
"def load_gtf_data(fil):\n ofunc = get_open_function(fil)\n\n gene_data = {}\n exon_data = {}\n with ofunc(fil, 'rt') as fh:\n for line in fh:\n if line.startswith('#'):\n continue\n cols = line.rstrip('\\r\\n').split('\\t')\n fclass = cols[2]\n if fclass == 'gene':\n gene_id, gene_type = extract_metadata(cols[8])\n gene_data[gene_id] = gene_type\n elif fclass == 'exon':\n gene_id, gene_type = extract_metadata(cols[8])\n if gene_id not in exon_data:\n exon_data[gene_id] = []\n val = (int(cols[3]), int(cols[4]))\n exon_data[gene_id].append(val)\n return gene_data, exon_data",
"def read_in_gtf(infile_path, gtf_rows_to_skip): \n\tdf = pd.read_csv(infile_path, compression='gzip', sep='\\t', dtype=str, header=None, skiprows=range(gtf_rows_to_skip))\n\n\tcols = ['#chrom', 'source', 'feature', 'chromStart', 'chromEnd', 'score', 'strand', 'frame', 'transcript_id']\n\tdf.columns = cols\n\n\treturn df",
"def get_metadata(filepath):\n\tsong_id = os.path.basename(filepath).split('.')[0]\n\t\n\ttry:\n\t\tid3_ = EasyID3(filepath)\n\texcept Exception as e:\n\t\tprint(\"Error encountered with file {}: {}\".format(filepath, e))\n\t\treturn None\n\ttry:\n\t\tmp3_ = MP3(filepath) \n\texcept Exception as e:\n\t\tprint(\"Error encountered with file {}: {}\".format(filepath, e))\n\t\treturn None\n\tdf = pd.DataFrame([song_id], columns=['id'])\n\tdf['album'] = id3_.get('album', ['unknown'])[0]\n\tdf['genre'] = id3_.get('genre', ['unknown'])[0]\n\tdf['duration'] = mp3_.info.length\n\treturn df.set_index('id').reset_index()",
"def _read_ascfiles(ascfiles:list):\n\n output = pd.DataFrame()\n\n for ascfile in ascfiles:\n # Find number of lines to skip - interesting output starts after\n # line labelled 'MODE'\n n_lines = 0\n with open(ascfile, 'r') as fid:\n for line in fid:\n n_lines += 1\n if 'MODE' in line:\n break\n\n try:\n output = output.append(pd.read_csv(ascfile, sep='\\s+',\n skiprows=n_lines, header=None))\n except:\n print(ascfile, ' is empty.')\n\n if output.empty:\n return output\n\n output.columns = ['n', 'mode', 'l', 'w_rad_per_s', 'w_mHz', 'T_sec',\n 'grV_km_per_s', 'Q', 'RaylQuo']\n output.drop(['mode', 'RaylQuo'], axis=1, inplace=True)\n output.reset_index(drop=True, inplace=True)\n\n\n return output",
"def cafa4_mapping() -> pd.DataFrame:\n # List of the paths considered in the function\n paths = [\n \"cafa4.tar.gz\",\n \"CAFA4-export/TargetFiles/sp_species.9606.tfa\"\n ]\n if not any(os.path.exists(path) for path in paths):\n # Downloading the url to the given path\n download(\n url=\"https://www.biofunctionprediction.org/cafa-targets/CAFA4-export.tgz\",\n path=paths[0]\n )\n # Extracting the acquire\n shutil.unpack_archive(paths[0], \".\")\n # Delete the archived file\n os.remove(paths[0])\n # Parse the file and retrieve the IDs from the fasta file\n f = open(paths[1], \"r\")\n df = pd.DataFrame(\n (\n line[1:-1].split(\" \")\n for line in f.readlines()\n if line.startswith(\">\")\n ),\n columns=[\n \"cafa4_id\",\n \"uniprot_id\"\n ]\n )\n f.close()\n # Return the obtained IDs\n return df",
"def ingest():\n\n base_path = '/home/mnichol3/Coding/wx-scripts/wtlma'\n\n flash_files = ['flash-out-05232019-2050.txt',\n 'flash-out-05232019-2100.txt',\n 'flash-out-05232019-2110.txt',\n 'flash-out-05232019-2120.txt',\n 'flash-out-05232019-2130.txt',\n 'flash-out-05232019-2140.txt',\n 'flash-out-05232019-2150.txt']\n\n df_cols = ['start', 'end', 'duration', 'area', 'ctr_alt', 'ctr_lat', 'ctr_lon',\n 'tot_energy']\n\n flash_df = pd.read_csv(join(base_path, flash_files[0]), sep=',', names=df_cols)\n\n for f in flash_files[1:]:\n curr_path = join(base_path, f)\n curr_df = pd.read_csv(curr_path, sep=',', names=df_cols)\n flash_df = pd.concat([flash_df, curr_df], ignore_index=True)\n\n return flash_df",
"def read_gff3(self,gff3_file):\r\n with open(gff3_file) as infile:\r\n set = None\r\n for line in infile:\r\n if line[0] == '#':\r\n if line[:3] == '###' and set:\r\n self.sets.append(set)\r\n set = None\r\n if line.startswith(\"##sequence-region\"):\r\n splitline = line.split()\r\n self.sequence_regions[splitline[1]] = line\r\n #TODO: properly deal with comment lines.\r\n self.sets.append(line)\r\n else:\r\n line = GFF3_line(set,line)\r\n #adding the feature individually\r\n self.features_id[line.attributes.id] = line\r\n if line.attributes.name:\r\n if line.attributes.name in self.features_name:\r\n #TODO: find a way to handle features that have the same name.\r\n pass#print(line.attributes.id, line.attributes.name, self.features_name[line.attributes.name].attributes.id)\r\n else:\r\n self.features_name[line.attributes.name] = line\r\n #adding the set of features\r\n if line.type == \"region\" and not line.attributes.parent:\r\n #this feature has been deemed redundant and is not used in recent versions of the gff3,\r\n if set:\r\n #this is the first element of a set,\r\n # old set needs to be added to the list and a new set created\r\n self.sets.append(set)\r\n set = GT_seq_location()\r\n else:\r\n set = GT_seq_location()\r\n #if the set is none, it was also during init, and we need to set the owner_set again\r\n line._owner_set = set\r\n set._flanking_region = line\r\n elif line.type == \"flanking_region\":\r\n if set and set.flanking_region:\r\n # this can also be the first element of a set,\r\n # if the set already has a flanking region\r\n # old set needs to be added to the list and a new set created\r\n self.sets.append(set)\r\n set = GT_seq_location()\r\n else:\r\n set = GT_seq_location()\r\n #if the set is none, it was also during init, and we need to set the owner_set again\r\n line._owner_set = set\r\n set.flanking_region = line\r\n elif line.type == \"region\" and line.attributes.parent:\r\n set.gt_seq_region.append(line)\r\n elif line.type == \"PCR_product\":\r\n set.pcr_product.append(line)\r\n elif line.type == \"forward_primer\":\r\n set.forward_primer.append(line)\r\n elif line.type == \"reverse_primer\":\r\n set.reverse_primer.append(line)\r\n elif line.type == \"SNP\":\r\n set.snp.append(line)\r\n else:\r\n pass#print(\"line of type {} not added.\".format(line.type))\r\n if set:\r\n # there was no '###' at the end of the file so the last set needs to be added.\r\n self.sets.append(set)"
] |
[
"0.5787096",
"0.5700914",
"0.5652596",
"0.55933994",
"0.55396926",
"0.5514567",
"0.546654",
"0.5403991",
"0.53897697",
"0.5375177",
"0.534354",
"0.5343404",
"0.5285535",
"0.52755135",
"0.52441376",
"0.52279055",
"0.5219072",
"0.5205338",
"0.52000976",
"0.51452714",
"0.5143794",
"0.51173586",
"0.5112521",
"0.5102277",
"0.5089787",
"0.5087556",
"0.50709695",
"0.5067778",
"0.5034766",
"0.503413"
] |
0.6558859
|
0
|
use the multiprocessing to speed up trees construction
|
def construct_trees_with_mp(self, nodes):
cores = multiprocessing.cpu_count() // 2
pool = multiprocessing.Pool(cores)
new_nodes = []
n_node_per_core = self.n_node // cores
for i in range(cores):
if i != cores - 1:
new_nodes.append(nodes[i * n_node_per_core: (i + 1) * n_node_per_core])
else:
new_nodes.append(nodes[i * n_node_per_core:])
self.trees = {}
trees_result = pool.map(self.construct_trees, new_nodes)
for tree in trees_result:
self.trees.update(tree)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]",
"def _initialize_trees(self):",
"def main(simulated, backbone, taxonomy, output, cores, chunksize):\n pool = multiprocessing.Pool(processes=cores)\n click.echo(\"Using %d parallel cores\" % cores, err=True)\n taxonomy = dendropy.Tree.get_from_path(taxonomy, schema=\"newick\")\n tn = taxonomy.taxon_namespace\n click.echo(\"Taxonomy OK\", err=True)\n\n r1 = pool.apply_async(get_tree, [backbone, tn])\n r2 = pool.apply_async(get_tree, [simulated, tn])\n\n backbone = r1.get()\n click.echo(\"Backbone OK\", err=True)\n simulated = r2.get()\n click.echo(\"Simulated OK\", err=True)\n\n bb_tips = get_tip_labels(backbone)\n st_tips = get_tip_labels(simulated)\n\n # Start calculating ASAP\n wrap = functools.partial(analyze_taxon, bb_tips, st_tips, backbone, simulated)\n nnodes = len(taxonomy.internal_nodes(exclude_seed_node=True))\n if chunksize is None:\n chunksize = max(5, math.ceil(nnodes / cores / 10))\n # We use preorder because the root is going to take the longest to\n # run calculations. Allocating things to cores takes a non-negigible\n # amount of time so we want the root to be running for the longest.\n it = pool.imap_unordered(wrap, taxonomy.preorder_internal_node_iter(exclude_seed_node=True), chunksize=chunksize)\n\n writer = csv.writer(output)\n writer.writerow(\n \"node taxonomy_tips backbone_tips simulated_tips backbone_monophyletic simulated_monophyletic backbone_birth simulated_birth backbone_death simulated_death warnings\".split()\n )\n\n with click.progressbar(it, width=12, length=nnodes) as prog:\n for result in prog:\n if result:\n writer.writerow(result)",
"def runPidGen ( tree , ## initial tree/chain to be updated \n pidgen , ## PidGen object \n newpid , ## name of new PID variable \n seed = None , ## random seed\n silent = False , ## silent ?\n variants = False , ## add alterbvative models?\n parallel = False , ## use parallel processing?\n **kwargs ) : ## arguments for parallel processing (WorkManager)\n\n assert isinstance ( tree , ROOT.TTree ) and tree , \"Invalid 'tree' argument!\"\n assert isinstance ( pidgen , PidGen ) , \"Invalid 'pidgen' argument!\"\n\n assert not newpid in tree.branches() ,\"`Branch' %s already exists!\" % newpid \n\n old_branches = set ( tree.branches() ) | set ( tree.leaves() )\n\n\n ## parallel processing?\n if parallel and isinstance ( tree , ROOT.TChain ) and 1 < len ( tree.files() ) :\n \n from ostap.trees.trees import Chain\n ch = Chain ( tree ) \n cname = tree.name\n \n ## create the task \n task = AddPidGen ( newpid = newpid ,\n pidgen = pidgen ,\n seed = seed ,\n variants = variants )\n \n wmgr = WorkManager ( silent = silent , **kwargs )\n trees = ch.split ( max_files = 1 )\n \n wmgr.process ( task , trees )\n \n new_chain = ROOT.TChain ( cname )\n for f in ch.files : new_chain.Add ( f )\n \n ## sequential processing \n else :\n\n if kwargs :\n logger.warning ( \"runPidGen: ignore arguments : %s\" % [ k for k in kwargs.keys( ) ] )\n \n from ostap.utils.utils import root_random_seed\n with root_random_seed ( seed ) :\n \n from ostap.math.make_fun import make_fun3\n the_function = ( make_fun3 ( pidgen ) , \n pidgen.pt_log_var ,\n pidgen.eta_var ,\n pidgen.ntrk_log_var )\n \n ## add new branch \n new_chain = tree.add_new_branch ( newpid ,\n the_function , \n verbose = not silent ,\n report = False )\n \n if variants : \n vars = pidgen.variants ()\n for var in progress_bar ( vars , silent = silent , description = ' %+d variants:' % len ( vars ) ) : \n varpg = PidGen ( pt_log_var = pidgen.pt_log_var ,\n eta_var = pidgen.eta_var ,\n ntrk_log_var = pidgen.ntrk_log_var ,\n config = pidgen.config ,\n dataset = pidgen.dataset ,\n variant = var ,\n silent = True )\n ## add new variable \n new_chain = runPidGen ( new_chain ,\n varpg ,\n '%s_%s' % ( newpid , var ) ,\n seed = seed ,\n silent = True ,\n variants = False ) \n ## final summary table \n if not silent :\n \n title = 'PidGen(%s): configuration&statistics' % pidgen.variant \n logger.info ( '%s:\\n%s' % ( title , pidgen.table ( title , prefix = '# ' ) ) )\n\n new_branches = set ( new_chain.branches() ) | set ( new_chain.leaves() )\n new_branches = new_branches - old_branches\n if new_branches :\n n = len ( new_branches )\n if 1 == n : title = 'Added %s branch to TTree/TChain' % n \n else : title = 'Added %s branches to TTree/TChain' % n \n table = new_chain.table ( new_branches , title = title , prefix = '# ' )\n logger.info ( '%s:\\n%s' % ( title , table ) ) \n \n return new_chain",
"def construct_trees(self, nodes):\n trees = {}\n for root in tqdm.tqdm(nodes):\n # note that nodes is an uniquely ordered set\n # tree = {0: {0 : [nb_1, nb_2, ..., nb_k], nb_1: [0, ...]}, 1 : {1: [nb_1,...], nb_1 : [..]},...}\n trees[root] = {}\n trees[root][root] = [root]\n # print('test...', trees[root][root])\n used_nodes = set()\n # queue has the form as following queue([root] for root in tqdm.tqdm(nodes)\n # with each node, we construct the tree rooted at that node, denoted as queue(['root'])\n queue = collections.deque([root]) # deque([0]) -> deque([0,1])\n while len(queue) > 0:\n cur_node = queue.popleft()\n used_nodes.add(cur_node)\n for sub_node in self.graph[cur_node]:\n # sub_node is not ordered\n if sub_node not in used_nodes:\n trees[root][cur_node].append(sub_node)\n trees[root][sub_node] = [cur_node]\n queue.append(sub_node)\n used_nodes.add(sub_node)\n return trees",
"def brute_tree(XTRAIN,istopTRAIN,XTEST,istopTEST):\n \n ntrain=XTRAIN.shape[0]\n ntest=XTEST.shape[0]\n \n if np.sum(istopTRAIN)==0:\n return 0,[]\n\n cost0=np.zeros(Ngammas*Nreps)\n cost1=np.zeros(Ngammas*Nreps)\n cost0test=np.zeros(Ngammas*Nreps)\n cost1test=np.zeros(Ngammas*Nreps)\n \n precisionTRAIN=np.zeros(Ngammas*Nreps)\n precisionTEST=np.zeros(Ngammas*Nreps)\n recallTEST=np.zeros(Ngammas*Nreps)\n rate=np.zeros(Ngammas*Nreps)\n \n for iii in range(Ngammas):\n \n gamma=GAMMA[iii]\n \n for jjj in range(Nreps):\n \n \"\"\" train a tree using training data with random splitting \"\"\"\n \n tree_hyperparameters['class_weight']={0:1,1:gamma}\n clf=tree.DecisionTreeClassifier(**tree_hyperparameters)\n clf.fit(XTRAIN,istopTRAIN)\n \n \"\"\"\" record costs and precision on validation data \"\"\"\n \n pTRAIN=clf.predict(XTRAIN)\n precisionTRAIN[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==1),sum(pTRAIN))\n cost0[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==0)\n cost1[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 0 and istopTRAIN[i]==1)\n \n \"\"\" record precision on test data \"\"\"\n \n pTEST=clf.predict(XTEST)\n precisionTEST[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1),sum(pTEST))\n recallTEST[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1)/sum(istopTEST)\n cost0test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==0)\n cost1test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 0 and istopTEST[i]==1)\n \n \"\"\" record positive rate on full data \"\"\"\n \n rate[iii*Nreps+jjj]=(sum(pTRAIN)+sum(pTEST))/(ntrain+ntest)\n \n \"\"\" Compute Pareto front for validation data \"\"\"\n \n Pareto = Lower_Convex_Hull(np.concatenate((cost0.reshape(-1,1),cost1.reshape(-1,1)),1))\n \n \"\"\" make some nice plots for whoever is watching \"\"\"\n \n plt.figure(figsize=(10,5))\n plt.subplot(121)\n plt.plot(cost0,cost1,'.')\n plt.plot(cost0[Pareto],cost1[Pareto],'d')\n plt.xlabel('errors on class zero training data')\n plt.ylabel('errors on class one training data')\n\n plt.subplot(122)\n plt.plot(cost0test,cost1test,'.')\n plt.plot(cost0test[Pareto],cost1test[Pareto],'d')\n plt.xlabel('errors on class zero test data')\n plt.ylabel('errors on class one test data')\n plt.show()\n \n plt.figure(figsize=(15,5))\n plt.subplot(131)\n plt.semilogy(precisionTRAIN,rate,'.')\n plt.semilogy(precisionTRAIN[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on training data')\n plt.ylabel('positive rate')\n\n plt.subplot(132) \n plt.semilogy(precisionTEST,rate,'.')\n plt.semilogy(precisionTEST[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('positive rate')\n\n plt.subplot(133) \n plt.plot(precisionTEST,recallTEST,'.')\n plt.plot(precisionTEST[Pareto],recallTEST[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('recall on test data')\n plt.show() \n \n return {'cost0':cost0,'cost1':cost1,'cost0test':cost0test,'cost1test':cost1test,'precisionTRAIN':precisionTRAIN,'precisionTEST':precisionTEST,'recallTEST':recallTEST,'rate':rate,'Pareto':Pareto}",
"def job_tree(self):\n\n # 1. Enforce depth of 1 for steps\n def depth_one(steps):\n depth_one = []\n for step in steps:\n if type(step) is list:\n if type(step[0]) is list:\n depth_one.append(step[0])\n else:\n depth_one.append(step)\n else:\n depth_one.append([step])\n return depth_one\n\n # 2. Convert steps to list of node objects (0,1,2,3...)\n def assign_nodes(steps):\n nodes = [i for i in range(len(steps))]\n objects = list(\n set([elem for sublist in steps for elem in sublist]))\n\n # checks for multiple src and dst objects -- added when looking for\n # mutiples\n split_objects = []\n for obj in objects:\n if len(obj) > 1:\n new_objs = obj.split(\", \")\n split_objects.extend(new_objs)\n else:\n split_objects.append(obj)\n objects = split_objects\n del(split_objects)\n\n # populate with leafless trees (Node objects, no edges)\n for node in nodes:\n nodes[node] = Node(str(node))\n\n # search for leafy trees\n for obj in objects:\n\n # accounts for multiple drc/dst objects\n leaves = []\n for i, sublist in enumerate(steps):\n for string in sublist:\n if string.count(',') > 0:\n if obj in string:\n leaves.append(i)\n else:\n if obj in sublist:\n leaves.append(i)\n leaves = sorted(list(set(leaves)))\n\n if len(leaves) > 1:\n viable_edges = []\n\n # compute cross-product\n for leaf1 in leaves:\n for leaf2 in leaves:\n if str(leaf1) != str(leaf2) and sorted((leaf1, leaf2)) not in viable_edges:\n viable_edges.append(sorted((leaf1, leaf2)))\n\n # form edge networks\n for edge in viable_edges:\n n1, n2 = nodes[edge[0]], nodes[edge[1]]\n n1.add_edge(n2)\n n2.add_edge(n1)\n nodes[int(n1.name)], nodes[int(n2.name)] = n1, n2\n return nodes\n\n # 3. Determine number of trees and regroup by connected nodes\n def connected_nodes(nodes):\n proto_trees = []\n nodes = set(nodes)\n\n while nodes:\n n = nodes.pop()\n group = {n}\n queue = [n]\n while queue:\n n = queue.pop(0)\n neighbors = n.edges\n neighbors.difference_update(group)\n nodes.difference_update(neighbors)\n group.update(neighbors)\n queue.extend(neighbors)\n proto_trees.append(group)\n return proto_trees\n\n # 4. Convert nodes to nested dictionary of parent-children relations\n # i.e. adding depth -- also deals with tree-node sorting and path\n # optimization\n def build_tree_dict(trees, steps):\n # node sorting in trees\n sorted_trees = []\n for tree in trees:\n sorted_trees.append(\n sorted(tree, key=lambda x: int(x.name)))\n\n # retrieve values of the nodes (the protocol's containers)\n # for each tree ... may want to use dictionary eventually\n all_values = []\n for tree in sorted_trees:\n values = [steps[int(node.name)] for node in tree]\n all_values.append(values)\n\n # create relational tuples:\n all_digs = []\n singles = []\n dst_potentials = []\n for tree_idx in range(len(sorted_trees)):\n edge_flag = False\n tree_digs = []\n for node_idx in range(len(sorted_trees[tree_idx])):\n\n # digs: directed graph vectors\n digs = []\n dst_nodes = []\n node_values = all_values[tree_idx][node_idx]\n src_node = str(sorted_trees[tree_idx][node_idx].name)\n\n # ACTION ON MULTIPLE OBJECTS (E.G. TRANSFER FROM SRC -> DST\n # WELLS)\n # Outcome space: {1-1, 1-many, many-1, many-many}\n if len(node_values) == 2:\n # single destination (x-1)\n if node_values[1].count(\",\") == 0:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[1] == sublist[0]]\n # multiple destinations (x-many)\n elif node_values[1].count(\",\") > 0:\n dst_nodes = []\n for dst in node_values[1].replace(\", \", \"\"):\n for i, sublist in enumerate(steps):\n if i not in dst_nodes and dst == sublist[0]:\n dst_nodes.append(i)\n\n # ACTION ON A SINGLE OBJECT\n elif len(node_values) == 1:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[0] == sublist[0]]\n\n # Constructing tuples in (child, parent) format\n for dst_node in dst_nodes:\n dig = (int(dst_node), int(src_node))\n digs.append(dig)\n\n # else: an edge-case for dictionaries constructed with no edges\n # initiates tree separation via flag\n if digs != []:\n edge_flag = False\n tree_digs.append(digs)\n else:\n edge_flag = True\n digs = [(int(src_node), int(src_node))]\n tree_digs.append(digs)\n\n # digraph cycle detection: avoids cycles by overlooking set\n # repeats\n true_tree_digs = []\n for digs in tree_digs:\n for dig in digs:\n if tuple(sorted(dig, reverse=True)) not in true_tree_digs:\n true_tree_digs.append(\n tuple(sorted(dig, reverse=True)))\n\n # edge-case for dictionaries constructed with no edges\n if true_tree_digs != [] and edge_flag == False:\n all_digs.append(true_tree_digs)\n elif edge_flag == True:\n all_digs.extend(tree_digs)\n\n # Enforces forest ordering\n all_digs = sorted(all_digs, key=lambda x: x[0])\n\n # job tree traversal to find all paths:\n forest = []\n for digs_set in all_digs:\n\n # pass 1: initialize nodes dictionary\n nodes = OrderedDict()\n for tup in digs_set:\n id, parent_id = tup\n # ensure all nodes accounted for\n nodes[id] = OrderedDict({'id': id})\n nodes[parent_id] = OrderedDict({'id': parent_id})\n\n # pass 2: create trees and parent-child relations\n for tup in digs_set:\n id, parent_id = tup\n node = nodes[id]\n # links node to its parent\n if id != parent_id:\n # add new_node as child to parent\n parent = nodes[parent_id]\n if not 'children' in parent:\n # ensure parent has a 'children' field\n parent['children'] = []\n children = parent['children']\n children.append(node)\n\n desired_tree_idx = sorted(list(nodes.keys()))[0]\n forest.append(nodes[desired_tree_idx])\n return forest\n\n # 5. Convert dictionary-stored nodes to unflattened, nested list of\n # parent-children relations\n def dict_to_list(forest):\n forest_list = []\n for tree in forest:\n tString = str(json.dumps(tree))\n tString = tString.replace('\"id\": ', \"\").replace('\"children\": ', \"\").replace(\n '[{', \"[\").replace('}]', \"]\").replace('{', \"[\").replace('}', \"]\")\n\n # find largest repeated branch (if applicable)\n # maybe think about using prefix trees or SIMD extensions for better\n # efficiency\n x, y, length, match = 0, 0, 0, ''\n for y in range(len(tString)):\n for x in range(len(tString)):\n substring = tString[y:x]\n if len(list(re.finditer(re.escape(substring), tString))) > 1 and len(substring) > length:\n match = substring\n length = len(substring)\n\n # checking for legitimate branch repeat\n if \"[\" in match and \"]\" in match:\n hits = []\n index = 0\n if len(tString) > 3:\n while index < len(tString):\n index = tString.find(str(match), index)\n if index == -1:\n break\n hits.append(index)\n index += len(match)\n\n # find all locations of repeated branch and remove\n if len(hits) > 1:\n for start_loc in hits[1:]:\n tString = tString[:start_loc] + \\\n tString[start_loc:].replace(match, \"]\", 1)\n\n # increment all numbers in string to match the protocol\n newString = \"\"\n numString = \"\"\n for el in tString:\n if el.isdigit(): # build number\n numString += el\n else:\n if numString != \"\": # convert it to int and reinstantaite numString\n numString = str(int(numString) + 1)\n newString += numString\n newString += el\n numString = \"\"\n tString = newString\n del newString\n\n forest_list.append(ast.literal_eval(tString))\n return forest_list\n\n # 6. Print job tree(s)\n def print_tree(lst, level=0):\n print(' ' * (level - 1) + '+---' * (level > 0) + str(lst[0]))\n for l in lst[1:]:\n if type(l) is list:\n print_tree(l, level + 1)\n else:\n print(' ' * level + '+---' + l)\n\n # 1\n steps = depth_one(self.object_list)\n # 2\n nodes = assign_nodes(steps)\n # 3\n proto_forest = connected_nodes(nodes)\n # 4\n forest = build_tree_dict(proto_forest, steps)\n # 5\n self.forest_list = dict_to_list(forest)\n # 6\n print(\"\\n\" + \"A suggested Job Tree based on container dependency: \\n\")\n for tree_list in self.forest_list:\n print_tree(tree_list)",
"def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)",
"def improve_tree(tree, freq_dict):\n # todo",
"def build(self, dist_matrix, class_map, cluster_naming_function):\n # Update attributes\n self.orig_dist_matrix = dist_matrix \n self.class_map = class_map \n self.work_dist_matrix = dist_matrix\n\n # Get number of elements\n n = dist_matrix.shape[0]\n\n if PROGRESS:\n print 'Starting tree build now!'\n\n # Loop through n-3 elements & add nodes in tree\n for i in range(n - 3):\n\n if DEBUG:\n print 'Distance Matrix'\n pprint(self.work_dist_matrix)\n print\n\n # Calculate q_matrix matrix from distances\n q_matrix = _calculate_q_matrix(self.work_dist_matrix)\n \n if DEBUG:\n print 'Q matrix:'\n pprint(q_matrix)\n print\n\n # Find pair of elements (i,j) where q_matrix(i,j) has the lowest value\n (min_col, min_row) = _find_min_pair(q_matrix)\n\n # Add nodes i,j, and cluster node of i and j to this tree\n # And update working distance matrix accordingly\n new_cluster_name = cluster_naming_function(min_row, min_col, self.cluster_map)\n self.cluster_leaves(min_row, min_col, new_cluster_name) \n\n if DEBUG:\n print 'Tree:'\n pprint(nx.clustering(self.tree))\n pprint(self.cluster_dictionary)\n print '\\n\\n'\n \n # View graph after each step for debugging\n if VIEW_ALL:\n labels = {i[0]: i[0]+'/'+i[1]['c'] for i in njt.tree.nodes(data=True)}\n layout = nx.spring_layout(njt.tree)\n nx.draw_networkx(njt.tree, pos=layout, with_labels=True, labels=labels) #class labels\n plt.show()\n\n if PROGRESS:\n print str(i + 1) + \" down, \" + str(n-i-4) + \" to go...\"\n \n # Add remaining branch lengths and nodes from working distance matrix to this tree \n previous_cluster = new_cluster_name\n mid_edge_length = 0.5 * (self.work_dist_matrix.iat[0, 1]\n + self.work_dist_matrix.iat[0, 2]\n - self.work_dist_matrix.iat[1, 2])\n (node1, node2) = (self.work_dist_matrix.columns[0], self.work_dist_matrix.columns[1])\n new_cluster = cluster_naming_function(node1, node2, self.cluster_map)\n self.cluster_leaves(node1, node2, new_cluster)\n # Viz only scales based on a weight attribute, so we set that as the length\n self.tree.add_edge(previous_cluster, new_cluster, length=mid_edge_length, weight=mid_edge_length)\n\n if DEBUG:\n print 'Final tree:'\n pprint(nx.clustering(self.tree))\n pprint(self.cluster_dictionary)",
"def process_tree_nodes(self):\n self.leaves, self.internal = set(), set()\n _is_cladogram = True\n for node in self.nodes:\n if not node._been_processed:\n if not node.name:\n node.name = node.id\n elif self._remove_name_quotes and (node.name[0] == node.name[-1] == \"'\" or node.name[0] == node.name[-1] == '\"'):\n node.name = node.name[1:-1].strip()\n if node.branch != '' and node.branch != None:\n node.branch = float(node.branch)\n _is_cladogram = False\n else:\n node.branch = 0.0\n if not node.children:\n self.leaves.add(node)\n else:\n self.internal.add(node)\n if not node._been_processed and node.support:\n try:\n node.support = float(node.support)\n if not node.support_type:\n node.support_type = self._support_label\n except ValueError:\n if not node.comment:\n node.comment = node.support\n node.support = None\n if self._is_cladogram == None:\n self._is_cladogram = _is_cladogram\n self.node_names = {}\n for node in self.nodes:\n if node != self.root:\n if self._is_cladogram:\n node.branch = self._cladogram_branch\n if node.name in self.node_names:\n i = 2\n name = '{}_{}'.format(node.name, i)\n while name in self.node_names:\n i += 1\n name = '{}_{}'.format(node.name, i)\n if verbose:\n print('Warning: non-unique node \"{}\" was renamed to \"{}\"'.format(node.name, name))\n node.name = name\n self.node_names[node.name] = node\n node._been_processed = True\n self.calculate_paths()",
"def makeTTree():\n \n tree = TTree(\"tree\",\"tree\")\n px = array('d',[0])\n py = array('d',[0])\n pz = array('d',[0])\n pi = array('i',[0])\n tree.Branch(\"x\",px,\"x/D\")\n tree.Branch(\"y\",py,\"y/D\")\n tree.Branch(\"z\",pz,\"y/D\")\n tree.Branch(\"i\",pi,\"y/I\")\n for i in range(500):\n px[0] = gRandom.Gaus(0,3)\n py[0] = gRandom.Uniform()*30 - 15\n pz[0] = gRandom.Gaus(0,5)\n pi[0] = i%3\n tree.Fill()\n return tree",
"def compute_tree(self, tree):\n g_list_val, g_list_h = self._build_graph(tree) # return theano variable of each node\n list_val = self._traversal_tree(tree) #\n f = theano.function(g_list_val, g_list_h, allow_input_downcast=True)\n result = f(*list_val)\n return result",
"def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up",
"def tree_construct(self, *args, **kwargs):\n l_files = []\n d_constructCallback = {}\n fn_constructCallback = None\n d_probe = {}\n l_range = []\n\n for k, v in kwargs.items():\n if k == 'l_files': l_files = v\n if k == 'constructCallback': fn_constructCallback = v\n if k == 'd_probe': d_probe = v\n\n if d_probe: l_files = d_probe['l_files']\n index = 0\n total = len(l_files)\n if int(self.verbosityLevel) and self.toConsole():\n l_range = tqdm(l_files, desc = ' Constructing tree')\n else:\n l_range = l_files\n for l_series in l_range:\n if len(l_series):\n str_path = os.path.dirname(l_series[0])\n l_series = [ os.path.basename(i) for i in l_series]\n # self.simpleProgress_show(index, total)\n self.d_inputTree[str_path] = l_series\n if fn_constructCallback:\n kwargs['path'] = str_path\n d_constructCallback = fn_constructCallback(l_series, **kwargs)\n self.d_inputTreeCallback[str_path] = d_constructCallback\n self.d_outputTree[str_path] = \"\"\n index += 1\n return {\n 'status': True,\n 'd_constructCallback': d_constructCallback,\n 'totalNumberOfAllSeries': index,\n 'd_probe': d_probe\n }",
"def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))",
"def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward",
"def _tree_query_parallel_helper(tree, *args, **kwargs):\n return tree.query(*args, **kwargs)",
"def do_threadedgen(self, args):\n\t\tfor lang in self.languages:\n\t\t\tt = threading.Thread(name = lang.name + \" hierarchies\", target = self.wrap_semaphore(getattr), args= (lang, \"hierarchyLengths\"))\n\t\t\tprint(\"spawning thread to generate hierarchies for \" + lang.name)\n\t\t\tt.start()\n\t\t\tself.generate_threads.append(t)",
"def build(self):\n # weights to apply to training samples, updated on each\n # iteration of the boosting algo, normalised to 1\n sigWeights = np.ones(self.nSig, dtype=float)\n bkgWeights = np.ones(self.nBkg, dtype=float)\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight \n\n # Weight of each tree, strong classifers have higher weight\n self.treeWeights = np.zeros(self.ntrees, dtype=float)\n\n for i in xrange(self.ntrees):\n\n # build new tree\n newTree = Tree()\n newTree.load(self.sigData,self.bkgData,weights=(sigWeights,bkgWeights))\n newTree.build()\n self.dTrees.append(newTree) \n\n # evaluate trees\n # keep track of each event\n err = 0.0\n sigWrong = np.zeros(self.nSig)\n bkgWrong = np.zeros(self.nBkg)\n\n for j in range(self.nSig):\n if newTree.classify(np.array((self.sigData[j,])))<0:\n sigWrong[i]=1\n err+=sigWeights[j]\n\n for j in range(self.nBkg):\n if newTree.classify(np.array((self.bkgData[j,])))>0:\n bkgWrong[i]=1\n err+=bkgWeights[j]\n\n alpha = self.beta*math.log((1.0-err)/err)\n print err,alpha\n corFactor = math.exp(-alpha)\n wrongFactor = math.exp(alpha)\n\n if (err<1e-20 or err >= 0.5):\n print \"SOEMTHING WRONG!!\"\n\n self.treeWeights[i] = alpha\n\n # reweight training samples\n for j in range(self.nSig):\n if sigWrong[j]:\n sigWeights[j]*=wrongFactor\n else :\n sigWeights[j]*=corFactor\n\n for j in range(self.nBkg):\n if bkgWrong[j]:\n bkgWeights[j]*=wrongFactor\n else :\n bkgWeights[j]*=corFactor\n\n # normalise weights\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight",
"def analyze_trees(fname, only_mb=False, slow_mb=False):\n cols = get_col_names(fname)\n tnums = get_tree_nums(fname)\n df = read_data(fname, cols)\n df['tree'] = make_tree_col(df, tnums)\n if slow_mb:\n df['TotalMass_mmp'] = False\n if HAVE_PBAR:\n tnums = tqdm(tnums, desc='Main Branches')\n for tn in tnums:\n mmps = main_trees(df.loc[df.tree == tn])\n mmps = np.isin(df.loc[df.tree == tn].index, mmps)\n df.loc[df.tree == tn, 'TotalMass_mmp'] = mmps\n else:\n df['TotalMass_mmp'] = main_trees_quick(df)\n\n df = verify_main_branches(df)\n if only_mb:\n return df.loc[df.TotalMass_mmp == 1]\n return df",
"def process_tree(tree):\n c = circuit()\n l = line()\n names = {}\n procedures = []\n for lst in tree.children:\n print(lst)\n if type(lst[0]) is str:\n names[lst[0]] = lst[1]\n else:\n procedures.append(lst)\n print(names)\n #print(procedures)\n\n for proc in procedures:\n\n proc_elements_names = proc[0]\n proc_name = proc[1]\n\n #print(proc_elements_names)\n #print(proc_name)\n\n if proc_name == \"set_mode\":\n mode_name = proc_elements_names[0]\n if mode_name != \"draw-mode\": \n c.set_mode(mode_name)\n elif mode_name == \"draw-mode\":\n l1 = line()\n # draw mode is different from other modes\n for element in names:\n e = CompleteElement(element)\n e.set_other_attrs(names[element])\n e.process_other_attrs()\n l1.addElement(e)\n c.connectInSeries(l1)\n c.set_mode(\"draw-mode\")\n \n \n if proc_name == \"series\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n l = l1\n c.connectInSeries(l)\n #raise SyntaxError(\"Alias {0} referrenced before assignment\".format(item[0]))\n\n elif proc_name == \"parallel\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n c.connectInParallel(l1)\n l1 = line()\n\n\n elif proc_name == \"add_parallel\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n l1 = line()\n l1.addElement(names[new_element])\n c.connection.append(l1)\n\n\n elif proc_name == \"add_series\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n for ln in c.connection:\n for e in ln.elements:\n if names[old_element] == e:\n ln.addElement(names[new_element])\n\n\n c.evaluate(\"output.png\")\n #print(c)",
"def _internal_build(self):\n self.nodes = self.__tree.Nodes()\n self.edges = self.__tree.Edges()\n self.augmentedEdges = {}\n for key, val in self.__tree.AugmentedEdges().items():\n self.augmentedEdges[key] = list(val)\n self.root = self.__tree.Root()\n\n seen = set()\n self.branches = set()\n\n # Find all of the branching nodes in the tree, degree > 1\n # That is, they appear in more than one edge\n for e1, e2 in self.edges:\n if e1 not in seen:\n seen.add(e1)\n else:\n self.branches.add(e1)\n\n if e2 not in seen:\n seen.add(e2)\n else:\n self.branches.add(e2)\n\n # The nodes that are not branches are leaves\n self.leaves = set(self.nodes.keys()) - self.branches\n self.leaves.remove(self.root)",
"def get_move(self, state):\n # this method should only be called when self is real root.,so that's here where we can should use mutiprocess\n if self._root.is_leaf(): # no expanded children yet\n action_probs, _ = self._policy(state)\n self._root.expand(action_probs)\n\n the_children = self._root._children\n i = 0\n sorted_children = sorted(the_children.items(), key=lambda act_node: act_node[1].get_value(self._c_puct))\n for child_node in sorted_children:\n i += 1\n child_tree = MCTS(policy_value_fn,root=child_node[1])\n state_copy = copy.deepcopy(state)\n state_copy.do_move(child_node[0])\n visits_count = 0\n for j in range(0,relu(1200-i*20),10): # at least run one time\n child_tree._playout(copy.deepcopy(state_copy))\n visits_count += 1\n self._root.update(-child_tree.get_root_node().last_leafvalue,visits_count=visits_count) # update real root\n child_tree.get_root_node().set_parent(self._root) # to link the sub tree\n\n '''\n for n in range(self._n_playout):\n # get top n (assumed to be 6) nodes from children\n # step1 let all children of root have chance to run in parallel\n # adjust the round count of children by value\n if n%6 == 0:\n the_children = self._root._children\n top_n = sorted(the_children.items(),key=lambda act_node: act_node[1].get_value(self._c_puct))[:6]\n for child_node in top_n:\n # child_tree = MCTS(policy_value_fn,copy.deepcopy(child_node)) # use copy because we will use it in multiprocess\n child_tree = MCTS(policy_value_fn,\n child_node) \n state_copy = copy.deepcopy(state)\n state_copy.do_move(child_node[0])\n child_tree._playout(state_copy)\n self._root.update(-child_tree.get_root_node().last_leafvalue) # update real root\n child_tree.get_root_node().set_parent(self._root) # to link the sub tree\n # self._root.get_children()[child_node[0]] = child_tree.get_root_node() # copy sub tree\n '''\n\n '''\n return max(self._root._children.items(),\n # key=lambda act_node: act_node[1].get_visits())[0]\n key=lambda act_node: act_node[1].get_value(self._c_puct))[0]\n '''\n\n for n in range(300):\n state_copy = copy.deepcopy(state)\n self._playout(state_copy)\n return max(self._root._children.items(),\n key=lambda act_node: act_node[1].get_value(self._c_puct))[0]",
"def build_tree(self, df=None, tree=None, depth=0):\n if df is None:\n df = self.df\n target = self.target\n\n node = self.get_lowest_entropy_feature(df)\n if not node:\n print(\"Pure solution not possible in current branch...\")\n return tree\n variables = df[node].unique()\n\n if tree is None: \n tree = {}\n tree[node] = {}\n\n for value in variables:\n subtable = df[df[node] == value].reset_index(drop=True)\n inner_variables, counts = np.unique(subtable[target], return_counts=True) \n \n if len(counts) == 1:\n tree[node][value] = inner_variables[0] \n elif depth >= self.max_depth:\n return tree \n else:\n depth += 1 \n tree[node][value] = self.build_tree(df=subtable, depth=depth)\n \n return tree",
"def make_trees(self):\n self.trees = build_recursively_from_cells(self.cells, container=self)\n# self.trees = []\n# for cell in self.cells:\n# if cell.bpointer is None: # test whether cell is root\n# tree = Colony(container=self)\n# tree.add_cell_recursive(cell)\n# self.trees.append(tree)\n return",
"def _preprocess(self):\n # Size of each micro tree: B = 1/4 logn.\n self._block_size = int(1/4 * math.log2(self._size))\n\n # Build a list of ladders and a sparse table for the jump nodes.\n super()._preprocess()\n\n # Decompose the tree into macro tree and micro trees.\n self._micro_macro_decomposition()\n\n # Build simple tables for the micro trees.\n self._build_micro_tree_tables()",
"def _build_octree(self):\n\n # cleanup old tree\n self._nodes_positions = []\n self._nodes_mass = []\n self._nodes_sizes = []\n self._nodes_children_types = []\n self._nodes_children_ids = []\n\n min_pos = np.min(self._positions)\n max_pos = np.max(self._positions)\n\n self._build_octree_branch(\n bodies=list(range(self.bodies)),\n coords_min=np.array([min_pos] * 3),\n coords_max=np.array([max_pos] * 3)\n )",
"def buildTreePandas(rows, res, min_ppl = None, maxDepth=None, scoref=entropy, depth=0):\n minimum_ppl = deepcopy(min_ppl)\n num_ppl = len(rows)\n \n if min_ppl is not None and num_ppl <= min_ppl:\n #Extra protection to stop the recursion\n return decisionNode(results=__uniqueCountsPandas(rows, res)) \n if num_ppl==0: \n return decisionNode( )\n newDepth = depth + 1\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth):\n #print \"Hooray I got here.\"\n return decisionNode(results=__uniqueCountsPandas(rows, res))\n current_score=scoref(rows, resCol = res)\n # Set up some variables to track the best criteria\n best_gain=0.0\n best_criteria=None\n best_sets=None\n \n featColumns=rows.columns.tolist()\n featColumns.remove(res)\n for col in featColumns:\n # Generate the list of different values in\n # this column\n column_values=rows.loc[:,col].unique()\n # Now try dividing the rows up for each value\n # in this column\n copy = rows.sort(columns = col)\n for value in column_values:\n (set1,set2)=__dividePandas(copy,col,value)\n # Information gain\n p=float(len(set1))/len(rows)\n gain=current_score-p*scoref(set1, resCol = res)-(1-p)*scoref(set2, resCol = res)\n size_min = 0 if minimum_ppl is None else minimum_ppl - 1\n if gain>best_gain and len(set1)>size_min and len(set2)>size_min:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Create the subbranches\n if best_gain>0:\n trueBranch=buildTreePandas(best_sets[0], res, min_ppl = minimum_ppl, maxDepth = maxDepth, depth=newDepth)\n falseBranch=buildTreePandas(best_sets[1], res, min_ppl = minimum_ppl, maxDepth = maxDepth, depth=newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCountsPandas(rows, res))",
"def merge_trees_via_nj(pdm, trees):\n leaves = []\n for tree in trees:\n leaves.append(get_leaf_set(tree))\n\n # Check trees are on disjoint leaf sets\n for i, li in enumerate(leaves[:-1]):\n for lj in leaves[i+1:]:\n shared = li.intersection(lj)\n if len(shared) != 0:\n raise Exception(\"Input trees are not on disjoint leaf sets!\\n\")\n\n # Check distance matrix and trees have matching leaf sets\n full_leaf_set = set()\n for l in leaves:\n full_leaf_set = full_leaf_set.union(l)\n if full_leaf_set != set([x.label for x in pdm.taxon_namespace]):\n raise Exception(\"Names in matrix do not match those in trees!\\n\")\n\n # Remove some extra nonsense\n for tree in trees:\n # Root trees\n tree.resolve_polytomies(limit=2)\n tree.is_rooted = True\n\n # Remove branch lengths\n for e in tree.preorder_edge_iter():\n e.length = None\n\n # Remove bootstrap support\n for n in tree.internal_nodes():\n n.label = None\n\n # Map splits to nodes\n maps = []\n for tree in trees:\n maps.append(map_splits_to_nodes(tree))\n\n # Taken from dendropy\n original_dmatrix = pdm._taxon_phylogenetic_distances\n tree_factory = dendropy.Tree\n tree = tree_factory(taxon_namespace=pdm.taxon_namespace)\n tree.is_rooted = False\n\n # Initialize node pool - taken from dendropy\n node_pool = []\n for t1 in pdm._mapped_taxa:\n nd = tree.node_factory()\n nd.taxon = t1\n nd._nj_distances = {}\n node_pool.append(nd)\n\n # Initialize factor - taken from dendropy\n n = len(pdm._mapped_taxa)\n\n # Cache calculations - taken from dendropy\n for nd1 in node_pool:\n nd1._nj_xsub = 0.0\n for nd2 in node_pool:\n if nd1 is nd2:\n continue\n d = original_dmatrix[nd1.taxon][nd2.taxon]\n nd1._nj_distances[nd2] = d\n nd1._nj_xsub += d\n\n while n > 1:\n print(n)\n # Using multiprocessing!\n\n # Sort the Q-matrix\n # TODO: Use multi-threading!\n pairs = []\n qvalues = []\n for idx1, nd1 in enumerate(node_pool[:-1]):\n idx2 = idx1 + 1\n for nd2 in node_pool[idx2:]:\n v1 = (n - 2) * nd1._nj_distances[nd2]\n qvalue = v1 - nd1._nj_xsub - nd2._nj_xsub\n pairs.append([idx1, idx2])\n qvalues.append(qvalue)\n idx2 = idx2 + 1\n\n # Test for constraint violations\n # TODO: Use multi-threading in test_join function!\n nodes_to_join = None\n for idxq in numpy.argsort(qvalues):\n [idx1, idx2] = pairs[idxq]\n nd1 = node_pool[idx1]\n nd2 = node_pool[idx2]\n # Check join does not violate a constraint tree!\n violates = test_join(trees, leaves, maps, nd1, nd2)\n if not violates:\n nodes_to_join = (nd1, nd2)\n break\n\n if nodes_to_join is None:\n raise Exception(\"Unable to find valid siblinghood!\\n\")\n\n # Nodes to join\n (nd1, nd2) = nodes_to_join\n\n # Update the constraint trees!\n [trees, edits] = join_nodes(trees, leaves, maps, nd1, nd2)\n if sum(edits) > 0:\n i = 0\n for t, e in zip(trees, edits):\n if e:\n # Check to see if you can quit early\n leaves[i] = get_leaf_set(t)\n if leaves[i] == full_leaf_set:\n return t\n\n # Update split-to-node maps\n maps[i] = map_splits_to_nodes(t)\n i = i + 1\n\n # Create the new node - taken from dendropy\n new_node = tree.node_factory()\n\n # Attach it to the tree - taken from dendropy\n for node_to_join in nodes_to_join:\n new_node.add_child(node_to_join)\n node_pool.remove(node_to_join)\n\n # Calculate the distances for the new node - taken from dendropy\n new_node._nj_distances = {}\n new_node._nj_xsub = 0.0\n for node in node_pool:\n # actual node-to-node distances\n v1 = 0.0\n for node_to_join in nodes_to_join:\n v1 += node._nj_distances[node_to_join]\n v3 = nodes_to_join[0]._nj_distances[nodes_to_join[1]]\n dist = 0.5 * (v1 - v3)\n new_node._nj_distances[node] = dist\n node._nj_distances[new_node] = dist\n\n # Adjust/recalculate the values needed for the Q-matrix\n # calculations - taken from dendropy\n new_node._nj_xsub += dist\n node._nj_xsub += dist\n for node_to_join in nodes_to_join:\n node._nj_xsub -= node_to_join._nj_distances[node]\n\n # Clean up - taken from dendropy\n for node_to_join in nodes_to_join:\n del node_to_join._nj_distances\n del node_to_join._nj_xsub\n\n # Add the new node to the pool of nodes - taken from dendropy\n node_pool.append(new_node)\n\n # Adjust count - taken from dendropy\n n -= 1\n\n # More clean up - taken from dendropy\n tree.seed_node = node_pool[0]\n del tree.seed_node._nj_distances\n del tree.seed_node._nj_xsub\n return tree"
] |
[
"0.6634557",
"0.65097874",
"0.6468073",
"0.6322716",
"0.61754715",
"0.61633855",
"0.6134023",
"0.6103396",
"0.6072749",
"0.6025818",
"0.5942558",
"0.5940306",
"0.59400433",
"0.59197605",
"0.5803043",
"0.5800409",
"0.5730466",
"0.57235837",
"0.570595",
"0.56996745",
"0.5682015",
"0.56593484",
"0.5652294",
"0.56499326",
"0.564837",
"0.5629537",
"0.5618723",
"0.5617391",
"0.5598036",
"0.55974406"
] |
0.67621195
|
0
|
given a path from root to a sampled node, generate all the node pairs within the given windows size e.g., path = [1, 0, 2, 4, 2], window_size = 2 > node pairs= [[1, 0], [1, 2], [0, 1], [0, 2], [0, 4], [2, 1], [2, 0], [2, 4], [4, 0], [4, 2]]
|
def get_node_pairs_from_path(path):
path = path[:-1]
pairs = []
for i in range(len(path)):
center_node = path[i]
for j in range(max(i - config.window_size, 0), min(i + config.window_size + 1, len(path))):
if i == j:
continue
node = path[j]
pairs.append([center_node, node])
return pairs
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward",
"def ring_topology(random, population, args):\r\n neighborhood_size = args.setdefault('neighborhood_size', 3)\r\n half_hood = neighborhood_size // 2\r\n neighbor_index_start = []\r\n for index in range(len(population)):\r\n if index < half_hood:\r\n neighbor_index_start.append(len(population) - half_hood + index)\r\n else:\r\n neighbor_index_start.append(index - half_hood)\r\n neighbors = []\r\n for start in neighbor_index_start:\r\n n = []\r\n for i in range(0, neighborhood_size):\r\n n.append(population[(start + i) % len(population)])\r\n yield n",
"def test_node_sampling(weighted_graph_config_fixture):\n w_config = weighted_graph_config_fixture\n\n # Node 5 to node 4 has zero weight (zero transition probability)\n # Node 4 to node 5 has ten weight (high transition probability)\n edges = pd.DataFrame({'source_content_id': [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5],\n 'destination_content_id': [5, 1, 0, 3, 4, 1, 2, 1, 3, 5, 3, 4],\n 'weight': [1, 2, 3, 4, 1, 2, 3, 4, 1, 10, 5, 0]}\n )\n wm = N2VModel()\n\n wm.create_graph(edges, w_config['weighted_graph'])\n\n wm.generate_walks(**w_config)\n\n wm.fit_model(**w_config, callbacks=EpochLogger())\n\n n_nodes = len(set(edges.source_content_id))\n n_transitions = n_nodes * (w_config['walk_length'] - 1) * w_config['num_walks']\n\n res = np.array([np.array(list(zip(x, x[1:]))).ravel() for x in wm.node2vec.walks])\n walks = np.reshape(res, (n_transitions, 2))\n\n pairs = pd.DataFrame({'state1': walks[:, 0], 'state2': walks[:, 1]})\n counts = pairs.groupby('state1')['state2'].value_counts().unstack()\n counts = counts.replace(np.nan, 0)\n assert pairs.shape == (n_nodes * (w_config['walk_length'] - 1) * w_config['num_walks'], 2)\n assert counts.iloc[5][4] == 0\n assert counts.iloc[4][5] != 0\n assert len(set(edges['source_content_id']).union(\n set(edges['destination_content_id']))) == len(wm.model.wv.vocab.keys())",
"def run_generations(init_len):\n num_graphs = 0\n current_gen = [nx.path_graph(init_len)]\n complete_graph_list = current_gen.copy()\n while len(current_gen) and current_gen[0].size() < (3*init_len - 7):\n current_gen = generation_next(current_gen)\n num_graphs += show_graph_list(current_gen)\n complete_graph_list.extend(filter_bridge_case(current_gen))\n print(num_graphs)\n return complete_graph_list",
"def make_pnts(start_point, end_point, path, cellsize):\n pnts = misc.generateSamples(tran = [start_point, end_point], path = path)\n \n #Only do sampling at DEM with >3m resolution \n if cellsize[0] >= 5:\n pnts = misc.selectSamplePts([start_point, end_point], pnts, cellsize[0])\n return pnts",
"def get_pairs(feature_size=256, window_size=31):\n # Approach proposed by the professor\n # std = 1 / 5 * window_size\n # point_pairs2 = np.int32(np.random.randn(4, feature_size) * std)\n\n # Generate random point pairs\n # Using the approach G II: Gaussian(0, 1/25 * window_size^2)\n std = 0\n dev = 1 / 25 * (window_size * window_size)\n point_pairs = np.int32(np.random.normal(std, dev, (4, feature_size)))\n # Make sure the points are inside the window (patch)\n half_window_size = window_size // 2 -1\n brief_points = np.maximum(-half_window_size, np.minimum(point_pairs, half_window_size))\n return brief_points",
"def sliding_window(top, step=10, window_size=(20,20)):\n\tfor x in range(0, top.shape[0], step):\n\t\tif x + window_size[0] > top.shape[0]:\n\t\t\tx = top.shape[0] - window_size[0]\n\t\tfor y in range(0, top.shape[1], step):\n\t\t\tif y + window_size[1] > top.shape[1]:\n\t\t\t\ty = top.shape[1] - window_size[1]\n\t\t\tyield x, y, window_size[0], window_size[1]",
"def generate_graph(size, number_of_clusters, minimal_size):\n base_list = list(range(size))\n result_list = []\n random.shuffle(base_list)\n for i in range(number_of_clusters - 1):\n size = random.randint(minimal_size, len(base_list) - (number_of_clusters - i - 1) * minimal_size)\n cluster = []\n for n in range(size):\n actual = random.choice(base_list)\n base_list.remove(actual)\n cluster.append(actual)\n result_list.append(strongly_connect(cluster))\n result_list.append(strongly_connect(base_list))\n\n while len(result_list) < 5:\n result_list.append([])\n\n print(sorted([len(i) for i in result_list], reverse=True)[:5])\n\n return weak_connect_graph(result_list)",
"def compute_m_paths_of_len_forw(g, sampl, start_times, path_len=5, cost=1, its=100, max_duration=sys.maxint):\n\n num_of_paths_dict = {}\n\n # for each node in source, generate random paths and count the number of times they end up in each\n # dest. node\n for source, start_time in zip(sampl, start_times):\n print \"Processing source node: {} (starting at time {})\".format(source, start_time)\n num_of_paths_dict[(source, start_time)] = {dest: 0 for dest in g.nodes()}\n for _ in range(its):\n d = random_path_forward(g, source, path_len, start_time, cost, max_duration)\n if d is not None:\n num_of_paths_dict[(source, start_time)][d] += 1\n\n return num_of_paths_dict",
"def __generate_all_shortest_paths(self,cutoff = 10):\n if cutoff < 1:\n cutoff = 10\n self.__logger.info(\"cutoff value must be a positive integer. Set back to default value: 10\")\n\n all_pair_shortest_paths = nx.all_pairs_shortest_path(self.G, cutoff=cutoff)\n for item in all_pair_shortest_paths:\n from_node = item[0]\n paths = item[1]\n for destination,path in paths.items():\n yield (len(path),path)",
"def sample_paths(S0, N, u, d, q, M):\n value = S0*np.ones((M,1)) # M sample paths at once\n paths = np.zeros((M, N+1))\n paths[:,0] = value[:,0]\n for i in np.arange(1, N+1,1): # time steps\n random_values = bernoulli.rvs(size=(M,1),p=q)\n up_moves = random_values*u\n down_moves = -1*(random_values-1)*d \n value = np.multiply(value,up_moves+down_moves)\n paths[:,i] = value[:,0]\n return paths",
"def generate(bat_size, s=\"train\"):\n while True:\n pairs, targets = get_batch(bat_size, s)\n yield (pairs, targets)",
"def generate_pairs(number: int) -> List[List[int]]:\n return [\n [top, inner]\n for top in range(number + 1)\n for inner in range(top, number + 1)\n ]",
"def sample_paths(S0, N, u, d, q, M):\n value = S0 * np.ones((M, 1)) # M sample paths at once\n paths = np.zeros((M, N + 1))\n paths[:, 0] = value[:, 0]\n for i in np.arange(1, N + 1, 1): # time steps\n random_values = bernoulli.rvs(size=(M, 1), p=q)\n up_moves = random_values * u\n down_moves = -1 * (random_values - 1) * d\n value = np.multiply(value, up_moves + down_moves)\n paths[:, i] = value[:, 0]\n return paths",
"def star_topology(random, population, args):\r\n for _ in range(len(population)):\r\n yield population[:]",
"def generate_paths(self, paths: list) -> list or int:\n path_count = 0\n new_paths = {}\n # follow each path in the paths list\n for path in paths:\n # find neighbours from last position in each path\n neighbours = self.neighbours(path[-1])\n for neighbour in neighbours:\n if neighbour in self.queue.values():\n continue # if neighbour in queue, go to next neighbour\n # find grid value of neighbour\n grid_value = self.grid[neighbour[1]][neighbour[0]]\n if grid_value == 1:\n new_paths[path_count] = path.copy()\n new_paths[path_count].append(neighbour) # add path to dict\n self.queue[path_count + 1] = neighbour # add neighbour to queue\n path_count += 1 # increase count of number of paths\n if grid_value == 9:\n return len(path)\n\n # roll dict out into list of paths\n paths_store = [new_paths[key] for key in new_paths]\n\n return paths_store",
"def generateNumsets(G):\n # paths = []\n #\n # path = [0]\n # for edge in nx.dfs_edges(G, 0):\n # if edge[0] == path[-1]:\n # path.append(edge[1])\n # else:\n # paths.append(path)\n # search_index = 2\n # while search_index <= len(path):\n # if edge[0] == path[-search_index]:\n # path = path[:-search_index + 1] + [edge[1]]\n # break\n # search_index += 1\n # else:\n # raise Exception(\"Wrong path structure?\", path, edge)\n # paths.append(path)\n # return paths\n\n \"\"\"\n Trying to use itertools LMAO\n \"\"\"\n # paths = []\n #\n # for path in itertools.combinations(G.nodes, 5):\n # paths.append(path)\n # return paths\n\n \"\"\"\n Generating paths using graph\n \"\"\"\n paths = []\n n = len(G.nodes)\n for source in range(n):\n for target in range(source+1, n):\n paths.extend([path for path in nx.all_simple_paths(G, source=source, target=target)])\n return paths\n\n # return paths",
"def make_nodes_and_paths(friends_lst):\n\n # nodes = {}\n\n # for item in friends_lst:\n # friend1, friend2, group = item\n # for person in pair:\n # if not nodes.get(person):\n # nodes[person] = pair[1]\n\n # nodes = [{'name': person, 'friend': nodes[person]} for person in nodes.keys()]\n\n nodes = {}\n for item in friends_lst:\n friend1, friend2, group = item\n if not nodes.get(friend1):\n nodes[friend1] = group\n elif nodes.get(friend1) > group:\n nodes[friend1] = group\n\n nodes = [{'name': person, 'group': nodes[person]} for person in nodes.keys()]\n\n index_nodes = {}\n for idx, n in enumerate(nodes):\n index_nodes[n['name']] = (idx, n['group'])\n\n paths = []\n\n # paths.append({'source': item[1], 'target': item[0]})\n\n for item in friends_lst:\n # one = User.query.get(item.user_id)\n # two = User.query.get(item.friend_id)\n source, target, group = item\n paths.append({'source': index_nodes[source][0], 'target': index_nodes[target][0]})\n\n # print nodes\n # print index_nodes\n # print paths\n\n return nodes, paths",
"def Generate_edges(size, connectedness):\r\n\r\n assert connectedness <= 1\r\n random.seed(10)\r\n for i in range(size):\r\n for j in range(i + 1, size):\r\n if random.randrange(0, 100) <= connectedness * 100:\r\n yield f'{i} {j}'",
"def path(self, root, n=10, stepsize=3):\n seq = []\n seq.append(root)\n while len(seq) < n:\n next = self.synonyms([seq[-1]], stepsize)\n random.shuffle(next)\n maxToAdd = stepsize\n added_something = False\n for j in next:\n if j not in seq:\n seq.append(j)\n added_something = True\n maxToAdd -= 1\n if maxToAdd <= 0:\n break\n if added_something is False:\n seq.append(root)\n return(seq[0:n])",
"def get_2pairs():\n\n done = 0\n while not done:\n r0 = int(random(GRID_CELLS))\n c0 = int(random(GRID_CELLS))\n\n r1 = int(random(GRID_CELLS))\n c1 = int(random(GRID_CELLS))\n done = 1\n\n if random(1) < 0.5:\n # move one cell right\n ra1 = r0 + 1\n rb1 = r1 + 1\n ra0, rb0 = r0, r1\n ca0, cb0 = c0, c1\n ca1, cb1 = c0, c1\n\n if ra1 >= GRID_CELLS or rb1 >= GRID_CELLS:\n done = 0\n else: # move down:\n ca1 = c0 + 1\n cb1 = c1 + 1\n ca0, cb0 = c0, c1\n ra0, rb0 = r0, r1\n ra1, rb1 = r0, r1\n if ca1 >= GRID_CELLS or cb1 >= GRID_CELLS:\n done = 0\n\n return [((ra0, ca0), (rb0, cb0)), ((ra1, ca1), (rb1, cb1))]",
"def generate_window(self):\n self.window_num += 1\n cur_window = []\n for i in range(self.window_size):\n if len(self.cur_topics) < self.min_topics: break\n topic_num = np.random.choice(self.cur_topics)\n doc_num = random.randint(0, len(self.filepaths[topic_num])-1)\n cur_window.append(self.filepaths[topic_num][doc_num])\n del self.filepaths[topic_num][doc_num]\n if len(self.filepaths[topic_num]) == 0: self.cur_topics.remove(topic_num) # If a topic runs out of documents\n self.calculate_topic_distribution(cur_window)\n self.time_windows.append(cur_window)",
"def init_tsp_pop(tsp, n): \n pop = []\n nodes = list(tsp.get_nodes())\n random_path = []\n p_len = len(nodes)\n for i in range(n):\n temp_nodes = list(tsp.get_nodes())\n random_path = []\n for j in range(p_len):\n x = random.choice(temp_nodes)\n random_path.append(x)\n temp_nodes.remove(x)\n pop.append(random_path)\n return pop",
"def compute_m_paths_of_len_back(g, sampl, end_times, path_len=5, cost=1, its=100, max_duration=sys.maxint):\n\n num_of_paths_dict = {}\n\n # for each node in source, generate random paths and count the number of times they end up in each\n # dest. node\n for dest, end_time in zip(sampl, end_times):\n print \"Processing dest node: {} (ending at time {})\".format(dest, end_time)\n num_of_paths_dict[(dest, end_time)] = {source: 0 for source in g.nodes()}\n for _ in range(its):\n d = random_path_backwards(g, dest, path_len, end_time, cost, max_duration)\n if d is not None:\n num_of_paths_dict[(dest, end_time)][d] += 1\n\n return num_of_paths_dict",
"def generate(self, batch_size, s=\"train\"):\n while True:\n pairs, targets = self.get_batch(batch_size,s)\n yield (pairs, targets)",
"def generate_pairs_lists(\n top, molecule=None, sort_key=None, refer_from_scaling_factor=False\n):\n from gmso.external import to_networkx\n from gmso.parameterization.molecule_utils import (\n molecule_angles,\n molecule_bonds,\n molecule_dihedrals,\n )\n\n nb_scalings, coulombic_scalings = top.scaling_factors\n\n if sort_key is None:\n sort_key = top.get_index\n\n graph = to_networkx(top, parse_angles=False, parse_dihedrals=False)\n\n pairs_dict = dict()\n if refer_from_scaling_factor:\n for i in range(3):\n if nb_scalings[i] or coulombic_scalings[i]:\n pairs_dict[f\"pairs1{i+2}\"] = list()\n else:\n for i in range(3):\n pairs_dict = {f\"pairs1{i+2}\": list() for i in range(3)}\n\n if molecule is None:\n bonds, angles, dihedrals = top.bonds, top.angles, top.dihedrals\n else:\n bonds = molecule_bonds(top, molecule)\n angles = molecule_angles(top, molecule)\n dihedrals = molecule_dihedrals(top, molecule)\n\n if \"pairs12\" in pairs_dict:\n for bond in bonds:\n pairs = sorted(bond.connection_members, key=sort_key)\n pairs_dict[\"pairs12\"].append(pairs)\n\n if \"pairs13\" in pairs_dict:\n for angle in angles:\n pairs = sorted(\n (angle.connection_members[0], angle.connection_members[-1]),\n key=sort_key,\n )\n if (\n pairs not in pairs_dict[\"pairs13\"]\n and shortest_path_length(graph, pairs[0], pairs[1]) == 2\n ):\n pairs_dict[\"pairs13\"].append(pairs)\n\n if \"pairs14\" in pairs_dict:\n for dihedral in dihedrals:\n pairs = sorted(\n (\n dihedral.connection_members[0],\n dihedral.connection_members[-1],\n ),\n key=sort_key,\n )\n if (\n pairs not in pairs_dict[\"pairs14\"]\n and shortest_path_length(graph, pairs[0], pairs[1]) == 3\n ):\n pairs_dict[\"pairs14\"].append(pairs)\n\n for key in pairs_dict:\n pairs_dict[key] = sorted(\n pairs_dict[key],\n key=lambda pairs: (sort_key(pairs[0]), sort_key(pairs[1])),\n )\n\n return pairs_dict",
"def split_at_nodes(shp):\n nodes = find_nodes(shp)\n nodeIds = list(nodes)\n nodeIds.sort()\n nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])\n \n for road in shp:\n vrts = road.vertices\n midVrts = set(road.vertices[1:-1]) #we know end points are nodes\n midNodes = midVrts.intersection(nodes) # find any nodes in the middle of the feature.\n midIdx = [vrts.index(node) for node in midNodes] # Get their indices\n midIdx.sort()\n if midIdx:\n #print vrts\n starts = [0]+midIdx\n stops = [x+1 for x in midIdx]+[None]\n for start,stop in zip(starts,stops):\n feat = pysal.cg.Chain(vrts[start:stop])\n rec = (nodeIds[feat.vertices[0]],nodeIds[feat.vertices[-1]],False)\n yield feat,rec\n else:\n rec = (nodeIds[road.vertices[0]],nodeIds[road.vertices[-1]],False)\n yield road,rec",
"def graph_connectome(\n num_sampled,\n max_depth,\n num_iters=10,\n graph=None,\n reverse_graph=None,\n to_write=None,\n num_cpus=1,\n a_indices=None,\n b_indices=None,\n):\n num_a, num_b = to_write\n\n if a_indices is None:\n a_indices = np.array([i for i in range(num_a)])\n if b_indices is None:\n b_indices = np.array([i for i in range(num_b)])\n\n def random_var_gen(iter_val):\n start = np.random.choice(a_indices, size=num_sampled[0], replace=False)\n end = np.random.choice(b_indices, size=num_sampled[1], replace=False)\n end = end + num_a\n\n return start, end\n\n def fn_to_eval(start, end):\n return (\n len(find_connected_limited(graph, start, end, max_depth, reverse_graph)),\n )\n\n result = monte_carlo(fn_to_eval, random_var_gen, num_iters, num_cpus=num_cpus)\n df = list_to_df(\n result,\n [\"Connections\"],\n )\n result = summarise_monte_carlo(\n df,\n plot=False,\n )\n ordered_dist = get_distribution(df, \"Connections\", num_iters)\n\n return {\n \"full_results\": df,\n \"summary_stats\": result,\n \"dist\": ordered_dist,\n }",
"def create_node2edges_on2freq_grid(self):\n trip_id2model = pickle.load(open('pickles/trip_id2model.pickle','rb'))\n old_trip_id = -1\n model = trip_id2model[1]\n sub_x = 5\n sub_y = 5\n node2edges_on2sub_grid2points = {}\n for line in self.lines:\n trip_id,lat,lon = normalize_simple(line)\n if trip_id != old_trip_id:\n #print trip_id\n model = trip_id2model[trip_id]\n old_trip_id = trip_id\n node = self.gps_to_node(lat,lon)\n if node == -1:\n continue\n #print \"pushed through\"\n incident_edges = self.incident_edges(node)\n edges_on = []\n for edge in incident_edges:\n if model[edge] == 1:\n edges_on.append(edge)\n edges_on.sort()\n edges_on = tuple(edges_on)\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon(self.node_to_coords(node))\n\n sub_row,sub_col = gen_gps_to_coords(lat,lon,sub_x,sub_y,min_lat,max_lat,min_lon,max_lon)\n sub_tuple = (sub_row,sub_col)\n if node not in node2edges_on2sub_grid2points:\n node2edges_on2sub_grid2points[node] = {}\n edges_on2sub_grid2points = node2edges_on2sub_grid2points[node]\n if edges_on not in edges_on2sub_grid2points:\n edges_on2sub_grid2points[edges_on] = defaultdict(list)\n sub_grid2points = edges_on2sub_grid2points[edges_on]\n points = sub_grid2points[sub_tuple]\n node2edges_on2sub_grid2points[node][edges_on][sub_tuple].append([lat,lon])\n #points.append([lat,lon])\n\n print node2edges_on2sub_grid2points.keys()\n print node2edges_on2sub_grid2points[2].keys()\n print node2edges_on2sub_grid2points[2][(2,3)].keys()\n \n node2edges_on2median = {}\n for node in node2edges_on2sub_grid2points:\n print node\n edges_on2sub_grid2points = node2edges_on2sub_grid2points[node]\n node2edges_on2median[node] = {}\n for edges_on in edges_on2sub_grid2points:\n sub_grid2points = edges_on2sub_grid2points[edges_on]\n best_spot = (-1,-1)\n best_score = 0\n for spot in sub_grid2points:\n score = len(sub_grid2points[spot])\n if score > best_score:\n best_score = score\n best_spot = spot\n node2edges_on2median[node][edges_on] = list_median(sub_grid2points[spot])\n \n with open('pickles/node2edges_on2median-%d-%d.pickle' % (self.rows,self.cols),'wb') as output:\n pickle.dump(node2edges_on2median,output)",
"def set_random_session(self, G, degree_s):\n sorted_nodes = nx.topological_sort(G)\n num_nodes = G.number_of_nodes()\n\n # create sources and destinations of each of the sections\n # name the nodes to be the last 4 numbers\n srcs = [num_nodes, num_nodes + 1]\n dsts = [num_nodes + 2, num_nodes + 3]\n\n end_idx = int(0.3 * len(sorted_nodes))\n end_idx = max(end_idx, 2)\n for i in range(2):\n s = srcs[i]\n t = dsts[i]\n reachables = []\n iter_num = 0\n\n while len(reachables) == 0:\n iter_num += 1\n if iter_num > 100:\n end_idx = end_idx * 2\n\n # pick an entry point from the first 30%\n entry_point = random.choice(sorted_nodes[:end_idx])\n # print \"Source \", i\n # print \"candidates: \", sorted_nodes[:end_idx]\n # print \"entry point: \", entry_point\n # print \"all nodes: \", G.nodes()\n\n # pick a random point from the reachables\n reachables = nx.shortest_path(G, entry_point)\n del reachables[entry_point]\n #print \"reachables: \", reachables\n reachables = reachables.keys()\n\n exit_point = random.choice(reachables)\n #print \"exit_point: \", exit_point\n\n if degree_s[i]:\n G.add_edge(s, entry_point, weight=degree_s[i])\n G.add_edge(exit_point, t, weight=degree_s[i])\n else:\n # figure out the out_degree of entry point\n out_degree = np.sum(G[u][v]['weight'] for u,v in G.out_edges(entry_point))\n G.add_edge(s, entry_point, weight=out_degree)\n\n # figure out the int_degree of exit point\n in_degree = np.sum(G[u][v]['weight'] for u,v in G.in_edges(exit_point))\n G.add_edge(exit_point, t, weight=in_degree)\n\n edges = G.edges()\n for u, v in edges:\n par_num = int(G[u][v]['weight'])\n for i in range(par_num):\n self.add_edge(u, v)\n\n # set indices etc\n self.set_sources(srcs)\n self.set_destinations(dsts)\n self.set_indices()\n #print \"number of nodes: \" + str(self.number_of_nodes())\n #print \"number of edges: \" + str(self.number_of_edges())"
] |
[
"0.6034385",
"0.58849245",
"0.5821947",
"0.57078713",
"0.567708",
"0.5635777",
"0.55671024",
"0.5543528",
"0.5502547",
"0.5464833",
"0.54600173",
"0.54036397",
"0.53936696",
"0.5389518",
"0.538797",
"0.5360911",
"0.5360335",
"0.5356528",
"0.5354449",
"0.5350147",
"0.5309977",
"0.528144",
"0.5261747",
"0.5186649",
"0.5183839",
"0.51762784",
"0.51583624",
"0.51519525",
"0.5146432",
"0.5135836"
] |
0.66977215
|
0
|
write embeddings of the generator and the discriminator to files
|
def write_embeddings_to_file(self):
modes = [self.generator, self.discriminator]
for i in range(2):
embedding_matrix = modes[i].embedding_matrix
embedding_matrix = embedding_matrix.detach().to('cpu').numpy()
index = np.array(range(self.n_node)).reshape(-1, 1)
embedding_matrix = np.hstack([index, embedding_matrix])
embedding_list = embedding_matrix.tolist()
embedding_str = [str(int(emb[0])) + "\t" + "\t".join([str(x) for x in emb[1:]]) + "\n"
for emb in embedding_list]
with open(config.emb_filenames[i], "w+") as f:
lines = [str(self.n_node) + "\t" + str(config.n_emb) + "\n"] + embedding_str
f.writelines(lines)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def export_embeddings(self):\n save_path = self.config.path_embeddings / self.model.model_name\n save_path.mkdir(parents=True, exist_ok=True)\n \n idx2ent = self.model.config.knowledge_graph.read_cache_data('idx2entity')\n idx2rel = self.model.config.knowledge_graph.read_cache_data('idx2relation')\n\n\n series_ent = pd.Series(idx2ent)\n series_rel = pd.Series(idx2rel)\n series_ent.to_pickle(save_path / \"ent_labels.pickle\")\n series_rel.to_pickle(save_path / \"rel_labels.pickle\")\n\n with open(str(save_path / \"ent_labels.tsv\"), 'w') as l_export_file:\n for label in idx2ent.values():\n l_export_file.write(label + \"\\n\")\n\n with open(str(save_path / \"rel_labels.tsv\"), 'w') as l_export_file:\n for label in idx2rel.values():\n l_export_file.write(label + \"\\n\")\n\n for parameter in self.model.parameter_list:\n all_ids = list(range(0, int(parameter.shape[0])))\n stored_name = parameter.name.split(':')[0]\n # import pdb; pdb.set_trace()\n\n if len(parameter.shape) == 2:\n all_embs = parameter.numpy()\n with open(str(save_path / (\"%s.tsv\" % stored_name)), 'w') as v_export_file:\n for idx in all_ids:\n v_export_file.write(\"\\t\".join([str(x) for x in all_embs[idx]]) + \"\\n\")\n\n df = pd.DataFrame(all_embs)\n df.to_pickle(save_path / (\"%s.pickle\" % stored_name))",
"def write_megam_file(train_toks, encoding, stream, bernoulli: bool = ..., explicit: bool = ...):\n ...",
"def save(self, folder):\n self.generator.save_weights('%s/generator.h5'%folder)\n self.critic.save_weights('%s/critic.h5'%folder)",
"def save2file(self):\n ids_input = []\n labels_input = []\n ids_path = os.path.join(self.path, 'ids')\n if not os.path.exists(ids_path):\n os.makedirs(ids_path)\n labels_path = os.path.join(self.path, 'labels')\n if not os.path.exists(labels_path):\n os.makedirs(labels_path)\n ids_total = len(self.test)\n for i in range(ids_total):\n ids_input = self.test[i][0]\n labels_input = self.test[i][1]\n file_name = \"ids/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(ids_input, dtype=np.int32).tofile(file_path)\n file_name = \"labels/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(labels_input, dtype=np.int32).tofile(file_path)\n print(\"\\n ****** Success! ******\\n \")",
"def output_wave_files(predicted_mfccs_batch, true_target_mfccs_batch):\n # only outputting 1 wavefile in the batch, because otherwise it takes too long\n for i in range(min(1, predicted_mfccs_batch.shape[0])):\n print \"Converting wavefile \", i\n predicted_mfccs = predicted_mfccs_batch[i,:,:]\n target_mfccs = true_target_mfccs_batch[i]\n\n output_wave_file(predicted_mfccs, filename='autoencoder_pred_' + str(i)) \n output_wave_file(target_mfccs, filename='autoencoder_input_' + str(i))",
"def write(self, path: str, embeddings: Embeddings):\n if self == Format.finalfusion:\n embeddings.write(path)\n elif self == Format.word2vec:\n write_word2vec(path, embeddings)\n elif self == Format.text:\n write_text(path, embeddings)\n elif self == Format.textdims:\n write_text_dims(path, embeddings)\n elif self == Format.fasttext:\n write_fasttext(path, embeddings)\n else:\n raise ValueError(f\"Unknown format {str(self)}\")",
"def saveWeights(self, basename, generation):\n for i,wt in enumerate(self.weights):\n np.save(\"./data/\"+basename+\"/\"+basename + \"_W\"+str(i)+\"_G\" + str(generation),wt)\n for i,bs in enumerate(self.bias):\n np.save(\"./data/\"+basename+\"/\"+basename + \"_B\"+str(i)+\"_G\" + str(generation),bs)",
"def export(self, output_dir, config, train_ratio=0.7, delimiter=\",\"):\n model_dir = os.path.join(output_dir, self.model_id)\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n X_tr, X_te = self.get_train_test_embeddings(config, train_ratio)\n #save text feature matrix\n idx = config[\"dimension\"]\n tr_text = csr_matrix(X_tr[:,:idx])\n te_text = csr_matrix(X_te[:,:idx])\n print(\"text\", tr_text.shape, te_text.shape)\n save_npz(os.path.join(model_dir, \"train_text\"), tr_text)\n save_npz(os.path.join(model_dir, \"test_text\"), te_text)\n #save history feature matrix\n if config.get(\"user_history\", False):\n tr_history = X_tr[:,idx:idx+4]\n te_history = X_te[:,idx:idx+4]\n np.savetxt(os.path.join(model_dir, \"train_history.csv\"), tr_history, delimiter=delimiter)\n np.savetxt(os.path.join(model_dir, \"test_history.csv\"), te_history, delimiter=delimiter)\n idx += 4\n print(\"history\", tr_history.shape, te_history.shape)\n # save node embeddings\n if \"user_ne\" in config and X_tr.shape[1] > idx:\n tr_network = X_tr[:,idx:]\n te_network = X_te[:,idx:]\n np.savetxt(os.path.join(model_dir, \"train_network.csv\"), tr_network, delimiter=delimiter)\n np.savetxt(os.path.join(model_dir, \"test_network.csv\"), te_network, delimiter=delimiter)\n print(\"network\", tr_network.shape, te_network.shape)\n #save labels\n np.savetxt(os.path.join(model_dir, \"train_label.csv\"), self.tr_label, delimiter=delimiter, fmt='%i')\n np.savetxt(os.path.join(model_dir, \"test_label.csv\"), self.te_label, delimiter=delimiter, fmt='%i')\n #save meta\n self.tr_meta[self._exported_meta_columns].to_csv(os.path.join(model_dir, \"train_meta.csv\"), index=False, sep=delimiter)\n self.te_meta[self._exported_meta_columns].to_csv(os.path.join(model_dir, \"test_meta.csv\"), index=False, sep=delimiter)\n print(\"Model was exported\")\n return model_dir",
"def write(self):\n with open(self.filename, 'w') as outfile:\n [outfile.write(element) for element in self.preamble]\n [outfile.write(element) for element in self.body]",
"def write_conformers(self, filename): # ccids):\n cnt = 0\n for confId in range(self.nconf): #ccids:\n w = Chem.SDWriter('%s_c%03d.sdf'%(filename,cnt+1))\n w.write(self.mol, confId=confId)\n w.flush()\n w.close()\n cnt += 1",
"def save(self, dest_dir):\n try:\n makedirs(dest_dir)\n except FileExistsError:\n pass\n suffix = '-%d-%d.h5' % (self.num_gen_steps, self.num_disc_steps)\n gen_path = path.join(dest_dir, 'gen' + suffix)\n disc_path = path.join(dest_dir, 'disc' + suffix)\n self.discriminator.save(disc_path)\n self.generator.save(gen_path)",
"def save(self, dest_dir):\n try:\n makedirs(dest_dir)\n except FileExistsError:\n pass\n suffix = '-%d-%d.h5' % (self.num_gen_steps, self.num_disc_steps)\n gen_path = path.join(dest_dir, 'gen' + suffix)\n disc_path = path.join(dest_dir, 'disc' + suffix)\n self.discriminator.save(disc_path)\n self.generator.save(gen_path)",
"def save_model(self):\n print(\"\\nModels are integrated to be multi scale.\\nSaving to disk.\")\n self.column_names = [ \"x_\" + str(x) for x in range(self.embedding.shape[1])]\n self.embedding = pd.DataFrame(self.embedding, columns = self.column_names)\n self.embedding.to_csv(self.args.output, index = None)",
"def write_out_examples(examples, path):\n\n writer = tf.io.TFRecordWriter(path)\n for example in examples:\n writer.write(example.SerializeToString())",
"def write(self, output_stream=sys.stdout):\n for model in self.models:\n if len(model.chains) == 0:\n continue\n if len(self.models) > 1:\n print(\"MODEL %4d\" % (model.number), file=output_stream)\n model.write(output_stream)\n if len(self.models) > 1:\n print(\"ENDMDL\", file=output_stream)\n print(\"END\", file=output_stream)",
"def save_model(self):\n filename=self.name + '_words'\n file_write(filename, self.words)\n\n filename2=self.name+'_word_lengths'\n file_write(filename2, self.word_lengths)\n\n filename3=self.name+'_stems'\n file_write(filename3, self.stems)\n\n filename4=self.sentence_lengths+'_sentence_lengths'\n file_write(filename4, self.sentence_lengths)\n\n filename5= self.endings+'_endings'\n file_write(filename5, self.endings)",
"def save(self, file_name):\n logger.warning(\"save embeddings and name mappings to `%s`\" % file_name)\n\n objects = EasyDict()\n for name in dir(self.graph):\n object = getattr(self.graph, name)\n if isinstance(object, list) and len(object) > 0 and isinstance(object[0], str): # id2name\n objects[name] = object\n for name in dir(self.solver):\n object = getattr(self.solver, name)\n if isinstance(object, np.ndarray): # embedding\n objects[name] = object\n\n with open(file_name, \"wb\") as fout:\n pickle.dump(objects, fout, protocol=pickle.HIGHEST_PROTOCOL)",
"def save(self, path=\"\"):\n path = path + \"model_\" + str(self.name) + \".txt\"\n if os.path.isfile(path):\n os.remove(path)\n f = open(path, \"w+\")\n for ident in self.networks:\n f.write(ident + \"_\" + self.networks[ident].descriptor.codify_components() + \"_\" + str(self.networks[ident].taking.size) + \",\" + self.networks[ident].taking.type + \"_\" + str(self.networks[ident].producing.size) + \",\" + self.networks[ident].producing.type + \"_\" +\n str(self.networks[ident].depth) + \"_\" + \",\".join(self.reachable[ident]) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.inputs:\n f.write(ident + \"_\" + str(self.inputs[ident].producing.size) + \"_\" + self.inputs[ident].producing.type + \"_\" + str(self.inputs[ident].depth) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.outputs:\n f.write(ident + \"_\" + str(self.outputs[ident].taking.size) + \"_\" + self.outputs[ident].taking.type + \"_\" + str(self.outputs[ident].depth) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for con in self.connections:\n f.write(self.connections[con].codify() + \"\\n\")\n #f.write(\"\\n\")\n\n f.close()\n\n return path",
"def save_model(self):\n filename = self.name + '_words'\n f = open(filename, 'w') \n f.write(str(self.words)) \n f.close()\n \n filename2 = self.name + '_word_lengths'\n f = open(filename2, 'w') \n f.write(str(self.word_lengths)) \n f.close()\n \n filename3 = self.name + '_stems'\n f = open(filename3, 'w') \n f.write(str(self.stems)) \n f.close()\n \n filename4 = self.name + '_sentence_lengths'\n f = open(filename4, 'w') \n f.write(str(self.sentence_lengths)) \n f.close()\n \n filename5 = self.name + '_punctuation'\n f = open(filename5, 'w') \n f.write(str(self.punctuation)) \n f.close()",
"def generate(train_data_path, trained_model_path, num_output_files):\n # load the notes used to train the model\n\n train_data = data_preprocess.load_from_pickle(train_data_path)\n training_notes = train_data[\"data\"]\n note_translator = train_data[\"note_translator\"]\n\n net = networks.TransformerNet.load_checkpoint(trained_model_path)\n\n for i in range(num_output_files):\n prediction_output = generate_notes(net, training_notes, note_translator)\n create_midi(prediction_output, file_suffix=i)",
"def write_all_patients():\n\n data_dir = sys.argv[1]\n output_dir = sys.argv[2]\n\n imgs, i_msks, o_msks = load_all_patients(data_dir=data_dir)\n\n for idx, array in enumerate(imgs):\n np.save(output_dir+'/img_'+str(idx), array)\n for idx, array in enumerate(i_msks):\n np.save(output_dir+'/i_msk_'+str(idx), array)\n for idx, array in enumerate(o_msks):\n np.save(output_dir + '/o_msk_' + str(idx), array)\n\n return None",
"def write_schema_files():\n print(\"\\nStarting to generate Provider JSON Schemas...\\n\")\n\n for name, generator in schema_generators().items():\n schema = generator()\n with open(f\"../provider/{name}.json\", \"w\") as schemafile:\n schemafile.write(json.dumps(schema, indent=2))\n print(f\"Wrote {name}.json\")\n\n print(\"\\nFinished generating Provider JSON Schemas\")",
"def save(self, filename):\n target = open(filename, 'w')\n target.write(\"\\\\data\\\\\\n\")\n target.write(\"ngram 1=\" + str(len(self.f1)) + \"\\n\\n\")\n target.write(\"\\\\1-grams:\\n\")\n for w,p in sorted(self.f1.items()): \n target.write(str(p) + \" \" + w + \"\\n\")\n target.write(\"\\\\end\\\\\\n\")\n target.close()",
"def save_embeddings(\n _output_path, _trained_model, _categorical_columns, _categorical_encoder\n):\n embs = {}\n for c in _categorical_columns:\n embs[c] = _trained_model.extract_weights(c.replace(\" \", \"_\") + \"_embedding\")\n column_names = _categorical_encoder.retrieve_names(\n c, range(0, embs[c].shape[0])\n )\n df_embs = pd.DataFrame(data=embs[c])\n df_embs[c] = column_names\n df_embs.to_csv(_output_path + c + \"_embedding.csv\", mode=\"w\", index=False)\n return embs",
"def write_vocabulary(vocab_processor, outfile):\n vocab_size = len(vocab_processor.vocabulary_)\n with open(outfile, \"w\") as vocabfile:\n for id in range(vocab_size):\n word = vocab_processor.vocabulary_._reverse_mapping[id]\n vocabfile.write(word + \"\\n\")\n print(\"Saved vocabulary to {}\".format(outfile))",
"def write_pc_embedding(filename, xyz, embeddings):\n color = converter.embedding_to_color(embeddings)\n write_pc(filename, xyz, color)",
"def pickle_dump_files():\n with open('data/' + dataset_name + '_' + model_name + '_' + 'predictions', 'wb') as f:\n pickle.dump(predictions, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'state_sentences', 'wb') as f:\n pickle.dump(final_state_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'decoded_sentences', 'wb') as f:\n pickle.dump(final_decoded_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'ids', 'wb') as f:\n pickle.dump(idx, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'exemplars', 'wb') as f:\n pickle.dump(exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'counter_exemplars', 'wb') as f:\n pickle.dump(counter_exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_exemplar_words', 'wb') as f:\n pickle.dump(top_exemplar_words, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_counter_exemplar_words', 'wb') as f:\n pickle.dump(top_counter_exemplar_words, f)",
"def save_models(encoder_dict, decoder_dict, epoch):\n try:\n os.mkdir('saved_models/' + str(epoch))\n os.mkdir('saved_models/' + str(epoch) + '/encoders')\n os.mkdir('saved_models/' + str(epoch) + '/decoders')\n except OSError:\n pass\n for name, encoder in encoder_dict.items():\n torch.save(encoder.state_dict(), 'saved_models/' + str(epoch) + '/encoders/' + str(name) + '.pt')\n for name, decoder in decoder_dict.items():\n torch.save(decoder.state_dict(), 'saved_models/' + str(epoch) + '/decoders/' + str(name) + '.pt')",
"def write_chords(paths, annotations, write=False, strategy = 'powers'):\n\n print ('Generating chords of class \"{}\"'.format(strategy))\n\n strategy_config = yaml.load(open(paths.get('strategies').get(strategy), 'r'))\n annotations_df = pd.read_csv(annotations, index_col = 0)\n annotations_df['pitch'] = pd.to_numeric(annotations_df['pitch'])\n records = annotations_df.set_index(['guitarModel', 'pitch'])['audioFileName'].to_dict()\n directories = []\n\n file_ticker = 0\n\n history = {}\n\n for ii in annotations_df[['guitarModel', 'pitch', 'audioFileName']].itertuples():\n\n pitch = int(ii.pitch)\n model = ii.guitarModel\n if model not in history:\n history[model] = 0\n\n audioname = ii.audioFileName.split('/')[-1].strip('.wav')\n subdirectory = os.path.join(paths.get('interim').trace, strategy, model, '')\n\n if model not in directories:\n os.makedirs(subdirectory, exist_ok = True)\n directories.append(model)\n\n for chord, segment in strategy_config.items():\n for s, pitch_components in segment.items():\n [*bindings], [*components] = zip(*[(records.get((model, pitch + x)), str(pitch + x)) for x in pitch_components])\n if any([fn is None for fn in bindings]) is False:\n history[model] += 1\n rename = '_'.join([audioname, chord, s] + components) + '.wav'\n rename = os.path.join(subdirectory, rename)\n if write is True:\n combiner = sox.Combiner()\n # BINDINGS = OTHER TRACKS MIXING\n combiner.build(bindings, rename, 'mix')\n file_ticker += 1\n else:\n pass\n else:\n break\n\n print('----- {} GENERATED BY MODEl -----'.format(strategy.upper()))\n pprint (history)",
"def save(self, folder):\n try:\n os.makedirs(folder)\n print(\"Created directory for generated code.\")\n except FileExistsError:\n pass\n\n with open(os.path.join(folder, \"network.cpp\"), \"w\") as f:\n f.write(self.network_code)\n\n with open(os.path.join(folder, \"network.h\"), \"w\") as f:\n f.write(self.network_header)\n\n # with open(os.path.join(folder, \"network_initialization.cpp\"), \"w\") as f:\n # f.write(self.constructor_code)\n #\n # with open(os.path.join(folder, \"network_initialization.h\"), \"w\") as f:\n # f.write(self.buffer_declaration)\n #\n # with open(os.path.join(folder, \"network_cleanup.cpp\"), \"w\") as f:\n # f.write(self.destructor_code)\n #\n # with open(os.path.join(folder, \"network_cleanup.h\"), \"w\") as f:\n # f.write(self.cleanup_header)\n\n with open(os.path.join(folder, \"network.weights.bin\"), \"wb\") as f:\n for packed_struct in self.packed_file:\n f.write(packed_struct)\n\n with open(os.path.join(folder, \"Makefile\"), \"w\") as f:\n f.write(self.makefile)\n\n with open(os.path.join(folder, \"dummy_input.cpp\"), \"w\") as f:\n f.write(self.dummy_input)\n\n with open(os.path.join(folder, \"reference_input.cpp\"), \"w\") as f:\n f.write(self.reference_input)"
] |
[
"0.6645376",
"0.616794",
"0.6127513",
"0.6118077",
"0.6082668",
"0.6051448",
"0.60008365",
"0.5980834",
"0.59451413",
"0.5944806",
"0.5940215",
"0.5940215",
"0.5868026",
"0.58156615",
"0.5809229",
"0.58066696",
"0.58032924",
"0.58025366",
"0.5781701",
"0.57721895",
"0.5767009",
"0.5735629",
"0.57245123",
"0.57216924",
"0.5696966",
"0.56908333",
"0.56837565",
"0.5671408",
"0.56450427",
"0.56179583"
] |
0.8185345
|
0
|
Emit an Operation at the current position. Sets result register if not set already.
|
def emit(self, op):
assert self._curblock, "Builder is not positioned!"
if op.result is None:
op.result = self.func.temp()
if self._lastop == 'head' and self._curblock.ops.head:
op.insert_before(self._curblock.ops.head)
elif self._lastop in ('head', 'tail'):
self._curblock.append(op)
else:
op.insert_after(self._lastop)
self._lastop = op
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def emit(self, instruction):\n return self.builder.emit(instruction)",
"def output_op(self, op):\n self.output['text'] += ' ' + op + ' ' \n self.seen_op = True",
"def next_operation(self):\n raise NotImplementedError",
"def add_operation(self):\n arg1 = self.memory[self.memory[self._cursor + 1]]\n arg2 = self.memory[self.memory[self._cursor + 2]]\n arg3 = self.memory[self._cursor + 3]\n self.memory[arg3] = arg1 + arg2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1 + n2}')\n self._cursor += 4\n return",
"def add_operation(self):\n n1 = self.memory[self.memory[self._cursor + 1]]\n n2 = self.memory[self.memory[self._cursor + 2]]\n position = self.memory[self._cursor + 3]\n self.memory[position] = n1 + n2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1} + {n2} = {n1 + n2}')\n return",
"def incr_operand(self):\n pass",
"def register_operation(self, name, result, args, kwargs):\r\n if not isinstance(result, autodiff.tensor.Tensor):\r\n result = autodiff.tensor.Tensor(result, graph=self)\r\n args = [x if isinstance(x, autodiff.tensor.Tensor) \r\n else autodiff.tensor.Tensor(x, graph=self) for x in args]\r\n self.operation_map[result.id] = Operation(name, result, args, kwargs)",
"def apply(self) -> Operation:\n op = self.popleft()\n op()\n return op",
"def __call__(self, node, operations, last_operation):\n if last_operation == NO_OPERATION:\n return 0\n return 1",
"def addOp(self, op):\n self.operations << op",
"def mark_cur_op_complete(self, cur_op: Callable) -> None:\n # torch.nn.Module __setattr__ has overhead,\n # this code is the explicit fast path for `self.idx += 1`\n object.__setattr__(self, 'idx', self.idx + 1)",
"def trans_op_op(self, data):\n\n return self.trans_op(self.op(data))",
"def __call__(self):\r\n new_node = Op.__call__(self)\r\n return new_node",
"def __call__(self):\n new_node = Op.__call__(self)\n return new_node",
"def advance(self):\n if self.instr is not None:\n self.simulator.registers[int(self.instr.binary[20:25], 2)][1] = self.instr.result",
"def _UnaryOp(self, t):\n self.write(\"(\")\n self.write(self.unop[t.op.__class__.__name__])\n self.dispatch(t.operand)\n self.write(\")\")",
"def result(self, state, action):\n\n\t\t#Adds new number to the state\n\t\treturn state + action",
"def read_operation(self, opcode: int) -> int:\n\n if self.insight:\n self.insight.operation(opcode)\n\n return opcode & 0xF000",
"def instruction_out(self, value):\n if Vm.is_register(value):\n value = self.get_register(value)\n sys.stdout.write(chr(value))",
"def _call_shift_action(self, context):\n debug = self.debug\n token = context.token\n sem_action = token.symbol.action\n\n if self.build_tree:\n # call action for building tree node if tree building is enabled\n if debug:\n h_print(\"Building terminal node\",\n \"'{}'.\".format(token.symbol.name), level=2)\n\n # If both build_tree and call_actions_during_build are set to\n # True, semantic actions will be call but their result will be\n # discarded. For more info check following issue:\n # https://github.com/igordejanovic/parglare/issues/44\n if self.call_actions_during_tree_build and sem_action:\n sem_action(context, token.value, *token.additional_data)\n\n return NodeTerm(context, token)\n\n if sem_action:\n result = sem_action(context, token.value, *token.additional_data)\n\n else:\n if debug:\n h_print(\"No action defined\",\n \"for '{}'. \"\n \"Result is matched string.\".format(token.symbol.name),\n level=1)\n result = token.value\n\n if debug:\n h_print(\"Action result = \",\n \"type:{} value:{}\"\n .format(type(result), repr(result)), level=1)\n\n return result",
"def emit(self, *args):\n return _ida_hexrays.codegen_t_emit(self, *args)",
"def advance(self):\n\n if self.instr is not None:\n #calculate the offset of the lw and sw instructions\n if opcode_decode[self.instr.opcode] == 'Load':\n self.instr.source1RegValue = self.instr.source1RegValue + int(self.instr.imm)\n else:\n self.instr.result = eval(\n \"%d %s %d\" % (\n self.instr.source1RegValue, self.simulator.operations[self.instr.operation],\n self.instr.source2RegValue))",
"def do_operation(self):\n operation = self.inputs['operation']\n res = self.entity.do_operation(self.context, **self.inputs)\n if res:\n return self.RES_OK, \"Node operation '%s' succeeded.\" % operation\n else:\n return self.RES_ERROR, \"Node operation '%s' failed.\" % operation",
"def op(self) -> Node:\n return self._step_execution_context.op",
"def addop(self, mask, target, args):\n\n self.set_user(args)\n yield \"Added operator.\"",
"def mult_operation(self):\n arg1 = self.memory[self.memory[self._cursor + 1]]\n arg2 = self.memory[self.memory[self._cursor + 2]]\n arg3 = self.memory[self._cursor + 3]\n self.memory[arg3] = arg1 * arg2\n print(f'Cursor: {self._cursor}\\tAssigning position {arg3} with value {arg1 * arg2}')\n self._cursor += 4\n return",
"def to_op(self):\n raise NotImplementedError",
"def op(self):\n return self.__op",
"def op(self):\n return self.__op",
"def operate(self):\n n_dispatch = 0\n\n for i, instruction in enumerate(self.current_buffer):\n if n_dispatch == self.width:\n break\n if not instruction.can_dispatch():\n continue\n exist = False\n for cap in inspect.getmro(type(instruction)):\n units = self.execution_units[cap]\n exist = exist or units\n\n idle = {u for u in units if not u.full()}\n if idle:\n unit = next(iter(idle))\n unit.feed(instruction)\n del self.future_buffer[i - n_dispatch]\n n_dispatch += 1\n break\n assert exist, 'Instruction %r has no ExecutionUnit' % instruction"
] |
[
"0.642982",
"0.62555254",
"0.6210357",
"0.6160179",
"0.60740376",
"0.5878905",
"0.5864841",
"0.58310556",
"0.5782979",
"0.5778094",
"0.57313156",
"0.57203454",
"0.5717315",
"0.5639052",
"0.56168175",
"0.5579971",
"0.54953563",
"0.54846054",
"0.54685915",
"0.54667056",
"0.5422695",
"0.5408087",
"0.5401052",
"0.539692",
"0.5387578",
"0.53608185",
"0.53552514",
"0.5304553",
"0.5304553",
"0.53035676"
] |
0.7211463
|
0
|
Position the builder at the beginning of the given block.
|
def position_at_beginning(self, block):
self._curblock = block
self._lastop = 'head'
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def position_before(self, op):\n if isinstance(op, FuncArg):\n raise error.PositioningError(\n \"Cannot place builder before function argument\")\n self._curblock = op.block\n self._lastop = op._prev",
"def begin():\n return BeginBlock()",
"def createFirstBlock(self):\n firstBlock = Block(0, self.__currentTransactionsList, 0, '00')\n self.__chain.append(firstBlock)",
"def _initBlock(o,block):\n o.block = block.clone().shift(*o.board.startPosition)",
"def start_at(self) -> global___Statement.Declaration:",
"def genesis_block(self):\n block = Block(target=self.target, transactions=[])\n self.current_transactions.append(block)",
"def _set_block(self, pos, block_):\n raise NotImplementedError",
"def create_genesis(self):\n return Block(0, 0, b'0', b'0', b'')",
"def create_genesis_block(self):\n genesis_block = Block(0, [], 0, \"0\")\n genesis_block.hash = genesis_block.compute_hash()\n self.chain.append(genesis_block)",
"def create_genesis_block(self):\n genesis_block = Block(0, [], 0, \"0\")\n genesis_block.hash = genesis_block.compute_hash()\n self.chain.append(genesis_block)",
"def create_genesis_block(self):\r\n genesis_block = Block(0, [], time.time(), \"0\")\r\n genesis_block.hash = genesis_block.compute_hash()\r\n self.chain.append(genesis_block)",
"def create_genesis_block(self):\n genesis_block = Block(0, [], time.time(), \"0\")\n genesis_block.hash = genesis_block.compute_hash()\n self.chain.append(genesis_block)",
"def set_start_position(self) -> None:\n self.cozmo.set_head_angle(degrees(0)).wait_for_completed()\n self.cozmo.set_lift_height(0.0).wait_for_completed()",
"def FrameStart(builder):\n return Start(builder)",
"def create_genesis_block(self):\n index = 0\n transactions = []\n timestamp = 0.0\n previous_hash = \"0\"*64\n block = Block(index=index, transactions=transactions, timestamp=timestamp,previous_hash=previous_hash)\n block.hash = block.compute_hash()\n self.chain.append(block)",
"def reset_position(self):\n self.goto(STARTING_POSITION)",
"def start(self) -> global___Pos:",
"def move_to_line_start(self) -> None:\n self.index = self.buffer.get_line_start(self.index)",
"def _set_block(self, pos, block_):\n self._changes[deepcopy(pos)] = block",
"def go_to_start(self):\n self.go_to(0)",
"def make_genesis_block():\n block = Block(index=0,\n timestamp=datetime.now(),\n data=\"Genesis Block\",\n previous_hash=\"0\")\n return block",
"def begining_of_line():\r\n set_point(point().begining_of_line())",
"async def add_block(\n self,\n position: typing.Tuple[int, int, int],\n block_name: typing.Union[str, typing.Any],\n immediate=True,\n block_update=True,\n block_update_self=True,\n lazy_setup: typing.Callable[[typing.Any], None] = None,\n check_build_range=True,\n block_state=None,\n network_sync=True,\n ) -> typing.Optional[typing.Any]:\n raise NotImplementedError",
"def mark_position(self, node):\n if self.block:\n self.block.positions.add(node.pos[:2])",
"def begin(self):\n pass",
"def penblock(self, block):\n self.block = block",
"def nextblock(self, parent=None, **kwargs):\n block = self.newblock(parent, **kwargs)\n if not parent and self.block:\n self.block.add_child(block)\n\n self.block = block\n return block",
"def GachaCraftNodeExcelStart(builder):\n return Start(builder)",
"def seed(cls, block: Expr) -> Expr:\n return cls(BlockField.block_seed, block)",
"def begin(self):\r\n self.queue.append((self.start, 0.0))\r\n self.cost_to_pos[self.start] = 0\r\n self.loop()"
] |
[
"0.6780588",
"0.657286",
"0.65366715",
"0.6378802",
"0.6168305",
"0.59907305",
"0.5931497",
"0.5873343",
"0.5858289",
"0.5858289",
"0.5829275",
"0.5818509",
"0.5761768",
"0.5753733",
"0.57440203",
"0.5732442",
"0.57301",
"0.5676104",
"0.5670693",
"0.56525517",
"0.5638855",
"0.5631642",
"0.5618851",
"0.56011164",
"0.559925",
"0.5552659",
"0.55459374",
"0.55230516",
"0.5517911",
"0.55116844"
] |
0.80572224
|
0
|
Position the builder at the end of the given block.
|
def position_at_end(self, block):
self._curblock = block
self._lastop = block.tail or 'tail'
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def position_after(self, op):\n if isinstance(op, FuncArg):\n self.position_at_beginning(op.parent.startblock)\n else:\n self._curblock = op.block\n self._lastop = op",
"def end():\n return EndBlock()",
"def RespEnd(builder):\n return End(builder)",
"def GachaCraftNodeExcelEnd(builder):\n return End(builder)",
"def move_block():\n animate(block, 'bounce_end', duration=1, pos=next(block_positions))",
"def exit_block(self, parent=None, **kwargs):\n block = self.newblock(parent, have_code=False, is_exit=True, **kwargs)\n self.blocks.pop()\n return block",
"def local_chain_blocks_after(self, block_height):\n return self.client.call('POST', 'local/chain/blocks-after', payload={\n 'height': block_height\n })",
"def InvocationEnd(builder):\n return End(builder)",
"def commit_block(self, block):\n raise NotImplementedError('commit_block: Implementation of this method is required.')",
"def end(self):\n self.set_initial_offset(1e6)",
"def FrameEnd(builder):\n return End(builder)",
"def finalize_block_construction(self, pyomo_block):\n pass",
"def _finish_element(self):\n assert self.currentelem.indexend is True\n self.currentelem.indexend = self._parser.CurrentByteIndex + self.baseposition\n self.currentelem = self.currentelem.parent",
"def move_cursors_to_end(self, target_block):\n self.a_cursor = target_block.a_end\n self.b_cursor = target_block.b_end",
"def _set_block(self, pos, block_):\n raise NotImplementedError",
"def CommitBlock(self, block):\n QUEUE.put(Block(block=block))",
"def block(self, block):\n\n self._block = block",
"def block(self, block):\n\n self._block = block",
"def nextblock(self, parent=None, **kwargs):\n block = self.newblock(parent, **kwargs)\n if not parent and self.block:\n self.block.add_child(block)\n\n self.block = block\n return block",
"def position_at_beginning(self, block):\n self._curblock = block\n self._lastop = 'head'",
"def moveBlock(self, block: ghidra.program.model.mem.MemoryBlock, newStartAddr: ghidra.program.model.address.Address, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...",
"def GroundExcelEnd(builder):\n return End(builder)",
"def put_block(self):\n self.blocks[self.editor_cursor_position[1]][\n self.editor_cursor_position[0]] = self.available_block_types[self.current_block_type]",
"def test_block_end_injection(self):\n intkey_batch1 = create_batch(self.signer, [('inc', 'abcd', 60)])\n intkey_batch2 = create_batch(self.signer, [('inc', 'abcd', 70)])\n intkey_batch3 = create_batch(self.signer, [('inc', 'abcd', 80)])\n batches = create_batch_list(\n [intkey_batch1, intkey_batch2, intkey_batch3])\n\n post_batch(batches)\n\n # Assert injected batch is at the end of the block\n # get last committed block (first from the list)\n last_block = get_blocks()[0]\n\n family_name = get_family_from(last_block['batches'][0])\n self.assertEqual(family_name, 'intkey')\n family_name = get_family_from(last_block['batches'][1])\n self.assertEqual(family_name, 'intkey')\n family_name = get_family_from(last_block['batches'][2])\n self.assertEqual(family_name, 'intkey')\n family_name = get_family_from(last_block['batches'][3])\n self.assertEqual(family_name, 'block_info')",
"def last_block(self):\n return self.chain[-1]",
"def last_block(self):\n return self.chain[-1]",
"def PricingEnd(builder):\n return End(builder)",
"def bottom(self, bottom):\n self.ptr.bottom(bottom)",
"def _set_block(self, pos, block_):\n self._changes[deepcopy(pos)] = block",
"def do_bottom(self, arg):\n if self.curindex + 1 == len(self.stack):\n self.error('Newest frame')\n return\n self._select_frame(len(self.stack) - 1)"
] |
[
"0.68136996",
"0.67185444",
"0.6190995",
"0.61247826",
"0.60937697",
"0.6060488",
"0.60131776",
"0.59254843",
"0.5914432",
"0.59005034",
"0.58558226",
"0.5840668",
"0.5792603",
"0.5767451",
"0.5759115",
"0.57441527",
"0.5667821",
"0.5667821",
"0.5660413",
"0.5648996",
"0.563632",
"0.55990154",
"0.55894494",
"0.5500972",
"0.54235363",
"0.54235363",
"0.5411085",
"0.5394596",
"0.53907496",
"0.537119"
] |
0.75534904
|
0
|
Position the builder before the given op.
|
def position_before(self, op):
if isinstance(op, FuncArg):
raise error.PositioningError(
"Cannot place builder before function argument")
self._curblock = op.block
self._lastop = op._prev
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def position_after(self, op):\n if isinstance(op, FuncArg):\n self.position_at_beginning(op.parent.startblock)\n else:\n self._curblock = op.block\n self._lastop = op",
"def emit(self, op):\n assert self._curblock, \"Builder is not positioned!\"\n\n if op.result is None:\n op.result = self.func.temp()\n\n if self._lastop == 'head' and self._curblock.ops.head:\n op.insert_before(self._curblock.ops.head)\n elif self._lastop in ('head', 'tail'):\n self._curblock.append(op)\n else:\n op.insert_after(self._lastop)\n self._lastop = op",
"def _insert_op(self, op):",
"def push(self, op):\n self.top += 1\n self.stack.append(op)",
"def insert_before(self, insert_pos_inst):\n basic_block = insert_pos_inst.basic_block\n if basic_block is None:\n raise IRError('Instruction is not in basic block')\n idx = basic_block.insts.index(insert_pos_inst)\n self.basic_block = basic_block\n basic_block.insts.insert(idx, self)",
"def position_at_beginning(self, block):\n self._curblock = block\n self._lastop = 'head'",
"def before(self, location=None):\n side_effect = SideEffect(self.f, location)\n dataflow = inject_before(self.container, side_effect, location)\n return self._construct(dataflow)",
"def before(self, p):\n node = self._validate(p)\n return self._make_position(node._prev)",
"def add_before ( self ):\n self.add_item( 0 )",
"def precmd(self, statement):\n return statement",
"def before(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.Before)\n newq.setValue(value)\n return newq",
"def insertBefore(self, otherinstr):\n # pylint: disable=protected-access\n assert isinstance(otherinstr, ICode)\n if self.__prev is None:\n self.__prev = otherinstr\n otherinstr.__next = self\n otherinstr.owner = self.owner\n self.owner._firstInstr = otherinstr\n else:\n otherinstr.__next = self\n otherinstr.owner = self.owner\n otherinstr.__prev = self.__prev\n self.__prev = otherinstr\n otherinstr.__prev.__next = otherinstr",
"def _move_head(self, cmd):\n self.move_head(cmd.data)",
"def _log_prepend(self, msg):\n\t\tp = self._edit.get_buffer()\n\t\tstart = p.get_start_iter()\n\t\tp.insert(start, msg)\n\t\tself._trunc_lines()\n\t\tself._edit.scroll_to_iter(p.get_start_iter(), 0.0)",
"def add_before(self, p, e):\n original = self._validate(p)\n return self._insert_between(e, original._prev, original)",
"def getInstructionBefore(self, instruction: ghidra.program.model.listing.Instruction) -> ghidra.program.model.listing.Instruction:\n ...",
"def pre_build(cls, ops, signals, rng):\n\n logger.debug(\"===================\")\n logger.debug(\"PRE BUILD %s\", ops)\n logger.debug(\"sets %s\", [op.sets for op in ops])\n logger.debug(\"incs %s\", [op.incs for op in ops])\n logger.debug(\"reads %s\", [op.reads for op in ops])\n logger.debug(\"updates %s\", [op.updates for op in ops])\n\n if type(ops[0]) not in cls.builders:\n raise BuildError(\"No registered builder for operators of type %r\" %\n type(ops[0]))\n\n BuildClass = cls.builders[type(ops[0])]\n\n kwargs = {}\n if BuildClass.pass_rng:\n kwargs[\"rng\"] = rng\n\n cls.op_builds[ops] = BuildClass(ops, signals, **kwargs)",
"def setOp(self, op):\n self.__op = op",
"def setOp(self, op):\n self.__op = op",
"def DocumentElementInsertBefore(self):\n raise NotImplementedError()",
"def at_pre_cmd(self):\n pass",
"def _set_pre_op_offset(self, spec: DTensorSpec) -> None:\n dtensor_shape = spec.shape\n mesh = spec.mesh\n dim_map = spec.dim_map\n\n # Compute shard coordinate:\n # The coordinate on each tensor dim is a tuple (idx, range)\n # If a DTensor is partitioned on its dim i into n shards, and the current rank\n # holds the j-th, then its shard coordinate will be (idx=j, range=n) on dim i\n coordinate = mesh.get_coordinate()\n assert coordinate is not None\n shard_coord = [\n coordinate[mesh_dim] if mesh_dim >= 0 else 0 for mesh_dim in dim_map\n ]\n shard_size = [\n mesh.size(mesh_dim) if mesh_dim >= 0 else 1 for mesh_dim in dim_map\n ]\n\n # compute shard linear index\n shard_linear_idx = self._calc_shard_linear_idx(shard_coord, shard_size)\n\n # compute starting offset using the first shard's size\n local_size_on_rank_0 = list(dtensor_shape)\n for idx, placement in enumerate(spec.placements):\n if isinstance(placement, Shard):\n mesh_dim_size = mesh.size(idx)\n shard_dim = placement.dim\n local_size_on_rank_0[shard_dim] = placement._local_shard_size_on_dim(\n dtensor_shape[shard_dim],\n mesh_dim_size,\n 0,\n return_offset=False,\n )[0]\n\n from torch.distributed._tensor.ops.utils import prod\n\n local_size = prod(local_size_on_rank_0)\n\n # get current RNG offset\n current_offset = self.get_offset(\"parallel-rng\")\n\n # pytorch: offset must be multiple of 4\n # source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp\n offset_incr = (shard_linear_idx * local_size + 3) // 4 * 4\n self.set_offset(\"parallel-rng\", current_offset + offset_incr)",
"def addOp(self, op):\n self.operations << op",
"def _insert_action_before(self, idx, name, value):\n i = list(self.pipeline.index).index(idx)\n part1 = self.pipeline[0:i]\n new_item = pd.Series([value], index=[name])\n part2 = self.pipeline[i:]\n self.pipeline = pd.concat([part1, new_item, part2])",
"def precmd(self, line):\n return cmd.Cmd.precmd(self, line)",
"def add_op(self, op):\n self._operations.append(op)",
"def on_increased_position(self, order) -> None:\n pass",
"def set_program_start(op):\n replace_platform_variable(\"start\", op)",
"def _insert_paragraph_before(self):\n p = self._p.add_p_before()\n return Paragraph(p, self._parent)",
"def crunch_entry_push(self, op):\n lst = self.want_label(op)\n if not lst:\n return\n ii = lst[0] + 1\n jj = ii\n stack_decrement = 0\n stack_save_decrement = 0\n reinstated_lines = []\n while True:\n current_line = self.__content[jj]\n match = re.match(r'\\s*(push\\S).*%(\\S+)', current_line, re.IGNORECASE)\n if match:\n if is_stack_save_register(match.group(2)):\n stack_save_decrement += get_push_size(match.group(1))\n else:\n stack_decrement += get_push_size(match.group(1))\n jj += 1\n continue;\n # Preserve comment lines as they are.\n match = re.match(r'^\\s*[#;].*', current_line, re.IGNORECASE)\n if match:\n reinstated_lines += [current_line]\n jj += 1\n continue\n # Saving stack pointer or sometimes initializing edx seem to be within pushing.\n match = re.match(r'\\s*mov.*,\\s*%(rbp|ebp|edx).*', current_line, re.IGNORECASE)\n if match:\n if is_stack_save_register(match.group(1)):\n stack_save_decrement = 0\n reinstated_lines += [current_line]\n jj += 1\n continue;\n # xor (zeroing) seems to be inserted in the 'middle' of pushing.\n match = re.match(r'\\s*xor.*\\s+%(\\S+)\\s?,.*', current_line, re.IGNORECASE)\n if match:\n reinstated_lines += [current_line]\n jj += 1\n continue\n match = re.match(r'\\s*sub.*\\s+[^\\d]*(\\d+),\\s*%(rsp|esp)', current_line, re.IGNORECASE)\n if match:\n total_decrement = int(match.group(1)) + stack_decrement + stack_save_decrement\n self.__content[jj] = re.sub(r'\\d+', str(total_decrement), current_line)\n break\n if is_verbose():\n print(\"Erasing function header from '%s': %i lines\" % (op, jj - ii - len(reinstated_lines)))\n self.erase(ii, jj)\n self.__content[ii:ii] = reinstated_lines"
] |
[
"0.6777482",
"0.6292546",
"0.6218918",
"0.6052035",
"0.59945905",
"0.5801662",
"0.56987685",
"0.5685576",
"0.5647879",
"0.54777586",
"0.53614604",
"0.53515476",
"0.5339638",
"0.53279823",
"0.5269136",
"0.5266044",
"0.5263959",
"0.5225988",
"0.5225988",
"0.52121824",
"0.52094823",
"0.5206961",
"0.5201782",
"0.5193412",
"0.5157604",
"0.51455456",
"0.5143074",
"0.50846183",
"0.50545394",
"0.5053273"
] |
0.87499154
|
0
|
Position the builder after the given op.
|
def position_after(self, op):
if isinstance(op, FuncArg):
self.position_at_beginning(op.parent.startblock)
else:
self._curblock = op.block
self._lastop = op
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def position_before(self, op):\n if isinstance(op, FuncArg):\n raise error.PositioningError(\n \"Cannot place builder before function argument\")\n self._curblock = op.block\n self._lastop = op._prev",
"def emit(self, op):\n assert self._curblock, \"Builder is not positioned!\"\n\n if op.result is None:\n op.result = self.func.temp()\n\n if self._lastop == 'head' and self._curblock.ops.head:\n op.insert_before(self._curblock.ops.head)\n elif self._lastop in ('head', 'tail'):\n self._curblock.append(op)\n else:\n op.insert_after(self._lastop)\n self._lastop = op",
"def push(self, op):\n self.top += 1\n self.stack.append(op)",
"def _insert_op(self, op):",
"def replaceTerminator(self, op):\n self._children[0].replaceTerminator(op)",
"def addOp(self, op):\n self.operations << op",
"def add_operation(self):\n arg1 = self.memory[self.memory[self._cursor + 1]]\n arg2 = self.memory[self.memory[self._cursor + 2]]\n arg3 = self.memory[self._cursor + 3]\n self.memory[arg3] = arg1 + arg2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1 + n2}')\n self._cursor += 4\n return",
"def add_op(self, op):\n self._operations.append(op)",
"def insert_after(self, insert_pos_inst):\n basic_block = insert_pos_inst.basic_block\n if basic_block is None:\n raise IRError('Instruction is not in basic block')\n idx = basic_block.insts.index(insert_pos_inst)\n self.basic_block = basic_block\n basic_block.insts.insert(idx + 1, self)",
"def output_op(self, op):\n self.output['text'] += ' ' + op + ' ' \n self.seen_op = True",
"def operationAt(self, op, n, at):\n self._changed = True\n\n bpTokenStart = 0 # in bp (base pairs)\n # iToken and tokenLength are used ouside of loop\n for iStartToken,tokenLength in enumerate(t[0] for t in self._tokens):\n if bpTokenStart + tokenLength > at: break\n bpTokenStart += tokenLength\n rem = (n, op)\n opAt = at - bpTokenStart\n\n out = self._tokens[0:iStartToken]\n for i in range(iStartToken, len(self._tokens)):\n t, rem = CIGAR._mutateToken(self._tokens[i], opAt, rem)\n # Replace the current token with the output t\n out.extend(t)\n if rem == (): \n # We're done applying the operation to the CIGAR string\n out.extend(self._tokens[i+1:])\n break\n else: \n # Apply remaining operation at start of next token\n opAt = 0 \n\n # If an operation remains after all tokens have been dealt with\n if rem != (): \n if(rem[1] == 'I'):\n out.append(rem)\n else:\n raise ValueError((\"The operation {} at {}bp \"\n +\"exceeds the end of the string (and is no insert)\")\n .format((n, op), at))\n self._tokens = out",
"def setOp(self, op):\n self.__op = op",
"def setOp(self, op):\n self.__op = op",
"def replaceTerminator(self, op):\n if not (op in (',', ';')):\n raise RuntimeError(\"invalid replacement terminator for GlslBlockStatement: '%s'\" % (op))\n self.__terminator = op",
"def apply(self) -> Operation:\n op = self.popleft()\n op()\n return op",
"def RespEnd(builder):\n return End(builder)",
"def add_operation(self, op):\n\n self.operations[op.name] = op",
"def mark_cur_op_complete(self, cur_op: Callable) -> None:\n # torch.nn.Module __setattr__ has overhead,\n # this code is the explicit fast path for `self.idx += 1`\n object.__setattr__(self, 'idx', self.idx + 1)",
"def run(self, in_op):\n self.move_inner_state(in_op)\n if isinstance(in_op, memops.ReorderBase):\n self.substitute_reorder(in_op)\n elif isinstance(in_op, memops.FlushBase):\n self.flush_stores(in_op)\n elif isinstance(in_op, memops.Store):\n self._ops_list.append(in_op)\n elif isinstance(in_op, memops.Register_file):\n self.reg_file(in_op)\n\n return True",
"def _set_post_op_offset(self, spec: DTensorSpec, old_offset: int) -> None:\n dtensor_shape = spec.shape\n\n from torch.distributed._tensor.ops.utils import prod\n\n numel = prod(dtensor_shape)\n # pytorch: offset must be multiple of 4\n # source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp\n numel = (numel + 3) // 4 * 4\n self.set_offset(\"parallel-rng\", old_offset + numel)",
"def add_operation(self):\n n1 = self.memory[self.memory[self._cursor + 1]]\n n2 = self.memory[self.memory[self._cursor + 2]]\n position = self.memory[self._cursor + 3]\n self.memory[position] = n1 + n2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1} + {n2} = {n1 + n2}')\n return",
"def successorBuilder(self, currentState):",
"def after(self, location=None):\n side_effect = SideEffect(self.f, location)\n dataflow = inject_after(self.container, side_effect, location)\n return self._construct(dataflow)",
"def do_bottom(self, arg):\n if self.curindex + 1 == len(self.stack):\n self.error('Newest frame')\n return\n self._select_frame(len(self.stack) - 1)",
"async def bottomify(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"bottom\")",
"def position_at_end(self, block):\n self._curblock = block\n self._lastop = block.tail or 'tail'",
"def move_to_end(self, *args, **kwargs): # real signature unknown\n pass",
"def learn(self, last_op_move):\r\n self.last_op_move = last_op_move",
"def _insert_action_after(self, idx, name, value):\n i = list(self.pipeline.index).index(idx)\n part1 = self.pipeline[0 : i + 1]\n new_item = pd.Series([value], index=[name])\n part2 = self.pipeline[i + 1 :]\n self.pipeline = pd.concat([part1, new_item, part2])",
"def add_OP(self, OP):\n \n if len(self.OPs) == self.size: # matrix is full, check for swaps\n mut_info = []\n existing = []\n for i in range(len(self.OPs)):\n mi, label = self.mut.distance(self.OPs[i], OP)\n mut_info.append(mi)\n product = 1\n for j in range(len(self.OPs)):\n if not i == j:\n product = product * self.matrix[i][j]\n existing.append(product)\n update = False\n difference = None\n for i in range(len(self.OPs)):\n candidate_info = 1\n for j in range(len(self.OPs)):\n if not i == j:\n candidate_info = candidate_info * mut_info[j]\n if candidate_info > existing[i]:\n update = True\n if difference == None:\n difference = candidate_info - existing[i]\n old_OP = i\n else:\n if (candidate_info - existing[i]) > difference:\n difference = candidate_info - existing[i]\n old_OP = i\n if update == True: # swapping out an OP\n mi, label = self.mut.distance(OP, OP)\n mut_info[old_OP] = mi\n self.matrix[old_OP] = mut_info\n self.OPs[old_OP] = OP\n for i in range(len(self.OPs)):\n self.matrix[i][old_OP] = mut_info[i]\n else: # adding an OP when there are fewer than self.size\n distances = []\n for i in range(len(self.OPs)):\n mi,label = self.mut.distance(OP, self.OPs[i])\n distances.append(mi)\n for i in range(len(self.OPs)):\n mut_info = distances[i]\n self.matrix[i].append(mut_info)\n self.matrix[len(self.OPs)].append(mut_info)\n mi, label = self.mut.distance(OP, OP)\n #mi = dask.compute(mi)\n self.matrix[len(self.OPs)].append(mi)\n self.OPs.append(OP)"
] |
[
"0.718077",
"0.6929871",
"0.5845374",
"0.57306945",
"0.56287616",
"0.54109967",
"0.53804183",
"0.5341464",
"0.5304844",
"0.52726626",
"0.5257731",
"0.5215819",
"0.5215819",
"0.517075",
"0.5169561",
"0.51237255",
"0.51043963",
"0.5063665",
"0.50462085",
"0.5042502",
"0.50140345",
"0.5012719",
"0.49987173",
"0.49986064",
"0.49798164",
"0.49739727",
"0.49560505",
"0.4951606",
"0.49420643",
"0.4922072"
] |
0.8053975
|
0
|
Propagate an exception. If `exc` is not given it will be loaded to match in 'except' clauses.
|
def gen_error_propagation(self, exc=None):
assert self._curblock
block = self._curblock
exc_setup = findop(block.leaders, 'exc_setup')
if exc_setup:
exc = exc or self.load_tl_exc(types.Exception)
self._find_handler(exc, exc_setup)
else:
self.gen_ret_undef()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _propagate_exc(self):\n catch_op = self._find_handler()\n if catch_op:\n # Exception caught! Transfer control to block\n catch_block = catch_op.parent\n self.pc = self.blockstarts[catch_block.name]\n else:\n # No exception handler!\n raise UncaughtException(self.exception)",
"def func_on_exception(*args, **keys):\n try:\n yield\n except Exception as exc:\n reraise = func(*args + (\":\", str(exc)), **keys)\n if not CRDS_EXCEPTION_TRAP:\n # In python-2, distinction between raise and \"raise something\". raise doesn't\n # wreck the traceback, raising a new improved exception does.\n raise\n # Augmented, the traceback is trashed from here down but the message is better when caught higher up.\n elif reraise:\n exc_class = keys.pop(\"exception_class\", exc.__class__)\n keys[\"end\"] = \"\"\n raise exc_class(format(*args + (\":\", str(exc)), **keys)) from exc\n else:\n pass # snuff the exception, func() probably issued a log message.",
"def raise_exc(self, exctype):\n\t\t_async_raise(self._get_my_tid(), exctype)",
"def raise_exc(self, exctype):\n _async_raise(self._get_my_tid(), exctype)",
"def raise_exc(self, exctype):\n _async_raise(self._get_my_tid(), exctype)",
"def _raise(self, exc):\n cb = _format_callback_source(self._callback, self._args)\n msg = 'Exception in callback {}'.format(cb)\n context = {\n 'message': msg,\n 'exception': exc,\n 'handle': self,\n }\n if self._source_traceback:\n context['source_traceback'] = self._source_traceback\n self._loop.call_exception_handler(context)",
"def except__else(self, exception: BaseException) -> typing.Any:\n raise exception",
"def user_exception(self, frame, exc_tuple):\r\n frame.f_locals['__exc_tuple__'] = exc_tuple\r\n\r\n if not self._wait_for_mainpyfile:\r\n self.interaction(frame, exc_tuple)",
"def _check_exc(self):\n if self._exc is not None:\n raise self._exc",
"def process_exception(self, request, exc):\n return None",
"def visit_ExceptHandler(self, node):\r\n current_context = self.context_stack[-1]\r\n if self.use_exceptions:\r\n current_context.increment_decision_points()\r\n \r\n ast.NodeVisitor.generic_visit(self, node)",
"def exception(self, *args, **kwargs):",
"def set_error_from_exc(self, exc: Exception, code: Optional[int] = ERR_UNKNOWN) -> None:\n self.set_error(code=code, text=str(exc))",
"def _exception_dispatcher(self, e):\n # TODO Currently not doing anything\n raise e",
"def exc_handler(self, exc_type, exc, *args) -> None:\n self.exception = exc\n self.exit_code = 1",
"def convert_error(exc_src, exc_dest):\n\n def wrap(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except exc_dest:\n raise\n except exc_src as err:\n reraise(exc_dest, err, sys.exc_info()[2])\n\n return wrapper\n\n return wrap",
"def raising(*exc_types):\n\n ASSERT.all(exc_types, lambda type_: issubclass(type_, Exception))\n\n def decorate(cls_or_func):\n md = get_interface_metadata(cls_or_func)\n if md:\n md = Metadata(raising=md.raising + exc_types)\n else:\n md = Metadata(raising=exc_types)\n set_interface_metadata(cls_or_func, md)\n return cls_or_func\n\n return decorate",
"def exception_hook(type, value, traceback):\n sys.__excepthook__(type, value, traceback)",
"def handle_exception(self, channel: Channel, session: Session, msg: Message, # pylint: disable=W0613\n exc: Exception) -> None:\n self.outcome = Outcome.ERROR\n self.details = exc",
"def exception_hook(exc_type, exc_value, exc_traceback) -> None:\n log.error(\n \"exception\",\n exception_type=exc_type.__name__,\n exc_info=(exc_type, exc_value, exc_traceback),\n )",
"def exception_handler(self, exception):\n pass",
"def __raise_clean_exception(exc_type, exc_value, exc_traceback):\n if exc_type.__name__ not in dir(napalm.exceptions) and \\\n exc_type.__name__ not in __builtins__.keys():\n epilog = (\"NAPALM didn't catch this exception. Please, fill a bugfix on \"\n \"https://github.com/napalm-automation/napalm/issues\\n\"\n \"Don't forget to include this traceback.\")\n print(epilog)\n raise exc_type, exc_value, exc_traceback",
"def _on_exception(self, exception):\n pass",
"def on_exception(self):\n\n def decorator(coro):\n self._hooks.append((\"exception\", coro))\n return coro\n\n return decorator",
"def exception_handler(exc, context):\n if isinstance(exc, NotFoundException):\n exc = exceptions.NotFound()\n elif isinstance(exc, UnauthorizedException):\n exc = exceptions.PermissionDenied()\n elif isinstance(exc, exceptions.NotAuthenticated):\n exc = NotAuthenticated()\n\n if isinstance(exc, exceptions.APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n if isinstance(exc.detail, (list, dict)):\n data = exc.detail\n else:\n data = {'detail': exc.detail}\n\n set_rollback()\n return Response(data, status=exc.status_code, headers=headers)\n\n return None",
"def report_exc_info(exc_info=None, request=None, extra_data=None, payload_data=None, level=None, **kw):\n if exc_info is None:\n exc_info = sys.exc_info()\n\n try:\n return _report_exc_info(exc_info, request, extra_data, payload_data, level=level)\n except Exception as e:\n log.exception(\"Exception while reporting exc_info to Rollbar. %r\", e)",
"def user_exception(self, frame, exc_info):\n pass",
"def yield_and_raise(data, exc):\n yield from data\n raise exc",
"def _broken_ep(ep, exc, *args, **kwargs):\n import logging\n logger = logging.getLogger('yatsm')\n logger.critical('Trying to import \"{0.name}\" algorithm entry point '\n 'raised a {1}'.format(ep, exc))\n raise exc",
"def exception_handler(exc, context):\n headers = None\n if isinstance(exc, APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n data = exc.detail\n if type(data) is ErrorDetail:\n data = str(data)\n status_code = exc.status_code\n set_rollback()\n\n elif isinstance(exc, Http404):\n data = \"Not Found\"\n status_code = status.HTTP_404_NOT_FOUND\n set_rollback()\n\n else:\n data = str(exc)\n status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n\n return smart_response(data, status_code=status_code, headers=headers)"
] |
[
"0.6327253",
"0.602761",
"0.5974743",
"0.5897044",
"0.5897044",
"0.58730125",
"0.5812579",
"0.5781652",
"0.5733888",
"0.5635754",
"0.56160176",
"0.5546794",
"0.5500533",
"0.54902285",
"0.54452264",
"0.54216564",
"0.5380202",
"0.53723145",
"0.5351295",
"0.5333911",
"0.5310856",
"0.5308378",
"0.52942544",
"0.5276313",
"0.5274245",
"0.5266063",
"0.5258799",
"0.5248417",
"0.52365726",
"0.5170686"
] |
0.65648335
|
0
|
Generate a return with undefined value
|
def gen_ret_undef(self):
type = self.func.type.restype
if type.is_void:
self.ret(None)
else:
self.ret(Undef(type))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def return_none() -> None:\n pass",
"def get_nothing():\n return \"\" # intentional non-existent variable",
"def null() -> SetupVal:\n return NullVal()",
"def get_none1(self):\n pass",
"def function_with_wrong_return() -> None:\n return 42",
"def get_return_value(self):\n return None #Default value to indicate that no meaningful result was returned",
"def undefined(self, ident, args):\n return \"\"",
"def result(value):\n return None, value, None",
"def test_undefined_as_null_indicator(self):\n self.custom_null_indicator_template('undefined')",
"def none(self):",
"def question_9():\n return None",
"def question_4():\n return None",
"def question_6():\n return None",
"def my_function():\n\n\treturn None",
"def question_5():\n return None",
"def noop():",
"def question_7():\n return None",
"def question_10():\n return None",
"def question_12():\n return None",
"def __reduce__(self):\n return b\"no_value\"",
"def optional():",
"def question_11():\n return None",
"def question_3():\n return None",
"def default():",
"def always_return_foo() -> str:\n return \"foo\"",
"def _get_iterative_null_value():\n return _ITERATIVE_NULL_VALUE",
"def nothing(nothing):\n pass",
"def _nullop(value):\n return value",
"def test_return_nothing(self):\n data = '''\n def func():\n return\n\n a = func()\n '''\n astroid = test_utils.build_module(data)\n call = astroid.body[1].value\n func_vals = call.infered()\n self.assertEqual(len(func_vals), 1)\n self.assertIsInstance(func_vals[0], nodes.Const)\n self.assertIsNone(func_vals[0].value)",
"def __emptygen():\n if False:\n yield"
] |
[
"0.71695924",
"0.702917",
"0.63986486",
"0.62983674",
"0.6256622",
"0.6242176",
"0.62280655",
"0.6171779",
"0.6141852",
"0.61372393",
"0.61182797",
"0.6094714",
"0.6093091",
"0.60747826",
"0.6060189",
"0.6041134",
"0.603262",
"0.59996945",
"0.5962412",
"0.5952938",
"0.5946937",
"0.59137475",
"0.59121597",
"0.5876476",
"0.5866544",
"0.5856665",
"0.58507353",
"0.5835452",
"0.5829171",
"0.5819233"
] |
0.75520056
|
0
|
Split the current block, returning (old_block, new_block)
|
def splitblock(self, name=None, terminate=False):
# -------------------------------------------------
# Sanity check
# Allow splitting only after leaders and before terminator
# TODO: error check
# -------------------------------------------------
# Split
oldblock = self._curblock
newblock = self.func.new_block(name or 'block', after=self._curblock)
op = self._lastop
# Terminate if requested and not done already
if terminate and not ops.is_terminator(op):
op = self.jump(newblock)
# -------------------------------------------------
# Move ops after the split to new block
if op:
if op == 'head':
trailing = list(self._curblock.ops)
elif op == 'tail':
trailing = []
else:
trailing = list(op.block.ops.iter_from(op))[1:]
for op in trailing:
op.unlink()
newblock.extend(trailing)
# -------------------------------------------------
# Patch phis
if terminate:
self._patch_phis(oldblock.ops, oldblock, newblock)
else:
for op in oldblock:
for use in self.func.uses[op]:
if use.opcode == 'phi':
raise error.CompileError(
"Splitting this block would corrupt some phis")
self._patch_phis(newblock.ops, oldblock, newblock)
return oldblock, newblock
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def split(self, block: ghidra.program.model.mem.MemoryBlock, addr: ghidra.program.model.address.Address) -> None:\n ...",
"def changed_block(self, old_block, new_block):",
"def test_block_split(self):\n block1 = self.geographies.find({ 'geoid': '150010210051016' }) \n self.assertEqual(block1.count(), 1)\n block1 = block1[0]\n\n split_block_pop = 448 \n block1_land_pct = float(184458) / 587158 # AREALAND_INT / AREALAND_2000\n block1_pop_2000 = int(block1_land_pct * split_block_pop)\n block1_pop_2010 = 22 \n block1_pop_delta = block1_pop_2010 - block1_pop_2000\n block1_pop_pct_change = float(block1_pop_delta) / block1_pop_2000\n\n self.assertAlmostEqual(block1['xwalk']['150010210011337']['POPPCT00'], block1_land_pct, places=4)\n self.assertAlmostEqual(block1['xwalk']['150010210011337']['HUPCT00'], block1_land_pct, places=4)\n self.assertAlmostEqual(block1['data']['2000']['P1']['P001001'], block1_pop_2000)\n self.assertAlmostEqual(float(block1['data']['2010']['P1']['P001001']), block1_pop_2010)\n self.assertAlmostEqual(float(block1['data']['delta']['P1']['P001001']), block1_pop_delta)\n self.assertAlmostEqual(float(block1['data']['pct_change']['P1']['P001001']), block1_pop_pct_change)",
"def block_splitter(data, block_size):\n buf = []\n for i, datum in enumerate(data):\n buf.append(datum)\n if len(buf) == block_size:\n yield buf\n buf = []\n\n # If there's anything leftover (a partial block),\n # yield it as well.\n if buf:\n yield buf",
"def split_blocks(b: bytes, k_len: int) -> tuple:\n assert len(b) >= k_len\n\n return tuple(\n bytes(\n b[j] for j in range(i, len(b), k_len)\n ) for i in range(0, k_len)\n )",
"def split_to_blocks(self,raw_data,token1='[MS Chromatogram]',token2 = '\\n\\n'):\n data = []\n \n pos = 0 # position in the raw data string\n flag = False # flag set if end of file reached \n while not flag:\n pos_A=raw_data.find(token1,pos) # searching for token1 - start of block\n if pos_A == -1: # if token1 not found - quit cycle\n flag = True\n else:\n pos_B=raw_data.find(token2,pos_A)\n \n if pos_B == -1: #if token2 (explicit block end) not found, take the rest of raw data string\n pos_B = len(raw_data)\n else:\n pos = pos_B\n \n block = raw_data[pos_A:pos_B]\n data.append(block)\n return data",
"def between_blocks(self, frame):\n return []",
"def CompactBlocks(blocks):\n if len(blocks) == 1:\n return blocks\n result = [blocks[0]]\n for block in blocks[1:-1]:\n last_start, last_len = result[-1]\n curr_start, curr_len = block\n if last_start + last_len == curr_start:\n result[-1] = last_start, last_len + curr_len\n else:\n result.append(block)\n result.append(blocks[-1])\n return result",
"def __split_node(self, cur_node):\n temp = self.Node(cur_node.data_list[len(cur_node.data_list) / 2:], cur_node.next_node)\n cur_node.data_list = cur_node.data_list[:len(cur_node.data_list) / 2]\n cur_node.next_node = temp\n\n if cur_node == self.tail:\n self.tail = cur_node.next_node",
"def nextSplit(self):\n pass",
"def parseBlock(self, text, prevLineData):\n return self.parser.parseBlock(text, prevLineData)",
"def split_blocks(strings):\n blocks = [StringList()]\n for item in strings.xitems(): # (source, offset, value)\n if item[2].strip():\n blocks[-1].append(item[2], source=item[0], offset=item[1])\n elif len(blocks[-1]):\n blocks.append(StringList())\n # remove the last block if empty\n if len(blocks[-1]) == 0:\n del blocks[-1]\n return blocks",
"def switch(self):\n base_block = self.base_block or self\n self.next_block = Block(self.parent, base_block=base_block, py3_wrapper=self.py3_wrapper)\n return self.next_block",
"def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)",
"def prevSplit(self):\n pass",
"def build_new_block(cls, data='', previous_block=None):\n if previous_block:\n new_index = previous_block.index+1\n previous_hash = previous_block.hash\n else:\n new_index = 0\n previous_hash = ''\n timestamp = int(time.time())\n block_hash = cls.build_block_hash(\n index=new_index,\n timestamp=timestamp,\n data=data,\n previous_hash=previous_hash\n )\n block = cls(\n index=new_index,\n previous_hash=previous_hash,\n data=data,\n timestamp=timestamp,\n block_hash=block_hash\n )\n\n return block",
"def _get_block_data(self, blocks: numpy.ndarray) -> Tuple[numpy.ndarray, numpy.ndarray]:\n larger_blocks = numpy.zeros(blocks.shape + numpy.array((2, 2, 2)), blocks.dtype)\n larger_blocks[1:-1, 1:-1, 1:-1] = blocks\n unique_blocks = numpy.unique(larger_blocks)\n return larger_blocks, unique_blocks",
"def _splitBucket(self, bucket):\n idx = self.buckets.index(bucket)\n self.buckets.pop(idx)\n middle = int(bucket.low + (bucket.high - bucket.low)/2)\n \n bucketLow = Bucket(bucket.low, middle, bucket.refreshed)\n bucketHigh = Bucket(middle+1, bucket.high, refreshed.refreshed)\n \n self.buckets.append(bucketLow)\n self.buckets.append(bucketHigh)\n \n for bucket in bucket.nodes:\n if bucketLow.inRange(bucket):\n bucketLow.addNode(bucket)\n else:\n bucketHigh.addNode(bucket)\n \n return (bucketLow, bucketHigh)",
"def split(self):\n left = BPlusNode(self.order)\n right = BPlusNode(self.order)\n mid = self.order // 2\n\n left.keys = self.keys[:mid]\n left.values = self.values[:mid]\n\n right.keys = self.keys[mid:]\n right.values = self.values[mid:]\n\n # When the node is split, set the parent key to the left-most key of the right child node.\n self.keys = [right.keys[0]]\n self.values = [left, right]\n self.leaf = False",
"def split(base_list):\n list_mid_pointer=len(base_list)//2\n return base_list[:list_mid_pointer],base_list[list_mid_pointer:]",
"def split_chunk(chunk, *a, **kw):\n return split_chunk(chunk, *a, **kw)",
"def convert_split(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.input(\"AxisTensor\")\n if axis:\n axis = g.get_node(axis[0])\n axis, infered = try_infer_value(axis, g.get_params())\n if infered:\n axis = axis.tolist()[0]\n else:\n axis = op.attr(\"axis\")\n\n sections = op.input(\"SectionsTensorList\")\n if sections:\n tmp_section = []\n for i in sections:\n i = g.get_node(i)\n i, infered = try_infer_value(i, g.get_params())\n if infered:\n i = i.tolist()\n else:\n raise ValueError(\"Dynamic Split not yet supported.\")\n tmp_section.extend(i)\n sections = tmp_section\n else:\n sections = op.attr(\"sections\")\n if sections:\n indices = []\n split_index = 0\n for i in sections[:-1]:\n if i == -1:\n input_shape = infer_shape(x)[axis]\n i = input_shape - np.sum(sections) - 1\n split_index += i\n indices.append(split_index)\n else:\n indices = op.attr(\"num\")\n\n out = _op.split(x, indices, axis)\n for i, out_i in enumerate(out):\n g.add_node(op.output(\"Out\")[i], out_i)",
"def IntraRegionDiff(old_lines, new_lines, diff_params):\n old_line, old_state = ConvertToSingleLine(old_lines)\n new_line, new_state = ConvertToSingleLine(new_lines)\n old_blocks, new_blocks, ratio = IntraLineDiff(old_line, new_line, diff_params)\n for begin, length in old_blocks:\n MarkBlock(old_state, begin, begin+length)\n old_blocks = GetBlocks(old_state)\n\n for begin, length in new_blocks:\n MarkBlock(new_state, begin, begin+length)\n new_blocks = GetBlocks(new_state)\n\n return (old_blocks, new_blocks, ratio)",
"def add_new_block(self):\n old_block = self.curr_block\n self.curr_block = self.gen_new_block()\n add_edge(old_block, self.curr_block)",
"def _split_block(block: PruningBlock, list_output_channels: List[int]) -> List[PruningBlock]:\n if len(list_output_channels) == 1:\n raise RuntimeError\n\n dot_product = reduce((lambda x, y: x * y), list_output_channels)\n\n current_size = dot_product\n new_blocks = []\n divided_shapes = filter(lambda x: x != 1, list_output_channels)\n for divided_shape in divided_shapes:\n offset = int(current_size % dot_product)\n current_size /= divided_shape\n new_block = copy.copy(block)\n new_block.size = int(current_size)\n new_block.offset = offset\n new_blocks.append(new_block)\n return new_blocks",
"def split(self, node, width, height):\r\n node.used = True\r\n node.down = SquareAlgorithmNode(x=node.x,\r\n y=node.y + height,\r\n width=node.width,\r\n height=node.height - height)\r\n node.right = SquareAlgorithmNode(x=node.x + width,\r\n y=node.y,\r\n width=node.width - width,\r\n height=height)\r\n return node",
"def _prepare_blocks():\n\n counter = blocks[0]['freeStart']\n maxBlocks = blocks[0]['maxBlocks']\n while(counter < maxBlocks) :\n try:\n # print (mount['parent'] + '/linddata.' + str(counter))\n f = open(mount['parent'] + '/linddata.' + str(counter), 'r') \n except IOError, e:\n return STATUS['M_BD']\n else :\n fdatastring = f.next()\n fdata = deserializedata(fdatastring)\n blocks[counter] = fdata\n counter += 1\n \n return STATUS['OK']",
"def _buff_partition(self, upload_buffer):\n left_buff = UploadBuffer(upload_buffer.start_offset)\n buff_start = upload_buffer.start_offset\n for idx, (intent, fragment_end) in enumerate(upload_buffer.iter_items()):\n candidate_size = fragment_end - buff_start\n if candidate_size > self.recommended_upload_part_size:\n right_fragment_size = candidate_size - self.recommended_upload_part_size\n left_buff.append(intent, fragment_end - right_fragment_size)\n return left_buff, upload_buffer.get_slice(\n start_idx=idx, start_offset=left_buff.end_offset\n )\n else:\n left_buff.append(intent, fragment_end)\n if candidate_size == self.recommended_upload_part_size:\n return left_buff, upload_buffer.get_slice(start_idx=idx + 1)\n\n return left_buff, UploadBuffer(left_buff.end_offset)",
"def NormalizeBlocks(blocks, line):\n result = []\n prev_start, prev_len = blocks[0]\n for curr_start, curr_len in blocks[1:]:\n # Note: nm_ is a prefix for non matching and m_ is a prefix for matching.\n m_len, nm_len = prev_len, curr_start - (prev_start+prev_len)\n # This if condition checks if matching and non matching parts are greater\n # than zero length and are comprised of spaces ONLY. The last condition\n # deals with most of the observed cases of strange diffs.\n # Note: curr_start - prev_start == m_l + nm_l\n # So line[prev_start:curr_start] == matching_part + non_matching_part.\n text = line[prev_start:curr_start]\n if m_len > 0 and nm_len > 0 and text == ' ' * len(text):\n # Move the matching block towards the end i.e. normalize.\n result.append((prev_start + nm_len, m_len))\n else:\n # Keep the existing matching block.\n result.append((prev_start, prev_len))\n prev_start, prev_len = curr_start, curr_len\n result.append(blocks[-1])\n assert len(result) == len(blocks)\n return result",
"def block_split(stream, block_size=BLOCK_SIZE_IN_BYTES):\n # TODO: this could possibly be a generator\n return [stream[i:i + BLOCK_SIZE_IN_BYTES]\n for i in range(0, len(stream), BLOCK_SIZE_IN_BYTES)]"
] |
[
"0.7007891",
"0.6474244",
"0.63708407",
"0.6220532",
"0.5873715",
"0.5784748",
"0.57539546",
"0.5708441",
"0.5696605",
"0.5676573",
"0.56721663",
"0.56536955",
"0.55770147",
"0.5571765",
"0.55206585",
"0.5516312",
"0.5514029",
"0.55035573",
"0.54716635",
"0.5471263",
"0.54684216",
"0.54534125",
"0.54509443",
"0.54400325",
"0.5439961",
"0.5386099",
"0.53845954",
"0.5369845",
"0.5363693",
"0.5357401"
] |
0.72078073
|
0
|
Patch uses of the instructions in `ops` when a predecessor changes from `oldblock` to `newblock`
|
def _patch_phis(self, ops, oldblock, newblock):
for op in ops:
for use in self.func.uses[op]:
if use.opcode == 'phi':
# Update predecessor blocks
preds, vals = use.args
preds = [newblock if pred == oldblock else pred
for pred in preds]
use.set_args([preds, vals])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def changed_block(self, old_block, new_block):",
"def _poputil_block_recompute_backward(op, grads):\n return grads",
"def replaceChain(self, newbc):\n if (Blockchain.validChain(newbc) == False):\n print(\"New Blockchain is invalid\")\n return\n elif (len(newbc.chain) < len(self.chain)):\n print(\"Not enough blocks on new Blockchain\")\n return\n \n print(\"Updating blockchain to newest version\")\n self.chain = newbc",
"def _blocks_changed_in_config(old_config, new_config, block_comparator=_blocks_changed):\n\n for block_name in new_config.blocks.keys():\n # Check to see if there are any new blocks\n if block_name not in old_config.blocks.keys() or \\\n block_comparator(old_config.blocks[block_name], new_config.blocks[block_name]):\n return True\n\n for block_name in old_config.blocks.keys():\n if block_name not in new_config.blocks.keys() \\\n or block_comparator(old_config.blocks[block_name], new_config.blocks[block_name]):\n return True\n\n return False",
"def update(self, old, new, blk_id):\n\n oldsrc = \"%s/%s\" % (self._dir, old)\n newsrc = \"%s/%s\" % (self._dir, new)\n olddst = \"%s/.%d.old\" % (self._tempdir, blk_id)\n newdst = \"%s/%d\" % (self._tempdir, blk_id)\n copyfile(oldsrc, olddst)\n copyfile(newsrc, newdst)\n result = self._run(\"%s --%d --block-size %d --bits %d --quiet --threads %d %s --mode %s --rehash %s %s\" %\n (self._ishakesumd, self._mode, self._block_size, self._output_bits, self._threads,\n self._profile, self._alg, self._hash, self._tempdir))\n\n os.remove(olddst)\n os.remove(newdst)\n return result",
"def stageordering(cls, instcls, old, new):\n if instcls == blade:\n ret, slot = cls.getSlotforObject(old)\n if ret is False and slot != '0':\n print(old)\n cblist = cls.getsleepcallback(400)\n if slot == '0':\n cblist += cls.getreinitsessioncallback(None)\n cblist += cls.getsleepcallback(30)\n if old.peek_extension_app_mode() == \"FCIP\" and\\\n new.peek_extension_app_mode() == \"hybrid\":\n cls.setClsStageCB(instcls, \"PRE_PATCH\", slot, cblist)\n cls.setClsStagePri(instcls, \"PRE_PATCH\", slot, 10)\n # elif is causing identation error in all cases\n # adding return for all cases for flake8\n return\n if new.peek_extension_app_mode() == \"FCIP\" and\\\n old.peek_extension_app_mode() == \"hybrid\":\n cls.setClsStageCB(instcls, \"PRE_PATCH\", slot, cblist)\n cls.setClsStagePri(instcls, \"PRE_PATCH\", slot, 1)\n return\n if new.peek_extension_ve_mode() == \"20VE\" and\\\n old.peek_extension_app_mode() == \"10VE\":\n cls.setClsStageCB(instcls, \"PRE_PATCH\", slot, cblist)\n cls.setClsStagePri(instcls, \"PRE_PATCH\", slot, 10)\n return\n if new.peek_extension_app_mode() == \"10VE\" and\\\n old.peek_extension_app_mode() == \"20VE\":\n cls.setClsStageCB(instcls, \"PRE_PATCH\", slot, cblist)\n cls.setClsStagePri(instcls, \"PRE_PATCH\", slot, 10)\n return\n elif instcls == fibrechannel_switch:\n ret, slot = cls.getSlotforObject(old)\n if ret is False and slot != '0':\n print(old)\n cblist = cls.getsleepcallback(400)\n if slot == '0':\n cblist += cls.getreinitsessioncallback(None)\n cblist += cls.getsleepcallback(30)\n if old.peek_ag_mode() != new.peek_ag_mode():\n cls.setClsStageCB(instcls, \"PRE_DELETE\", slot, cblist)\n # print(slot, cls.getClsStageCB(instcls, \"PRE_DELETE\", slot))\n elif instcls == chassis:\n ret, slot = cls.getSlotforObject(old)\n if ret is False and slot != '0':\n print(old)\n cblist = cls.getsleepcallback(500)\n if slot == '0':\n cblist += cls.getreinitsessioncallback(None)\n cblist += cls.getsleepcallback(30)\n if old.peek_vf_enabled() != new.peek_vf_enabled():\n cls.setClsStageCB(instcls, \"PRE_PATCH\", slot, cblist)\n # print(slot, cls.getClsStageCB(instcls, \"PRE_PATCH\", slot))",
"def update_blocks_closure(self, ln, block, fail_bool):\n\n if ln == Line.LINE_GREEN:\n # Check that block isnt already in that state\n if self.blocks_green_arr[block - 1].open == (not fail_bool):\n if fail_bool == True:\n self.blocks_green_arr[block - 1].num_faliures += 1\n else:\n self.blocks_green_arr[block - 1].num_faliures -= 1\n else:\n if fail_bool == True:\n self.blocks_green_arr[block - 1].num_faliures += 1\n else:\n self.blocks_green_arr[block - 1].num_faliures -= 1\n\n\n # Update block if fail\n if self.blocks_green_arr[block - 1].num_faliures > 0:\n if self.blocks_green_arr[block - 1].open:\n signals.ctc_update_failure_blocks_gui.emit(ln, fail_bool)\n self.blocks_green_arr[block - 1].open = False\n else:\n if not self.blocks_green_arr[block - 1].open:\n signals.ctc_update_failure_blocks_gui.emit(ln, fail_bool)\n self.blocks_green_arr[block - 1].open = True\n\n elif ln == Line.LINE_RED:\n # Check that block isnt already in that state\n if self.blocks_red_arr[block - 1].open == (not fail_bool):\n if fail_bool == True:\n self.blocks_red_arr[block - 1].num_faliures += 1\n else:\n self.blocks_red_arr[block - 1].num_faliures -= 1\n else:\n if fail_bool == True:\n self.blocks_red_arr[block - 1].num_faliures += 1\n else:\n self.blocks_red_arr[block - 1].num_faliures -= 1\n\n # Update block if fail\n if self.blocks_red_arr[block - 1].num_faliures > 0:\n if self.blocks_red_arr[block - 1].open:\n signals.ctc_update_failure_blocks_gui.emit(ln, fail_bool)\n self.blocks_red_arr[block - 1].open = False\n else:\n if not self.blocks_red_arr[block - 1].open:\n signals.ctc_update_failure_blocks_gui.emit(ln, fail_bool)\n self.blocks_red_arr[block - 1].open = True\n\n else:\n raise Exception(\"CTC : UPDATE BLOCK CLOSURES (maint. mode from SWTrack \\\n Cont. Send INVALID Line\")",
"def IntraRegionDiff(old_lines, new_lines, diff_params):\n old_line, old_state = ConvertToSingleLine(old_lines)\n new_line, new_state = ConvertToSingleLine(new_lines)\n old_blocks, new_blocks, ratio = IntraLineDiff(old_line, new_line, diff_params)\n for begin, length in old_blocks:\n MarkBlock(old_state, begin, begin+length)\n old_blocks = GetBlocks(old_state)\n\n for begin, length in new_blocks:\n MarkBlock(new_state, begin, begin+length)\n new_blocks = GetBlocks(new_state)\n\n return (old_blocks, new_blocks, ratio)",
"def optimize_states(old_state, committed_state, new_state):\n old = old_state['actions']\n committed = committed_state['actions']\n new = new_state['actions']\n\n old, new, committed = map(optimize_actions, [old, new, committed])\n\n old_state['actions'] = old\n committed_state['actions'] = committed\n new_state['actions'] = new",
"def deduce_new_block_origins(line, hints, block_origins):\n block_origins = copy(block_origins)\n # Storing information whether function deduced anything new\n sth_changed = False\n\n # forward loop\n i = 0\n while i < len(hints):\n # Situation when there is filled cell just before the block need not to\n # be checked, due to use of push_block_origins\n\n # check for empty space blocking placing\n changed1, block_origins = check_no_empty_cell_inside(\n line, hints, block_origins, i,\n )\n\n # check for filled space enforcing push of block origin\n changed2, block_origins = check_filled_cell_from_right(\n line, hints, block_origins, i,\n )\n\n if changed1 or changed2:\n sth_changed = True\n else:\n i += 1\n\n # backward loop analysis\n changed, block_origins = pull_block_origins(line, hints, block_origins)\n sth_changed = sth_changed or changed\n\n return sth_changed, block_origins",
"def _adjustBlock(self, b):\n raise NotImplementedError",
"def test_update_manifold(self):\r\n locator = BlockUsageLocator(\r\n CourseLocator('testx', 'GreekHero', branch='draft'),\r\n 'problem', block_id='problem1'\r\n )\r\n original = modulestore().get_item(locator)\r\n # first add 2 children to the course for the update to manipulate\r\n locator = BlockUsageLocator(\r\n CourseLocator('guestx', 'contender', branch='draft'),\r\n 'course', block_id=\"head345679\"\r\n )\r\n category = 'problem'\r\n new_payload = \"<problem>empty</problem>\"\r\n modulestore().create_item(\r\n locator, category, 'test_update_manifold',\r\n fields={'display_name': 'problem 1', 'data': new_payload},\r\n )\r\n another_payload = \"<problem>not empty</problem>\"\r\n modulestore().create_item(\r\n locator, category, 'test_update_manifold',\r\n fields={'display_name': 'problem 2', 'data': another_payload},\r\n definition_locator=original.definition_locator,\r\n )\r\n # pylint: disable=W0212\r\n modulestore()._clear_cache()\r\n\r\n # now begin the test\r\n block = modulestore().get_item(locator)\r\n pre_def_id = block.definition_locator.definition_id\r\n pre_version_guid = block.location.version_guid\r\n\r\n self.assertNotEqual(block.grading_policy['GRADER'][0]['min_count'], 13)\r\n block.grading_policy['GRADER'][0]['min_count'] = 13\r\n block.children = block.children[1:] + [block.children[0]]\r\n block.advertised_start = \"Soon\"\r\n\r\n block.save() # decache model changes\r\n updated_block = modulestore().update_item(block, \"**replace_user**\")\r\n self.assertNotEqual(updated_block.definition_locator.definition_id, pre_def_id)\r\n self.assertNotEqual(updated_block.location.version_guid, pre_version_guid)\r\n self.assertEqual(updated_block.grading_policy['GRADER'][0]['min_count'], 13)\r\n self.assertEqual(updated_block.children[0].version_agnostic(), block.children[0].version_agnostic())\r\n self.assertEqual(updated_block.advertised_start, \"Soon\")",
"def lower_common_block_offset(self):\n if len(self.__changed_blocks) <= 1:\n return\n\n current = self.shrink_target\n\n blocked = [current.buffer[u:v] for u, v in current.all_block_bounds()]\n\n changed = [\n i\n for i in sorted(self.__changed_blocks)\n if not self.shrink_target.blocks[i].trivial\n ]\n\n if not changed:\n return\n\n ints = [int_from_bytes(blocked[i]) for i in changed]\n offset = min(ints)\n assert offset > 0\n\n for i in hrange(len(ints)):\n ints[i] -= offset\n\n def reoffset(o):\n new_blocks = list(blocked)\n for i, v in zip(changed, ints):\n new_blocks[i] = int_to_bytes(v + o, len(blocked[i]))\n return self.incorporate_new_buffer(hbytes().join(new_blocks))\n\n new_offset = Integer.shrink(offset, reoffset, random=self.random)\n if new_offset == offset:\n self.clear_change_tracking()",
"def eval_step_b_old_and_new(self, sess, task_b_data):\n raise NotImplemented()",
"def test_replace_chain_keep_original(self):\n import copy\n miner_address = 'miner_address'\n\n blockchain1 = Blockchain()\n blockchain1.mine(miner_address)\n\n blockchain2 = copy.deepcopy(blockchain1)\n blockchain1.mine(miner_address)\n\n # Now let's make sure that each blockchain has its own number of blocks\n self.assertEqual(3, len(blockchain1.full_chain))\n self.assertEqual(2, len(blockchain2.full_chain))\n\n # Then let's replace blockchain1 with blockchain2\n blockchain1.replace_chain(blockchain2.full_chain)\n\n self.assertEqual(3, len(blockchain1.full_chain))\n self.assertEqual(2, len(blockchain2.full_chain))",
"def test_use_new_and_old_inputs(self):\n deployment, modified_bp_path = self._deploy_and_get_modified_bp_path(\n 'use_new_and_old_inputs',\n inputs={'input_prop1': 'custom_input1',\n 'input_prop2': 'custom_input2'}\n )\n node_mapping = {'affected_node': 'site1'}\n\n base_nodes, base_node_instances = \\\n self._map_node_and_node_instances(deployment.id, node_mapping)\n base_node = base_nodes['affected_node'][0]\n self.client.blueprints.upload(modified_bp_path, BLUEPRINT_ID)\n wait_for_blueprint_upload(BLUEPRINT_ID, self.client)\n self._do_update(deployment.id, BLUEPRINT_ID,\n inputs={'input_prop3': 'custom_input3'})\n\n modified_nodes, modified_node_instances = \\\n self._map_node_and_node_instances(deployment.id, node_mapping)\n modified_node = modified_nodes['affected_node'][0]\n\n # Checking that get_property works correctly\n outputs_to_check = {\n 'output_prop1': {\n 'value': 'custom_input1'\n },\n 'output_prop2': {\n 'value': 'custom_input2'\n },\n 'output_prop3': {\n 'value': 'custom_input3'\n }\n }\n outputs = self.client.deployments.get(deployment.id).outputs\n self.assertEqual(outputs_to_check, outputs)\n\n # assert nothing else changed\n self._assert_equal_dicts(base_node,\n modified_node,\n excluded_items=['properties', 'blueprint_id'])\n self._assert_equal_dicts(\n base_node['properties'],\n modified_node['properties'],\n excluded_items=['prop1', 'prop2', 'prop3', 'blueprint_id']\n )",
"def moveBlock(self, block: ghidra.program.model.mem.MemoryBlock, newStartAddr: ghidra.program.model.address.Address, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...",
"def _blocks_changed(block1, block2):\n if block1.name != block2.name:\n return True\n\n # Check for any changed blocks (symmetric difference operation of sets)\n block_diff = set(block1.to_dict().items()) ^ set(block2.to_dict().items())\n if len(block_diff) > 0:\n return True\n\n return False",
"def apply_block(self, block_id, func=..., edges=..., inplace=...): # -> None:\n ...",
"def add_new_block(self):\n old_block = self.curr_block\n self.curr_block = self.gen_new_block()\n add_edge(old_block, self.curr_block)",
"def update_pre_block(pre_block):\r\n updated_block=\"\";\r\n count=0;\r\n for line in pre_block.splitlines():\r\n count+=1;\r\n if count<len(pre_block.splitlines()):\r\n line=update_line(line)+\"\\n\";\r\n updated_block=updated_block+line;\r\n else:\r\n line=update_line(line);\r\n updated_block=updated_block+line;\r\n return updated_block",
"def test_change(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents and deepcopy everything (just to be sure)\n cfg = Config().genome\n gene1, gene2 = get_gru_node_gene(0, cfg)\n gene1_act = deepcopy(gene1.activation)\n gene1_bias = deepcopy(gene1.bias)\n gene1_bias_hh = deepcopy(gene1.bias_hh)\n gene1_bias_ih = deepcopy(gene1.bias_ih)\n gene1_weight_hh = deepcopy(gene1.weight_hh)\n gene1_weight_ih = deepcopy(gene1.weight_ih)\n gene1_weight_ih_full = deepcopy(gene1.weight_ih_full)\n gene2_act = deepcopy(gene2.activation)\n gene2_bias = deepcopy(gene2.bias)\n gene2_bias_hh = deepcopy(gene2.bias_hh)\n gene2_bias_ih = deepcopy(gene2.bias_ih)\n gene2_weight_hh = deepcopy(gene2.weight_hh)\n gene2_weight_ih = deepcopy(gene2.weight_ih)\n gene2_weight_ih_full = deepcopy(gene2.weight_ih_full)\n \n # Perform crossover and mutations\n gene3 = gene1.crossover(other=gene2, cfg=cfg, ratio=0.5)\n gene3.add_input_key(cfg=cfg, k=-1)\n gene3.update_weight_ih()\n gene3.activation = 'c'\n gene3.bias = -10\n gene3.bias_hh[0] = -10 # Make modifications directly on the vector\n gene3.bias_ih[0] = -10 # Make modifications directly on the vector\n gene3.weight_hh[0, 0] = -10 # Make modifications directly on the vector\n gene3.weight_ih[0, 0] = -10 # Make modifications directly on the vector\n gene3.weight_ih_full[0, 0] = -10 # Make modifications directly on the vector\n \n # Check for unchanged parents\n self.assertEqual(gene1.activation, gene1_act)\n self.assertEqual(gene1.bias, gene1_bias)\n self.assertEqual(np.linalg.norm(gene1.bias_hh - gene1_bias_hh), 0)\n self.assertEqual(np.linalg.norm(gene1.bias_ih - gene1_bias_ih), 0)\n self.assertEqual(np.linalg.norm(gene1.weight_hh - gene1_weight_hh), 0)\n self.assertEqual(np.linalg.norm(gene1.weight_ih - gene1_weight_ih), 0)\n self.assertEqual(np.linalg.norm(gene1.weight_ih_full - gene1_weight_ih_full), 0)\n self.assertEqual(gene2.activation, gene2_act)\n self.assertEqual(gene2.bias, gene2_bias)\n self.assertEqual(np.linalg.norm(gene2.bias_hh - gene2_bias_hh), 0)\n self.assertEqual(np.linalg.norm(gene2.bias_ih - gene2_bias_ih), 0)\n self.assertEqual(np.linalg.norm(gene2.weight_hh - gene2_weight_hh), 0)\n self.assertEqual(np.linalg.norm(gene2.weight_ih - gene2_weight_ih), 0)\n self.assertEqual(np.linalg.norm(gene2.weight_ih_full - gene2_weight_ih_full), 0)",
"def replace_predecessor(self, old_bb, new_bb):\n for i, (bb, value) in enumerate(self.pairs):\n if bb == old_bb:\n self.pairs[i] = (new_bb, value)\n break\n else:\n assert False",
"def rewire(self, new_node, near_indexes):\n\n for i in near_indexes:\n near_node = self.node_list[i]\n edge_node = self.steer(new_node, near_node)\n if not edge_node:\n continue\n edge_node.cost = self.calc_new_cost(new_node, near_node)\n\n no_collision = self.check_collision(edge_node, self.obstacle_list)\n improved_cost = near_node.cost > edge_node.cost\n\n if no_collision and improved_cost:\n self.node_list[i] = edge_node\n self.propagate_cost_to_leaves(new_node)",
"def notice_change(self, position, new_tags=None):\n\t\tif new_tags == None:\n\t\t\tnew_tags = self.get_tags_for_position(position)\n\t\tif __debug__:\n\t\t\tif random.random() < 0.001:\n\t\t\t\tassert new_tags == self.get_tags_for_position(position)\n\t\tbb = BinaryBox(0, position)\n\t\tif bb not in self._block_tag_cache:\n\t\t\treturn\n\t\t# get tags that this block previously had by looking up 1x1x1 BinaryBox, calculate difference for each tag (+1,0,-1) <=> added, kept, removed\n\t\tprevious_tags = self._block_tag_cache[bb]\n\t\ttag_difference = collections.Counter(new_tags)\n\t\ttag_difference.subtract(previous_tags)\n\n\t\t# apply that change to BinaryBox and all parents\n\t\twhile bb in self._block_tag_cache:\n\t\t\ttag_counter = self._block_tag_cache[bb]\n\t\t\ttag_counter.update(tag_difference)\n\t\t\tbb = bb.get_parent()",
"def test_replace_chain(self):\n import copy\n miner_address = 'miner_address'\n\n blockchain1 = Blockchain()\n blockchain1.mine(miner_address)\n\n blockchain2 = copy.deepcopy(blockchain1)\n blockchain2.mine(miner_address)\n\n # Now let's make sure that each blockchain has its own number of blocks\n self.assertEqual(2, len(blockchain1.full_chain))\n self.assertEqual(3, len(blockchain2.full_chain))\n\n # Then let's replace blockchain1 with blockchain2\n blockchain1.replace_chain(blockchain2.full_chain)\n\n self.assertEqual(3, len(blockchain1.full_chain))\n self.assertEqual(3, len(blockchain2.full_chain))\n self.assertEqual(blockchain1.last_block.hash, blockchain2.last_block.hash)",
"def makeReadBlocksStandard(solver, adjustment):\n #Correct CPU block\n timeSlice = slice(0,solver.sharedShape[0],1)\n solver.blocks = [(timeSlice,)+tuple(block) for block in solver.blocks]\n #Make CPU read blocks\n if solver.blocks: #Just in case blocks are empty\n readBlocks = list()\n for block in solver.blocks:\n readBlocks.append(block[:2]+tuple([slice(curr.start-adjustment,curr.stop+adjustment,1) for curr in block[2:]]))\n solver.blocks = list(zip(solver.blocks,readBlocks))",
"def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))",
"def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))",
"def _set_block(self, pos, block_):\n self._changes[deepcopy(pos)] = block"
] |
[
"0.7141038",
"0.5633429",
"0.5507244",
"0.546693",
"0.5414261",
"0.5385301",
"0.5373628",
"0.5321918",
"0.53146756",
"0.5311769",
"0.53000045",
"0.5256756",
"0.52212864",
"0.52187675",
"0.5158157",
"0.51488775",
"0.51406777",
"0.5132721",
"0.51189345",
"0.50957066",
"0.5091767",
"0.50888896",
"0.50820833",
"0.508141",
"0.5067552",
"0.5059189",
"0.5042955",
"0.5014464",
"0.5014464",
"0.50131077"
] |
0.76315385
|
0
|
Generate a loop given start, stop, step and the index variable type. The builder's position is set to the end of the body block. Returns (condition_block, body_block, exit_block).
|
def gen_loop(self, start=None, stop=None, step=None):
assert isinstance(stop, Value), "Stop should be a Constant or Operation"
ty = stop.type
start = start or Const(0, ty)
step = step or Const(1, ty)
assert start.type == ty == step.type
with self.at_front(self.func.startblock):
var = self.alloca(types.Pointer(ty), [])
prev, exit = self.splitblock('loop.exit')
cond = self.func.new_block('loop.cond', after=prev)
body = self.func.new_block('loop.body', after=cond)
with self.at_end(prev):
self.store(start, var)
self.jump(cond)
# Condition
with self.at_front(cond):
index = self.load(ty, [var])
self.store(self.add(ty, [index, step]), var)
self.cbranch(self.lt(types.Bool, [index, stop]), body, exit)
with self.at_end(body):
self.jump(cond)
self.position_at_beginning(body)
return cond, body, exit
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gen_for(self, stmt: statements.For) -> None:\n condition_block = self.builder.new_block()\n body_block = self.builder.new_block()\n final_block = self.builder.new_block()\n iterator_block = self.builder.new_block()\n self.break_block_stack.append(final_block)\n self.continue_block_stack.append(iterator_block)\n\n # Initialization:\n if stmt.init:\n if isinstance(stmt.init, declarations.VariableDeclaration):\n self.gen_local_variable(stmt.init)\n else:\n self.gen_expr(stmt.init, rvalue=True)\n self.builder.emit_jump(condition_block)\n\n # Condition:\n self.builder.set_block(condition_block)\n if stmt.condition:\n self.gen_condition(stmt.condition, body_block, final_block)\n else:\n self.builder.emit_jump(body_block)\n\n # Body:\n self.builder.set_block(body_block)\n self.gen_stmt(stmt.body)\n self.builder.emit_jump(iterator_block)\n\n # Iterator part:\n self.builder.set_block(iterator_block)\n if stmt.post:\n self.gen_expr(stmt.post, rvalue=True)\n self.builder.emit_jump(condition_block)\n\n # Continue here:\n self.builder.set_block(final_block)\n self.break_block_stack.pop()\n self.continue_block_stack.pop()",
"def makeLoop( name, *structure ):\n return X12Loop( name, *structure )",
"def _visit_loop_body(self, node, if_block=None, is_for=None):\n loop_name = \"for\" if is_for else \"while\"\n if if_block:\n node.if_block = if_block\n else:\n node.if_block = self.flow.nextblock(label=\"%s_body\" % loop_name,\n pos=node.body[0])\n self.visitlist(node.body)\n self.flow.loops.pop()\n\n if self.flow.block:\n # Add back-edge\n self.flow.block.add_child(node.cond_block)\n\n # Else clause\n if node.orelse:\n node.else_block = self.flow.nextblock(\n parent=node.cond_block,\n label=\"else_clause_%s\" % loop_name,\n pos=node.orelse[0])\n self.visitlist(node.orelse)\n if self.flow.block:\n self.flow.block.add_child(node.exit_block)\n else:\n node.cond_block.add_child(node.exit_block)\n\n self.exit_block(node.exit_block, node)",
"def body_build(declaration, body):\r\n #scan for stencil templates\r\n stencil_grammar = Literal('for') +roundBracketedExpr(identifier + 'in' + identifier('stencil') + Optional(roundBracketedExpr(identifier)))\r\n for t,s,e in stencil_grammar.scanString(body):\r\n declaration.stencil_templates.add(t.stencil)\r\n #scan for type templates? cant think of use case, and not sure of possible grammar conflicts\r",
"def _define_loop(graph, logdir, train_steps, eval_steps, batch_env):\n\n default_external_action = np.zeros(batch_env.action_info[1])\n loop = tools.Loop(\n logdir, graph.step, graph.should_log, graph.do_report,\n graph.force_reset)\n loop.add_phase(\n 'train', graph.done, graph.score, graph.summary, train_steps,\n report_every=None,\n log_every=train_steps // 2,\n checkpoint_every=None,\n feed={\n graph.is_training: True, graph.should_step: True, graph.use_external_action: False,\n graph.external_action: default_external_action})\n loop.add_phase(\n 'eval', graph.done, graph.score, graph.summary, eval_steps,\n report_every=eval_steps,\n log_every=eval_steps // 2,\n checkpoint_every=10 * eval_steps,\n feed={graph.is_training: False, graph.should_step: True, graph.use_external_action: False,\n graph.external_action: default_external_action})\n return loop",
"def _extract_loops(self, pdb, loop_type, mapping, normalize):\n try:\n mlab = matlab.Matlab(self.config['locations']['fr3d_root'])\n [loops, count, err_msg] = mlab.extractLoops(pdb, loop_type, nout=3)\n except Exception as err:\n self.logger.exception(err)\n raise err\n\n if err_msg != '':\n raise core.MatlabFailed(err_msg)\n\n if loops == 0:\n self.logger.warning('No %s in %s', loop_type, pdb)\n loop_id = self._get_fake_loop_id(pdb, loop_type)\n return [mod.LoopInfo(loop_id=loop_id,\n type = 'NA',\n pdb_id=pdb,\n sequential_id='000',\n length=0,\n seq='',\n r_seq='',\n nwc_seq='',\n r_nwc_seq='',\n unit_ids='',\n loop_name='')]\n\n self.logger.info('Found %i %s loops', count, loop_type)\n\n data = []\n for index in xrange(count):\n loop = loops[index].AllLoops_table\n full_id = normalize(loop.full_id)\n loop_id = self._get_loop_id(full_id, pdb, loop_type, mapping)\n loops[index].Filename = loop_id\n\n data.append(mod.LoopInfo(\n loop_id=loop_id,\n type=loop_type,\n pdb_id=pdb,\n sequential_id=loop_id.split(\"_\")[-1],\n length=int(loops[index].NumNT[0][0]),\n seq=loop.seq,\n r_seq=loop.r_seq,\n nwc_seq=loop.nwc,\n r_nwc_seq=loop.r_nwc,\n unit_ids=','.join(full_id),\n loop_name=loop.loop_name))\n\n if self.save_loops:\n self.__save__(loops, self.config['locations']['loops_mat_files'])\n\n return data",
"def loop_counter(self, name = \"i\", start_val = syntax_helpers.zero_i64):\n\n start_val = syntax_helpers.wrap_if_constant(start_val)\n counter_type = start_val.type\n\n counter_before = self.assign_temp(start_val, name + \"_before\")\n counter = self.fresh_var(counter_type, name)\n counter_after = self.fresh_var(counter_type, name + \"_after\")\n merge = {counter.name:(counter_before, counter_after)}\n return counter, counter_after, merge",
"def _analyse_loop(\n self,\n statement: Union[ast.AsyncFor, ast.For, ast.While],\n *,\n next: CFNode,\n ) -> CFNode:\n # Node acting as target for the next iteration. We'll identify this\n # with the loop entry node, once that exists.\n dummy_node = self._dummy_node()\n with self._updated_context(break_=next, continue_=dummy_node):\n body_node = self._analyse_statements(statement.body, next=dummy_node)\n\n loop_node = self._ast_node(\n statement,\n enter=body_node,\n else_=self._analyse_statements(statement.orelse, next=next),\n error=self._raise,\n )\n\n self._graph.collapse_node(dummy_node, loop_node)\n return loop_node",
"def _while_loop(self):\n bind_map = {}\n wl = set_span(tvm.relay.var(\"while_loop\"), self._loop_name)\n sb = tvm.relay.scope_builder.ScopeBuilder()\n\n lv_list = []\n expr_list = []\n extra_vars = []\n\n for i, lv in enumerate(self.loop_vars):\n if self._loop_name not in self._lvar2expr:\n self._lvar2expr[self._loop_name] = {}\n\n # Handle the case when loop var is not properly lifted.\n # This can happen when loop var node name is set accidentally\n # beginning with loop name.\n if lv not in self._lvar2expr[self._loop_name]:\n var_name = f\"{self._loop_name}_loop_var_{i}\"\n var_type = _infer_type(lv, self._mod).checked_type\n loop_var = set_span(tvm.relay.var(var_name, type_annotation=var_type), var_name)\n self._lvar2expr[self._loop_name][loop_var] = lv\n bind_map[lv] = loop_var\n self.loop_vars[i] = loop_var\n lv = loop_var\n\n lv_list.append(lv)\n expr_list.append(self._lvar2expr[self._loop_name][lv])\n\n if bind_map:\n self.cond = rewrite_subgraph(self.cond, bind_map)\n self.body = [rewrite_subgraph(b, bind_map) for b in self.body]\n\n cond = set_span(tvm.relay.op.min(self.cond), self.cond.span)\n\n for lv, exp in self._lvar2expr[self._loop_name].items():\n if lv not in self.loop_vars:\n var_checker = VarChecker(lv)\n for bd in self.body + [cond]:\n var_checker.visit(bd)\n if var_checker.used:\n lv_list.append(lv)\n expr_list.append(exp)\n extra_vars.append(lv)\n break\n\n with sb.if_scope(cond):\n sb.ret(wl(*list(self.body + extra_vars)))\n with sb.else_scope():\n sb.ret(tvm.relay.Tuple(lv_list))\n\n loop_fn = tvm.relay.Function(lv_list, sb.get())\n sb = tvm.relay.scope_builder.ScopeBuilder()\n sb.let(wl, loop_fn)\n loop_ret = wl(*expr_list)\n\n sb.ret(loop_ret)\n ret = sb.get()\n return ret",
"def internal_loop(p, i, j, k, l):\n return _RNAstructure_wrap.internal_loop(p, i, j, k, l)",
"def get_for_iterations(self, file, i):\n\n # Run super definition\n line = super().get_for_iterations(file, i)\n\n # Save required words\n variable, for_range = line[0], \"\".join(line[2:])\n\n # Strip ending semicolon\n for_range = for_range.split(\":\", 1)[0]\n\n # Create start and end for 'for loop' call\n start = []\n end = []\n\n # Set start and step to default\n begin = \"0\"\n step = \"1\"\n\n # Parse for_range\n if for_range.find(\"range\") != -1:\n\n # Dump unwanted portion\n for_range = for_range.strip(\"range(\").strip(\")\")\n\n # Parse variables in for_range\n variables = [var.strip() for var in for_range.split(\",\")]\n\n # Store variable values\n var_count = len(variables)\n\n # If only one variable is given,\n # Set stop variable with default begin and step\n if var_count == 1:\n stop = variables[0]\n # Else if two variable are given,\n # set begin and stop variable with default step\n else:\n begin = variables[0]\n stop = variables[1]\n # If three variables are given,\n # set all three begin, stop and step variables\n if var_count == 3:\n step = variables[2]\n\n # Set array to None\n array = None\n else:\n # If range not found, iterate over given array\n\n # Get array\n array = for_range\n\n # Set default stop\n stop = \"Array.length\"\n\n # Check of array slicing\n if array.find(\"[\") != -1 and array.find(\":\") != -1:\n array, begin, stop, step = self.get_list_slice_vars(array)\n\n # Get variable type for variable\n\n # If array is passed, get type of array\n if array:\n var_type = \"Array.data_type\"\n\n # Else get type of variable\n else:\n var_type = self.get_type(begin)\n\n # Append variable type to variable\n variable = var_type + \" \" + variable\n\n # Return all variables\n return variable, begin, stop, step, array, start, end",
"def _dynamic_rnn_loop(cell: RNNCellBase[State], inputs: torch.Tensor, initial_state: State, sequence_length: torch.LongTensor) ->Tuple[torch.Tensor, State]:\n state = initial_state\n time_steps = inputs.shape[0]\n all_outputs = []\n all_state = map_structure(lambda _: no_map(list), state)\n for i in range(time_steps):\n output, state = cell(inputs[i], state)\n all_outputs.append(output)\n map_structure_zip(lambda xs, x: xs.append(x), (all_state, state))\n final_outputs = torch.stack(all_outputs, dim=0)\n final_outputs = mask_sequences(final_outputs, sequence_length=sequence_length, time_major=True)\n final_state = map_structure(lambda _: no_map(list), state)\n for batch_idx, time_idx in enumerate(sequence_length.tolist()):\n if time_idx > 0:\n map_structure_zip(lambda xs, x: xs.append(x[time_idx - 1][batch_idx]), (final_state, all_state))\n else:\n map_structure_zip(lambda xs, x: xs.append(x[batch_idx]), (final_state, initial_state))\n final_state = map_structure(lambda x: torch.stack(x, dim=0), final_state)\n return final_outputs, final_state",
"def LoopBody(i, *input_arrays):\n # Outfeed ops execute on each JF node, so they must be located on the\n # nodes.\n outfeed_devices = []\n device_assignment = py_utils.GetTpuDeviceAssignment()\n assert device_assignment\n for replica in range(device_assignment.num_replicas):\n num_cores_per_replica = 1 if self.spmd else (\n device_assignment.num_cores_per_replica)\n for core in range(num_cores_per_replica):\n with tf.device(device_assignment.host_device(replica, core)):\n outfeed_devices.append(\n tpu_ops.outfeed_dequeue_tuple(\n tensor_types,\n tensor_shapes,\n device_ordinal=device_assignment.tpu_ordinal(replica,\n core)))\n offset = i * num_devices\n output_arrays = list(input_arrays)\n # Each output_array holds a different per-example tensor. We get results\n # for each tensor from each TPU for each TpuTrainStep call.\n for j in range(len(output_arrays)):\n for k in range(len(outfeed_devices)):\n output_arrays[j] = output_arrays[j].write(offset + k,\n outfeed_devices[k][j])\n\n return tuple([i + 1] + output_arrays)",
"def for_loop(num_iters, body, initial_args):\n for i in range(num_iters):\n if i == 0:\n outputs = body(*initial_args)\n else:\n outputs = body(*outputs)\n return outputs",
"def make_range_temp_K_prot(DB_version, DB_type, i1, i2):\n\n for index in range(i1, i2):\n print(index)\n make_temp_K_prot(DB_version, DB_type, index)",
"def __getMultiLevelTileLoop(self, tile_level, iter_names, st_exps, lbody):\n\n iter_names = iter_names[:]\n iter_names.reverse()\n st_exps = st_exps[:]\n st_exps.reverse()\n loop = lbody\n for level in range(1, tile_level + 1):\n if level == 1:\n for iname, st_exp in zip(iter_names, st_exps):\n n_tsize_name = self.__getTileSizeName(iname, level)\n lb = ast.IdentExp(self.__getTileIterName(iname, level))\n loop = self.__getIntraTileLoop(\n iname, n_tsize_name, lb, st_exp, loop\n )\n else:\n for iname in iter_names:\n c_iname = self.__getTileIterName(iname, level - 1)\n n_tsize_name = self.__getTileSizeName(iname, level)\n lb = ast.IdentExp(self.__getTileIterName(iname, level))\n st = ast.IdentExp(self.__getTileSizeName(iname, level - 1))\n loop = self.__getIntraTileLoop(c_iname, n_tsize_name, lb, st, loop)\n return loop",
"def build_input(self) -> None:\n possible_vars = {typ: names & self.undefined for typ, names in MAGIC_VARS.items()}\n\n if (possible_vars[\"loop\"] or possible_vars[\"index\"]) and possible_vars[\"input\"]:\n loop_names = \", \".join(possible_vars[\"loop\"] or possible_vars[\"index\"])\n input_names = \", \".join(possible_vars[\"input\"])\n raise PypError(\n f\"Candidates found for both loop variable ({loop_names}) and \"\n f\"input variable ({input_names})\"\n )\n\n for typ, names in possible_vars.items():\n if len(names) > 1:\n names_str = \", \".join(names)\n raise PypError(f\"Multiple candidates for {typ} variable: {names_str}\")\n\n if possible_vars[\"loop\"] or possible_vars[\"index\"]:\n # We'll loop over stdin and define loop / index variables\n idx_var = possible_vars[\"index\"].pop() if possible_vars[\"index\"] else None\n loop_var = possible_vars[\"loop\"].pop() if possible_vars[\"loop\"] else None\n\n if loop_var:\n self.define(loop_var)\n if idx_var:\n self.define(idx_var)\n if loop_var is None:\n loop_var = \"_\"\n\n if idx_var:\n for_loop = f\"for {idx_var}, {loop_var} in enumerate(sys.stdin): \"\n else:\n for_loop = f\"for {loop_var} in sys.stdin: \"\n for_loop += f\"{loop_var} = {loop_var}.rstrip('\\\\n')\"\n\n loop: ast.For = ast.parse(for_loop).body[0] # type: ignore\n loop.body.extend(self.tree.body)\n self.tree.body = [loop]\n elif possible_vars[\"input\"]:\n # We'll read from stdin and define the necessary input variable\n input_var = possible_vars[\"input\"].pop()\n self.define(input_var)\n\n if input_var == \"stdin\":\n input_assign = ast.parse(f\"{input_var} = sys.stdin\")\n else:\n input_assign = ast.parse(f\"{input_var} = [x.rstrip('\\\\n') for x in sys.stdin]\")\n\n self.tree.body = input_assign.body + self.tree.body\n self.use_pypprint_for_implicit_print()\n else:\n no_pipe_assertion = ast.parse(\n \"assert sys.stdin.isatty() or not sys.stdin.read(), \"\n \"\"\"\"The command doesn't process input, but input is present. \"\"\"\n '''Maybe you meant to use a magic variable like `stdin` or `x`?\"'''\n )\n self.tree.body = no_pipe_assertion.body + self.tree.body\n self.use_pypprint_for_implicit_print()",
"def make_loop_careduce(loop_orders, dtypes, loop_tasks, sub):\r\n\r\n def loop_over(preloop, code, indices, i):\r\n iterv = 'ITER_%i' % i\r\n update = \"\"\r\n suitable_n = \"1\"\r\n for j, index in enumerate(indices):\r\n var = sub['lv%i' % j]\r\n update += \"%(var)s_iter += %(var)s_jump%(index)s_%(i)s;\\n\" % locals()\r\n if index != 'x':\r\n suitable_n = \"%(var)s_n%(index)s\" % locals()\r\n return \"\"\"\r\n %(preloop)s\r\n for (int %(iterv)s = %(suitable_n)s; %(iterv)s; %(iterv)s--) {\r\n %(code)s\r\n %(update)s\r\n }\r\n \"\"\" % locals()\r\n\r\n preloops = {}\r\n for i, (loop_order, dtype) in enumerate(zip(loop_orders, dtypes)):\r\n for j, index in enumerate(loop_order):\r\n if index != 'x':\r\n preloops.setdefault(j, \"\")\r\n preloops[j] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n break\r\n else: # all broadcastable\r\n preloops.setdefault(0, \"\")\r\n preloops[0] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n\r\n if len(loop_tasks) == 1:\r\n s = preloops.get(0, \"\")\r\n else:\r\n s = \"\"\r\n for i, (pre_task, task), indices in reversed(zip(xrange(len(loop_tasks) - 1), loop_tasks, zip(*loop_orders))):\r\n s = loop_over(preloops.get(i, \"\") + pre_task, s + task, indices, i)\r\n\r\n s += loop_tasks[-1]\r\n return \"{%s}\" % s",
"def parse(self):\n iterator = self.parseLowLevel()\n while True:\n item_type, item = next(iterator)\n if item_type is KEYWORD and item[0] == \"data\":\n yield item_type, item\n break\n\n state = LABEL_OR_KEYWORD\n while True:\n item_type, item = next(iterator)\n\n if state is LABEL_OR_KEYWORD:\n if item_type is DATA_LABEL:\n label1, label2 = item\n state = VALUE\n elif item_type is KEYWORD:\n if item[0] == \"loop\":\n loop_labels = []\n state = LOOP_LABELS\n else:\n yield item_type, item\n else:\n raise MMCIFSyntaxError(\"Expected data label or keyword\",\n self.line_number)\n\n elif state is VALUE:\n if item_type is DATA_VALUE:\n state = LABEL_OR_KEYWORD\n yield DATA, (label1, label2, item)\n else:\n raise MMCIFSyntaxError(\"Expected data value \"\n \"for label %s.%s\"\n % (label1, label2),\n self.line_number)\n\n elif state is LOOP_LABELS:\n if item_type is DATA_LABEL:\n if loop_labels and loop_labels[0][0] != item[0]:\n # The label does not belong to the loop category.\n # meaning that the loop is empty and terminated.\n label1, label2 = item\n state = VALUE\n else:\n loop_labels.append(item)\n elif item_type is DATA_VALUE:\n loop_data = [item]\n state = LOOP_VALUES\n yield TABLE_HEADER, loop_labels\n if len(loop_labels) == 1:\n yield TABLE_DATA, loop_data\n loop_data = []\n else:\n raise MMCIFSyntaxError(\"Expected label or value in loop\",\n self.line_number)\n\n elif state is LOOP_VALUES:\n if item_type is DATA_VALUE:\n loop_data.append(item)\n if len(loop_data) == len(loop_labels):\n yield TABLE_DATA, loop_data\n loop_data = []\n else:\n if len(loop_data) > 0:\n raise MMCIFSyntaxError(\"Extraneous data in loop:\" +\n str(loop_data),\n self.line_number)\n if item_type is DATA_LABEL:\n label1, label2 = item\n state = VALUE\n elif item_type is KEYWORD:\n if item[0] == \"loop\":\n loop_labels = []\n state = LOOP_LABELS\n else:\n yield item_type, item\n else:\n raise MMCIFSyntaxError(\"Expected data label or loop\",\n self.line_number)",
"def update_DLoop(self, astnode, path, pathidx):\n pathidx = self.update_until_STOP(astnode['_cond'], path, pathidx+1)\n if pathidx > 0:\n self.update_until_STOP(astnode['_body'], path, pathidx+1)",
"def convert_for(self, variable, start, stop, step, array):\n\n # Run super definition\n variable, start, stop, step, array = super().convert_for(\n variable, start, stop, step, array\n )\n\n # Remove data type from variable(duck typing in Python)\n variable = variable.split(\" \")[-1]\n\n # Create for template\n for_template = \"for {} in {}:\"\n\n # Define loop condition\n if array:\n # If array if given, loop through array\n loop_cond = array\n\n # Check if array slicing is required\n if step != \"1\" or stop != \"Array.length\" or start != \"0\":\n\n # Make template for array slicing\n loop_cond = \"{}[{{}}]\".format(array)\n\n if start == \"0\":\n start = \"\"\n\n if stop == \"Array.length\":\n stop = \"\"\n\n if step == \"1\":\n step = \"\"\n\n # If step is default, omit step\n if not step:\n\n # Else add start to range call\n loop_cond = loop_cond.format(start + \":\" + stop)\n else:\n # Add all three parameters if step is provided\n loop_cond = loop_cond.format(start + \":\" + stop + \":\" + step)\n\n else:\n # Else make range template\n loop_cond = \"range({})\"\n\n # If step if default, omit step\n if step == \"1\":\n\n # If start is default, omit start\n if start == \"0\":\n loop_cond = loop_cond.format(stop)\n\n else:\n # Else add start to range call\n loop_cond = loop_cond.format(start + \", \" + stop)\n else:\n # Add all three parameters if step is provided\n loop_cond = loop_cond.format(start + \", \" + stop + \", \" + step)\n\n # Return converted for statement\n return [for_template.format(variable, loop_cond)], []",
"def gen_goto(self, stmt: statements.Goto) -> None:\n block = self.get_label_block(stmt.label)\n self.builder.emit_jump(block)\n new_block = self.builder.new_block()\n self.builder.set_block(new_block)",
"def generate_multi_type2(self, num_data, velocity, num_step, num_dim, test=False, visualize=False):\n num_vel = len(velocity)\n if not test:\n # if pow(num_vel, num_step) < num_data:\n # vel_list = np.asarray(list(itertools.product(np.arange(num_vel), repeat=num_step)))\n # num_vel_list = len(vel_list)\n #\n # div, rem = num_data // num_vel_list, num_data % num_vel_list\n # vel_idx = np.vstack((np.tile(vel_list, [div, 1]), vel_list[np.random.choice(num_vel_list, size=rem)]))\n # np.random.shuffle(vel_idx)\n # else:\n vel_idx = np.random.choice(num_vel, size=[num_data, num_step])\n\n vel_grid = np.take(velocity, vel_idx, axis=0)\n vel = vel_grid * self.interval_length\n\n vel_grid_cumsum = np.cumsum(vel_grid, axis=1)\n mu_max = np.fmin(self.num_interval, np.min(self.num_interval - vel_grid_cumsum, axis=1))\n mu_min = np.fmax(0, np.max(-vel_grid_cumsum, axis=1))\n mu_start = np.random.sample(size=[num_data, num_dim])\n mu_start = np.expand_dims(np.round(mu_start * (mu_max - mu_min) + mu_min - 0.5), axis=1)\n mu_seq = np.concatenate((mu_start, mu_start + vel_grid_cumsum), axis=1)\n else:\n if visualize:\n mu_start = np.reshape([4, 4], newshape=(1, 1, 2))\n vel_pool = np.where((velocity[:, 0] >= -1) & (velocity[:, 1] >= -1))\n vel_idx = np.random.choice(vel_pool[0], size=[num_data * 10, num_step])\n\n vel_grid_cumsum = np.cumsum(np.take(velocity, vel_idx, axis=0), axis=1)\n mu_seq = np.concatenate((np.tile(mu_start, [num_data * 10, 1, 1]), vel_grid_cumsum + mu_start), axis=1)\n mu_seq_new, vel_idx_new = [], []\n for i in range(len(mu_seq)):\n mu_seq_sub = mu_seq[i]\n if len(np.unique(mu_seq_sub, axis=0)) == len(mu_seq_sub):\n mu_seq_new.append(mu_seq[i])\n vel_idx_new.append(vel_idx[i])\n mu_seq, vel_idx = np.stack(mu_seq_new, axis=0), np.stack(vel_idx_new, axis=0)\n mu_seq_rs = np.reshape(mu_seq, [-1, (num_step + 1) * 2])\n select_idx = np.where(np.sum(mu_seq_rs >= self.num_interval, axis=1) == 0)[0][:num_data]\n vel_idx = vel_idx[select_idx]\n mu_seq = mu_seq[select_idx]\n vel = np.take(velocity, vel_idx, axis=0) * self.interval_length\n else:\n vel_idx = np.random.choice(num_vel, size=[num_data * num_dim, num_step])\n vel_grid_cumsum = np.cumsum(np.take(velocity, vel_idx, axis=0), axis=1)\n mu_max = np.fmin(self.num_interval, np.min(self.num_interval - vel_grid_cumsum, axis=1))\n mu_min = np.fmax(0, np.max(-vel_grid_cumsum, axis=1))\n\n select_idx = np.where(np.sum(mu_max < mu_min, axis=1) == 0)[0][:num_data]\n vel_idx, vel_grid_cumsum = vel_idx[select_idx], vel_grid_cumsum[select_idx]\n vel_grid = np.take(velocity, vel_idx, axis=0)\n mu_max, mu_min = mu_max[select_idx], mu_min[select_idx]\n mu_start = np.random.sample(size=[num_data, num_dim])\n mu_start = np.expand_dims(np.round(mu_start * (mu_max - mu_min) + mu_min - 0.5), axis=1)\n mu_seq = np.concatenate((mu_start, mu_start + vel_grid_cumsum), axis=1)\n vel = vel_grid * self.interval_length\n\n # sns.distplot(vel, rug=True, hist=False)\n # plt.show()\n\n place_seq = {'seq': mu_seq, 'vel': vel, 'vel_idx': vel_idx}\n return place_seq",
"def __staticLoopBoundScanning(\n self, stmts, tile_level, outer_loop_inames, loop_info_table\n ):\n\n # initialize all returned variables\n scan_stmts = []\n lbound_info_seq = []\n int_vars = []\n\n # generate the lower and upper values of each inter-tile loop\n val_table = {}\n for iname in outer_loop_inames:\n _, _, _, st_exp, _ = loop_info_table[iname]\n lval = ast.IdentExp(self.__getTileIterName(iname, tile_level))\n t = ast.BinOpExp(\n ast.IdentExp(self.__getTileSizeName(iname, tile_level)),\n ast.ParenthExp(st_exp.replicate()),\n ast.BinOpExp.SUB,\n )\n uval = ast.BinOpExp(lval.replicate(), ast.ParenthExp(t), ast.BinOpExp.ADD)\n val_table[iname] = (lval, uval)\n\n # iterate over each statement to determine loop bounds that are affine functions\n # of outer loop iterators\n lb_exps_table = {}\n ub_exps_table = {}\n for stmt in stmts:\n\n # skip all non loop statements\n if not isinstance(stmt, ast.ForStmt):\n lbound_info_seq.append(None)\n continue\n\n # extract this loop structure\n id, lb_exp, ub_exp, st_exp, lbody = self.ast_util.getForLoopInfo(stmt)\n\n # see if the loop bound expressions are bound/free of outer loop iterators\n lb_inames = filter(\n lambda i: self.ast_util.containIdentName(lb_exp, i), outer_loop_inames\n )\n ub_inames = filter(\n lambda i: self.ast_util.containIdentName(ub_exp, i), outer_loop_inames\n )\n\n # skip loops with bound expressions that are free of outer loop iterators\n if not lb_inames and not ub_inames:\n lbound_info_seq.append(None)\n continue\n\n # check if this loop runs only once\n is_one_time_loop = str(lb_exp) == str(ub_exp)\n\n # generate booleans to indicate the needs of prolog, epilog, and orio.main.tiled loop\n if is_one_time_loop:\n need_tiled_loop = False\n need_prolog = False\n need_epilog = False\n else:\n need_tiled_loop = True\n need_prolog = len(lb_inames) > 0\n need_epilog = len(ub_inames) > 0\n\n # generate new variable names for both the new lower and upper loop bounds\n if need_tiled_loop:\n lb_name, ub_name = self.__getLoopBoundNames()\n int_vars.extend([lb_name, ub_name])\n else:\n lb_name = \"\"\n ub_name = \"\"\n\n # append information about the new loop bounds\n lbinfo = (lb_name, ub_name, need_prolog, need_epilog, need_tiled_loop)\n lbound_info_seq.append(lbinfo)\n\n # skip generating loop-bound scanning code (if it's a one-time loop)\n if not need_tiled_loop:\n continue\n\n # determine the value of the new lower loop bound\n if str(lb_exp) in lb_exps_table:\n lb_var = lb_exps_table[str(lb_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n if need_prolog:\n t = self.__findMinMaxVal(\n \"max\", lb_exp.replicate(), lb_inames, val_table\n )\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), t.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n lb_exps_table[str(lb_exp)] = ast.IdentExp(lb_name)\n scan_stmts.append(ast.ExpStmt(a))\n\n # determine the value of the new upper loop bound\n if str(ub_exp) in ub_exps_table:\n ub_var = ub_exps_table[str(ub_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n if need_epilog:\n t = self.__findMinMaxVal(\n \"min\", ub_exp.replicate(), ub_inames, val_table\n )\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), t.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n ub_exps_table[str(ub_exp)] = ast.IdentExp(ub_name)\n scan_stmts.append(ast.ExpStmt(a))\n\n # return all necessary information\n return (scan_stmts, lbound_info_seq, int_vars)",
"def __getLoopBoundScanningStmts(\n self, stmts, tile_level, outer_loop_inames, loop_info_table\n ):\n\n # (optimization) generate code that determines the loop bounds of full tiles at compile time\n if self.affine_lbound_exps:\n return self.__staticLoopBoundScanning(\n stmts, tile_level, outer_loop_inames, loop_info_table\n )\n\n # initialize all returned variables\n scan_stmts = []\n lbound_info_seq = []\n int_vars = []\n\n # iterate over each statement to find loop bounds that are functions of outer loop iterators\n min_int = ast.NumLitExp(-2147483648, ast.NumLitExp.INT)\n max_int = ast.NumLitExp(2147483647, ast.NumLitExp.INT)\n lb_exps_table = {}\n ub_exps_table = {}\n pre_scan_stmts = []\n post_scan_stmts = []\n scan_loops = SimpleLoops()\n for stmt in stmts:\n\n # skip all non loop statements\n if not isinstance(stmt, ast.ForStmt):\n lbound_info_seq.append(None)\n continue\n\n # extract this loop structure\n id, lb_exp, ub_exp, st_exp, lbody = self.ast_util.getForLoopInfo(stmt)\n\n # see if the loop bound expressions are bound/free of outer loop iterators\n lb_inames = filter(\n lambda i: self.ast_util.containIdentName(lb_exp, i), outer_loop_inames\n )\n ub_inames = filter(\n lambda i: self.ast_util.containIdentName(ub_exp, i), outer_loop_inames\n )\n\n # skip loops with bound expressions that are free of outer loop iterators\n if not lb_inames and not ub_inames:\n lbound_info_seq.append(None)\n continue\n\n # check if this loop runs only once\n is_one_time_loop = str(lb_exp) == str(ub_exp)\n\n # generate booleans to indicate the needs of prolog, epilog, and orio.main.tiled loop\n if is_one_time_loop:\n need_tiled_loop = False\n need_prolog = False\n need_epilog = False\n else:\n need_tiled_loop = True\n need_prolog = len(lb_inames) > 0\n need_epilog = len(ub_inames) > 0\n\n # generate new variable names for both the new lower and upper loop bounds\n if need_tiled_loop:\n lb_name, ub_name = self.__getLoopBoundNames()\n int_vars.extend([lb_name, ub_name])\n else:\n lb_name = \"\"\n ub_name = \"\"\n\n # append information about the new loop bounds\n lbinfo = (lb_name, ub_name, need_prolog, need_epilog, need_tiled_loop)\n lbound_info_seq.append(lbinfo)\n\n # skip generating loop-bound scanning code (if it's a one-time loop)\n if not need_tiled_loop:\n continue\n\n # generate loop-bound scanning code for the prolog\n if str(lb_exp) in lb_exps_table:\n lb_var = lb_exps_table[str(lb_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n post_scan_stmts.append(ast.ExpStmt(a))\n else:\n if need_prolog:\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), min_int.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n a = ast.BinOpExp(\n ast.IdentExp(lb_name),\n ast.FunCallExp(\n ast.IdentExp(\"max\"),\n [ast.IdentExp(lb_name), lb_exp.replicate()],\n ),\n ast.BinOpExp.EQ_ASGN,\n )\n scan_loops.insertLoop(lb_inames, ast.ExpStmt(a))\n else:\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n lb_exps_table[str(lb_exp)] = ast.IdentExp(lb_name)\n\n # generate loop-bound scaning code for the epilog\n if str(ub_exp) in ub_exps_table:\n ub_var = ub_exps_table[str(ub_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n post_scan_stmts.append(ast.ExpStmt(a))\n else:\n if need_epilog:\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), max_int.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n a = ast.BinOpExp(\n ast.IdentExp(ub_name),\n ast.FunCallExp(\n ast.IdentExp(\"min\"),\n [ast.IdentExp(ub_name), ub_exp.replicate()],\n ),\n ast.BinOpExp.EQ_ASGN,\n )\n scan_loops.insertLoop(ub_inames, ast.ExpStmt(a))\n else:\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n ub_exps_table[str(ub_exp)] = ast.IdentExp(ub_name)\n\n # build a new loop information tabe for generating the loop-bound scanning code\n n_loop_info_table = {}\n for iname, linfo in loop_info_table.items():\n _, _, _, st_exp, _ = linfo\n n_loop_info_table[iname] = (\n self.__getTileSizeName(iname, tile_level),\n self.__getTileIterName(iname, tile_level),\n st_exp,\n )\n\n # convert the \"SimpleLoop\" abstractions into loop ASTs\n scan_loop_stmts = scan_loops.convertToASTs(tile_level, n_loop_info_table)\n\n # merge all scanning statements\n scan_stmts = pre_scan_stmts + scan_loop_stmts + post_scan_stmts\n\n # return all necessary information\n return (scan_stmts, lbound_info_seq, int_vars)",
"def update_until_STOP(self, nodes, path, pathidx):\n nodeidx = 0\n\n while path[pathidx][0] != 'STOP':\n node, edge = path[pathidx]\n\n if nodeidx >= len(nodes):\n astnode = {}\n if node == 'DBranch':\n astnode['node'] = node\n astnode['_cond'] = []\n astnode['_then'] = []\n astnode['_else'] = []\n nodes.append(astnode)\n elif node == 'DExcept':\n astnode['node'] = node\n astnode['_try'] = []\n astnode['_catch'] = []\n nodes.append(astnode)\n elif node == 'DLoop':\n astnode['node'] = node\n astnode['_cond'] = []\n astnode['_body'] = []\n nodes.append(astnode)\n else:\n nodes.append({'node': 'DAPICall', '_call': node})\n nodeidx += 1\n pathidx += 1\n continue\n else:\n astnode = nodes[nodeidx]\n\n if edge == SIBLING_EDGE:\n nodeidx += 1\n pathidx += 1\n continue\n\n if node == 'DBranch':\n self.update_DBranch(astnode, path, pathidx)\n return -1\n elif node == 'DExcept':\n self.update_DExcept(astnode, path, pathidx)\n return -1\n elif node == 'DLoop':\n self.update_DLoop(astnode, path, pathidx)\n return -1\n else:\n raise ValueError('Invalid node/edge: ' + str((node, edge)))\n\n return pathidx",
"def pyramid_factory(motor, start, stop, step_size):\n if stop < start:\n start, stop = stop, start\n last_group = None\n last_pos = start\n\n def x_motion_per_step(dets, stream_name):\n nonlocal last_group\n nonlocal last_pos\n nonlocal step_size\n\n if last_group is not None:\n yield from bps.wait(last_group)\n\n yield from bps.trigger_and_read(dets, stream_name)\n\n last_group = short_uid()\n\n if not start < last_pos + step_size < stop:\n step_size *= -1\n last_pos += step_size\n\n yield from bps.abs_set(motor, last_pos, group=last_group)\n\n return x_motion_per_step",
"def _set_loops(loop_data):\n # Initiate a loop column with 1 for each loop tag\n loop_data[u'loop'] = 0\n loop_data.ix[loop_data.text.str.contains(u'loop_'),u'loop'] = 1\n\n # Ensure the index is linear, but set it up so the original\n # index can be replaced\n old_columns = loop_data.columns\n loop_data = loop_data.reset_index(drop=False)\n index_column = list(set(loop_data.columns) - set(old_columns))\n \n # Get the difference between index positions of the loop tags\n loop_index = loop_data.ix[loop_data.loop==1].index\n loop_diff = loop_index[1:] - loop_index[:-1]\n\n # For nested loops, the difference \n depth = 0\n for idx,diff in zip(loop_index[:-1],loop_diff):\n if diff <= 2:\n depth += 1\n loop_data.ix[idx,u'loop'] += depth\n else:\n depth = 0\n \n \n # Reset the index\n loop_data.set_index(index_column, drop=True)\n if u'index' in loop_data.columns:\n loop_data = loop_data.drop([u'index'], axis = 1)\n \n return loop_data",
"def visit_for_of(self, flags, scope, token, parent):\r\n\r\n label, expr, body = token.children\r\n\r\n if body.type != Token.T_BLOCK:\r\n # the parser should be run in python mode\r\n raise TransformError(body, \"expected block in for loop body\")\r\n\r\n # the extra block scope, and finalize, allows for declaring\r\n # variables inside of a for arg list\r\n\r\n scope.pushBlockScope(\"loop\")\r\n self._push_finalize(scope, token, parent)\r\n\r\n self._push_children(scope, body, flags)\r\n self._push_tokens(ST_VISIT | ST_STORE | (flags & ST_SCOPE_MASK), scope, [label], token)\r\n self._push_tokens(ST_VISIT, scope, [expr], token)",
"def step_index():\n return _ConditionBuilder(patterns_pb2.Condition(step_index=True))"
] |
[
"0.5881916",
"0.5810891",
"0.5258655",
"0.52414805",
"0.50282264",
"0.49179763",
"0.48754445",
"0.48476613",
"0.47897246",
"0.47825179",
"0.47693425",
"0.47301555",
"0.4664564",
"0.46524873",
"0.464208",
"0.46326712",
"0.46270207",
"0.4616576",
"0.4600605",
"0.458489",
"0.45702785",
"0.45476258",
"0.45184124",
"0.45148656",
"0.45126683",
"0.451176",
"0.4511404",
"0.45104402",
"0.4509234",
"0.45052525"
] |
0.73493594
|
0
|
Adds a weight variable to the layer.
|
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
constraint=None):
if dtype is None:
dtype = K.floatx()
weight = self.add_variable(name, shape,
dtype=dtype,
initializer=initializers.get(initializer),
regularizer=regularizers.get(regularizer),
constraint=constraints.get(constraint),
trainable=trainable)
return weight
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_weight(self, weight, layer_name, tag):\n if tag != 'bias' and tag != 'wmat':\n raise Exception('tag must be bias or wmat')\n cxnlib.CXNNetSetWeight(self.handle,\n weight.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),\n weight.size,\n ctypes.c_char_p(layer_name.encode('utf-8')),\n ctypes.c_char_p(tag.encode('utf-8')))",
"def set_weight(self, weight):\n self.weight = weight # overwrite the existing weight with the input weight value",
"def weight(self, weight):\n\n self._weight = weight",
"def weight(self, weight):\n\n self._weight = weight",
"def weight(self, weight):\n\n self._weight = weight",
"def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):\n if use_xavier:\n # initializer = tf.contrib.layers.xavier_initializer()\n initializer = tf.initializers.glorot_uniform()\n else:\n initializer = tf.truncated_normal_initializer(stddev=stddev)\n var = _variable_on_cpu(name, shape, initializer)\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n\n #var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev))\n var = weight_variable(shape)\n if wd is not None:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def weight_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer()):\n variable = tf.get_variable(name, initializer=initializer(shape))\n return variable",
"def _add_weight_decay(self, var, wd):\n wd_loss = tf.multiply(tf.nn.l2_loss(var),\n wd,\n name='weight_loss')\n tf.add_to_collection(GKeys.LOSSES, wd_loss)",
"def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.get_variable(\n 'weight', initializer=initial, regularizer=tf.nn.l2_loss)",
"def weight_expr(self, t, w_plus, z, value):\n pass",
"def create_weight_variable(self, shape, name=\"W\"):\n import tensorflow as tf\n\n self.variables[name] = tf.Variable(tf.truncated_normal(shape, stddev=0.1), name=name)\n self.variables[name + \"_ph\"] = tf.placeholder(tf.float32, shape=shape, \n name=name+\"_ph\")\n self.variables[name + \"_assign\"] = tf.assign(self.variables[name], \n self.variables[name + \"_ph\"])",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape,\n initializer=tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n # add weight decay term to 'losses' collection, so the sum of all loss in 'losses' collection\n # will be the total/final loss\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name = 'weight_loss')\n tf.add_to_collection('losses', weight_decay)\n \n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n var = variable(\n name,\n shape,\n initializer=tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n var = _variable_on_cpu(name, shape,\n tf.truncated_normal_initializer(stddev=stddev))\n if wd:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _set_weight(self, v, load=False):\n try:\n t = YANGDynClass(v,base=np.uint8, is_leaf=True, yang_name=\"weight\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"weight must be of a type compatible with base=np.uint8, is_leaf=True, yang_name=\"weight\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__weight = t\n if hasattr(self, '_set'):\n self._set()",
"def add_weight(self, from_neuron, to_neuron, value):\n index = (to_neuron * self.neuron_count) + from_neuron\n if index >= len(self.weights):\n raise IndexError(\"Out of range: from_neuron: {}, to_neuron: {}\".format(from_neuron, to_neuron))\n self.weights[index] += value",
"def weight_variable_const(name, value):\n return tf.get_variable(\n name, dtype=tf.float32,\n initializer=value,\n )",
"def add_to_average(self, value, decay=1.0, weight=1.0):\n decay = tf.cast(decay, dtype=self.dtype)\n weight = tf.cast(weight, dtype=self.dtype)\n\n update_var = smart_assign(self._var, decay * self._var + weight * value)\n\n update_total_weight = smart_assign(self._total_weight,\n decay * self._total_weight + weight)\n\n return tf.group(update_var, update_total_weight)",
"def addEditVariable(\n self,\n variable: Variable,\n strength: float\n | Literal[\"weak\"]\n | Literal[\"medium\"]\n | Literal[\"strong\"]\n | Literal[\"required\"],\n /,\n ) -> None:\n ...",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, wd = 0.0):\n var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer())\n # print(\"change var\")\n # var = tf.Variable(tf.truncated_normal(shape, mean= 0.0, stddev = 1.0), name = name)\n if wd != 0.0:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def setWeight(self, w):\n self._W = w",
"def add_variable(self, name, var):\n self.variables.append(_3ds_named_variable(name, var))",
"def add_variable(self, name, var):\n self.variables.append(_3ds_named_variable(name, var))",
"def new_weight_variable(self, shape):\n self.total_parameters += np.product(shape)\n # Scale down regular Xavier initialization because we're residual.\n stddev = 0.2 * (2.0 / np.product(shape[:-1]))**0.5\n var = tf.Variable(tf.truncated_normal(shape, stddev=stddev))\n self.parameters.append(var)\n return var"
] |
[
"0.65926033",
"0.6463909",
"0.64615434",
"0.64615434",
"0.64615434",
"0.6434855",
"0.64340943",
"0.6415618",
"0.64081067",
"0.64048344",
"0.63897765",
"0.6330229",
"0.63140184",
"0.63023055",
"0.629226",
"0.6286121",
"0.6258184",
"0.62510973",
"0.6248951",
"0.6238682",
"0.6232904",
"0.6229782",
"0.6219793",
"0.6208264",
"0.6208264",
"0.6184801",
"0.6184688",
"0.6173019",
"0.6173019",
"0.6164154"
] |
0.75557965
|
0
|
Retrieves the input mask tensor(s) of a layer at a given node.
|
def get_input_mask_at(self, node_index):
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_input_masks(nx_node, nx_graph):\n input_edges = list(nx_graph.in_edges(nx_node['key']))\n input_masks = [nx_graph.nodes[input_node]['output_mask'] for input_node, _ in input_edges]\n return input_masks",
"def get_input_masks(nx_node, nx_graph):\n input_edges = list(nx_graph.in_edges(nx_node[\"key\"]))\n input_masks = [nx_graph.nodes[input_node][\"output_mask\"] for input_node, _ in input_edges]\n return input_masks",
"def get_output_mask_at(self, node_index):\n output = self.get_output_at(node_index)\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)",
"def input_mask(self):\n inputs = self.input\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)",
"def fill_input_masks(nx_node, nx_graph):\n input_edges = sorted(list(nx_graph.in_edges(nx_node['key'])), key=lambda edge: nx_graph.edges[edge]['in_port'])\n input_masks = [nx_graph.nodes[input_node]['output_mask'] for input_node, _ in input_edges]\n\n filled_input_masks = []\n for i, mask in enumerate(input_masks):\n if mask is None:\n mask = torch.ones(nx_graph.edges[input_edges[i]]['activation_shape'][1])\n filled_input_masks.append(mask)\n return input_masks, filled_input_masks",
"def masked_softmax(input_layer,n_nodes, batch_size):\n mask_lower = K.theano.tensor.tril(K.ones((n_nodes, n_nodes)))\n mask_upper = \\\n K.theano.tensor.triu(-100. * K.ones((n_nodes, n_nodes)), 1)\n mask_layer = mask_lower * input_layer + mask_upper\n mask_layer = mask_layer + 0 * K.eye(n_nodes)[0:n_nodes, 0:n_nodes]\n mask_layer = \\\n K.reshape(mask_layer, (batch_size * n_nodes, n_nodes))\n softmax_layer = K.softmax(mask_layer)\n output_layer = K.reshape(softmax_layer, (batch_size, n_nodes, n_nodes))\n return output_layer",
"def layer_masks(self, module):\n pass\n # return masks",
"def compute_mask(self, input, input_mask=None):\n if input_mask is None:\n return [None for i in range(self.n)]\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))",
"def attention_mask(model, x):\n config = model.config\n input_mask = model.inputs[\"input_mask\"]\n final_mask = model.builder.customOp(opName=\"AttentionMask\",\n opVersion=1,\n domain=\"ai.graphcore\",\n inputs=[input_mask, x],\n attributes={\"dataType\": model.config.popart_dtype})[0]\n final_mask = model.detach(final_mask)\n return final_mask",
"def identity_mask_propagation(nx_node, nx_graph):\n input_masks = get_input_masks(nx_node, nx_graph)\n assert len(input_masks) == 1\n nx_node[\"input_masks\"] = input_masks\n nx_node[\"output_mask\"] = input_masks[0]",
"def identity_mask_propagation(nx_node, nx_graph):\n input_masks = get_input_masks(nx_node, nx_graph)\n assert len(input_masks) == 1\n nx_node['input_masks'] = input_masks\n nx_node['output_mask'] = input_masks[0]",
"def lstm_mask_layer(proj, mask):\n\n return proj * mask[:, :, None]",
"def attention_mask(nd, ns, dtype=tf.float32):\n i = tf.range(nd)[:, None]\n j = tf.range(ns)\n m = i >= j - ns + nd\n out = tf.cast(m, dtype)\n return out",
"def get_padding_mask(inputs, padding_value=0):\n mask = tf.cast(tf.equal(inputs, padding_value), 'float32') \n mask = mask[:, tf.newaxis, tf.newaxis, :]\n return mask",
"def _tf_mask(self, feats: th.Tensor) -> List[th.Tensor]:\n proj = self.dfsmn(feats, None)[0]\n # N x S*F x T\n masks = self.masks(proj)\n # [N x F x T, ...]\n return th.chunk(masks, self.num_branchs, 1)",
"def compute_mask(self, input, input_mask=None):\n n_inputs = len(input)\n if input_mask is None or all([m is None for m in input_mask]):\n # return [None for _ in range(0, n_inputs - 1, 2)]\n return [None for _ in range(n_inputs * (n_inputs - 1) / 2)]\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))",
"def compute_mask(self, input, input_mask=None):\n n_inputs = len(input)\n if input_mask is None or all([m is None for m in input_mask]):\n # return [None for _ in range(0, n_inputs - 1, 2)]\n return [None for _ in range(n_inputs * (n_inputs - 1) / 2)]\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))",
"def create_attention_mask_from_input_mask(from_tensor, to_mask):\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n \n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n \n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n \n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n \n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n \n return mask",
"def output_mask(self):\n output = self.output\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)",
"def compute_mask(t, padding_idx=0):\n mask = torch.ne(t, padding_idx).float()\n return mask",
"def compute_mask(self, input, input_mask=None):\n if input_mask is None or all([m is None for m in input_mask]):\n # return [None for _ in range(0, n_inputs - 1, 2)]\n return None\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))",
"def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument\n if not self.supports_masking:\n if mask is not None:\n if isinstance(mask, list):\n if any(m is not None for m in mask):\n raise TypeError('Layer ' + self.name + ' does not support masking, '\n 'but was passed an input_mask: ' + str(mask))\n else:\n raise TypeError('Layer ' + self.name + ' does not support masking, '\n 'but was passed an input_mask: ' + str(mask))\n # masking not explicitly supported: return None as mask\n return None\n # if masking is explicitly supported, by default\n # carry over the input mask\n return mask",
"def masked_tensor(tensor0, tensor1, mask):\n s = tensor0.size()\n assert s[0] == mask.size()[0]\n m = mask\n for i in range(len(s) - 1):\n m = mask.unsqueeze(-1)\n m = m.repeat(1, *s[1:])\n m = m.float()\n out = ((1.0 - m) * tensor0 + m * tensor1).type(tensor0.dtype)\n return out",
"def make_attn_mask(inp, inp_len, dtype=tf.float32):\n with tf.name_scope(\"encoder_mask\"):\n mask = tf.sequence_mask(inp_len, dtype=dtype, maxlen=tf.shape(inp)[1])\n return mask[:, None, None, :]",
"def compute_mask(self, inputs, mask=None):\n if self.padding != \"same\":\n raise ValueError(\"Padding mode '%s' not yet supported\" % (\n self.padding,))\n return mask",
"def get_source_inputs(tensor, layer=None, node_index=None):\n if not hasattr(tensor, '_keras_history'):\n return tensor\n\n if layer is None or node_index:\n layer, node_index, _ = tensor._keras_history\n if not layer._inbound_nodes:\n return [tensor]\n else:\n node = layer._inbound_nodes[node_index]\n if not node.inbound_layers:\n # Reached an Input layer, stop recursion.\n return node.input_tensors\n else:\n source_tensors = []\n for i in range(len(node.inbound_layers)):\n x = node.input_tensors[i]\n layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n previous_sources = get_source_inputs(x, layer, node_index)\n # Avoid input redundancy.\n for x in previous_sources:\n if x not in source_tensors:\n source_tensors.append(x)\n return source_tensors",
"def mask_rnn_inputs(rnn_inputs, gamma):\n\t\twith torch.no_grad():\n\t\t\treturn rnn_inputs * gamma",
"def apply_mask(data, mask_func, seed=None):\n shape = np.array(data.shape)\n shape[:-3] = 1\n mask = mask_func(shape, seed)\n return torch.where(mask == 0, torch.Tensor([0]), data), mask",
"def compute_mask(self, inputs, mask=None):\n return None",
"def compute_mask(self, inputs, mask=None):\n if mask is None:\n return None\n if not isinstance(mask, list):\n raise ValueError('`mask` should be a list.')\n if not isinstance(inputs, list):\n raise ValueError('`inputs` should be a list.')\n if len(mask) != len(inputs):\n raise ValueError('The lists `inputs` and `mask` '\n 'should have the same length.')\n if mask[0] is not None:\n raise ValueError('Attention mask should be None.')\n if mask[1] is None:\n return None\n return K.any(mask[1], axis=-1)"
] |
[
"0.7356327",
"0.7356168",
"0.70601845",
"0.686372",
"0.6512168",
"0.6421162",
"0.6359002",
"0.6270404",
"0.61987",
"0.6198212",
"0.6178",
"0.6079104",
"0.60541415",
"0.6017263",
"0.5996643",
"0.5978338",
"0.5978338",
"0.59334666",
"0.59316564",
"0.5927264",
"0.5876722",
"0.5857829",
"0.5820902",
"0.5818255",
"0.5806833",
"0.57908875",
"0.57882863",
"0.57744086",
"0.57605445",
"0.5735301"
] |
0.78300685
|
0
|
Retrieves the output mask tensor(s) of a layer at a given node.
|
def get_output_mask_at(self, node_index):
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def output_mask(self):\n output = self.output\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)",
"def get_input_mask_at(self, node_index):\n inputs = self.get_input_at(node_index)\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)",
"def get_input_masks(nx_node, nx_graph):\n input_edges = list(nx_graph.in_edges(nx_node['key']))\n input_masks = [nx_graph.nodes[input_node]['output_mask'] for input_node, _ in input_edges]\n return input_masks",
"def get_input_masks(nx_node, nx_graph):\n input_edges = list(nx_graph.in_edges(nx_node[\"key\"]))\n input_masks = [nx_graph.nodes[input_node][\"output_mask\"] for input_node, _ in input_edges]\n return input_masks",
"def masked_softmax(input_layer,n_nodes, batch_size):\n mask_lower = K.theano.tensor.tril(K.ones((n_nodes, n_nodes)))\n mask_upper = \\\n K.theano.tensor.triu(-100. * K.ones((n_nodes, n_nodes)), 1)\n mask_layer = mask_lower * input_layer + mask_upper\n mask_layer = mask_layer + 0 * K.eye(n_nodes)[0:n_nodes, 0:n_nodes]\n mask_layer = \\\n K.reshape(mask_layer, (batch_size * n_nodes, n_nodes))\n softmax_layer = K.softmax(mask_layer)\n output_layer = K.reshape(softmax_layer, (batch_size, n_nodes, n_nodes))\n return output_layer",
"def layer_masks(self, module):\n pass\n # return masks",
"def lstm_mask_layer(proj, mask):\n\n return proj * mask[:, :, None]",
"def attention_mask(nd, ns, dtype=tf.float32):\n i = tf.range(nd)[:, None]\n j = tf.range(ns)\n m = i >= j - ns + nd\n out = tf.cast(m, dtype)\n return out",
"def input_mask(self):\n inputs = self.input\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)",
"def identity_mask_propagation(nx_node, nx_graph):\n input_masks = get_input_masks(nx_node, nx_graph)\n assert len(input_masks) == 1\n nx_node[\"input_masks\"] = input_masks\n nx_node[\"output_mask\"] = input_masks[0]",
"def identity_mask_propagation(nx_node, nx_graph):\n input_masks = get_input_masks(nx_node, nx_graph)\n assert len(input_masks) == 1\n nx_node['input_masks'] = input_masks\n nx_node['output_mask'] = input_masks[0]",
"def attention_mask(model, x):\n config = model.config\n input_mask = model.inputs[\"input_mask\"]\n final_mask = model.builder.customOp(opName=\"AttentionMask\",\n opVersion=1,\n domain=\"ai.graphcore\",\n inputs=[input_mask, x],\n attributes={\"dataType\": model.config.popart_dtype})[0]\n final_mask = model.detach(final_mask)\n return final_mask",
"def get_layers_output(self, dataset):\n layers_out = []\n\n with self.tf_graph.as_default():\n with tf.Session() as self.tf_session:\n self.tf_saver.restore(self.tf_session, self.model_path)\n for l in self.layer_nodes:\n layers_out.append(l.eval({self.input_data: dataset,\n self.keep_prob: 1}))\n\n if layers_out == []:\n raise Exception(\"This method is not implemented for this model\")\n else:\n return layers_out",
"def get_dropout_mask(dropout_probability: float, tensor_for_masking: mx.ndarray.ndarray.NDArray):\n binary_mask = mx.nd.random.uniform(0, 1, tensor_for_masking.shape) > dropout_probability\n # Scale mask by 1/keep_prob to preserve output statistics.\n dropout_mask = binary_mask.float().div(1.0 - dropout_probability)\n return dropout_mask",
"def compute_mask(t, padding_idx=0):\n mask = torch.ne(t, padding_idx).float()\n return mask",
"def _tf_mask(self, feats: th.Tensor) -> List[th.Tensor]:\n proj = self.dfsmn(feats, None)[0]\n # N x S*F x T\n masks = self.masks(proj)\n # [N x F x T, ...]\n return th.chunk(masks, self.num_branchs, 1)",
"def get_all_node_outputs(node: Node):\n return [port.node for port in get_node_output_ports(node)]",
"def get_contest_mask():\n return createmaskdf(\"data/fcstrodeo_nctemplates/fcstrodeo_mask.nc\")",
"def compute_mask(self, input, input_mask=None):\n if input_mask is None:\n return [None for i in range(self.n)]\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))",
"def fill_input_masks(nx_node, nx_graph):\n input_edges = sorted(list(nx_graph.in_edges(nx_node['key'])), key=lambda edge: nx_graph.edges[edge]['in_port'])\n input_masks = [nx_graph.nodes[input_node]['output_mask'] for input_node, _ in input_edges]\n\n filled_input_masks = []\n for i, mask in enumerate(input_masks):\n if mask is None:\n mask = torch.ones(nx_graph.edges[input_edges[i]]['activation_shape'][1])\n filled_input_masks.append(mask)\n return input_masks, filled_input_masks",
"def attention_mask_future(nd, ns, dtype=tf.float32):\n i = tf.range(nd)[:, None]\n j = tf.range(ns)\n m = i >= j - ns + nd\n out = tf.cast(m, dtype)\n return out",
"def get_dropout_mask(\n dropout_probability: float, tensor_for_masking: torch.Tensor\n): # pragma: no cover\n binary_mask = tensor_for_masking.new_tensor(\n torch.rand(tensor_for_masking.size()) > dropout_probability\n )\n # Scale mask by 1/keep_prob to preserve output statistics.\n dropout_mask = binary_mask.float().div(1.0 - dropout_probability)\n return dropout_mask",
"def masked_tensor(tensor0, tensor1, mask):\n s = tensor0.size()\n assert s[0] == mask.size()[0]\n m = mask\n for i in range(len(s) - 1):\n m = mask.unsqueeze(-1)\n m = m.repeat(1, *s[1:])\n m = m.float()\n out = ((1.0 - m) * tensor0 + m * tensor1).type(tensor0.dtype)\n return out",
"def get_mask(tensor, padding_idx=0):\n mask = torch.ones(size=list(tensor.size()), dtype=torch.bool)\n mask[tensor == padding_idx] = False \n\n return mask",
"def testMask2D(self):\n\n # This mask, applied on an image filled with 1, should result in an image\n # filled with 8 (since we sum 4 elements per channel and there are 2 input\n # channels).\n mask = np.array([[1, 1, 1],\n [1, 0, 0],\n [0, 0, 0]], dtype=np.float32)\n inputs = tf.constant(1.0, shape=(1, 5, 5, 2))\n conv1 = snt.Conv2D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = np.array([[8] * 3] * 3)\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)",
"def generate_final_state(tensor, mask):\n tt = tf.transpose(tensor, [1, 0, 2])\n return tf.boolean_mask(tt, mask)",
"def get_padding_mask(inputs, padding_value=0):\n mask = tf.cast(tf.equal(inputs, padding_value), 'float32') \n mask = mask[:, tf.newaxis, tf.newaxis, :]\n return mask",
"def get_mask(self, nodetype):\n print(\"Deprecation warning: Do it yourself.\")\n assert nodetype in [PAPER_TYPE, SUBJECT_TYPE, AUTHOR_TYPE], \"Unknown node type\"\n return (self.ndata.type == nodetype).values",
"def apply_mask(data, mask_func, seed=None):\n shape = np.array(data.shape)\n shape[:-3] = 1\n mask = mask_func(shape, seed)\n return torch.where(mask == 0, torch.Tensor([0]), data), mask",
"def mask(self):\n return list(self._mask_generator())"
] |
[
"0.70013404",
"0.67517936",
"0.6399421",
"0.6388399",
"0.6365272",
"0.62206274",
"0.5890313",
"0.58897936",
"0.58688754",
"0.584128",
"0.58091635",
"0.578257",
"0.57217216",
"0.56831867",
"0.5655766",
"0.5625976",
"0.56219614",
"0.5602559",
"0.5580995",
"0.5553757",
"0.5529019",
"0.5487441",
"0.547068",
"0.546916",
"0.5464576",
"0.54527736",
"0.54147786",
"0.5400914",
"0.5394756",
"0.53869396"
] |
0.80216116
|
0
|
Retrieves the input mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer.
|
def input_mask(self):
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_input_mask_at(self, node_index):\n inputs = self.get_input_at(node_index)\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)",
"def get_input_masks(nx_node, nx_graph):\n input_edges = list(nx_graph.in_edges(nx_node['key']))\n input_masks = [nx_graph.nodes[input_node]['output_mask'] for input_node, _ in input_edges]\n return input_masks",
"def get_input_masks(nx_node, nx_graph):\n input_edges = list(nx_graph.in_edges(nx_node[\"key\"]))\n input_masks = [nx_graph.nodes[input_node][\"output_mask\"] for input_node, _ in input_edges]\n return input_masks",
"def compute_mask(self, input, input_mask=None):\n if input_mask is None:\n return [None for i in range(self.n)]\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))",
"def fill_input_masks(nx_node, nx_graph):\n input_edges = sorted(list(nx_graph.in_edges(nx_node['key'])), key=lambda edge: nx_graph.edges[edge]['in_port'])\n input_masks = [nx_graph.nodes[input_node]['output_mask'] for input_node, _ in input_edges]\n\n filled_input_masks = []\n for i, mask in enumerate(input_masks):\n if mask is None:\n mask = torch.ones(nx_graph.edges[input_edges[i]]['activation_shape'][1])\n filled_input_masks.append(mask)\n return input_masks, filled_input_masks",
"def compute_mask(self, input, input_mask=None):\n n_inputs = len(input)\n if input_mask is None or all([m is None for m in input_mask]):\n # return [None for _ in range(0, n_inputs - 1, 2)]\n return [None for _ in range(n_inputs * (n_inputs - 1) / 2)]\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))",
"def compute_mask(self, input, input_mask=None):\n n_inputs = len(input)\n if input_mask is None or all([m is None for m in input_mask]):\n # return [None for _ in range(0, n_inputs - 1, 2)]\n return [None for _ in range(n_inputs * (n_inputs - 1) / 2)]\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))",
"def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument\n if not self.supports_masking:\n if mask is not None:\n if isinstance(mask, list):\n if any(m is not None for m in mask):\n raise TypeError('Layer ' + self.name + ' does not support masking, '\n 'but was passed an input_mask: ' + str(mask))\n else:\n raise TypeError('Layer ' + self.name + ' does not support masking, '\n 'but was passed an input_mask: ' + str(mask))\n # masking not explicitly supported: return None as mask\n return None\n # if masking is explicitly supported, by default\n # carry over the input mask\n return mask",
"def get_source_inputs(tensor, layer=None, node_index=None):\n if not hasattr(tensor, '_keras_history'):\n return tensor\n\n if layer is None or node_index:\n layer, node_index, _ = tensor._keras_history\n if not layer._inbound_nodes:\n return [tensor]\n else:\n node = layer._inbound_nodes[node_index]\n if not node.inbound_layers:\n # Reached an Input layer, stop recursion.\n return node.input_tensors\n else:\n source_tensors = []\n for i in range(len(node.inbound_layers)):\n x = node.input_tensors[i]\n layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n previous_sources = get_source_inputs(x, layer, node_index)\n # Avoid input redundancy.\n for x in previous_sources:\n if x not in source_tensors:\n source_tensors.append(x)\n return source_tensors",
"def _source_mask(self, ilens):\n x_masks = make_non_pad_mask(ilens)\n return x_masks.unsqueeze(-2)",
"def compute_mask(self, input, input_mask=None):\n if input_mask is None or all([m is None for m in input_mask]):\n # return [None for _ in range(0, n_inputs - 1, 2)]\n return None\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))",
"def get_output_mask_at(self, node_index):\n output = self.get_output_at(node_index)\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)",
"def layer_masks(self, module):\n pass\n # return masks",
"def compute_mask(self, inputs, mask=None):\n if self.padding != \"same\":\n raise ValueError(\"Padding mode '%s' not yet supported\" % (\n self.padding,))\n return mask",
"def compute_mask(self, inputs, mask=None):\n if mask is None:\n return None\n if not isinstance(mask, list):\n raise ValueError('`mask` should be a list.')\n if not isinstance(inputs, list):\n raise ValueError('`inputs` should be a list.')\n if len(mask) != len(inputs):\n raise ValueError('The lists `inputs` and `mask` '\n 'should have the same length.')\n if mask[0] is not None:\n raise ValueError('Attention mask should be None.')\n if mask[1] is None:\n return None\n return K.any(mask[1], axis=-1)",
"def masked_softmax(input_layer,n_nodes, batch_size):\n mask_lower = K.theano.tensor.tril(K.ones((n_nodes, n_nodes)))\n mask_upper = \\\n K.theano.tensor.triu(-100. * K.ones((n_nodes, n_nodes)), 1)\n mask_layer = mask_lower * input_layer + mask_upper\n mask_layer = mask_layer + 0 * K.eye(n_nodes)[0:n_nodes, 0:n_nodes]\n mask_layer = \\\n K.reshape(mask_layer, (batch_size * n_nodes, n_nodes))\n softmax_layer = K.softmax(mask_layer)\n output_layer = K.reshape(softmax_layer, (batch_size, n_nodes, n_nodes))\n return output_layer",
"def compute_mask(self, inputs, mask=None):\n return None",
"def output_mask(self):\n output = self.output\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)",
"def mask(self):\n return list(self._mask_generator())",
"def mask(self) -> list[int]:\n return self._mask",
"def attention_mask(model, x):\n config = model.config\n input_mask = model.inputs[\"input_mask\"]\n final_mask = model.builder.customOp(opName=\"AttentionMask\",\n opVersion=1,\n domain=\"ai.graphcore\",\n inputs=[input_mask, x],\n attributes={\"dataType\": model.config.popart_dtype})[0]\n final_mask = model.detach(final_mask)\n return final_mask",
"def apply_mask_to_inputs(self, inputs: tf.Tensor, schema: tf.Tensor) -> tf.Tensor:\n inputs = tf.where(\n tf.cast(tf.expand_dims(schema, -1), tf.bool),\n inputs,\n tf.cast(self.masked_item_embedding, dtype=inputs.dtype),\n )\n return inputs",
"def identity_mask_propagation(nx_node, nx_graph):\n input_masks = get_input_masks(nx_node, nx_graph)\n assert len(input_masks) == 1\n nx_node[\"input_masks\"] = input_masks\n nx_node[\"output_mask\"] = input_masks[0]",
"def make_attn_mask(inp, inp_len, dtype=tf.float32):\n with tf.name_scope(\"encoder_mask\"):\n mask = tf.sequence_mask(inp_len, dtype=dtype, maxlen=tf.shape(inp)[1])\n return mask[:, None, None, :]",
"def _tf_mask(self, feats: th.Tensor) -> List[th.Tensor]:\n proj = self.dfsmn(feats, None)[0]\n # N x S*F x T\n masks = self.masks(proj)\n # [N x F x T, ...]\n return th.chunk(masks, self.num_branchs, 1)",
"def identity_mask_propagation(nx_node, nx_graph):\n input_masks = get_input_masks(nx_node, nx_graph)\n assert len(input_masks) == 1\n nx_node['input_masks'] = input_masks\n nx_node['output_mask'] = input_masks[0]",
"def _get_input_layer_names(self, layer):\n if self._is_functional_model(self.model):\n inbound_nodes = layer['inbound_nodes']\n return [connection_info[0] for connection_info in inbound_nodes[0]]\n else: # Sequential model.\n layers = self._config['layers']\n i = layers.index(layer)\n if i == 0:\n # First layer has no inputs.\n return []\n else:\n return [layers[i - 1]['config']['name']]",
"def get_padding_mask(inputs, padding_value=0):\n mask = tf.cast(tf.equal(inputs, padding_value), 'float32') \n mask = mask[:, tf.newaxis, tf.newaxis, :]\n return mask",
"def lstm_mask_layer(proj, mask):\n\n return proj * mask[:, :, None]",
"def get_out_seq_lens_nonmask_after_a_layer(self, in_seq_lens_tensor, i):\n out_lengths = in_seq_lens_tensor.clone()\n out_lengths = ((out_lengths.float() - (self.conv_layers_infos[i][1] - 1) - 1) / self.conv_layers_infos[i][-1] + 1).floor().long()\n out_nonmask = (~lengths_to_padding_mask(out_lengths)).float()\n return out_nonmask, out_lengths"
] |
[
"0.756858",
"0.72555685",
"0.7244732",
"0.66529214",
"0.6584047",
"0.6530159",
"0.6530159",
"0.6504658",
"0.64577276",
"0.64186",
"0.6376844",
"0.6373097",
"0.63566697",
"0.6205893",
"0.62017626",
"0.6168873",
"0.6005357",
"0.5990894",
"0.5963304",
"0.5865473",
"0.5858816",
"0.58379316",
"0.58321947",
"0.58179665",
"0.5813727",
"0.5810555",
"0.5796427",
"0.57761633",
"0.5711809",
"0.56836027"
] |
0.7356791
|
1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.