query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Sets the org_apache_felix_jetty_gzip_included_paths of this OrgApacheFelixHttpProperties. | def org_apache_felix_jetty_gzip_included_paths(self, org_apache_felix_jetty_gzip_included_paths: ConfigNodePropertyArray):
self._org_apache_felix_jetty_gzip_included_paths = org_apache_felix_jetty_gzip_included_paths | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_jetty_gzip_included_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_paths",
"def org_apache_felix_jetty_gzip_excluded_paths(self, org_apache_felix_jetty_gzip_excluded_paths: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_paths = org_apache_felix_jetty_gzip_excluded_paths",
"def org_apache_felix_jetty_gzip_included_methods(self, org_apache_felix_jetty_gzip_included_methods: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_methods = org_apache_felix_jetty_gzip_included_methods",
"def org_apache_felix_jetty_gzip_included_mime_types(self, org_apache_felix_jetty_gzip_included_mime_types: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_mime_types = org_apache_felix_jetty_gzip_included_mime_types",
"def org_apache_felix_jetty_gziphandler_enable(self, org_apache_felix_jetty_gziphandler_enable: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_jetty_gziphandler_enable = org_apache_felix_jetty_gziphandler_enable",
"def org_apache_felix_jetty_gziphandler_enable(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_jetty_gziphandler_enable",
"def org_apache_felix_jetty_gzip_excluded_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_paths",
"def org_apache_felix_jetty_gzip_included_methods(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_methods",
"def org_apache_felix_jetty_gzip_compression_level(self, org_apache_felix_jetty_gzip_compression_level: ConfigNodePropertyInteger):\n\n self._org_apache_felix_jetty_gzip_compression_level = org_apache_felix_jetty_gzip_compression_level",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self, org_apache_felix_jetty_gzip_excluded_user_agents: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_user_agents = org_apache_felix_jetty_gzip_excluded_user_agents",
"def org_apache_felix_jetty_gzip_excluded_mime_types(self, org_apache_felix_jetty_gzip_excluded_mime_types: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_mime_types = org_apache_felix_jetty_gzip_excluded_mime_types",
"def org_apache_felix_jetty_gzip_compression_level(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_jetty_gzip_compression_level",
"def org_apache_felix_https_jetty_protocols_included(self, org_apache_felix_https_jetty_protocols_included: ConfigNodePropertyArray):\n\n self._org_apache_felix_https_jetty_protocols_included = org_apache_felix_https_jetty_protocols_included",
"def org_apache_felix_jetty_gzip_min_gzip_size(self, org_apache_felix_jetty_gzip_min_gzip_size: ConfigNodePropertyInteger):\n\n self._org_apache_felix_jetty_gzip_min_gzip_size = org_apache_felix_jetty_gzip_min_gzip_size",
"def org_apache_felix_jetty_gzip_included_mime_types(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_mime_types",
"def org_apache_felix_https_jetty_ciphersuites_included(self, org_apache_felix_https_jetty_ciphersuites_included: ConfigNodePropertyArray):\n\n self._org_apache_felix_https_jetty_ciphersuites_included = org_apache_felix_https_jetty_ciphersuites_included",
"def org_apache_felix_jetty_gzip_excluded_methods(self, org_apache_felix_jetty_gzip_excluded_methods: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_methods = org_apache_felix_jetty_gzip_excluded_methods",
"def org_apache_felix_jetty_gzip_sync_flush(self, org_apache_felix_jetty_gzip_sync_flush: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_jetty_gzip_sync_flush = org_apache_felix_jetty_gzip_sync_flush",
"def org_apache_felix_http_path_exclusions(self, org_apache_felix_http_path_exclusions: ConfigNodePropertyArray):\n\n self._org_apache_felix_http_path_exclusions = org_apache_felix_http_path_exclusions",
"def org_apache_felix_jetty_gzip_inflate_buffer_size(self, org_apache_felix_jetty_gzip_inflate_buffer_size: ConfigNodePropertyInteger):\n\n self._org_apache_felix_jetty_gzip_inflate_buffer_size = org_apache_felix_jetty_gzip_inflate_buffer_size",
"def org_apache_felix_jetty_gzip_min_gzip_size(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_jetty_gzip_min_gzip_size",
"def org_apache_felix_jetty_gzip_sync_flush(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_jetty_gzip_sync_flush",
"def org_apache_felix_http_context_path(self, org_apache_felix_http_context_path: ConfigNodePropertyString):\n\n self._org_apache_felix_http_context_path = org_apache_felix_http_context_path",
"def org_apache_felix_jetty_gzip_inflate_buffer_size(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_jetty_gzip_inflate_buffer_size",
"def org_apache_felix_http_path_exclusions(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_http_path_exclusions",
"def set_extra_headers(self, path):\n pass",
"def set_nginx_path(self, packages_dir, nginx_version):\n if nginx_version:\n self._nginx_path = f\"{packages_dir}/nginx-{nginx_version}\"",
"def org_apache_felix_http_enable(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_http_enable",
"def org_apache_felix_http_enable(self, org_apache_felix_http_enable: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_http_enable = org_apache_felix_http_enable",
"def org_apache_felix_https_jetty_protocols_included(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_protocols_included"
] | [
"0.72814023",
"0.6815239",
"0.65494156",
"0.6281999",
"0.6279541",
"0.61835283",
"0.59572554",
"0.5699541",
"0.54085815",
"0.52071905",
"0.5186127",
"0.5165024",
"0.51406497",
"0.5057689",
"0.5044036",
"0.5007892",
"0.49969727",
"0.49527168",
"0.48988962",
"0.48697796",
"0.48678723",
"0.4801856",
"0.4800244",
"0.46442518",
"0.4640328",
"0.45883152",
"0.44874975",
"0.44758385",
"0.44259903",
"0.4422845"
] | 0.8183289 | 0 |
Gets the org_apache_felix_jetty_gzip_excluded_paths of this OrgApacheFelixHttpProperties. | def org_apache_felix_jetty_gzip_excluded_paths(self) -> ConfigNodePropertyArray:
return self._org_apache_felix_jetty_gzip_excluded_paths | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_jetty_gzip_included_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_paths",
"def org_apache_felix_jetty_gzip_excluded_paths(self, org_apache_felix_jetty_gzip_excluded_paths: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_paths = org_apache_felix_jetty_gzip_excluded_paths",
"def org_apache_felix_http_path_exclusions(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_http_path_exclusions",
"def org_apache_felix_jetty_gzip_excluded_methods(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_methods",
"def org_apache_felix_jetty_gzip_excluded_mime_types(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_mime_types",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_user_agents",
"def filter_excluded_paths(self) -> ConfigNodePropertyArray:\n return self._filter_excluded_paths",
"def org_apache_felix_jetty_gzip_included_paths(self, org_apache_felix_jetty_gzip_included_paths: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_paths = org_apache_felix_jetty_gzip_included_paths",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self, org_apache_felix_jetty_gzip_excluded_user_agents: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_user_agents = org_apache_felix_jetty_gzip_excluded_user_agents",
"def org_apache_felix_jetty_gzip_excluded_mime_types(self, org_apache_felix_jetty_gzip_excluded_mime_types: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_mime_types = org_apache_felix_jetty_gzip_excluded_mime_types",
"def org_apache_felix_jetty_gzip_included_methods(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_methods",
"def org_apache_felix_jetty_gzip_included_mime_types(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_mime_types",
"def org_apache_felix_jetty_gzip_excluded_methods(self, org_apache_felix_jetty_gzip_excluded_methods: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_methods = org_apache_felix_jetty_gzip_excluded_methods",
"def _exclude_paths_from_environ(env_prefix=''):\n paths = os.environ.get(env_prefix + 'WSGI_AUTH_EXCLUDE_PATHS')\n if not paths:\n return []\n return paths.split(';')",
"def org_apache_felix_https_jetty_protocols_excluded(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_protocols_excluded",
"def org_apache_felix_https_jetty_ciphersuites_excluded(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_ciphersuites_excluded",
"def excludes(self):\r\n\r\n return self._excludes",
"def org_apache_felix_jetty_gziphandler_enable(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_jetty_gziphandler_enable",
"def excludes(self) -> Set:\n if self._excludes is None:\n manifest = self._get_manifest()\n self._excludes = manifest[\"files\"][\"excludes\"]\n\n return self._excludes",
"def gas_exclusions(self) -> List[ContractFunctionPath]:\n\n cli_value = self.pytest_config.getoption(\"--gas-exclude\")\n exclusions: List[ContractFunctionPath] = []\n if cli_value:\n items = cli_value.split(\",\")\n for item in items:\n exclusion = ContractFunctionPath.from_str(item)\n exclusions.append(exclusion)\n\n paths = _get_config_exclusions(self.ape_test_config.gas)\n exclusions.extend(paths)\n return exclusions",
"def org_apache_felix_http_path_exclusions(self, org_apache_felix_http_path_exclusions: ConfigNodePropertyArray):\n\n self._org_apache_felix_http_path_exclusions = org_apache_felix_http_path_exclusions",
"def warping_paths(self):\n return self.paths",
"def org_apache_felix_jetty_gzip_compression_level(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_jetty_gzip_compression_level",
"def exclusions(self) -> Sequence['outputs.LogExclusionResponse']:\n return pulumi.get(self, \"exclusions\")",
"def exclude_env_vars(self):\n filter = self.conf.get(\"filter\", {})\n\n # DEPRECATED: remove in v0.20\n return get_deprecated(filter, \"exclude_env_vars\", \"environment_blacklist\", {})",
"def exclude_regexes(self) -> Optional[List[str]]:\n return pulumi.get(self, \"exclude_regexes\")",
"def option_skip_url_string(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionSkipURLString/')))",
"def filter_excluded_paths(self, filter_excluded_paths: ConfigNodePropertyArray):\n\n self._filter_excluded_paths = filter_excluded_paths",
"def excluded_from_scan(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/excludedFromScan/')))",
"def path_excluded(self,path):\n\t\tfor pattern in self.excludes['file_exclude']:\n\t\t\tif pattern in path:\n\t\t\t\t#print \" \u001b[41mExcluding:\u001b[m\",path\n\t\t\t\treturn True\n\t\treturn False"
] | [
"0.7664793",
"0.755466",
"0.71894544",
"0.6854936",
"0.6747863",
"0.6717936",
"0.6661041",
"0.6292526",
"0.59868807",
"0.59426415",
"0.58985263",
"0.5872978",
"0.5845391",
"0.57460874",
"0.56098026",
"0.55324715",
"0.550655",
"0.54484224",
"0.54354405",
"0.5385288",
"0.5339532",
"0.5237775",
"0.52007496",
"0.51865613",
"0.5159026",
"0.5051459",
"0.5027407",
"0.49755824",
"0.4923909",
"0.49190724"
] | 0.8602508 | 0 |
Sets the org_apache_felix_jetty_gzip_excluded_paths of this OrgApacheFelixHttpProperties. | def org_apache_felix_jetty_gzip_excluded_paths(self, org_apache_felix_jetty_gzip_excluded_paths: ConfigNodePropertyArray):
self._org_apache_felix_jetty_gzip_excluded_paths = org_apache_felix_jetty_gzip_excluded_paths | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_jetty_gzip_excluded_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_paths",
"def org_apache_felix_jetty_gzip_included_paths(self, org_apache_felix_jetty_gzip_included_paths: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_paths = org_apache_felix_jetty_gzip_included_paths",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self, org_apache_felix_jetty_gzip_excluded_user_agents: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_user_agents = org_apache_felix_jetty_gzip_excluded_user_agents",
"def org_apache_felix_jetty_gzip_excluded_mime_types(self, org_apache_felix_jetty_gzip_excluded_mime_types: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_mime_types = org_apache_felix_jetty_gzip_excluded_mime_types",
"def org_apache_felix_jetty_gzip_included_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_paths",
"def org_apache_felix_jetty_gzip_excluded_methods(self, org_apache_felix_jetty_gzip_excluded_methods: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_methods = org_apache_felix_jetty_gzip_excluded_methods",
"def org_apache_felix_http_path_exclusions(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_http_path_exclusions",
"def org_apache_felix_http_path_exclusions(self, org_apache_felix_http_path_exclusions: ConfigNodePropertyArray):\n\n self._org_apache_felix_http_path_exclusions = org_apache_felix_http_path_exclusions",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_user_agents",
"def org_apache_felix_jetty_gzip_excluded_methods(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_methods",
"def org_apache_felix_jetty_gzip_excluded_mime_types(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_mime_types",
"def filter_excluded_paths(self, filter_excluded_paths: ConfigNodePropertyArray):\n\n self._filter_excluded_paths = filter_excluded_paths",
"def org_apache_felix_https_jetty_protocols_excluded(self, org_apache_felix_https_jetty_protocols_excluded: ConfigNodePropertyArray):\n\n self._org_apache_felix_https_jetty_protocols_excluded = org_apache_felix_https_jetty_protocols_excluded",
"def org_apache_felix_jetty_gziphandler_enable(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_jetty_gziphandler_enable",
"def org_apache_felix_jetty_gzip_included_mime_types(self, org_apache_felix_jetty_gzip_included_mime_types: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_mime_types = org_apache_felix_jetty_gzip_included_mime_types",
"def org_apache_felix_jetty_gzip_included_methods(self, org_apache_felix_jetty_gzip_included_methods: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_methods = org_apache_felix_jetty_gzip_included_methods",
"def filter_excluded_paths(self) -> ConfigNodePropertyArray:\n return self._filter_excluded_paths",
"def org_apache_felix_https_jetty_ciphersuites_excluded(self, org_apache_felix_https_jetty_ciphersuites_excluded: ConfigNodePropertyArray):\n\n self._org_apache_felix_https_jetty_ciphersuites_excluded = org_apache_felix_https_jetty_ciphersuites_excluded",
"def org_apache_felix_jetty_gziphandler_enable(self, org_apache_felix_jetty_gziphandler_enable: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_jetty_gziphandler_enable = org_apache_felix_jetty_gziphandler_enable",
"def org_apache_felix_https_jetty_protocols_excluded(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_protocols_excluded",
"def org_apache_felix_https_jetty_ciphersuites_excluded(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_ciphersuites_excluded",
"def org_apache_felix_jetty_gzip_compression_level(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_jetty_gzip_compression_level",
"def _exclude_paths_from_environ(env_prefix=''):\n paths = os.environ.get(env_prefix + 'WSGI_AUTH_EXCLUDE_PATHS')\n if not paths:\n return []\n return paths.split(';')",
"def clear_excludepatterns(self):\n self._excludepatterns = []",
"def org_apache_felix_jetty_gzip_included_methods(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_methods",
"def org_apache_felix_jetty_gzip_included_mime_types(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_mime_types",
"def org_apache_felix_jetty_gzip_compression_level(self, org_apache_felix_jetty_gzip_compression_level: ConfigNodePropertyInteger):\n\n self._org_apache_felix_jetty_gzip_compression_level = org_apache_felix_jetty_gzip_compression_level",
"def excludes(self, excludes):\n\n self._excludes = excludes",
"def path_excluded(self,path):\n\t\tfor pattern in self.excludes['file_exclude']:\n\t\t\tif pattern in path:\n\t\t\t\t#print \" \u001b[41mExcluding:\u001b[m\",path\n\t\t\t\treturn True\n\t\treturn False",
"def _path_exclude(self,path):\n\t\t#exclusions = [\t'10A103_Milky_Way_DA_NY/flame_settings_BU',\n\t\t#\t\t\t'/2010_archive/conformFS/p6/0'\n\t\t#\t\t\t]\n\t\t#for exc in exclusions:\n\t\t#\tif exc in path:\n\t\t#\t\treturn True\n\t\treturn False"
] | [
"0.73312",
"0.6721894",
"0.64388037",
"0.64170027",
"0.6315897",
"0.6179333",
"0.60733014",
"0.5924842",
"0.57383174",
"0.5684811",
"0.56573874",
"0.5620455",
"0.5342405",
"0.5341773",
"0.53177553",
"0.52853596",
"0.52180296",
"0.50882804",
"0.5034334",
"0.48919365",
"0.48573786",
"0.47108966",
"0.4709174",
"0.47002593",
"0.46881604",
"0.4675103",
"0.46623826",
"0.4447189",
"0.44394347",
"0.43923363"
] | 0.79911524 | 0 |
Gets the org_apache_felix_jetty_gzip_included_mime_types of this OrgApacheFelixHttpProperties. | def org_apache_felix_jetty_gzip_included_mime_types(self) -> ConfigNodePropertyArray:
return self._org_apache_felix_jetty_gzip_included_mime_types | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_jetty_gzip_included_mime_types(self, org_apache_felix_jetty_gzip_included_mime_types: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_mime_types = org_apache_felix_jetty_gzip_included_mime_types",
"def org_apache_felix_jetty_gzip_excluded_mime_types(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_mime_types",
"def org_apache_felix_jetty_gzip_excluded_mime_types(self, org_apache_felix_jetty_gzip_excluded_mime_types: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_mime_types = org_apache_felix_jetty_gzip_excluded_mime_types",
"def org_apache_felix_jetty_gzip_included_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_paths",
"def org_apache_felix_jetty_gzip_included_methods(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_methods",
"def mime_allowed(self) -> ConfigNodePropertyArray:\n return self._mime_allowed",
"def mime_types(self) -> FilebaseApiConfigMimeTypes:\n mime_types = self.get(\"mime_types\", {})\n if not isinstance(mime_types, FilebaseApiConfigMimeTypes):\n mime_types = FilebaseApiConfigMimeTypes(**mime_types)\n self[\"mime_types\"] = mime_types\n return mime_types",
"def org_apache_felix_jetty_gzip_included_paths(self, org_apache_felix_jetty_gzip_included_paths: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_paths = org_apache_felix_jetty_gzip_included_paths",
"def org_apache_felix_jetty_gziphandler_enable(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_jetty_gziphandler_enable",
"def org_apache_felix_jetty_gzip_excluded_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_paths",
"def org_apache_felix_jetty_gzip_included_methods(self, org_apache_felix_jetty_gzip_included_methods: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_methods = org_apache_felix_jetty_gzip_included_methods",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_user_agents",
"def allowed_attachment_types(self) -> ConfigNodePropertyArray:\n return self._allowed_attachment_types",
"def org_apache_felix_jetty_gzip_min_gzip_size(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_jetty_gzip_min_gzip_size",
"def get_allowed_file_types(self):\n return self.allowed_file_types",
"def org_apache_felix_jetty_gzip_excluded_methods(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_methods",
"def getMimeTypes(self): #$NON-NLS-1$\r",
"def org_apache_felix_jetty_gzip_compression_level(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_jetty_gzip_compression_level",
"def getMimeType(self):\n return self.get('MimeType', list=True, type=\"regex\")",
"def file_types(self) -> Optional[List[str]]:\n return pulumi.get(self, \"file_types\")",
"def get_included_files(self):\n return self._includedfiles",
"def _wsgi_headers(self, media_type=None):\n\n headers = self._headers\n\n # PERF(kgriffs): Using \"in\" like this is faster than using\n # dict.setdefault (tested on py27).\n set_content_type = (media_type is not None and\n 'content-type' not in headers)\n\n if set_content_type:\n headers['content-type'] = media_type\n\n if six.PY2: # pragma: no cover\n # PERF(kgriffs): Don't create an extra list object if\n # it isn't needed.\n return headers.items()\n\n return list(headers.items()) # pragma: no cover",
"def get_all_supported_mimetypes_with_modules(self):\n\n # init dictionary and list\n mime_types_with_columns = {}\n mime_types = []\n\n # First get all mime-types\n for module in self.modules:\n # Global module is for each mime-type so ingnore it.\n if (not module.is_global and not \"__init__.py\" in module.path):\n # If key not already exists, append to the list with mime-types\n if not module.mimetype in mime_types:\n mime_types.append(module.mimetype)\n\n # Then get all columns for a mime-type\n for mime_type in mime_types:\n modules = {}\n for module in self.modules:\n # Global module is for each mime-type so ingnore it.\n if (not module.is_global and not \"__init__.py\" in module.path):\n if (module.mimetype == (mime_type)\n or (module.mimetype and\n mime_type.startswith\n (module.mimetype))):\n modules[module.tablename] = module.md5_tablename\n\n mime_types_with_columns[mime_type] = modules\n\n del mime_types\n return mime_types_with_columns",
"def org_apache_felix_jetty_gzip_excluded_paths(self, org_apache_felix_jetty_gzip_excluded_paths: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_paths = org_apache_felix_jetty_gzip_excluded_paths",
"def org_apache_felix_jetty_gzip_inflate_buffer_size(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_jetty_gzip_inflate_buffer_size",
"def _get_request_body_file_type(self) -> Optional[str]:\n result = None\n for decorator in self._find_decorators(AcceptsFileDecorator):\n if result is not None:\n raise TypeError(\"An endpoint cannot accept files of multiple types\")\n\n result = decorator.mime_type\n\n return result",
"def allowed_headers(self):\n\n return self._allowed_headers",
"def allowed_headers(self) -> typing.Optional[typing.List[str]]:\n return self._values.get('allowed_headers')",
"def org_apache_felix_jetty_gziphandler_enable(self, org_apache_felix_jetty_gziphandler_enable: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_jetty_gziphandler_enable = org_apache_felix_jetty_gziphandler_enable",
"def included_event_types(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"included_event_types\")"
] | [
"0.7882282",
"0.7741241",
"0.687413",
"0.6550321",
"0.64034975",
"0.60768574",
"0.5863483",
"0.5809866",
"0.56989354",
"0.5532147",
"0.5517617",
"0.5490475",
"0.5480388",
"0.54086906",
"0.5342103",
"0.5271495",
"0.5217648",
"0.5194284",
"0.5113195",
"0.5049082",
"0.49985638",
"0.4956359",
"0.49253848",
"0.49043834",
"0.48856825",
"0.48854148",
"0.48698613",
"0.48395565",
"0.4797045",
"0.47802785"
] | 0.88149124 | 0 |
Sets the org_apache_felix_jetty_gzip_included_mime_types of this OrgApacheFelixHttpProperties. | def org_apache_felix_jetty_gzip_included_mime_types(self, org_apache_felix_jetty_gzip_included_mime_types: ConfigNodePropertyArray):
self._org_apache_felix_jetty_gzip_included_mime_types = org_apache_felix_jetty_gzip_included_mime_types | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_jetty_gzip_included_mime_types(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_mime_types",
"def org_apache_felix_jetty_gzip_excluded_mime_types(self, org_apache_felix_jetty_gzip_excluded_mime_types: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_mime_types = org_apache_felix_jetty_gzip_excluded_mime_types",
"def org_apache_felix_jetty_gzip_excluded_mime_types(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_mime_types",
"def org_apache_felix_jetty_gzip_included_paths(self, org_apache_felix_jetty_gzip_included_paths: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_paths = org_apache_felix_jetty_gzip_included_paths",
"def org_apache_felix_jetty_gzip_included_methods(self, org_apache_felix_jetty_gzip_included_methods: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_methods = org_apache_felix_jetty_gzip_included_methods",
"def org_apache_felix_jetty_gziphandler_enable(self, org_apache_felix_jetty_gziphandler_enable: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_jetty_gziphandler_enable = org_apache_felix_jetty_gziphandler_enable",
"def org_apache_felix_jetty_gziphandler_enable(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_jetty_gziphandler_enable",
"def mime_types(self) -> FilebaseApiConfigMimeTypes:\n mime_types = self.get(\"mime_types\", {})\n if not isinstance(mime_types, FilebaseApiConfigMimeTypes):\n mime_types = FilebaseApiConfigMimeTypes(**mime_types)\n self[\"mime_types\"] = mime_types\n return mime_types",
"def mime_allowed(self, mime_allowed: ConfigNodePropertyArray):\n\n self._mime_allowed = mime_allowed",
"def org_apache_felix_jetty_gzip_excluded_paths(self, org_apache_felix_jetty_gzip_excluded_paths: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_paths = org_apache_felix_jetty_gzip_excluded_paths",
"def org_apache_felix_jetty_gzip_included_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_paths",
"def mime_type(self, mime_type):\n if self.local_vars_configuration.client_side_validation and mime_type is None: # noqa: E501\n raise ValueError(\"Invalid value for `mime_type`, must not be `None`\") # noqa: E501\n allowed_values = [\"application/json\", \"application/x-www-form-urlencoded\", \"none\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and mime_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `mime_type` ({0}), must be one of {1}\" # noqa: E501\n .format(mime_type, allowed_values)\n )\n\n self._mime_type = mime_type",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self, org_apache_felix_jetty_gzip_excluded_user_agents: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_user_agents = org_apache_felix_jetty_gzip_excluded_user_agents",
"def add_content_types():\n for ext in EXTRA_TYPES:\n mimetypes.add_type(EXTRA_TYPES[ext], ext)",
"def org_apache_felix_jetty_gzip_included_methods(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_methods",
"def svn_client_ctx_t_mimetypes_map_set(svn_client_ctx_t_self, apr_hash_t_mimetypes_map): # real signature unknown; restored from __doc__\n pass",
"def mime_type(self, mime_type):\n\n self._mime_type = mime_type",
"def org_apache_felix_jetty_gzip_min_gzip_size(self, org_apache_felix_jetty_gzip_min_gzip_size: ConfigNodePropertyInteger):\n\n self._org_apache_felix_jetty_gzip_min_gzip_size = org_apache_felix_jetty_gzip_min_gzip_size",
"def mime_allowed(self) -> ConfigNodePropertyArray:\n return self._mime_allowed",
"def purge_content_types_if_file_present(self):\n if 'files' in self.request_params:\n headers = self.request_params.get('headers', {}) or {}\n headers.pop('content-type', '')",
"def org_apache_felix_jetty_gzip_min_gzip_size(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_jetty_gzip_min_gzip_size",
"def attachment_mime_type(self, attachment_mime_type):\n\n self._attachment_mime_type = attachment_mime_type",
"def org_apache_felix_jetty_gzip_excluded_methods(self, org_apache_felix_jetty_gzip_excluded_methods: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_methods = org_apache_felix_jetty_gzip_excluded_methods",
"def allowed_attachment_types(self, allowed_attachment_types: ConfigNodePropertyArray):\n\n self._allowed_attachment_types = allowed_attachment_types",
"def org_apache_felix_jetty_gzip_compression_level(self, org_apache_felix_jetty_gzip_compression_level: ConfigNodePropertyInteger):\n\n self._org_apache_felix_jetty_gzip_compression_level = org_apache_felix_jetty_gzip_compression_level",
"def org_apache_felix_jetty_gzip_compression_level(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_jetty_gzip_compression_level",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_user_agents",
"def org_apache_felix_jetty_gzip_inflate_buffer_size(self, org_apache_felix_jetty_gzip_inflate_buffer_size: ConfigNodePropertyInteger):\n\n self._org_apache_felix_jetty_gzip_inflate_buffer_size = org_apache_felix_jetty_gzip_inflate_buffer_size",
"def org_apache_felix_jetty_gzip_excluded_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_paths",
"def mime_allow_empty(self) -> ConfigNodePropertyBoolean:\n return self._mime_allow_empty"
] | [
"0.76558256",
"0.74655026",
"0.6808436",
"0.62108487",
"0.5826314",
"0.55636775",
"0.556074",
"0.5519447",
"0.54764766",
"0.53906035",
"0.53241843",
"0.5211381",
"0.51962066",
"0.5180828",
"0.51409984",
"0.512985",
"0.5088649",
"0.5085278",
"0.5022681",
"0.49057752",
"0.48752946",
"0.48193994",
"0.4807158",
"0.4790563",
"0.47514877",
"0.46666747",
"0.46453327",
"0.46319792",
"0.45080185",
"0.4503936"
] | 0.8373357 | 0 |
Gets the org_apache_felix_jetty_gzip_excluded_mime_types of this OrgApacheFelixHttpProperties. | def org_apache_felix_jetty_gzip_excluded_mime_types(self) -> ConfigNodePropertyArray:
return self._org_apache_felix_jetty_gzip_excluded_mime_types | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_jetty_gzip_included_mime_types(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_mime_types",
"def org_apache_felix_jetty_gzip_excluded_mime_types(self, org_apache_felix_jetty_gzip_excluded_mime_types: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_mime_types = org_apache_felix_jetty_gzip_excluded_mime_types",
"def org_apache_felix_jetty_gzip_excluded_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_paths",
"def org_apache_felix_jetty_gzip_included_mime_types(self, org_apache_felix_jetty_gzip_included_mime_types: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_mime_types = org_apache_felix_jetty_gzip_included_mime_types",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_user_agents",
"def org_apache_felix_jetty_gzip_excluded_methods(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_methods",
"def mime_allowed(self) -> ConfigNodePropertyArray:\n return self._mime_allowed",
"def attachment_type_blacklist(self) -> ConfigNodePropertyArray:\n return self._attachment_type_blacklist",
"def org_apache_felix_jetty_gzip_excluded_paths(self, org_apache_felix_jetty_gzip_excluded_paths: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_paths = org_apache_felix_jetty_gzip_excluded_paths",
"def allowed_attachment_types(self) -> ConfigNodePropertyArray:\n return self._allowed_attachment_types",
"def mime_types(self) -> FilebaseApiConfigMimeTypes:\n mime_types = self.get(\"mime_types\", {})\n if not isinstance(mime_types, FilebaseApiConfigMimeTypes):\n mime_types = FilebaseApiConfigMimeTypes(**mime_types)\n self[\"mime_types\"] = mime_types\n return mime_types",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self, org_apache_felix_jetty_gzip_excluded_user_agents: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_user_agents = org_apache_felix_jetty_gzip_excluded_user_agents",
"def org_apache_felix_jetty_gzip_included_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_paths",
"def get_allowed_file_types(self):\n return self.allowed_file_types",
"def org_apache_felix_http_path_exclusions(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_http_path_exclusions",
"def org_apache_felix_jetty_gzip_excluded_methods(self, org_apache_felix_jetty_gzip_excluded_methods: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_methods = org_apache_felix_jetty_gzip_excluded_methods",
"def org_apache_felix_jetty_gzip_included_methods(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_methods",
"def info_types(self) -> List['outputs.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypesInfoType']:\n return pulumi.get(self, \"info_types\")",
"def file_types(self) -> Optional[List[str]]:\n return pulumi.get(self, \"file_types\")",
"def exclude_info_types(self) -> Optional['outputs.PreventionInspectTemplateInspectConfigRuleSetRuleExclusionRuleExcludeInfoTypes']:\n return pulumi.get(self, \"exclude_info_types\")",
"def excludes(self) -> Set:\n if self._excludes is None:\n manifest = self._get_manifest()\n self._excludes = manifest[\"files\"][\"excludes\"]\n\n return self._excludes",
"def org_apache_felix_https_jetty_ciphersuites_excluded(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_ciphersuites_excluded",
"def org_apache_felix_https_jetty_protocols_excluded(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_protocols_excluded",
"def exclude_regexes(self) -> Optional[List[str]]:\n return pulumi.get(self, \"exclude_regexes\")",
"def list_available_properties(self):\n # exclude file field\n properties = self.properties.exclude(\n json_schema__contains={\"format\": 'data-url'},\n )\n # exclude array object fields\n\n properties = properties.exclude(\n json_schema__contains={\"type\": \"array\", \"items\": {\"type\": \"object\"}}\n )\n # exclude textarea fields\n properties = properties.exclude(\n ui_schema__contains={'ui:widget': 'textarea'}\n )\n # exclude rte fields\n properties = properties.exclude(\n ui_schema__contains={'ui:field': 'rte'}\n )\n return properties",
"def getMimeTypes(self): #$NON-NLS-1$\r",
"def excluded_resource_types(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"excluded_resource_types\")",
"def excludes(self):\r\n\r\n return self._excludes",
"def getMimeType(self):\n return self.get('MimeType', list=True, type=\"regex\")",
"def org_apache_felix_jetty_gziphandler_enable(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_jetty_gziphandler_enable"
] | [
"0.8096301",
"0.78604734",
"0.6767221",
"0.67533714",
"0.66217756",
"0.64377946",
"0.6267546",
"0.6009801",
"0.59769696",
"0.59420717",
"0.5840183",
"0.575336",
"0.56591237",
"0.5629573",
"0.5563937",
"0.5517507",
"0.5375173",
"0.5352545",
"0.51818913",
"0.5176541",
"0.51742285",
"0.51653194",
"0.5139389",
"0.512521",
"0.5108609",
"0.5066824",
"0.50655127",
"0.5061801",
"0.50201505",
"0.49935186"
] | 0.8787211 | 0 |
Sets the org_apache_felix_jetty_gzip_excluded_mime_types of this OrgApacheFelixHttpProperties. | def org_apache_felix_jetty_gzip_excluded_mime_types(self, org_apache_felix_jetty_gzip_excluded_mime_types: ConfigNodePropertyArray):
self._org_apache_felix_jetty_gzip_excluded_mime_types = org_apache_felix_jetty_gzip_excluded_mime_types | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_jetty_gzip_excluded_mime_types(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_mime_types",
"def org_apache_felix_jetty_gzip_included_mime_types(self, org_apache_felix_jetty_gzip_included_mime_types: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_mime_types = org_apache_felix_jetty_gzip_included_mime_types",
"def org_apache_felix_jetty_gzip_included_mime_types(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_included_mime_types",
"def org_apache_felix_jetty_gzip_excluded_paths(self, org_apache_felix_jetty_gzip_excluded_paths: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_paths = org_apache_felix_jetty_gzip_excluded_paths",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self, org_apache_felix_jetty_gzip_excluded_user_agents: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_user_agents = org_apache_felix_jetty_gzip_excluded_user_agents",
"def org_apache_felix_jetty_gzip_excluded_methods(self, org_apache_felix_jetty_gzip_excluded_methods: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_excluded_methods = org_apache_felix_jetty_gzip_excluded_methods",
"def org_apache_felix_jetty_gzip_excluded_user_agents(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_user_agents",
"def org_apache_felix_jetty_gzip_excluded_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_paths",
"def mime_types(self) -> FilebaseApiConfigMimeTypes:\n mime_types = self.get(\"mime_types\", {})\n if not isinstance(mime_types, FilebaseApiConfigMimeTypes):\n mime_types = FilebaseApiConfigMimeTypes(**mime_types)\n self[\"mime_types\"] = mime_types\n return mime_types",
"def org_apache_felix_jetty_gzip_excluded_methods(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_methods",
"def mime_allowed(self, mime_allowed: ConfigNodePropertyArray):\n\n self._mime_allowed = mime_allowed",
"def purge_content_types_if_file_present(self):\n if 'files' in self.request_params:\n headers = self.request_params.get('headers', {}) or {}\n headers.pop('content-type', '')",
"def mime_allowed(self) -> ConfigNodePropertyArray:\n return self._mime_allowed",
"def allowed_attachment_types(self, allowed_attachment_types: ConfigNodePropertyArray):\n\n self._allowed_attachment_types = allowed_attachment_types",
"def mime_type(self, mime_type):\n if self.local_vars_configuration.client_side_validation and mime_type is None: # noqa: E501\n raise ValueError(\"Invalid value for `mime_type`, must not be `None`\") # noqa: E501\n allowed_values = [\"application/json\", \"application/x-www-form-urlencoded\", \"none\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and mime_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `mime_type` ({0}), must be one of {1}\" # noqa: E501\n .format(mime_type, allowed_values)\n )\n\n self._mime_type = mime_type",
"def org_apache_felix_jetty_gzip_included_paths(self, org_apache_felix_jetty_gzip_included_paths: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_paths = org_apache_felix_jetty_gzip_included_paths",
"def org_apache_felix_https_jetty_protocols_excluded(self, org_apache_felix_https_jetty_protocols_excluded: ConfigNodePropertyArray):\n\n self._org_apache_felix_https_jetty_protocols_excluded = org_apache_felix_https_jetty_protocols_excluded",
"def org_apache_felix_jetty_gziphandler_enable(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_jetty_gziphandler_enable",
"def allowed_attachment_types(self) -> ConfigNodePropertyArray:\n return self._allowed_attachment_types",
"def attachment_type_blacklist(self) -> ConfigNodePropertyArray:\n return self._attachment_type_blacklist",
"def mime_allow_empty(self) -> ConfigNodePropertyBoolean:\n return self._mime_allow_empty",
"def org_apache_felix_https_jetty_ciphersuites_excluded(self, org_apache_felix_https_jetty_ciphersuites_excluded: ConfigNodePropertyArray):\n\n self._org_apache_felix_https_jetty_ciphersuites_excluded = org_apache_felix_https_jetty_ciphersuites_excluded",
"def clear_excludepatterns(self):\n self._excludepatterns = []",
"def org_apache_felix_http_path_exclusions(self, org_apache_felix_http_path_exclusions: ConfigNodePropertyArray):\n\n self._org_apache_felix_http_path_exclusions = org_apache_felix_http_path_exclusions",
"def mime_type(self, mime_type):\n\n self._mime_type = mime_type",
"def org_apache_felix_jetty_gzip_included_methods(self, org_apache_felix_jetty_gzip_included_methods: ConfigNodePropertyArray):\n\n self._org_apache_felix_jetty_gzip_included_methods = org_apache_felix_jetty_gzip_included_methods",
"def attachment_type_blacklist(self, attachment_type_blacklist: ConfigNodePropertyArray):\n\n self._attachment_type_blacklist = attachment_type_blacklist",
"def mime_allow_empty(self, mime_allow_empty: ConfigNodePropertyBoolean):\n\n self._mime_allow_empty = mime_allow_empty",
"def org_apache_felix_https_jetty_ciphersuites_excluded(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_ciphersuites_excluded",
"def org_apache_felix_jetty_gziphandler_enable(self, org_apache_felix_jetty_gziphandler_enable: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_jetty_gziphandler_enable = org_apache_felix_jetty_gziphandler_enable"
] | [
"0.7721878",
"0.7150851",
"0.6898612",
"0.63561344",
"0.6181627",
"0.5800203",
"0.5683951",
"0.5612671",
"0.54054135",
"0.53159565",
"0.52980393",
"0.5160608",
"0.5096038",
"0.503808",
"0.49808657",
"0.49463513",
"0.49146417",
"0.48810196",
"0.4846096",
"0.47825828",
"0.4735421",
"0.47235686",
"0.46932214",
"0.468906",
"0.46483925",
"0.4645981",
"0.46362525",
"0.46160427",
"0.45940626",
"0.45896167"
] | 0.83199024 | 0 |
Gets the org_apache_felix_http_session_invalidate of this OrgApacheFelixHttpProperties. | def org_apache_felix_http_session_invalidate(self) -> ConfigNodePropertyBoolean:
return self._org_apache_felix_http_session_invalidate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_http_session_invalidate(self, org_apache_felix_http_session_invalidate: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_http_session_invalidate = org_apache_felix_http_session_invalidate",
"def auth_invalidate_session(self) -> None:\n self.__logger.debug('Eva.auth_invalidate_session called')\n return self.__http_client.auth_invalidate_session()",
"def org_apache_felix_http_session_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_session_timeout",
"def auth_renew_session(self) -> None:\n self.__logger.debug('Eva.auth_renew_session called')\n return self.__http_client.auth_renew_session()",
"def ExpireIncrSec(self):\n if self.force_auto_sync:\n self.get('ExpireIncrSec')\n return self._ExpireIncrSec",
"def org_apache_felix_http_session_uniqueid(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_http_session_uniqueid",
"def org_apache_felix_http_session_timeout(self, org_apache_felix_http_session_timeout: ConfigNodePropertyInteger):\n\n self._org_apache_felix_http_session_timeout = org_apache_felix_http_session_timeout",
"def refresh(self):\n\t\turl = f'{self.root.url}/api/v1/sessions/refresh'\n\t\tself.root.login_time = 0\n\t\tstatus, data = self.root.r('GET', url, body=None, headers=None, verify=self.root.verify)\n\t\ttry:\n\t\t\tif status < 400:\n\t\t\t\tif 'token' in data:\n\t\t\t\t\tself.root.token = data['token']\n\t\t\t\t\tself.root.login_time = time.monotonic()\n\t\t\t\t\treturn status, True\n\t\texcept:\n\t\t\treturn status, False",
"def expire(self):\n return self._expire",
"def expire(self):\n\n return self._expire",
"def ExpireIncr(self):\n if self.force_auto_sync:\n self.get('ExpireIncr')\n return self._ExpireIncr",
"def idle_session_ttl_in_seconds(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"idle_session_ttl_in_seconds\")",
"def idle_session_ttl_in_seconds(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"idle_session_ttl_in_seconds\")",
"def _get_expire(self):\n return self.__expire",
"def org_eclipse_jetty_servlet_session_cookie(self) -> ConfigNodePropertyString:\n return self._org_eclipse_jetty_servlet_session_cookie",
"def org_apache_felix_http_session_uniqueid(self, org_apache_felix_http_session_uniqueid: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_http_session_uniqueid = org_apache_felix_http_session_uniqueid",
"def expires(self):\n return self._data.get('expires')",
"def get(self): # Logout method\n\n response = get_custom_response(success=True,\n message='You are Logged out successfully'\n , status_code=200\n )\n return response",
"def expire_httpd_session(self):\n if os.path.isfile(self.HTTPD_SESSION_FILE):\n logger.debug('expiring session by renaming session file to %s' % (self.HTTPD_SESSION_FILE_EXPIRED))\n os.rename(self.HTTPD_SESSION_FILE, self.HTTPD_SESSION_FILE_EXPIRED)",
"def org_apache_felix_http_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_timeout",
"def check_token_invalidate(self, token):\n payload = {'key': self._lr_object._get_api_key(), 'secret': self._lr_object._get_api_secret(), 'access_token': token}\n url = SECURE_API_URL + \"api/v2/access_token/invalidate/\"\n return self._lr_object._get_json(url, payload)",
"def invalidate(self):\n super(HttpTransport, self).invalidate()\n return False",
"def stop(self):\n return self.rpc.call(MsfRpcMethod.SessionStop, [self.sid])",
"def invalidate(self):\n self.modified = True\n self._invalidated = True\n self._session.clear()",
"def session_hash(self):\n return self._session_hash",
"def getExpires(self):\n return self.base.get(\"expires\", [])",
"def get_session_property(self, key):\n\n try:\n return self.session[key]\n except KeyError:\n return None",
"def sessiontimeout(self) :\n\t\ttry :\n\t\t\treturn self._sessiontimeout\n\t\texcept Exception as e:\n\t\t\traise e",
"def test_existing_session_invalidate_nodupe(self):\n # existing session -> invalidate()\n request = self._make_request()\n session_id = self._get_session_id(request)\n self._set_session_cookie(request=request, session_id=session_id)\n request.session = self._makeOne(request)\n self._register_callback(request, request.session)\n persisted = request.session.redis.get(session_id)\n self.assertIsNotNone(persisted)\n\n # invalidate\n request.session.invalidate()\n response = webob.Response()\n request.response_callbacks[0](request, response)\n set_cookie_headers = response.headers.getall(\"Set-Cookie\")\n self.assertEqual(len(set_cookie_headers), 1)\n self.assertIn(\"Max-Age=0\", set_cookie_headers[0])\n\n # manually execute the callbacks\n request._process_finished_callbacks()\n\n # make sure this isn't in redis\n persisted = request.session.redis.get(session_id)\n self.assertIsNone(persisted)\n\n # make sure we don't have any keys in redis\n keys = request.session.redis.keys()\n self.assertEqual(len(keys), 0)",
"def ExpireTime(self):\n if self.force_auto_sync:\n self.get('ExpireTime')\n return self._ExpireTime"
] | [
"0.7217652",
"0.62059575",
"0.554804",
"0.53198594",
"0.50600505",
"0.5036488",
"0.4939528",
"0.4923382",
"0.4796819",
"0.47919518",
"0.47808963",
"0.4733047",
"0.46928778",
"0.4659546",
"0.4607059",
"0.4606501",
"0.45678967",
"0.45677337",
"0.4553705",
"0.45413536",
"0.45328128",
"0.45314565",
"0.44666812",
"0.44473487",
"0.4431188",
"0.44300914",
"0.4425043",
"0.44070315",
"0.4355686",
"0.4338958"
] | 0.7086756 | 1 |
Sets the org_apache_felix_http_session_invalidate of this OrgApacheFelixHttpProperties. | def org_apache_felix_http_session_invalidate(self, org_apache_felix_http_session_invalidate: ConfigNodePropertyBoolean):
self._org_apache_felix_http_session_invalidate = org_apache_felix_http_session_invalidate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_http_session_invalidate(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_http_session_invalidate",
"def org_apache_felix_http_session_timeout(self, org_apache_felix_http_session_timeout: ConfigNodePropertyInteger):\n\n self._org_apache_felix_http_session_timeout = org_apache_felix_http_session_timeout",
"def auth_invalidate_session(self) -> None:\n self.__logger.debug('Eva.auth_invalidate_session called')\n return self.__http_client.auth_invalidate_session()",
"def org_apache_felix_http_session_uniqueid(self, org_apache_felix_http_session_uniqueid: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_http_session_uniqueid = org_apache_felix_http_session_uniqueid",
"def expire_httpd_session(self):\n if os.path.isfile(self.HTTPD_SESSION_FILE):\n logger.debug('expiring session by renaming session file to %s' % (self.HTTPD_SESSION_FILE_EXPIRED))\n os.rename(self.HTTPD_SESSION_FILE, self.HTTPD_SESSION_FILE_EXPIRED)",
"def invalidate(self):\n self.modified = True\n self._invalidated = True\n self._session.clear()",
"def org_apache_felix_http_session_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_session_timeout",
"def set_session_property(self, key, value):\n\n self.session[key] = value",
"def expire(self):\n Slate.expire(self)\n\n one_year = 60 * 60 * 24 * 365\n e = time.time() - one_year\n cherrypy.serving.response.cookie[self.session_cookie] = 'expired'\n cherrypy.serving.response.cookie[self.session_cookie]['expires'] = httputil.HTTPDate(e)",
"def invalidate(self):\n super(HttpTransport, self).invalidate()\n return False",
"def set_end_session(self, end):\n self.response.shouldEndSession = end",
"def org_apache_felix_http_timeout(self, org_apache_felix_http_timeout: ConfigNodePropertyInteger):\n\n self._org_apache_felix_http_timeout = org_apache_felix_http_timeout",
"def auth_renew_session(self) -> None:\n self.__logger.debug('Eva.auth_renew_session called')\n return self.__http_client.auth_renew_session()",
"def set_cookie(self, response):\n if self._invalidated:\n response.delete_cookie(\n key=self.app.conf.flask.session_cookie_name,\n path=self.app.conf.flask.session_cookie_path,\n domain=self.app.conf.flask.session_cookie_domain,\n )\n return\n response.set_cookie(\n key=self.app.conf.flask.session_cookie_name,\n value=self.meta.cookie_val,\n domain=self.app.conf.flask.session_cookie_domain,\n path=self.app.conf.flask.session_cookie_path,\n secure=self.app.conf.flask.session_cookie_secure,\n httponly=self.app.conf.flask.session_cookie_httponly,\n samesite=self.app.conf.flask.session_cookie_samesite,\n max_age=self.app.conf.flask.permanent_session_lifetime,\n )",
"def set_refresh_token(self, token):\n\n self.__current_request_mock.headers['Cookie'] = f'Refresh-Auth={token}'",
"def expire_session(SessionId=None):\n pass",
"def logout(self):\n\n self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')",
"def org_apache_felix_https_keystore(self, org_apache_felix_https_keystore: ConfigNodePropertyString):\n\n self._org_apache_felix_https_keystore = org_apache_felix_https_keystore",
"def logout(self):\n self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')",
"def logout(self):\n self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')",
"def set_session_persistence(self, loadbalancer, val):\n loadbalancer.session_persistence = val",
"def invalidate_cache(self):\n self._invalidate_http_cache()",
"def org_apache_felix_https_jetty_session_cookie_http_only(self, org_apache_felix_https_jetty_session_cookie_http_only: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_https_jetty_session_cookie_http_only = org_apache_felix_https_jetty_session_cookie_http_only",
"def org_eclipse_jetty_servlet_session_cookie(self, org_eclipse_jetty_servlet_session_cookie: ConfigNodePropertyString):\n\n self._org_eclipse_jetty_servlet_session_cookie = org_eclipse_jetty_servlet_session_cookie",
"def refresh(self):\n\t\turl = f'{self.root.url}/api/v1/sessions/refresh'\n\t\tself.root.login_time = 0\n\t\tstatus, data = self.root.r('GET', url, body=None, headers=None, verify=self.root.verify)\n\t\ttry:\n\t\t\tif status < 400:\n\t\t\t\tif 'token' in data:\n\t\t\t\t\tself.root.token = data['token']\n\t\t\t\t\tself.root.login_time = time.monotonic()\n\t\t\t\t\treturn status, True\n\t\texcept:\n\t\t\treturn status, False",
"def refresh_token(self):\n url = 'https://www.yikyak.com/api/auth/token/refresh'\n token = self._request('POST', url)\n self.session.headers.update({'x-access-token': token})",
"def refresh_session():\n\n hruntime.response.headers['Cache-Control'] = 'must-revalidate, no-cache, no-store'\n\n hruntime.user = hruntime.dbroot.users[hruntime.session.name]\n hruntime.i18n = hruntime.dbroot.localization.languages['cz']",
"def test_existing_session_invalidate_nodupe(self):\n # existing session -> invalidate()\n request = self._make_request()\n session_id = self._get_session_id(request)\n self._set_session_cookie(request=request, session_id=session_id)\n request.session = self._makeOne(request)\n self._register_callback(request, request.session)\n persisted = request.session.redis.get(session_id)\n self.assertIsNotNone(persisted)\n\n # invalidate\n request.session.invalidate()\n response = webob.Response()\n request.response_callbacks[0](request, response)\n set_cookie_headers = response.headers.getall(\"Set-Cookie\")\n self.assertEqual(len(set_cookie_headers), 1)\n self.assertIn(\"Max-Age=0\", set_cookie_headers[0])\n\n # manually execute the callbacks\n request._process_finished_callbacks()\n\n # make sure this isn't in redis\n persisted = request.session.redis.get(session_id)\n self.assertIsNone(persisted)\n\n # make sure we don't have any keys in redis\n keys = request.session.redis.keys()\n self.assertEqual(len(keys), 0)",
"def org_apache_felix_http_session_uniqueid(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_http_session_uniqueid",
"def sessiontimeout(self, sessiontimeout) :\n\t\ttry :\n\t\t\tself._sessiontimeout = sessiontimeout\n\t\texcept Exception as e:\n\t\t\traise e"
] | [
"0.68846256",
"0.5935194",
"0.5732186",
"0.51594085",
"0.511239",
"0.49642003",
"0.4930626",
"0.4889229",
"0.48802778",
"0.48659036",
"0.47784737",
"0.47330034",
"0.46047753",
"0.45323482",
"0.45041436",
"0.44938076",
"0.44748378",
"0.44606805",
"0.4450914",
"0.4450914",
"0.43823832",
"0.43426132",
"0.4337256",
"0.4324694",
"0.43240345",
"0.43174186",
"0.4316366",
"0.42314008",
"0.4229013",
"0.421494"
] | 0.8399019 | 0 |
Gets the org_apache_felix_http_session_uniqueid of this OrgApacheFelixHttpProperties. | def org_apache_felix_http_session_uniqueid(self) -> ConfigNodePropertyBoolean:
return self._org_apache_felix_http_session_uniqueid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_http_session_uniqueid(self, org_apache_felix_http_session_uniqueid: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_http_session_uniqueid = org_apache_felix_http_session_uniqueid",
"def session_id(self) -> str:\n return self._session_id",
"def get_sessionid(self):\n if not self.__initialized or not self.__loggedin:\n raise NSNitroError(\"Not initialized or not logged in.\")\n\n return self.__sessionid",
"def get_session_id(self):\n return self.request_data['id']",
"def getSessionId(self):\n return self.sessionid",
"def get_session_id(self):\n raise NotImplementedError()",
"def getSessionId(self) -> int:\n return self.cpp.getSessionId()",
"def unique_id(self):\n return self.properties.get(\"UniqueId\", None)",
"def session_id(self):\n return self.browser.crawlera_session",
"def get_session_id(self, context: ResourceCommandContext) -> str:\n return self.handler.get_session_id()",
"def get_session_uuid(self):\n\t\treturn call_sdk_function('PrlLoginResponse_GetSessionUuid', self.handle)",
"def get_session_id(context):\n skey = session_key('session_id')\n session_id = get_session(context, skey)\n\n if session_id is None:\n session_id = generate_session_id(context)\n set_session(context, skey, session_id)\n\n return session_id",
"def unique_id(self) -> str | None:\n return self._config[CONF_ID]",
"def session_id(self) -> Optional[str]:\n session_id = self.params.get(ChannelBuilder.PARAM_SESSION_ID, None)\n if session_id is not None:\n try:\n uuid.UUID(session_id, version=4)\n except ValueError as ve:\n raise ValueError(\"Parameter value 'session_id' must be a valid UUID format.\", ve)\n return session_id",
"def org_eclipse_jetty_servlet_session_id_path_parameter_name(self) -> ConfigNodePropertyString:\n return self._org_eclipse_jetty_servlet_session_id_path_parameter_name",
"def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")",
"def device_session_identifier(self):\n return self._device_session_identifier",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id",
"def unique_id(self):\n return self._unique_id"
] | [
"0.6747569",
"0.6661827",
"0.65855795",
"0.65626913",
"0.6464378",
"0.63789254",
"0.6297064",
"0.62230587",
"0.6094951",
"0.6089729",
"0.6038436",
"0.6000553",
"0.59570223",
"0.59272194",
"0.58490086",
"0.5816694",
"0.5774036",
"0.5772387",
"0.5772387",
"0.5772387",
"0.5772387",
"0.5772387",
"0.5772387",
"0.5772387",
"0.5772387",
"0.5772387",
"0.5772387",
"0.5772387",
"0.5772387",
"0.5772387"
] | 0.73061305 | 0 |
Sets the org_apache_felix_http_session_uniqueid of this OrgApacheFelixHttpProperties. | def org_apache_felix_http_session_uniqueid(self, org_apache_felix_http_session_uniqueid: ConfigNodePropertyBoolean):
self._org_apache_felix_http_session_uniqueid = org_apache_felix_http_session_uniqueid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_http_session_uniqueid(self) -> ConfigNodePropertyBoolean:\n return self._org_apache_felix_http_session_uniqueid",
"def org_apache_felix_http_session_timeout(self, org_apache_felix_http_session_timeout: ConfigNodePropertyInteger):\n\n self._org_apache_felix_http_session_timeout = org_apache_felix_http_session_timeout",
"def session_id(self, session_id):\n\n self._session_id = session_id",
"def session_hash(self, session_hash):\n \n self._session_hash = session_hash",
"def do_SetSessionName (self, line):\r\n OpensslTracking.session = line",
"def org_eclipse_jetty_servlet_session_id_path_parameter_name(self, org_eclipse_jetty_servlet_session_id_path_parameter_name: ConfigNodePropertyString):\n\n self._org_eclipse_jetty_servlet_session_id_path_parameter_name = org_eclipse_jetty_servlet_session_id_path_parameter_name",
"def session_id(self) -> str:\n return self._session_id",
"def set_session_property(self, key, value):\n\n self.session[key] = value",
"def for_session(self, session_id):\n if not isinstance(session_id, str):\n raise TypeError('Session Id must be a string')\n\n self.token['sessionId'] = session_id\n\n return self",
"def org_eclipse_jetty_servlet_session_id_path_parameter_name(self) -> ConfigNodePropertyString:\n return self._org_eclipse_jetty_servlet_session_id_path_parameter_name",
"def org_eclipse_jetty_servlet_session_cookie(self, org_eclipse_jetty_servlet_session_cookie: ConfigNodePropertyString):\n\n self._org_eclipse_jetty_servlet_session_cookie = org_eclipse_jetty_servlet_session_cookie",
"def get_sessionid(self):\n if not self.__initialized or not self.__loggedin:\n raise NSNitroError(\"Not initialized or not logged in.\")\n\n return self.__sessionid",
"def set_login_session(self, session_id=None):\r\n meta = self.get_meta()\r\n old_login = meta.get('session_id', None)\r\n if old_login:\r\n SessionStore(session_key=old_login).delete()\r\n meta['session_id'] = session_id\r\n self.set_meta(meta)\r\n self.save()",
"def org_apache_felix_http_session_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_session_timeout",
"async def _async_set_unique_id(self, unique_id: str) -> None:\n await self.async_set_unique_id(unique_id)\n self._abort_if_unique_id_configured()",
"def _set_unique_id(self, json_request):\n values = []\n for field in value_fields:\n value = json_request.get(field, '')\n values.append(quote(self.fully_decode_uri(value), safe=''))\n if len(values) == 1:\n self.unique_id = values[0]\n elif len(values) == 2:\n self.unique_id = self.build_summary(values[0], values[1])\n elif len(values) == 1:\n self.unique_id = self.build_summary(values[0], values[1], values[2])",
"def session_id(self) -> Optional[str]:\n session_id = self.params.get(ChannelBuilder.PARAM_SESSION_ID, None)\n if session_id is not None:\n try:\n uuid.UUID(session_id, version=4)\n except ValueError as ve:\n raise ValueError(\"Parameter value 'session_id' must be a valid UUID format.\", ve)\n return session_id",
"def fusion_api_set_active_session(self, sessionId):\n return self.loginsession.set_active_session(sessionId)",
"def org_apache_felix_http_session_invalidate(self, org_apache_felix_http_session_invalidate: ConfigNodePropertyBoolean):\n\n self._org_apache_felix_http_session_invalidate = org_apache_felix_http_session_invalidate",
"def test_set_session_id(self, context):\n context.set_session_id(b\"abc\")",
"def session(self, value: ClientSession):\r\n self._session = value",
"def get_session_id(self):\n raise NotImplementedError()",
"def unique_identifier(self, unique_identifier):\n\n self._unique_identifier = unique_identifier",
"def setSession( self, name, value, REQUEST=None, cookie=None ):\n SetSessionValue( self, name, value, REQUEST, cookie )",
"def session_type_id(self, session_type_id):\n\n self._session_type_id = session_type_id",
"def set_user_cookie_id():\n #new fresh user\n if not request.cookies.get(config.COOKIE_ADSABS2_NAME):\n if current_user.is_anonymous():\n g.user_cookie_id = unicode(uuid.uuid4())\n else:\n g.user_cookie_id = current_user.get_id()\n #the user has already visited the web site\n else:\n if current_user.is_anonymous():\n #if the cookie is a valid UUID it's ok\n curr_cookie = request.cookies.get(config.COOKIE_ADSABS2_NAME)\n try:\n uuid.UUID(curr_cookie)\n g.user_cookie_id = curr_cookie\n #otherwise the app generates a new one\n except ValueError:\n g.user_cookie_id = unicode(uuid.uuid4())\n else:\n g.user_cookie_id = current_user.get_id()",
"def org_apache_felix_http_name(self, org_apache_felix_http_name: ConfigNodePropertyString):\n\n self._org_apache_felix_http_name = org_apache_felix_http_name",
"def set_session(session):\n\n global session_\n session_ = session\n import observatory.api.server.api as api\n\n api.session_ = session",
"def org_eclipse_jetty_servlet_session_cookie(self) -> ConfigNodePropertyString:\n return self._org_eclipse_jetty_servlet_session_cookie",
"def set_cookie(self, response):\n if self._invalidated:\n response.delete_cookie(\n key=self.app.conf.flask.session_cookie_name,\n path=self.app.conf.flask.session_cookie_path,\n domain=self.app.conf.flask.session_cookie_domain,\n )\n return\n response.set_cookie(\n key=self.app.conf.flask.session_cookie_name,\n value=self.meta.cookie_val,\n domain=self.app.conf.flask.session_cookie_domain,\n path=self.app.conf.flask.session_cookie_path,\n secure=self.app.conf.flask.session_cookie_secure,\n httponly=self.app.conf.flask.session_cookie_httponly,\n samesite=self.app.conf.flask.session_cookie_samesite,\n max_age=self.app.conf.flask.permanent_session_lifetime,\n )"
] | [
"0.7016722",
"0.6048987",
"0.5852379",
"0.54893565",
"0.54460126",
"0.52044636",
"0.5184681",
"0.51788116",
"0.5120198",
"0.5074298",
"0.5041709",
"0.5030468",
"0.5007963",
"0.49998662",
"0.49792632",
"0.49787027",
"0.496626",
"0.4916027",
"0.49080864",
"0.49074236",
"0.4866012",
"0.48476014",
"0.48394448",
"0.48378003",
"0.47922322",
"0.47754562",
"0.47507638",
"0.4738247",
"0.47221637",
"0.47106978"
] | 0.8104783 | 0 |
Join dataframes by resampling on milliseconds and join on datetimeindex. | def _join_on_millisec(dfs: list):
# Resample to milliseconds befor joining
for idx, df in enumerate(dfs):
df["sys_time_dt"] = pd.to_datetime(df["sys_time"], unit="ms")
df = df.set_index("sys_time_dt")
df = df.drop(columns=["sys_time"])
df = df[~df.index.duplicated(keep="last")] # Remove index dups
dfs[idx] = df.resample("10ms").interpolate(method="time")
# Join resampled sensor data, drop NaNs, that might be generated for
# start or end of session, because not all sensors start/end at same time
df_joined = pd.concat(dfs, axis=1).dropna()
# Add datetimeindex as ms
df_joined["sys_time"] = df_joined.index.astype("int64") // 10 ** 6
# Reset index to save memory
df_joined = df_joined.reset_index(drop=True)
return df_joined | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def join(df: DataFrame, other_df: DataFrame,\n df_alias: str, other_df_alias: str,\n on: str, how: str = \"left\") -> DataFrame:\n\n base_df_ts = col(f\"{df_alias}.ts\")\n other_df_ts = col(f\"{other_df_alias}.ts\")\n window_spec = Window.partitionBy(on, base_df_ts).orderBy(other_df_ts.desc())\n\n return df.join(other_df, how=how, on=on) \\\n .filter(other_df_ts.isNull() | (other_df_ts <= base_df_ts)) \\\n .withColumn(\"rn\", row_number().over(window_spec)) \\\n .filter(col(\"rn\") == 1) \\\n .drop(\"rn\")",
"def join_dfs(self, df1, df2):\n return df1.join(df2.set_index(\"date\"), on=\"date\")",
"def join(tw_df, rtt_df):\n original_tw_id = []\n author_ids = []\n rtt_dates = []\n groups = rtt_df.groupby('original_tweet_id').groups\n for k in groups.keys():\n l_a = []\n l_r = []\n original_tw_id.append(k)\n for index in groups[k]:\n line = rtt_df.iloc[[index]]\n l_a.append(int(line['author_id']))\n l_r.append(str(line['retweet_date']))\n author_ids.append(l_a)\n rtt_dates.append(l_r)\n \n df_temp = pd.DataFrame()\n df_temp['natural_key'] = original_tw_id\n df_temp['rtt_author_ids'] = author_ids\n df_temp['retweet_dates'] = rtt_dates\n df_temp = df_temp.set_index('natural_key')\n tw_df = tw_df.set_index('natural_key')\n return tw_df.join(df_temp)",
"def sjoin(left_df, right_df, how=..., op=..., lsuffix=..., rsuffix=...):\n ...",
"def join_input_data_and_multi_index(data, dataset_name):\n\n meter_df = data[dataset_name]\n building_df = data['building_metadata']\n weather_df = data['weather_' + dataset_name]\n\n # join meter and weather data\n building_n_meter = meter_df.merge(building_df, on='building_id', how='left')\n joined_data = building_n_meter.merge(weather_df, on=['site_id', 'timestamp'], how='left')\n\n # Add time related columns\n joined_data['hour'] = joined_data['timestamp'].dt.hour\n joined_data['weekday'] = joined_data['timestamp'].dt.dayofweek\n joined_data['week_number'] = joined_data['timestamp'].dt.week\n joined_data['month'] = joined_data['timestamp'].dt.month\n\n joined_data['is_weekend'] = joined_data['weekday'].apply(lambda x: 1 if x in [0, 6] else 0)\n\n # multi index on building id and timestamp\n joined_data = joined_data.set_index(['building_id', 'timestamp']).sort_index()\n\n return joined_data",
"def df_merge(left, right, on=[\"lat\", \"lon\", \"start_date\"], how=\"outer\"):\r\n if left is None:\r\n return right\r\n else:\r\n return pd.merge(left, right, on=on, how=how)",
"def resample(self, dataframes, freq='5s'):\n\n for df in dataframes:\n yield df.resample(freq, fill_method='bfill')",
"def add_times(self,df,link):\n \n if link not in self.to_concat:\n self.to_concat[link] = []\n self.to_concat[link].append(df)",
"def _resample_and_merge(ts, agg_dict):\n grouped = ts.group_serie(agg_dict['sampling'])\n existing = agg_dict.get('return')\n name = agg_dict.get(\"name\")\n resource = None if name is None else mock.Mock(id=str(uuid.uuid4()))\n metric = mock.Mock(id=str(uuid.uuid4()), name=name)\n agg_dict['return'] = (\n processor.MetricReference(metric, \"mean\", resource),\n carbonara.AggregatedTimeSerie.from_grouped_serie(\n grouped,\n carbonara.Aggregation(agg_dict['agg'],\n agg_dict['sampling'],\n None)))\n if existing:\n existing[2].merge(agg_dict['return'][2])\n agg_dict['return'] = existing",
"def merge_arrays(df_left,df_right,method = 'left', join_column = 'datetime'):\n df = df_left.merge(df_right,how = method,left_on = join_column,right_on = join_column)\n return df",
"def join_columns(self, other: \"MultiRegionTimeseriesDataset\") -> \"MultiRegionTimeseriesDataset\":\n if not other.latest_data.empty:\n raise NotImplementedError(\"No support for joining other with latest_data\")\n other_df = other.data_with_fips.set_index([CommonFields.LOCATION_ID, CommonFields.DATE])\n self_df = self.data_with_fips.set_index([CommonFields.LOCATION_ID, CommonFields.DATE])\n other_geo_columns = set(other_df.columns) & set(GEO_DATA_COLUMNS)\n other_ts_columns = (\n set(other_df.columns) - set(GEO_DATA_COLUMNS) - set(TimeseriesDataset.INDEX_FIELDS)\n )\n common_ts_columns = other_ts_columns & set(self.data_with_fips.columns)\n if common_ts_columns:\n # columns to be joined need to be disjoint\n raise ValueError(f\"Columns are in both dataset: {common_ts_columns}\")\n common_geo_columns = list(set(self.data_with_fips.columns) & other_geo_columns)\n # TODO(tom): fix geo columns check, no later than when self.data is changed to contain only\n # timeseries\n # self_common_geo_columns = self_df.loc[:, common_geo_columns].fillna(\"\")\n # other_common_geo_columns = other_df.loc[:, common_geo_columns].fillna(\"\")\n # try:\n # if (self_common_geo_columns != other_common_geo_columns).any(axis=None):\n # unequal_rows = (self_common_geo_columns != other_common_geo_columns).any(axis=1)\n # _log.info(\n # \"Geo data unexpectedly varies\",\n # self_rows=self_df.loc[unequal_rows, common_geo_columns],\n # other_rows=other_df.loc[unequal_rows, common_geo_columns],\n # )\n # raise ValueError(\"Geo data unexpectedly varies\")\n # except Exception:\n # _log.exception(f\"Comparing df {self_common_geo_columns} to {other_common_geo_columns}\")\n # raise\n combined_df = pd.concat([self_df, other_df[list(other_ts_columns)]], axis=1)\n return MultiRegionTimeseriesDataset.from_timeseries_df(\n combined_df.reset_index()\n ).append_latest_df(self.latest_data_with_fips.reset_index())",
"def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result",
"def merge_dataframes(df_metrics, df_tweets):\r\n df_tweets = df_tweets.rename(columns={'id': 'tweet_ID'})\r\n df_tweets[['tweet_ID']] = df_tweets[['tweet_ID']].astype('int64')\r\n df_metrics[['tweet_ID']] = df_metrics[['tweet_ID']].astype(\r\n \"float64\").astype('int64')\r\n ans = df_tweets.join(\r\n df_metrics.set_index('tweet_ID'), on='tweet_ID', how='inner').dropna()\r\n return ans",
"def _join_activity(df_activity: pd.DataFrame, df_sens: pd.DataFrame):\n df_sens[\"task_id\"] = 0\n for idx, row in df_activity.iterrows():\n df_sens.loc[\n (df_sens[\"sys_time\"] >= row[\"start_time\"])\n & (df_sens[\"sys_time\"] <= row[\"end_time\"]),\n \"task_id\",\n ] = row[\"task_id\"]\n\n # Map 24 task ids down to 6 task types\n df_sens[\"task_type\"] = (\n df_sens[\"task_id\"].astype(\"int8\").replace(HMOG_TASK_IDS_TYPES)\n )\n df_sens = df_sens.drop(columns=[\"task_id\"])\n\n return df_sens",
"def prepare_data(base_df, n_seconds_min=3):\n # Remove too short samples\n source_df = base_df.loc[base_df['seconds'] > n_seconds_min]\n # Group speakers duplicated by id\n df = source_df.loc[:, ['speaker_id', 'dataset_name']]\n df = df.set_index('speaker_id')\n df = df.loc[~df.index.duplicated(keep='first')]\n dfGrouped = source_df.groupby(['speaker_id']).sum()\n # Count the number of samples for each speaker\n dfCountAudio = source_df.groupby(['speaker_id']).count().filepath\n speakers_duration = dfGrouped.join(df)\n speakers_duration = speakers_duration.join(dfCountAudio)\n speakers_duration = speakers_duration.rename(columns={'filepath': 'n_samples'})\n return source_df, speakers_duration",
"def resample_data_frame(df):\r\n df_rs = df.resample('15min').mean()\r\n\r\n return df_rs",
"def df_merge(left, right, on=[\"lat\", \"lon\", \"start_date\"], how=\"outer\"):\n if left is None:\n return right\n else:\n # Use outer merge to include union of (lat,lon,date_col)\n # combinations across all features\n return pd.merge(left, right, on=on, how=how)",
"def resample(self, rs, inplace=False):\n obj_cols = (\n self\n .data\n .select_dtypes(include=['object'])\n .resample(rs)\n .first()\n )\n num_cols = (\n self\n .data\n .select_dtypes(exclude=['object'])\n .resample(rs)\n .mean()\n )\n\n # re-merge the two dataframes\n merged = pd.merge(num_cols, obj_cols, left_index=True, \n right_index=True, how='outer')\n\n if inplace:\n self.data = merged\n else:\n cpy = self.copy()\n cpy.data = merged\n\n return cpy\n\n return",
"def combine(new_data, raw_data):\n return pd.merge(new_data, raw_data, on=[\"location\", \"date\"], how=\"outer\")",
"def load_time_index(self, attrs, resolution=None):\n ts = time.time()\n logger.info('Rechunking time_index')\n with h5py.File(self._src_path, 'r') as f:\n time_index = f['time_index'][...]\n\n timezone = attrs['attrs'].get('timezone', None)\n if timezone is not None or resolution is not None:\n time_index = pd.to_datetime(time_index.astype(str))\n if timezone is not None:\n if time_index.tz is not None:\n time_index = time_index.tz_convert(timezone)\n else:\n time_index = time_index.tz_localize(timezone)\n\n if resolution is not None:\n resample = pd.date_range(time_index.min(), time_index.max(),\n freq=resolution)\n if len(resample) > len(time_index):\n msg = (\"Resolution ({}) must be > time_index resolution \"\n \"({})\".format(resolution, time_index.freq))\n logger.error(msg)\n raise RuntimeError(msg)\n\n self._time_slice = time_index.isin(resample)\n time_index = time_index[self.time_slice]\n\n time_index = time_index.astype(str)\n dtype = 'S{}'.format(len(time_index[0]))\n time_index = np.array(time_index, dtype=dtype)\n\n attrs['dtype'] = time_index.dtype\n\n ds = self.init_dset('time_index', time_index.shape, attrs)\n ds[...] = time_index\n logger.info('- time_index transfered')\n tt = (time.time() - ts) / 60\n logger.debug('\\t- {:.2f} minutes'.format(tt))",
"def merge_additional_features(df):\n col = [\"hour\",\"day\" ,\"dayofweek\", \"month\" , \"interval\" , \"season\", \"time_of_day\"]\n additional_featues = pd.DataFrame(data = [features_from_timestamp(i) for i in df.index ],columns=col).set_index(df.index)\n data = df.merge(additional_featues,on=\"dt\")\n data.sort_index(inplace=True) #make sure data is sorted by date\n\n return data",
"def convert_ms_df(df):\n df_lst = []\n\n df.apply(lambda x: df_lst.append(create_ext_df(x, np.int64)), axis=1)\n\n ms_df = pd.concat(df_lst)\n\n sorted_df = ms_df.sort_values(by=['Time'])\n\n sorted_df['Time'] = pd.to_datetime(sorted_df['Time'], unit='ms')\n\n grouped_ms_src = sorted_df.groupby(['Time', 'pkt_src']).agg(\n {'pkt_size':'sum'}).reset_index()\n\n return grouped_ms_src",
"def join_domain_time_span(domain_tables, span):\n joined_domain_tables = []\n \n for domain_table in domain_tables:\n #extract the domain concept_id from the table fields. E.g. condition_concept_id from condition_occurrence\n #extract the domain start_date column\n #extract the name of the table\n concept_id_field, date_field, table_domain_field = get_key_fields(domain_table) \n\n domain_table = domain_table.withColumn(\"date\", unix_timestamp(to_date(col(date_field)), \"yyyy-MM-dd\")) \\\n .withColumn(\"lower_bound\", unix_timestamp(date_add(col(date_field), -span), \"yyyy-MM-dd\"))\\\n .withColumn(\"upper_bound\", unix_timestamp(date_add(col(date_field), span), \"yyyy-MM-dd\"))\\\n .withColumn(\"time_window\", lit(1))\n \n #standardize the output columns\n joined_domain_tables.append(\n domain_table \\\n .select(domain_table[\"person_id\"], \n domain_table[concept_id_field].alias(\"standard_concept_id\"),\n domain_table[\"date\"],\n domain_table[\"lower_bound\"],\n domain_table[\"upper_bound\"],\n lit(table_domain_field).alias(\"domain\"))\n )\n \n return joined_domain_tables",
"def make_dataset_for_time_series(date_key):\n by_date_key = time_series_data[time_series_data['date_key'] == date_key]\n by_date_key.sort_values('datetime', inplace=True)\n return ColumnDataSource(by_date_key), ColumnDataSource(by_date_key.interpolate('slinear'))",
"def test_adds_index_column(self, reindex_dfs):\n df1, df2 = reindex_dfs\n df = pd.concat([df2, df1])\n # df_reindexed = util.reindex_datetime(df, add_index_col=True)\n (df_reindexed, missing_intervals, freq_str) = util.reindex_datetime(\n df, add_index_col=True\n )\n assert df_reindexed.shape[1] == 2\n assert isinstance((df_reindexed.loc[:, 'index'][0]), str)\n datetime_str = df_reindexed.iloc[0, 1]\n date_str = datetime_str.split(' ')[0]\n assert len(date_str.split('/')[0]) == 2\n assert len(date_str.split('/')[1]) == 2\n assert len(date_str.split('/')[2]) == 4",
"def concat_dataframe(df_msg: pd.DataFrame, df_time: pd.DataFrame) -> pd.DataFrame:\n\n concated = df_msg.join(df_time.set_index(\"name\"), on=\"name\")\n return concated",
"def join_target(self):\n df = self.get_all_data()\n target_df = self.get_target_df().copy(deep=True)\n target_df['ft_data_dt'] = target_df['ft_data_dt'].astype('datetime64[M]') - pd.DateOffset(months=2) + MonthEnd(1)\n df = df.merge(target_df, on=['idd', 'ft_data_dt'], how='left')\n values = {'target': 0}\n df['target'] = df['target'].replace(np.nan, 0)\n self.set_prep_data(df)",
"def _resampler(df_year, year):\n # Aggregates data using mean for each time interval and gets a\n # sample count for each new data point.\n df_15 = df_year.resample('15T').apply(['mean', 'count'])\n df_30 = df_year.resample('30T').apply(['mean', 'count'])\n df_1h = df_year.resample('1H').apply(['mean', 'count'])\n df_1d = df_year.resample('D').apply(['mean', 'count'])\n\n # Removes top level title that is not needed.\n df_15.columns = df_15.columns.droplevel(0)\n df_30.columns = df_30.columns.droplevel(0)\n df_1h.columns = df_1h.columns.droplevel(0)\n df_1d.columns = df_1d.columns.droplevel(0)\n\n # Creating new date range to include all time intervals within the year.\n idx_15 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:45:00', freq='15T')\n idx_30 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:30:00', freq='30T')\n idx_1h = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='1H')\n idx_1d = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='D')\n\n # Reindexing so data that starts in, for example August, will now\n # have the months prior to August filled with nans.\n df_15_reindex = df_15.reindex(idx_15, fill_value=np.nan)\n df_15_reindex[['count']] = df_15_reindex[['count']].fillna(0).astype(int)\n # Adding all columns to match example excel.\n df_15_reindex = df_15_reindex.rename(columns={'mean': 'H(ft)'})\n df_15_reindex = df_15_reindex.rename(columns={'count': 'SampleCount'})\n\n # Adding meters column.\n df_15_reindex['H(m)'] = df_15_reindex['H(ft)'] / 3.28\n # Rounds meters column so significant digits match\n # original height column.\n df_15_reindex['H(m)'] = df_15_reindex['H(m)'].round(2)\n df_15_reindex['H(ft)'] = df_15_reindex['H(ft)'].round(2)\n df_15_reindex['DateTime2'] = df_15_reindex.index\n df_15_reindex['Date'] = df_15_reindex.index\n df_15_reindex['Date2'] = df_15_reindex.index\n df_15_reindex['Date_Python_generated'] = df_15_reindex['Date'].dt.date\n df_15_reindex['Time1'] = df_15_reindex['Date'].dt.time\n df_15_reindex['Time2'] = df_15_reindex['Date'].dt.time\n df_15_reindex['H(m)_final'] = df_15_reindex['H(m)']\n df_15_reindex = df_15_reindex.reset_index(drop=True)\n # Adding original datetime and height data to dataframe. To do this\n # pd.concat is used because the column lengths are different.\n df_15_reindex = pd.concat([\n df_15_reindex, df_year.reset_index(drop=True)], axis=1)\n df_15_reindex['dateTime'] = pd.to_datetime(df_15_reindex['dateTime'])\n # Reordering columns to match example excel.\n df_15_reindex = df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n # Filling nans with empty cells in columns similar to example excel.\n df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_15_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but 30 minutes interval.\n df_30_reindex = df_30.reindex(idx_30, fill_value=np.nan)\n df_30_reindex[['count']] = df_30_reindex[['count']].fillna(0).astype(int)\n df_30_reindex = df_30_reindex.rename(columns={'mean': 'H(ft)'})\n df_30_reindex = df_30_reindex.rename(columns={'count': 'SampleCount'})\n df_30_reindex['H(m)'] = df_30_reindex['H(ft)'] / 3.28\n df_30_reindex['H(m)'] = df_30_reindex['H(m)'].round(2)\n df_30_reindex['H(ft)'] = df_30_reindex['H(ft)'].round(2)\n df_30_reindex['DateTime2'] = df_30_reindex.index\n df_30_reindex['Date'] = df_30_reindex.index\n df_30_reindex['Date2'] = df_30_reindex.index\n df_30_reindex['Date_Python_generated'] = df_30_reindex['Date'].dt.date\n df_30_reindex['Time1'] = df_30_reindex['Date'].dt.time\n df_30_reindex['Time2'] = df_30_reindex['Date'].dt.time\n df_30_reindex['H(m)_final'] = df_30_reindex['H(m)']\n df_30_reindex = df_30_reindex.reset_index(drop=True)\n df_30_reindex = pd.concat([\n df_30_reindex, df_year.reset_index(drop=True)], axis=1)\n df_30_reindex['dateTime'] = pd.to_datetime(df_30_reindex['dateTime'])\n df_30_reindex = df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_30_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but hourly interval.\n df_1h_reindex = df_1h.reindex(idx_1h, fill_value=np.nan)\n df_1h_reindex[['count']] = df_1h_reindex[['count']].fillna(0).astype(int)\n df_1h_reindex = df_1h_reindex.rename(columns={'mean': 'H(ft)'})\n df_1h_reindex = df_1h_reindex.rename(columns={'count': 'SampleCount'})\n df_1h_reindex['H(m)'] = df_1h_reindex['H(ft)'] / 3.28\n df_1h_reindex['H(m)'] = df_1h_reindex['H(m)'].round(2)\n df_1h_reindex['H(ft)'] = df_1h_reindex['H(ft)'].round(2)\n df_1h_reindex['DateTime2'] = df_1h_reindex.index\n df_1h_reindex['Date'] = df_1h_reindex.index\n df_1h_reindex['Date2'] = df_1h_reindex.index\n df_1h_reindex['Date_Python_generated'] = df_1h_reindex['Date'].dt.date\n df_1h_reindex['Time1'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['Time2'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['H(m)_final'] = df_1h_reindex['H(m)']\n df_1h_reindex = df_1h_reindex.reset_index(drop=True)\n df_1h_reindex = pd.concat([\n df_1h_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1h_reindex['dateTime'] = pd.to_datetime(df_1h_reindex['dateTime'])\n df_1h_reindex = df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1h_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but daily interval.\n df_1d_reindex = df_1d.reindex(idx_1d, fill_value=np.nan)\n df_1d_reindex[['count']] = df_1d_reindex[['count']].fillna(0).astype(int)\n df_1d_reindex = df_1d_reindex.rename(columns={'mean': 'H(ft)'})\n df_1d_reindex = df_1d_reindex.rename(columns={'count': 'SampleCount'})\n df_1d_reindex['H(m)'] = df_1d_reindex['H(ft)'] / 3.28\n df_1d_reindex['H(m)'] = df_1d_reindex['H(m)'].round(2)\n df_1d_reindex['H(ft)'] = df_1d_reindex['H(ft)'].round(2)\n df_1d_reindex['DateTime2'] = df_1d_reindex.index\n df_1d_reindex['Date'] = df_1d_reindex.index\n df_1d_reindex['Date2'] = df_1d_reindex.index\n df_1d_reindex['Date_Python_generated'] = df_1d_reindex['Date'].dt.date\n df_1d_reindex['Time1'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['Time2'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['H(m)_final'] = df_1d_reindex['H(m)']\n df_1d_reindex = df_1d_reindex.reset_index(drop=True)\n df_1d_reindex = pd.concat([\n df_1d_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1d_reindex['dateTime'] = pd.to_datetime(df_1d_reindex['dateTime'])\n df_1d_reindex = df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1d_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n return df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex",
"def test_report_output(self, reindex_dfs):\n df1, df2 = reindex_dfs\n df = pd.concat([df2, df1])\n df_reindexed, missing_int, freq = util.reindex_datetime(\n df, report=True, add_index_col=True\n )\n assert df_reindexed.shape[0] == 10\n assert missing_int == 2\n assert freq == '5min'",
"def resampling_records_dataframe(self, run_idxs):\n\n return pd.DataFrame(self.resampling_records(run_idxs))"
] | [
"0.5861676",
"0.57164484",
"0.57051706",
"0.55964965",
"0.54426515",
"0.54371125",
"0.5421133",
"0.54204875",
"0.5400217",
"0.53991127",
"0.53930044",
"0.52991986",
"0.5261319",
"0.525653",
"0.525153",
"0.52278084",
"0.52173066",
"0.5192161",
"0.5179339",
"0.5173237",
"0.5153754",
"0.5143331",
"0.51170635",
"0.51164526",
"0.5114527",
"0.5112246",
"0.50876755",
"0.5068136",
"0.5065461",
"0.5038761"
] | 0.7643946 | 0 |
Read CSVs of intertial sensor to dataframes. The three sensors CSVs red from disk and joined by timestamp into a single table. | def _read_sensors(session_path: Path):
subject = session_path.parent.name
session = session_path.name
# Read all sensor files
sensor_dfs = []
for filename in DATA_FILES:
df = pd.read_csv(
session_path / filename,
names=DATA_FILE_COLUMNS,
usecols=["x", "y", "z", "sys_time"],
dtype="float64",
header=None,
engine="c",
)
col_prefix = filename[:3].lower() + "_"
df.columns = [
col_prefix + c if c != "sys_time" else c for c in df.columns
]
sensor_dfs.append(df)
# Join into single DF
df_sensors = _join_on_millisec(sensor_dfs)
df_sensors["subject"] = subject
df_sensors["session"] = session
return df_sensors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _read_trajectory_files(self):\n dflist = []\n self.Ntimes = {}\n for downD in self.case.downstreamD:\n outputs = self.case.get_outputs(self.method,downD)\n print(outputs['trajectory_file'])\n df = pd.read_csv(outputs['trajectory_file'],\n header=None,\n usecols=[0,1,2])\n df.columns = ['t','y','z']\n df['x'] = downD * self.case.turbine.D\n df['z'] -= self.case.turbine.zhub\n df = df.set_index(['t','x'])[['y','z']]\n self.Ntimes[downD] = len(df.index.levels[0])\n dflist.append(df)\n self.df = pd.concat(dflist).sort_index()",
"def read_traffic_sensor_from_csv(path: str) -> pd.DataFrame:\n\n df = pd.read_csv(path)\n df[\"measuredTime\"] = pd.to_datetime(df[\"measuredTime\"])\n df.set_index(\"measuredTime\", inplace=True)\n return df",
"def read_pulse_acc(filename, multi_header=True):\n\n # TODO: Add Azure support\n num_headers = 20\n header_row = 18\n timestamp_row = 20\n\n with open(filename, \"r\") as f:\n accreader = csv.reader(f, delimiter=\" \")\n\n # Skip file info headers but extract header row and timestamp row data\n for i in range(num_headers):\n # Read columns header\n if i == header_row - 1:\n header = next(accreader)\n # Read the start timestamp\n elif i == timestamp_row - 1:\n ts_start = next(accreader)\n else:\n next(accreader)\n\n # Read body - drop blanks\n data = [[x for x in line if x != \"\"] for line in accreader]\n\n # Convert column names list so that split by \":\" not \" \"\n header = \" \".join(header).split(\":\")\n\n # Drop \"%Data,\" from the first column\n header[0] = header[0].split(\",\")[1]\n\n # Extract and convert start timestamp to datetime\n ts_start = [int(i) for i in ts_start[1:]]\n dt_start = datetime(\n ts_start[5], # year\n ts_start[4], # month\n ts_start[3], # day\n ts_start[2], # hour\n ts_start[1], # minute\n ts_start[0], # second\n )\n\n # Create dataframe and timestamps using start timestamp marker and time steps column\n df = pd.DataFrame(data, dtype=\"float\")\n ts = df.iloc[:, 0].values\n timestamps = [dt_start + timedelta(seconds=t) for t in ts]\n\n # For raw data module\n if multi_header is True:\n # Create multi-index header of channel names and units and time steps index\n channels = [col.split(\"(\")[0].strip() for col in header]\n units = [col.split(\"(\")[1][:-1] for col in header]\n header = list(zip(channels, units))\n header.insert(0, (\"Timestamp\", \"\"))\n header = pd.MultiIndex.from_tuples(header, names=[\"channels\", \"units\"])\n df = df.set_index(df.columns[0])\n df.index.name = \"Time (s)\"\n df.insert(loc=0, column=\"Timestamp\", value=timestamps)\n # For screening module\n else:\n # Create single row header of only channel names (i.e. strip out the units)\n # Replace time steps column with timestamps and use range index\n header = [\"Timestamp\"] + [col.split(\"(\")[0].strip() for col in header]\n df.iloc[:, 0] = timestamps\n\n # Set desired header (single or multi-index)\n df.columns = header\n\n return df",
"def load_from_csv(self):\n\n self._logger.info('Reading data coming from CSV files')\n\n sta = self.stations\n\n if sta != None:\n msta = \", \".join(sta)\n self._logger.debug('Using only stations {0}'.format(msta))\n\n # load the data\n v = list(self.variables)\n v.append('metadata')\n for i in v:\n if i in self.dataConfig:\n\n self._logger.debug('Reading %s...' % self.dataConfig[i])\n if i == 'metadata':\n dp_final = pd.read_csv(self.dataConfig[i],\n index_col='primary_id')\n #Ensure all stations are all caps.\n dp_final.index = [s.upper() for s in dp_final.index]\n\n elif self.dataConfig[i]:\n dp_full = pd.read_csv(self.dataConfig[i],\n index_col='date_time',\n parse_dates=[0])\n dp_full.columns = [s.upper() for s in dp_full.columns]\n\n if sta is not None:\n\n data_sta = dp_full.columns.str.upper()\n\n # Grab IDs from user list thats also in Data\n self.stations = [s for s in data_sta if s in sta]\n dp = dp_full[dp_full.columns[(data_sta).isin(sta)]]\n\n else:\n dp = dp_full\n\n # Only get the desired dates\n dp_final = dp[self.start_date:self.end_date]\n\n if dp_final.empty:\n raise Exception(\"No CSV data found for {0}\"\n \"\".format(i))\n\n setattr(self, i, dp_final)",
"def create_raw_data():\r\n for csv_file in glob.glob(raw_loc + 'ticket_data/PRR_*'):\r\n filestring =os.path.basename(csv_file)\r\n index_start = 1\r\n j = 0\r\n start = dt.datetime.now()\r\n print('{} file started at {}'.format(filestring, start.strftime(\"%H:%M\")))\r\n df = pd.read_csv(csv_file, encoding = 'utf-8', parse_dates = ['Tick Issue Date'])\r\n df = df.rename(columns = {c: c.replace(' ', '') for c in df.columns})\r\n try:\r\n df.to_sql('raw_ticket_data', con = conn, if_exists='append')\r\n except:\r\n print('File read error')\r\n\r\n\r\n print ('{} file finished in {:03.2f} minutes '.format(filestring, (dt.datetime.now()-start).seconds / 60))",
"def data_input(path, complete=False, nrows=10000):\n\n if complete:\n df = pd.read_csv(path)\n\n else:\n df = pd.read_csv(path, nrows=nrows)\n df[\"date_time\"] = pd.to_datetime(\n df[\"date_time\"], format=\"%Y-%m-%d %H:%M:%S\")\n\n #Maybe we could get rid of the exact timestamp if not useful\n #-> .apply(lambda x: x.date())\n return df",
"def read_2hps2_acc(filename, multi_header=True):\n\n num_headers = 27\n header_row = 16\n units_row = 17\n timestamp_row = 20\n\n with open(filename, \"r\") as f:\n accreader = csv.reader(f, delimiter=\" \")\n\n # Skip file info headers\n for i in range(num_headers):\n if i == header_row - 1:\n channels = next(accreader)\n elif i == units_row - 1:\n units = next(accreader)\n elif i == timestamp_row - 1:\n ts_start = next(accreader)\n else:\n next(accreader)\n\n # Read body - drop blanks\n data = [[x for x in line if x != \"\"] for line in accreader]\n\n # Convert column names list so that split by \",\" not \" \", drop \"Time\" item and trim\n channels = \" \".join(channels).split(\",\")[1:]\n channels = [c.strip() for c in channels]\n\n # Read the start timestamp marker and get start datetime\n ts_start = [int(i) for i in ts_start[5:]]\n dt_start = datetime(\n ts_start[5], # year\n ts_start[4], # month\n ts_start[3], # day\n ts_start[2], # hour\n ts_start[1], # minute\n ts_start[0], # second\n )\n\n # Create dataframe and timestamps using start timestamp marker and time steps column\n df = pd.DataFrame(data, dtype=\"float\")\n ts = df.iloc[:, 0].values\n timestamps = [dt_start + timedelta(seconds=t) for t in ts]\n\n # For raw data module\n if multi_header is True:\n # Create multi-index header of channel names and units and time steps index\n units = \" \".join(units).split(\",\")[1:]\n units = [i.strip().split(\"(\")[1][:-1] for i in units]\n header = list(zip(channels, units))\n header.insert(0, (\"Timestamp\", \"\"))\n header = pd.MultiIndex.from_tuples(header, names=[\"channels\", \"units\"])\n df = df.set_index(df.columns[0])\n df.index.name = \"Time (s)\"\n df.insert(loc=0, column=\"Timestamp\", value=timestamps)\n # For screening module\n else:\n # Create single row header of only channel names (i.e. strip out the units)\n # Replace time steps column with timestamps and use range index\n header = [\"Timestamp\"] + channels\n df.iloc[:, 0] = timestamps\n\n # Set desired header (single or multi-index)\n df.columns = header\n\n return df",
"def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather",
"def read_training(index_columns=None, both=False, weather=False):\n if weather:\n raw_X_train = pd.read_csv('data\\\\train_X.csv', parse_dates=['date'])\n raw_weather = pd.read_csv('data\\\\weather_data.csv', parse_dates=['date'])\n\n raw_X_train = ffill_nans(raw_X_train)\n raw_X_train = raw_X_train.merge(raw_weather, how='left', on=['date','hour'])\n raw_X_train = raw_X_train.set_index(index_columns)\n\n else:\n raw_X_train = pd.read_csv(\n 'data\\\\train_X.csv',\n parse_dates=['date'],\n index_col=index_columns)\n if both:\n raw_y_train = pd.read_csv(\n 'data\\\\train_y.csv',\n parse_dates=['date'],\n index_col=index_columns)\n\n return raw_X_train, raw_y_train\n \n return raw_X_train",
"def load_all_data() -> Tuple[pd.DataFrame, ...]:\n return tuple(\n pd.read_csv(path, sep='\\t') for path in (TARGETS_PATH, USER_INFO_PATH, INTERACTIONS_PATH, TRACK_INFO_PATH))",
"def _data_tuples_from_fnames(input_path='./', skiprows=0, classes=None):\n # Get list of classes for later\n list_of_tuples = []\n if classes is None:\n # This tells us to find the categories on our own.\n # See \"Handling\" package for these methods.\n classes = handling.classes_from_fnames(os.listdir(input_path))\n elif type(classes) is list:\n # OR simply pass an integer of how many to expect.\n # (in the instance above, default is to expect 2)\n classes = handling.classes_from_fnames(os.listdir(input_path),\n expect=len(classes))\n else:\n pass\n\n # Now loop through each item in list, load the dataframe, and append\n # the SERIAL, Dataframe (fixed), and Class to the list of tuples!\n fread_timer = time.perf_counter()\n n_total = len(os.listdir(input_path))\n n_trigger = int(n_total/10)\n n_counter = 0\n n_reset = 0\n\n last_skiprows = None\n for entry in os.listdir(input_path):\n n_counter += 1\n n_reset += 1\n if n_reset >= n_trigger:\n fread_rate = int(n_trigger / (time.perf_counter() - fread_timer))\n if fread_rate == 0:\n fread_rate += 1\n # Rate in Files per Second.\n print('\\rLoaded\\t{} of {}\\tFiles'.format(n_counter, n_total) +\n '\\t at rate of {} Files per Second'.format(fread_rate),\n end='')\n fread_timer = time.perf_counter()\n n_reset = 0\n else:\n pass\n\n if entry.endswith('.csv'):\n # Read data into pandas dataframe\n fdata, last_skiprows = \\\n handling.read_csv(input_path+entry,\n skiprows=skiprows,\n last_skiprows=last_skiprows)\n # Now remove any columns with bad data types\n # (Strings, objects, etc)\n for column in fdata.columns:\n if fdata[column].dtypes is float:\n pass\n elif fdata[column].dtypes is np.dtype('float64'):\n pass\n elif fdata[column].dtypes is int:\n pass\n else:\n # If type is not int, float, or numpy special float...\n # It's either string, object, or something else bad..\n fdata = fdata.drop(columns=column)\n\n label = None\n for each_label in classes:\n # Find the first label that matches.\n if not label and each_label in entry:\n label = each_label\n else:\n pass\n if not label:\n # If none of the labels fit, make new \"not\" first label\n label = \"not_\" + classes[0]\n else:\n pass\n\n list_of_tuples.append((entry.rstrip(entry[-4:]),\n fdata, label))\n else:\n # If File is not csv, ignore\n pass\n t_mins = round(n_total/fread_rate/60, 2)\n print(\"\\n\\t Success!\\t About {} Minutes...\".format(t_mins))\n # (Because timer has no Newline Character!)\n return list_of_tuples",
"def ingest():\n\n base_path = '/home/mnichol3/Coding/wx-scripts/wtlma'\n\n flash_files = ['flash-out-05232019-2050.txt',\n 'flash-out-05232019-2100.txt',\n 'flash-out-05232019-2110.txt',\n 'flash-out-05232019-2120.txt',\n 'flash-out-05232019-2130.txt',\n 'flash-out-05232019-2140.txt',\n 'flash-out-05232019-2150.txt']\n\n df_cols = ['start', 'end', 'duration', 'area', 'ctr_alt', 'ctr_lat', 'ctr_lon',\n 'tot_energy']\n\n flash_df = pd.read_csv(join(base_path, flash_files[0]), sep=',', names=df_cols)\n\n for f in flash_files[1:]:\n curr_path = join(base_path, f)\n curr_df = pd.read_csv(curr_path, sep=',', names=df_cols)\n flash_df = pd.concat([flash_df, curr_df], ignore_index=True)\n\n return flash_df",
"def read_data_for_question_2():\n df_T_fare_info = read_csv_file_data(\"../Data/T_Fare_info_Q2.csv\")\n df_T_fare_info.set_index('FlightId', inplace=True)\n df_T_flight_info = read_csv_file_data(\"../Data/T_Flight_info_Q2.csv\")\n df_T_flight_info.set_index('FlightId', inplace=True)\n df_T_fare_info.dropna(inplace=True)\n df_T_flight_info.dropna(inplace=True)\n return df_T_fare_info, df_T_flight_info",
"def readExerciseCSV(file_info, verbose=False, sensor_id_category='id', data_types=None, quat_type=None, quat_order=\"WXYZ\"):\n\n tsID = file_info['tsID']\n filepath = file_info['filepath']\n\n if quat_order.upper() is not \"WXYZ\" and quat_type is not None:\n convert_quat = True\n quat_order = quat_order.upper()\n w_i = quat_order.index(\"W\")\n x_i = quat_order.index(\"X\")\n y_i = quat_order.index(\"Y\")\n z_i = quat_order.index(\"Z\")\n else:\n convert_quat = False\n\n required_keys = set(['sample_number', 'mode', 'exercise_name', 'timestamp'])\n if len(required_keys.intersection(file_info.keys())) == len(required_keys):\n exercise_name = file_info['exercise_name']\n mode = file_info['mode']\n sample_number = file_info['sample_number']\n timestamp = file_info['timestamp']\n else:\n print(\"Missing entries from required file_info argument.\")\n return None\n\n csvReader = None\n try:\n inputcsv = open(filepath, 'r')\n csvReader = csv.DictReader(inputcsv,\n skipinitialspace=True,\n delimiter=',',\n quotechar='|')\n\n except NameError as e:\n print(\"Error opening input file: {}\".format( e))\n return None\n\n sensors = {}\n\n for row in csvReader:\n sensor_id = row[sensor_id_category]\n if sensor_id not in sensors:\n sensors[sensor_id] = {}\n for category in row:\n if category == sensor_id_category:\n continue\n\n try:\n data = ast.literal_eval(row[category])\n except:\n print(\"readExerciseCSV: Failed to eval data {} in file {} on column {}\"\n .format(filepath, row[category], category))\n return None\n\n if convert_quat and category == quat_type:\n data = [ data[w_i], data[x_i], data[y_i], data[z_i] ]\n\n if category not in sensors[sensor_id]:\n sensors[sensor_id][category] = []\n\n sensors[sensor_id][category].append(data)\n\n inputcsv.close()\n return {\"tsID\": tsID,\n \"exercise_name\": exercise_name,\n \"mode\" : mode,\n \"sample_number\" : sample_number,\n \"timestamp\": timestamp,\n \"sensors\" : sensors}",
"def read_from_and_write_to_csv(current_data_list):\n \n\n #add current timestamp to current_data_list at 0 index\n current_datetime = datetime.now()\n current_datetime_formatted = current_datetime.strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n current_data_list.insert(0, current_datetime_formatted)\n \n #read csv file as pandas dataframe\n va_river_data = pd.read_csv('river_levels.csv')\n\n #drop unneeded index column (added by pandas)\n columns_to_drop = ['Unnamed: 0']\n va_river_data.drop(columns_to_drop, inplace=True, axis=1)\n\n #add current_data_list as row at end of dataframe\n #if length of rows is > 100, drop first row of dataframe\n if va_river_data['Time Stamp'].count() > 100:\n va_river_data = va_river_data.drop(va_river_data.index[0])\n\n new_row = pd.Series(current_data_list, index=va_river_data.columns)\n va_river_data = va_river_data.append(new_row, ignore_index=True)\n\n #write data to 'rivers_csv' file\n va_river_data.to_csv('river_levels.csv')\n\n return va_river_data",
"def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)",
"def _read_csv(self) -> pd.DataFrame:\n\n return pd.concat(\n [\n pd.read_csv(f, usecols=[1, 2, 3, 4, 5])\n for f in self.temp_path.iterdir()\n if f.name.endswith(\".csv\")\n ]\n )",
"def read_csv():",
"def __merge_ati_files(ati_files: List[str]) -> pd.DataFrame:\n ati_df = pd.DataFrame(columns=consts.ACTIVITY_TRACKER_COLUMN.activity_tracker_columns())\n dataframes = []\n for ati_file in ati_files:\n dataframes.append(pd.read_csv(ati_file, encoding=consts.ISO_ENCODING,\n names=consts.ACTIVITY_TRACKER_COLUMN.activity_tracker_columns()))\n return __merge_dataframes(dataframes, ati_df, ACTIVITY_TRACKER_COLUMN.TIMESTAMP_ATI.value)",
"def read_csv(filename, cols=None, nrows=None):\n\n datecols = ['date_time', 'srch_ci', 'srch_co']\n dateparser = lambda x: pd.to_datetime(x, format='%Y-%m-%d %H:%M:%S',\n errors='coerce')\n dtypes = {\n 'id': np.uint32,\n 'site_name': np.uint8,\n 'posa_continent': np.uint8,\n 'user_location_country': np.uint16,\n 'user_location_region': np.uint16,\n 'user_location_city': np.uint16,\n 'orig_destination_distance': np.float32,\n 'user_id': np.uint32,\n 'is_mobile': bool,\n 'is_package': bool,\n 'channel': np.uint8,\n 'srch_adults_cnt': np.uint8,\n 'srch_children_cnt': np.uint8,\n 'srch_rm_cnt': np.uint8,\n 'srch_destination_id': np.uint32,\n 'srch_destination_type_id': np.uint8,\n 'is_booking': bool,\n 'cnt': np.uint64,\n 'hotel_continent': np.uint8,\n 'hotel_country': np.uint16,\n 'hotel_market': np.uint16,\n 'hotel_cluster': np.uint8,\n }\n\n df = pd.read_csv(\n filename,\n nrows=nrows,\n usecols=cols,\n dtype=dtypes,\n parse_dates=[col for col in datecols if col in cols],\n date_parser=dateparser,\n )\n\n if 'date_time' in df.columns:\n df['month'] = df['date_time'].dt.month.astype(np.uint8)\n df['year'] = df['date_time'].dt.year.astype(np.uint16)\n\n return df",
"def readCsv(variables, path, pathCsv, estacion):\n # os.makedirs('../data/totalData/')\n dataVa = df.DataFrame()\n variables = variables\n mypath = path\n patron = re.compile(variables + '_'+estacion+'_\\d\\d\\d\\d-\\d\\d-\\d\\d' + '.*')\n for base, dirs, filess in os.walk(mypath, topdown=False):\n filess = sorted(filess)\n for value in filess:\n if patron.match(value) != None:\n tempData = df.read_csv(mypath + value)\n #tempData = completeMet(tempData)\n tempData = tempData.iloc[0:24, :]\n dataVa = concat([tempData, dataVa], axis=0)\n dataVa = dataVa.reset_index()\n dataVa = dataVa.drop(labels='index', axis=1)\n dataVa.to_csv(pathCsv + variables + '_'+ estacion +'_total.csv', encoding='utf-8', index=False)\n dataVa = df.DataFrame()",
"def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values",
"def get_weather_data(filename, dates, highs, lows, date_index, high_index,\n low_index):\n with open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n\n # Get data temp.\n for row in reader:\n current_date = datetime.strptime(row[date_index], '%Y-%m-%d')\n try:\n high = int(row[high_index])\n low = int(row[low_index])\n except ValueError:\n print(f\"No data for {current_date}\")\n else:\n dates.append(current_date)\n highs.append(high)\n lows.append(low)",
"def get_data_from_csv_full_path(filepath, datatypes, date_column_list):\n\n dataframe = pandas.read_csv(filepath, dtype=datatypes, date_parser=pandas.to_datetime, parse_dates=date_column_list)\n\n return dataframe",
"def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()",
"def read_files(files):\n if len(files) == 1:\n return pd.read_csv(files[0], comment='#', names=[\"time\", \"volts\"])\n\n elif len(files)>1:\n df = []\n for f in files:\n data = pd.read_csv(f, comment='#', names=[\"time\", \"volts\"])\n df.append(data)\n new_df = pd.concat(df)\n new_df = new_df.drop_duplicates(subset='time')\n new_df.reset_index(drop=True, inplace=True)\n return new_df",
"def import_data(file):\n df = pd.read_csv(file, parse_dates=True, keep_date_col=True, sep=';')\n df = reduce_mem_usage(df)\n return df",
"def read_txts_and_combine(folder=\"../img_labeled/logs/\"):\n all_dfs = []\n for f in os.listdir(folder):\n if f.endswith(\".txt\"): \n df = pd.read_csv(folder+f,sep=\",\")\n df[\"filename\"] = \".\".join(f.split(\".\")[:-1])\n all_dfs.append(df)\n df = pd.concat(all_dfs).reset_index(drop=1)\n df.to_csv(folder+\"log_all.csv\", sep=\",\", index=0)\n return df",
"def new_csv_imp(infile):\r\n with open(infile, \"r\") as fd:\r\n txt = fd.readlines()\r\n if len(txt) > 1:\r\n if 'Serial' in txt[0]:\r\n print('{:} is Solinst'.format(infile))\r\n if 'UNIT: ' in txt[7]:\r\n level_units = str(txt[7])[5:].strip().lower()\r\n if 'UNIT: ' in txt[12]:\r\n temp_units = str(txt[12])[5:].strip().lower()\r\n f = pd.read_csv(infile, skiprows=13, parse_dates=[[0, 1]], usecols=[0, 1, 3, 4])\r\n print(f.columns)\r\n f['DateTime'] = pd.to_datetime(f['Date_Time'], errors='coerce')\r\n f.set_index('DateTime', inplace=True)\r\n f.drop('Date_Time', axis=1, inplace=True)\r\n f.rename(columns={'LEVEL': 'Level', 'TEMP': 'Temp'}, inplace=True)\r\n level = 'Level'\r\n temp = 'Temp'\r\n\r\n if level_units == \"feet\" or level_units == \"ft\":\r\n f[level] = pd.to_numeric(f[level])\r\n elif level_units == \"kpa\":\r\n f[level] = pd.to_numeric(f[level]) * 0.33456\r\n printmes(\"Units in kpa, converting {:} to ft...\".format(os.path.basename(infile)))\r\n elif level_units == \"mbar\":\r\n f[level] = pd.to_numeric(f[level]) * 0.0334552565551\r\n elif level_units == \"psi\":\r\n f[level] = pd.to_numeric(f[level]) * 2.306726\r\n printmes(\"Units in psi, converting {:} to ft...\".format(os.path.basename(infile)))\r\n elif level_units == \"m\" or level_units == \"meters\":\r\n f[level] = pd.to_numeric(f[level]) * 3.28084\r\n printmes(\"Units in psi, converting {:} to ft...\".format(os.path.basename(infile)))\r\n else:\r\n f[level] = pd.to_numeric(f[level])\r\n printmes(\"Unknown units, no conversion\")\r\n\r\n if temp_units == 'Deg C' or temp_units == u'\\N{DEGREE SIGN}' + u'C':\r\n f[temp] = f[temp]\r\n elif temp_units == 'Deg F' or temp_units == u'\\N{DEGREE SIGN}' + u'F':\r\n printmes('Temp in F, converting {:} to C...'.format(os.path.basename(infile)))\r\n f[temp] = (f[temp] - 32.0) * 5.0 / 9.0\r\n return f\r\n\r\n elif 'Date' in txt[1]:\r\n print('{:} is Global'.format(infile))\r\n f = pd.read_csv(infile, skiprows=1, parse_dates=[[0, 1]])\r\n # f = f.reset_index()\r\n f['DateTime'] = pd.to_datetime(f['Date_ Time'], errors='coerce')\r\n f = f[f.DateTime.notnull()]\r\n if ' Feet' in list(f.columns.values):\r\n f['Level'] = f[' Feet']\r\n f.drop([' Feet'], inplace=True, axis=1)\r\n elif 'Feet' in list(f.columns.values):\r\n f['Level'] = f['Feet']\r\n f.drop(['Feet'], inplace=True, axis=1)\r\n else:\r\n f['Level'] = f.iloc[:, 1]\r\n # Remove first and/or last measurements if the transducer was out of the water\r\n # f = dataendclean(f, 'Level')\r\n flist = f.columns.tolist()\r\n if ' Temp C' in flist:\r\n f['Temperature'] = f[' Temp C']\r\n f['Temp'] = f['Temperature']\r\n f.drop([' Temp C', 'Temperature'], inplace=True, axis=1)\r\n elif ' Temp F' in flist:\r\n f['Temperature'] = (f[' Temp F'] - 32) * 5 / 9\r\n f['Temp'] = f['Temperature']\r\n f.drop([' Temp F', 'Temperature'], inplace=True, axis=1)\r\n else:\r\n f['Temp'] = np.nan\r\n f.set_index(['DateTime'], inplace=True)\r\n f['date'] = f.index.to_julian_date().values\r\n f['datediff'] = f['date'].diff()\r\n f = f[f['datediff'] > 0]\r\n f = f[f['datediff'] < 1]\r\n # bse = int(pd.to_datetime(f.index).minute[0])\r\n # f = hourly_resample(f, bse)\r\n f.rename(columns={' Volts': 'Volts'}, inplace=True)\r\n f.drop([u'date', u'datediff', u'Date_ Time'], inplace=True, axis=1)\r\n return f\r\n else:\r\n print('{:} is unrecognized'.format(infile))",
"def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')"
] | [
"0.66842204",
"0.6533941",
"0.6304058",
"0.6269054",
"0.62516725",
"0.6149453",
"0.6144662",
"0.6085041",
"0.6061771",
"0.5997839",
"0.59718335",
"0.5971198",
"0.59701777",
"0.59618956",
"0.59373224",
"0.593337",
"0.5908655",
"0.5892451",
"0.5883747",
"0.58696353",
"0.58657056",
"0.586556",
"0.5859259",
"0.5826729",
"0.58200914",
"0.58095086",
"0.5801299",
"0.5790393",
"0.5769495",
"0.57568985"
] | 0.65834403 | 1 |
Add meta info to sensors DF based on timestamps in activity DF. Also maps the 24 different task_ids into 6 different task types. | def _join_activity(df_activity: pd.DataFrame, df_sens: pd.DataFrame):
df_sens["task_id"] = 0
for idx, row in df_activity.iterrows():
df_sens.loc[
(df_sens["sys_time"] >= row["start_time"])
& (df_sens["sys_time"] <= row["end_time"]),
"task_id",
] = row["task_id"]
# Map 24 task ids down to 6 task types
df_sens["task_type"] = (
df_sens["task_id"].astype("int8").replace(HMOG_TASK_IDS_TYPES)
)
df_sens = df_sens.drop(columns=["task_id"])
return df_sens | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_ingestion_metadata_task(\n df: pd.DataFrame,\n):\n df2 = df.copy(deep=True)\n df2[\"_viadot_downloaded_at_utc\"] = datetime.now(timezone.utc).replace(microsecond=0)\n return df2",
"def _make_meta(self):\n available_meas_times = list()\n available_intervals = list()\n drill_by = list()\n related = list()\n last_data_set_instance = dict()\n\n if self._data['report_save_historical_instances_ind'] == 'Y':\n # last measurement instance\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\n LIMIT 0, 1\"\"\",(self._id, self._segment_value_id))\n if res:\n last_data_set_instance = self._db.record[0]\n last_data_set_instance['measurement_time'] = self._formatter.format_date(last_data_set_instance['measurement_time'])\n\n # available measurement instances\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\"\"\",(self._id, self._segment_value_id))\n if res:\n for data_set_instance in self._db.record:\n data_set_instance['measurement_time'] = self._formatter.format_date(data_set_instance['measurement_time'])\n available_meas_times.append(data_set_instance)\n \n\n # get drill by. not for this version\n\n # available measurement intervals\n if self._data['report_primary_shared_dimension_id'] is None:\n self._data['report_primary_shared_dimension_id'] = 0\n\n self._db.Query(\"\"\"\n SELECT measurement_interval.*,\n dashboard_element.element_id\n FROM dashboard_element\n LEFT JOIN measurement_interval\n ON measurement_interval.measurement_interval_id = dashboard_element.measurement_interval_id\n WHERE\n (dashboard_element.`element_id`<>%s\n AND dashboard_element.measurement_interval_id <> %s\n AND dashboard_element.shared_measure_id = %s\n AND dashboard_element.`type` = 'internal report'\n AND ifnull(dashboard_element.report_used_for_drill_to_ind,'N') = %s\n AND ifnull(dashboard_element.report_primary_shared_dimension_id,0) = %s\n AND ifnull(dashboard_element.segment_id,0) = %s)\n OR\n dashboard_element.`element_id`=%s\n AND 3=4\n \n GROUP BY measurement_interval.measurement_interval_id\n ORDER BY\n measurement_interval.display_sequence,\n dashboard_element.name ASC\n \"\"\",\n (self._id,\n self._data['measurement_interval_id'],\n self._data['shared_measure_id'],\n self._data['report_used_for_drill_to_ind'],\n self._data['report_primary_shared_dimension_id'],\n self._data['segment_id'],\n self._id))\n\n\n for interval in self._db.record:\n interval['report_data_set_instance_id'] = 0\n available_intervals.append(interval)\n\n # see related\n self._db.Query(\"\"\"SELECT e.*\n FROM dashboard_element_topic det, dashboard_element e\n WHERE e.element_id = det.dashboard_element_id\n AND dashboard_element_id <> %s\n AND e.enabled_ind = 'Y'\n AND topic_id IN (select topic_id from dashboard_element_topic where dashboard_element_id = %s)\n UNION SELECT e.*\n FROM dashboard_element e, metric_drill_to_report m\n WHERE m.metric_element_id = e.element_id\n AND m.report_element_id = %s\n AND e.enabled_ind = 'Y'\n AND ifnull(e.segment_id,0) = %s\n \"\"\", (self._id, self._id, self._id, self._data['segment_id']))\n \n\n for related_element in self._db.record:\n if not related_element['segment_id']:\n related_element['segment_id'] = 0\n if related_element['segment_id'] == self._data['segment_id']:\n related_element['segment_value_id'] = self._segment_value_id\n else:\n related_element['segment_value_id'] = 0\n related.append(related_element)\n\n # elements displayed on the page\n before_dataset = list()\n after_dataset = list()\n \n charts_before_dataset = list()\n charts_after_dataset = list()\n \n \n # dataset table\n dataset_el = OrderedDict()\n dataset_el['element_id'] = ''\n dataset_el['element_type'] = 'dataset'\n dataset_el['element_name'] = ''\n dataset_el['element_desc'] = ''\n dataset_el['placement'] = ''\n dataset_el['sequence'] = 0\n dataset_el['show_ind'] = self._data['show_data_set_table_in_report_ind']\n \n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND \n (ISNULL(report_data_set_pivot_id)\n OR report_data_set_pivot_id = 0) \n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n charts_before_dataset.append(chart_el)\n else:\n charts_after_dataset.append(chart_el)\n \n # pivots\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_pivot\n WHERE\n `element_id`= %s\n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for pivot in self._db.record:\n before_pivot = list()\n after_pivot = list()\n #pivot_element = list()\n \n pivot_el = OrderedDict()\n pivot_el['element_id'] = pivot['report_data_set_pivot_id']\n pivot_el['element_type'] = 'pivot'\n pivot_el['element_name'] = pivot['name']\n pivot_el['element_desc'] = ''\n pivot_el['placement'] = pivot['pivot_table_report_placement']\n pivot_el['sequence'] = pivot['display_sequence']\n pivot_el['show_ind'] = pivot['enabled_ind']\n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND report_data_set_pivot_id = %s \n ORDER BY display_sequence ASC\"\"\",\n (self._id, pivot_el['element_id']))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n before_pivot.append(chart_el)\n else:\n after_pivot.append(chart_el)\n pivot_element = before_pivot + [pivot_el] + after_pivot \n \n if pivot_el['placement'] == 'before data set':\n before_dataset += pivot_element\n else:\n after_dataset += pivot_element\n elements = charts_before_dataset + before_dataset + [dataset_el] + after_dataset + charts_after_dataset\n \n \n self._jfile.make_current_meta(last_data_set_instance,\n available_meas_times,\n available_intervals,\n drill_by,\n related,\n elements,\n self._segment_values)",
"def populateMeta(self, *args):\n meta = self._getAllMeta()\n if not meta:\n raise MetaReadError(\"Error Reading Image MetaData, has image finished copying?\")\n else:\n self.exifKeys = self._getAllMetaKeys(meta)\n for key in self.exifKeys:\n if key == self._getExifKey_TimeCode():\n tag = meta[self._getExifKey_TimeCode()]\n self.startTimecode = tag.raw_value\n self._splitTimecode()\n \n if args:\n for arg in args:\n try:\n lTag = meta[arg]\n self.__dict__[arg.split('.')[1] + '_' + arg.split('.')[2]] = lTag.raw_value\n except:\n print 'could not get meta for tag ', arg",
"def _get_sensors_data(task):\n\n try:\n report = irmc_common.get_irmc_report(task.node)\n sensor = irmc.scci.get_sensor_data(report)\n\n except (exception.InvalidParameterValue,\n exception.MissingParameterValue,\n irmc.scci.SCCIInvalidInputError,\n irmc.scci.SCCIClientError) as e:\n LOG.error(\"SCCI get sensor data failed for node %(node_id)s \"\n \"with the following error: %(error)s\",\n {'node_id': task.node.uuid, 'error': e})\n raise exception.FailedToGetSensorData(\n node=task.node.uuid, error=e)\n\n sensors_data = {}\n for sdr in sensor:\n sensor_type_name = sdr.find('./Data/Decoded/Sensor/TypeName')\n sensor_type_number = sdr.find('./Data/Decoded/Sensor/Type')\n entity_name = sdr.find('./Data/Decoded/Entity/Name')\n entity_id = sdr.find('./Data/Decoded/Entity/ID')\n\n if None in (sensor_type_name, sensor_type_number,\n entity_name, entity_id):\n continue\n\n sensor_type = ('%s (%s)' %\n (sensor_type_name.text, sensor_type_number.text))\n sensor_id = ('%s (%s)' %\n (entity_name.text, entity_id.text))\n reading_value = sdr.find(\n './Data/Decoded/Sensor/Thresholds/*/Normalized')\n reading_value_text = \"None\" if (\n reading_value is None) else str(reading_value.text)\n reading_units = sdr.find('./Data/Decoded/Sensor/BaseUnitName')\n reading_units_text = \"None\" if (\n reading_units is None) else str(reading_units.text)\n sensor_reading = '%s %s' % (reading_value_text, reading_units_text)\n\n sensors_data.setdefault(sensor_type, {})[sensor_id] = {\n 'Sensor Reading': sensor_reading,\n 'Sensor ID': sensor_id,\n 'Units': reading_units_text,\n }\n\n return sensors_data",
"def _get_setup_from_timestamps(self):\n self._parse_timestamps()\n IFOs = self.detectors.split(\",\")\n # at this point, it's definitely a comma-separated string\n tsfiles = self.timestamps.split(\",\")\n if len(IFOs) != len(tsfiles):\n raise ValueError(\n f\"Length of detectors=='{self.detectors}'\"\n f\" does not match that of timestamps=='{self.timestamps}'\"\n f\" ({len(IFOs)}!={len(tsfiles)})\"\n )\n tstart = []\n tend = []\n self.sftfilenames = [] # This refers to the MFD output!\n for X, IFO in enumerate(IFOs):\n tsX = np.genfromtxt(tsfiles[X], comments=\"%\")\n if tsX.ndim > 1:\n logger.warning(\n f\"Timestamps file {tsfiles[X]} has more than 1 column,\"\n \" we will ignore the rest.\"\n )\n tsX = tsX[:, 0]\n if not tsX[0].is_integer() or not tsX[-1].is_integer():\n logger.warning(\n \"Detected non-integer timestamps in timestamp file.\"\n \" We will floor start and end times to the nearest integer\"\n \" for the SFT name,\"\n \" and let lalpulsar_Makefakedata_v5 handle the rest.\"\n )\n\n this_start_time = int(tsX[0])\n this_end_time = int(tsX[-1]) + self.Tsft\n tstart.append(this_start_time)\n tend.append(this_end_time)\n self.sftfilenames.append(\n utils.get_official_sft_filename(\n IFO,\n len(tsX),\n self.Tsft,\n this_start_time,\n this_end_time - this_start_time,\n self.label,\n )\n )\n self.tstart = min(tstart)\n self.duration = max(tend) - self.tstart",
"def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)",
"def parse_data(data, activity_id, activity_start_date):\n data_dict = {}\n final_dict = {}\n for i in data:\n data_dict[i['type']] = i['data']\n\n counter = 1\n nrange = len(data_dict['time'])\n for item in range(1, nrange + 1):\n final_dict[item] = {}\n\n for key, value in data_dict.items():\n counter = 1\n for i in value:\n final_dict[counter][key] = i\n final_dict[counter]['activity_id'] = activity_id\n\n if 'time' in key:\n final_dict[counter]['time'] = final_dict[counter]['time'] + activity_start_date\n final_dict[counter]['time'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(final_dict[counter]['time']))\n\n if 'latlng' in key:\n final_dict[counter]['lat'] = final_dict[counter]['latlng'][0]\n final_dict[counter]['lon'] = final_dict[counter]['latlng'][1]\n final_dict[counter].pop('latlng')\n counter += 1\n\n result_list = [value for key, value in final_dict.items()]\n\n for event in result_list:\n write_to_splunk(index=helper.get_output_index(), sourcetype='strava:activities:stream', data=json.dumps(event))\n\n helper.log_info(f'Added activity stream {activity_id} for {athlete_id}.')\n return True",
"def add_datapoints(self, tasks):\n\n counters = {\n 'SUCCESS': 0,\n 'PENDING': 0,\n 'STARTED': 0,\n 'FAILURE': 0,\n 'RETRY' : 0,\n 'REVOKED': 0,\n 'RECEIVED': 0,\n };\n\n for task in tasks:\n counters[task[1]['state']] += 1\n\n for counter in counters:\n self.add_gauge_value('Tasks/' + counter.capitalize(), 'tasks', counters.get(counter, 0))",
"def data_process():\n global localtime\n global value_dict\n sensor_types = sEtting.sensor_types\n sensor_values = []\n msg = None\n value_dict = collections.OrderedDict.fromkeys(sEtting.payload_header)\n value_dict[\"ver_format\"] = sEtting.ver_format\n value_dict[\"FAKE_GPS\"] = sEtting.fake_gps\n value_dict[\"app\"] = sEtting.app\n value_dict[\"ver_app\"] = sEtting.ver_app\n value_dict[\"device_id\"] = sEtting.device_id\n value_dict[\"date\"] = localtime.strftime(\"%Y-%m-%d\")\n value_dict[\"time\"] = localtime.strftime(\"%H:%M:%S\")\n value_dict[\"device\"] = sEtting.device\n\n for sensor in sensor_types:\n if sensor == 'pm25-at':\n value_dict[\"s_d0\"] = get_reading_csv(sensor)\n elif sensor == 'temperature':\n value_dict[\"s_t0\"] = get_reading_csv(sensor)\n elif sensor == 'humidity':\n value_dict[\"s_h0\"] = get_reading_csv(sensor)\n elif sensor == 'pm10-at':\n value_dict[\"s_d1\"] = get_reading_csv(sensor)\n else:\n print 'Not support sensor type.'\n if sEtting.fake_gps == 1:\n value_dict[\"gps_lat\"] = sEtting.fgps_lat\n value_dict[\"gps_lon\"] = sEtting.fgps_lon\n value_dict[\"gps_alt\"] = sEtting.fgps_alt\n value_dict[\"gps_fix\"] = 0\n else:\n value_dict[\"gps_lat\"] = get_gps()[0]\n value_dict[\"gps_lon\"] = get_gps()[1]\n value_dict[\"gps_alt\"] = get_gps()[2]\n value_dict[\"gps_fix\"] = gpsd.fix.mode\n value_dict[\"gps_num\"] = 0\n #if debug_enable == '0':\n msg = \"|\" + \"|\".join([\"=\".join([key, str(val)])\n for key, val in value_dict.items()])\n return msg\n #elif debug_enable == '1':\n # msg_debug = \",\".join([\"=\".join([key, str(val)]) for key, val in value_dict.items()])\n # return msg_debug",
"def separate_activity_types(self):\n # Read in the CSV file and make a DataFrame.\n try :\n all_actsDF = pd.read_csv('strava-activities.csv', index_col=\"id\", parse_dates=[\"start_date\", \"start_date_local\"])\n except FileNotFoundError :\n print(\"separate_activity_types couldn't find strava-activities.csv.\")\n else :\n # We need to make sure that all_actsDF has all of the columns that are referenced\n # in the loop below. Otherwise, the code might throw a key error. For example, if someone\n # has no heart rate data at all, stava-activities.csv won't have a max_heartrate column,\n # causing the code to blow up when it looks for that column. So just add empty columns\n # as needed.\n necessary_columns = [\"distance\", \"total_elevation_gain\", \"elapsed_time\", \"moving_time\", \"max_speed(mph)\", \"max_speed(kph)\", \"start_date\", \"elevation_gain(ft)\", \"max_heartrate\"]\n for col in necessary_columns :\n if not col in all_actsDF.columns :\n all_actsDF[col] = np.nan\n\n # Get the list of unique activity types (Ride, Hike, Kayak, etc.)\n act_types = all_actsDF[\"type\"].unique()\n # Get the list of unique years in the data.\n # Extract each year out of the data and sort them.\n years = pd.Series(d.year for d in all_actsDF[\"start_date\"]).unique()\n years.sort()\n\n # Create a dataframe that will hold summary statistics for each activity.\n # The index or the set of rows is the activity types. The columns are the stats\n # we are interested in.\n stats = [\"Total Distance (miles)\", \"Total Distance (km)\", \"Total Elev. Gain (meters)\", \"Total Elev. Gain (ft)\", \"Total Elev. Gain (miles)\", \"Total Elev. Gain (km)\", \"Total Duration (hours)\", \"Total Duration (days)\", \"Average Duration (min)\", \"Total Moving Time (hours)\", \"Total Moving Time (days)\", \"Average Moving Time (min)\", \"Average Speed (mph)\", \"Average Speed (kph)\", \"Max Speed (mph)\", \"Max Speed (kph)\", \"Max Speed Date\", \"Max Elevation Gain(ft)\", \"Max Elevation Gain(m)\", \"Max Elevation Gain Date\", \"Max Heart Rate\", \"Max HR Date\"]\n summaryDF = pd.DataFrame(index=act_types, columns=stats)\n # Loop through all of the activity types and add info into the summary file.\n # Also create a csv for each activity that has the Strava info for that activity only.\n for act in act_types:\n actDF = all_actsDF[all_actsDF[\"type\"] == act]\n actDF.to_csv(act + \".csv\")\n # Add the summary stats\n summaryDF.loc[act, \"Total Distance (miles)\"] = actDF[\"distance\"].sum() * 0.000621371\n summaryDF.loc[act, \"Total Distance (km)\"] = actDF[\"distance\"].sum() / 1000\n summaryDF.loc[act, \"Total Elev. Gain (meters)\"] = actDF[\"total_elevation_gain\"].sum()\n summaryDF.loc[act, \"Total Elev. Gain (ft)\"] = actDF[\"total_elevation_gain\"].sum() * 3.28084\n summaryDF.loc[act, \"Total Elev. Gain (miles)\"] = actDF[\"total_elevation_gain\"].sum() * 3.28084/5280\n summaryDF.loc[act, \"Total Elev. Gain (km)\"] = actDF[\"total_elevation_gain\"].sum() / 1000\n summaryDF.loc[act, \"Total Duration (hours)\"] = actDF[\"elapsed_time\"].sum() / 3600\n summaryDF.loc[act, \"Total Duration (days)\"] = actDF[\"elapsed_time\"].sum() / (3600*24)\n summaryDF.loc[act, \"Average Duration (min)\"] = actDF[\"elapsed_time\"].mean() / 60\n summaryDF.loc[act, \"Total Moving Time (hours)\"] = actDF[\"moving_time\"].sum() / 3600\n summaryDF.loc[act, \"Total Moving Time (days)\"] = actDF[\"moving_time\"].sum() / (3600*24)\n summaryDF.loc[act, \"Average Moving Time (min)\"] = actDF[\"moving_time\"].mean() / 60\n summaryDF.loc[act, \"Average Speed (mph)\"] = (actDF[\"distance\"].sum() / actDF[\"moving_time\"].sum()) * 2.23694\n summaryDF.loc[act, \"Average Speed (kph)\"] = (actDF[\"distance\"].sum() / actDF[\"moving_time\"].sum()) * 3.6\n summaryDF.loc[act, \"Max Speed (mph)\"] = actDF[\"max_speed(mph)\"].max()\n summaryDF.loc[act, \"Max Speed (kph)\"] = actDF[\"max_speed(kph)\"].max()\n # We have to be careful anytime we want a specific date that something occured because\n # it may never have occurred and the result may be empty. That's why we do the following\n # five lines.\n s = actDF.loc[actDF[\"max_speed(mph)\"] == actDF[\"max_speed(mph)\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max Speed Date\"] = s.iloc[0].date()\n else :\n summaryDF.loc[act, \"Max Speed Date\"] = None\n summaryDF.loc[act, \"Max Elevation Gain(ft)\"] = actDF[\"elevation_gain(ft)\"].max()\n summaryDF.loc[act, \"Max Elevation Gain(m)\"] = actDF[\"total_elevation_gain\"].max()\n s = actDF.loc[actDF[\"elevation_gain(ft)\"] == actDF[\"elevation_gain(ft)\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max Elevation Gain Date\"] = s.iloc[0].date()\n else :\n summaryDF.loc[act, \"Max Elevation Gain Date\"] = None\n summaryDF.loc[act, \"Max Heart Rate\"] = actDF[\"max_heartrate\"].max()\n # We have to be careful with max heart rate because not all activities will have HR data.\n # The following code makes sure there is HR data before trying to access it.\n s = actDF.loc[actDF[\"max_heartrate\"] == actDF[\"max_heartrate\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max HR Date\"] = s.iloc[0].date()\n else:\n summaryDF.loc[act, \"Max HR Date\"] = None\n\n # Summarize each activity by year\n act_summaryDF = pd.DataFrame(index=stats, columns = years)\n for y in years :\n subDF = actDF[(actDF[\"start_date\"] >= datetime.datetime(year = y, month = 1, day = 1, tzinfo=pytz.utc)) & (actDF[\"start_date\"] < datetime.datetime(year = y+1, month = 1, day = 1, tzinfo=pytz.utc))]\n # Need to check that we had any of this activity in the year.\n if not subDF.empty :\n act_summaryDF.loc[\"Total Distance (miles)\", y] = subDF[\"distance\"].sum() * 0.000621371\n act_summaryDF.loc[\"Total Distance (km)\", y] = subDF[\"distance\"].sum() / 1000\n act_summaryDF.loc[\"Total Elev. Gain (meters)\", y] = subDF[\"total_elevation_gain\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (ft)\", y] = subDF[\"total_elevation_gain\"].sum() * 3.28084\n act_summaryDF.loc[\"Total Elev. Gain (miles)\", y] = subDF[\"total_elevation_gain\"].sum() * 3.28084/5280\n act_summaryDF.loc[\"Total Elev. Gain (km)\", y] = subDF[\"total_elevation_gain\"].sum() / 1000\n act_summaryDF.loc[\"Total Duration (hours)\", y] = subDF[\"elapsed_time\"].sum() / 3600\n act_summaryDF.loc[\"Total Duration (days)\", y] = subDF[\"elapsed_time\"].sum() / (3600*24)\n act_summaryDF.loc[\"Average Duration (min)\", y] = subDF[\"elapsed_time\"].mean() / 60\n act_summaryDF.loc[\"Total Moving Time (hours)\", y] = subDF[\"moving_time\"].sum() / 3600\n act_summaryDF.loc[\"Total Moving Time (days)\", y] = subDF[\"moving_time\"].sum() / (3600*24)\n act_summaryDF.loc[\"Average Moving Time (min)\", y] = subDF[\"moving_time\"].mean() / 60\n act_summaryDF.loc[\"Average Speed (mph)\", y] = (subDF[\"distance\"].sum() / subDF[\"moving_time\"].sum()) * 2.23694\n act_summaryDF.loc[\"Average Speed (kph)\", y] = (subDF[\"distance\"].sum() / subDF[\"moving_time\"].sum()) * 3.6\n act_summaryDF.loc[\"Max Speed (mph)\", y] = subDF[\"max_speed(mph)\"].max()\n act_summaryDF.loc[\"Max Speed (kph)\", y] = subDF[\"max_speed(kph)\"].max()\n s = subDF.loc[subDF[\"max_speed(mph)\"] == subDF[\"max_speed(mph)\"].max(), \"start_date\"]\n if not s.empty:\n act_summaryDF.loc[\"Max Speed Date\", y] = s.iloc[0].date()\n else :\n act_summaryDF.loc[\"Max Speed Date\", y] = None\n\n act_summaryDF.loc[\"Max Elevation Gain(ft)\", y] = subDF[\"elevation_gain(ft)\"].max()\n act_summaryDF.loc[\"Max Elevation Gain(m)\", y] = subDF[\"total_elevation_gain\"].max()\n s = subDF.loc[subDF[\"elevation_gain(ft)\"] == subDF[\"elevation_gain(ft)\"].max(), \"start_date\"]\n if not s.empty :\n act_summaryDF.loc[\"Max Elevation Gain Date\", y] = s.iloc[0].date()\n else :\n act_summaryDF.loc[\"Max Elevation Gain Date\", y] = None\n act_summaryDF.loc[\"Max Heart Rate\", y] = subDF[\"max_heartrate\"].max()\n s = subDF.loc[subDF[\"max_heartrate\"] == subDF[\"max_heartrate\"].max(), \"start_date\"]\n if not s.empty :\n act_summaryDF.loc[\"Max HR Date\", y] = s.iloc[0].date()\n else:\n act_summaryDF.loc[\"Max HR Date\", y] = None\n # Add a few totals\n act_summaryDF.loc[\"Total Distance (miles)\", \"Total\"] = act_summaryDF.loc[\"Total Distance (miles)\"].sum()\n act_summaryDF.loc[\"Total Distance (km)\", \"Total\"] = act_summaryDF.loc[\"Total Distance (km)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (meters)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (meters)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (ft)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (ft)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (miles)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (miles)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (km)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (km)\"].sum()\n act_summaryDF.loc[\"Total Duration (hours)\", \"Total\"] = act_summaryDF.loc[\"Total Duration (hours)\"].sum()\n act_summaryDF.loc[\"Total Duration (days)\", \"Total\"] = act_summaryDF.loc[\"Total Duration (days)\"].sum()\n\n act_summaryDF.loc[\"Average Duration (min)\", \"Total\"] = summaryDF.loc[act, \"Average Duration (min)\"]\n act_summaryDF.loc[\"Total Moving Time (hours)\", \"Total\"] = act_summaryDF.loc[\"Total Moving Time (hours)\"].sum()\n act_summaryDF.loc[\"Total Moving Time (days)\", \"Total\"] = act_summaryDF.loc[\"Total Moving Time (days)\"].sum()\n act_summaryDF.loc[\"Average Moving Time (min)\", \"Total\"] = summaryDF.loc[act, \"Average Moving Time (min)\"]\n act_summaryDF.loc[\"Average Speed (mph)\", \"Total\"] = summaryDF.loc[act, \"Average Speed (mph)\"]\n act_summaryDF.loc[\"Average Speed (kph)\", \"Total\"] = summaryDF.loc[act, \"Average Speed (kph)\"]\n act_summaryDF.loc[\"Max Speed (mph)\", \"Total\"] = act_summaryDF.loc[\"Max Speed (mph)\"].max()\n act_summaryDF.loc[\"Max Speed (kph)\", \"Total\"] = act_summaryDF.loc[\"Max Speed (kph)\"].max()\n act_summaryDF.loc[\"Max Speed Date\", \"Total\"] = summaryDF.loc[act, \"Max Speed Date\"]\n act_summaryDF.loc[\"Max Elevation Gain(ft)\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain(ft)\"]\n act_summaryDF.loc[\"Max Elevation Gain(m)\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain(m)\"]\n act_summaryDF.loc[\"Max Elevation Gain Date\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain Date\"]\n act_summaryDF.loc[\"Max Heart Rate\", \"Total\"] = summaryDF.loc[act, \"Max Heart Rate\"]\n act_summaryDF.loc[\"Max HR Date\", \"Total\"] = summaryDF.loc[act, \"Max HR Date\"]\n\n # Print the annual summary\n act_summaryDF.to_csv(act + \"-by-year.csv\")\n\n # Print the summary to a csv\n\n summaryDF.to_csv(\"strava-summary.csv\")",
"def set_timeseries_metadata(self, dataset_names):\n for dataset_name in dataset_names:\n if dataset_name in self:\n self[dataset_name].dataset_metadata.update({\n 'version': SCHEMA_VERSION,\n 'units': self.config[dataset_name]['units']\n })\n self[dataset_name].group_metadata.update({'source': 'lmt'})",
"def _convert_formats(self, meta_dict, filename=None):\n meta_dict['SDATE'] = utils.get_format_from_datetime_obj(\n meta_dict['TIMESTAMP'], '%Y-%m-%d')\n meta_dict['STIME'] = utils.get_format_from_datetime_obj(\n meta_dict['TIMESTAMP'], '%H:%M')\n\n # meta_dict['SERNO'] = str(self._running_serno).zfill(4)\n meta_dict.setdefault('PROJ', 'NOS')\n meta_dict.setdefault('ORDERER', 'HAV')\n meta_dict.setdefault('SLABO', 'SMHI')\n meta_dict.setdefault('ALABO', 'SMHI')\n meta_dict.setdefault('POSYS', 'GPS')\n if filename:\n fid_info = self._extract_filename_information(filename)\n for item, value in fid_info.items():\n meta_dict[item] = value",
"def create_activity_all(self, f_output='activity_all.txt'):\n list_tuple = []\n epoch = datetime.datetime.utcfromtimestamp(0) \n\n # For each records_*.csv, excluding records_{0,1,2,3,4}.csv\n regex = re.compile('records_.\\.csv')\n for filename in os.listdir(self.dir_name):\n if not re.match(regex, filename):\n if fnmatch.fnmatch(filename, 'records_*.csv'):\n path_to_file = self.dir_name + \"/\" + filename\n ret = subprocess.check_output(['wc', '-l', path_to_file])\n num = int(ret.split(' ')[0])\n # If follower has activity\n if num > 1:\n follower_id = filename.split('_')[1].split('.')[0]\n # Extract id of follower, get the anonymous number\n if follower_id in self.map_userid_number:\n follower_num = self.map_userid_number[follower_id]\n # Parse through file\n f = open(path_to_file,'r')\n # Skip first line\n f.readline()\n for line in f:\n line_split = line.split(',')\n # Extract the time of post, create the pair\n # year-month-day-hour-min-second (UTC - 4)\n date_and_time = line_split[1]\n dt_local = datetime.datetime.strptime(date_and_time, '%Y-%m-%d-%H:%M:%S')\n dt_utc = dt_local + datetime.timedelta(hours=4)\n seconds = (dt_utc - epoch).total_seconds()\n list_tuple.append((seconds,follower_num)) \n # Now append the bot activity\n for bot_id in range(0,5):\n print bot_id\n filename = \"records_%d.csv\" % bot_id\n path_to_file = self.dir_name + \"/\" + filename\n f = open(path_to_file, 'r')\n # Skip first line\n f.readline()\n for line in f:\n line_split = line.split(',')\n # Extract time of post, create the pair\n date_and_time = line_split[1]\n dt_local = datetime.datetime.strptime(date_and_time, '%Y-%m-%d-%H-%M-%S')\n dt_utc = dt_local + datetime.timedelta(hours=4)\n seconds = (dt_utc - epoch).total_seconds()\n list_tuple.append((seconds, bot_id+1))\n\n # Sort all pairs based on time of post\n list_tuple.sort()\n # Write f_output\n f_write = open(f_output, 'w')\n for t in list_tuple:\n f_write.write(\"%d %d\\n\" % (t[0], t[1]))\n f_write.close()",
"def prep_node_info(rows_for_node_df, nodes, graph_id):\n for _, node in nodes.iterrows():\n feats = {'graph_id': graph_id}\n feats['node_id'], feats['time'], feats['friends'], feats['followers'] = node['id'], node['time'], node['friends'], node['followers']\n rows_for_node_df.append(feats)",
"def _postprocess_timeseries(rows, cols, params):\n # pylint: disable=too-many-locals\n xlks, _, crop = cf.transform_params(params)\n base_unw_paths = cf.original_ifg_paths(params[cf.IFG_FILE_LIST])\n dest_tifs = cf.get_dest_paths(base_unw_paths, crop, params, xlks)\n output_dir = params[cf.TMPDIR]\n\n # load previously saved prepread_ifgs dict\n preread_ifgs_file = join(output_dir, 'preread_ifgs.pk')\n ifgs = cp.load(open(preread_ifgs_file, 'rb'))\n\n # metadata and projections\n gt, md, wkt = ifgs['gt'], ifgs['md'], ifgs['wkt']\n epochlist = ifgs['epochlist']\n ifgs = [v for v in ifgs.values() if isinstance(v, PrereadIfg)]\n\n tiles = shared.get_tiles(dest_tifs[0], rows, cols)\n\n # load the first tsincr file to determine the number of time series tifs\n tsincr_file = os.path.join(output_dir, 'tsincr_0.npy')\n tsincr = np.load(file=tsincr_file)\n\n # pylint: disable=no-member\n no_ts_tifs = tsincr.shape[2]\n # we create 2 x no_ts_tifs as we are splitting tsincr and tscuml\n # to all processes.\n process_tifs = mpiops.array_split(range(2 * no_ts_tifs))\n\n # depending on nvelpar, this will not fit in memory\n # e.g. nvelpar=100, nrows=10000, ncols=10000, 32bit floats need 40GB memory\n # 32 * 100 * 10000 * 10000 / 8 bytes = 4e10 bytes = 40 GB\n # the double for loop helps us overcome the memory limit\n log.info('process {} will write {} ts (incr/cuml) tifs of '\n 'total {}'.format(mpiops.rank, len(process_tifs), no_ts_tifs * 2))\n for i in process_tifs:\n tscum_g = np.empty(shape=ifgs[0].shape, dtype=np.float32)\n if i < no_ts_tifs:\n for n, t in enumerate(tiles):\n _assemble_tiles(i, n, t, tscum_g, output_dir, 'tscuml')\n md[ifc.EPOCH_DATE] = epochlist.dates[i + 1]\n # sequence position; first time slice is #0\n md['SEQUENCE_POSITION'] = i+1\n dest = os.path.join(params[cf.OUT_DIR],\n 'tscuml' + \"_\" +\n str(epochlist.dates[i + 1]) + \".tif\")\n md[ifc.DATA_TYPE] = ifc.CUML\n shared.write_output_geotiff(md, gt, wkt, tscum_g, dest, np.nan)\n else:\n tsincr_g = np.empty(shape=ifgs[0].shape, dtype=np.float32)\n i %= no_ts_tifs\n for n, t in enumerate(tiles):\n _assemble_tiles(i, n, t, tsincr_g, output_dir, 'tsincr')\n md[ifc.EPOCH_DATE] = epochlist.dates[i + 1]\n # sequence position; first time slice is #0\n md['SEQUENCE_POSITION'] = i+1\n dest = os.path.join(params[cf.OUT_DIR],\n 'tsincr' + \"_\" + str(\n epochlist.dates[i + 1]) + \".tif\")\n md[ifc.DATA_TYPE] = ifc.INCR\n shared.write_output_geotiff(md, gt, wkt, tsincr_g, dest, np.nan)\n log.info('process {} finished writing {} ts (incr/cuml) tifs of '\n 'total {}'.format(mpiops.rank, len(process_tifs), no_ts_tifs * 2))",
"def writeToMetadata(self, context):\n if self.modelType not in GenericMetadata.MODEL_TYPES:\n raise Exception(\"Model type %s is not among known model types: %s\" % (self.modelType, str(GenericMetadata.MODEL_TYPES) ) )\n \n modelRunEntries = GenericMetadata.readModelRunEntries(context)\n try:\n runs = modelRunEntries['runs'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n runs = []\n \n # Collected model entry and keys and values into lists so we can write to metadata store in batch\n keys = []\n values = []\n \n # Generate unique identifier for this model run. Unique ID is a combination of model type and a number\n entryNumber = 1\n fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)\n while fqId in runs:\n entryNumber += 1\n fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)\n self.runNumber = entryNumber\n # Add new run to list of runs\n runs.append(fqId)\n runsStr = GenericMetadata.VALUE_DELIM.join(runs)\n keys.append('runs'); values.append(runsStr)\n # Write attributes for run\n keyProto = fqId + GenericMetadata.KEY_SEP\n runDate = keyProto + 'date_utc'\n keys.append(runDate); values.append( self.date.strftime(ModelRun.FMT_DATE) )\n runDesc = keyProto + 'description'\n keys.append(runDesc); values.append(self.description)\n runCmd = keyProto + 'command'\n keys.append(runCmd); values.append(self.command)\n runOutput = keyProto + 'output'\n keys.append(runOutput); values.append(self.output)\n # Write to metadata\n GenericMetadata.writeModelRunEntries(context, keys, values)",
"def add_columns_for_taps(full_data: DataFrame, tap_data: DataFrame):\n for tap_file in tap_file_names:\n tap_type = tap_file_to_feature_name[tap_file]\n data = tap_data[tap_data['Type'] == tap_type].reset_index(drop = True)\n\n lead_file = 'Accelerometer.csv'\n time_column_name = x_columns[lead_file]\n data_times = full_data[time_column_name]\n data_index = 0\n\n new_column = []\n\n for tap_index in range(data.shape[0]):\n try:\n while data_times[data_index] < (data['Start'][tap_index] * 1000000):\n new_column.append(0) # Not in the midst of a tap\n data_index += 1\n if data_index >= full_data.shape[0]: break\n if data_index >= full_data.shape[0]: break\n new_column.append(1) # At least one value in the midst of the tap\n data_index += 1\n if data_index >= full_data.shape[0]: break\n while data_times[data_index] < (data['End'][tap_index] * 1000000):\n new_column.append(1)\n data_index += 1\n if data_index >= full_data.shape[0]: break\n if data_index >= full_data.shape[0]: break\n except KeyError:\n print(\"Okay, here's that thing again\")\n return\n\n \n while data_index < full_data.shape[0]:\n new_column.append(0)\n data_index += 1\n\n full_data[tap_type] = new_column",
"def _push_one(self, f, **kwargs):\n\n # Copy the metadata for modifying and open the ann file\n meta = kwargs.copy()\n desc = read_InSar_annotation(f)\n\n # Expand the path for the geotiffs\n tiff_dir = abspath(expanduser(self.geotiff_dir))\n\n # form the pattern to look for and grab the tifs\n pattern = '.'.join(basename(f).split('.')[0:-1]) + '*.tif'\n rasters = glob.glob(join(tiff_dir, pattern))\n\n # Submit each geotif, modifying meta on the fly\n for r in rasters:\n # Grab information from the filename\n f_pieces = r.split('.')\n component = f_pieces[-2] # Real or imaginary component\n data_abbr = f_pieces[-3] # Key to the data name\n dname = self.dname_map[data_abbr] # Data type in db\n\n # For the data type\n meta['type'] = 'insar ' + dname.split(' ')[0]\n\n if dname == 'interferogram':\n meta['type'] += (' ' + component)\n\n # Assign the date for the respective flights\n if 'amplitude' in dname:\n meta['date'] = desc['start time of acquisition for pass {}'.format(\n dname.split(' ')[-1])]['value']\n\n # Derived products always receive the date of the last overpass\n else:\n meta['date'] = desc['start time of acquisition for pass 2']['value']\n\n # Assign only the date not the date and time\n meta['date'] = meta['date'].date()\n\n # Assign units\n meta['units'] = desc['{} units'.format(\n dname.split(' ')[0])]['value']\n\n # Flexibly form a comment for each of the products for dates\n comment = get_InSar_flight_comment(dname, desc)\n # add which dem was used which dictates the file name convert e.g.\n # ...VV_01.int.grd\n comment += ', DEM used = {}'.format(\n desc['dem used in processing']['value'])\n # Add the polarization to the the comments\n comment += ', Polarization = {}'.format(\n desc['polarization']['value'])\n meta['description'] = comment\n\n self.log.info('Uploading {} as {}...'.format(r, meta['type']))\n\n d = self.UploaderClass(r, **meta)\n\n # Submit the data to the database\n d.submit(self.session)\n\n # Uploaded set\n self.uploaded += 1",
"def feature_list(user_id: str, session: str, tap_feature: str, task_name: str, window: DataFrame):\n if window.shape[0] == 0:\n return None\n #Add user ID, session, task name\n features = [user_id, session, task_name]\n\n #Add orientation\n orientation = mode(window['Phone_orientation_accel'])\n features.append(orientation)\n\n #Add tap type\n features.append(tap_feature)\n\n lead_file = 'Accelerometer.csv'\n\n time_col = x_columns[lead_file]\n\n before_start = window[window[tap_feature] == 4].index[0]\n during_start = window[window[tap_feature] == 2].index[0]\n after_start = window[window[tap_feature] == 3].index[0] + 1\n after_end = window[window[tap_feature] == 5].index[0]\n\n before = window.loc[before_start : during_start]\n during = window.loc[during_start : after_start]\n after = window.loc[after_start : after_end + 1]\n\n if during.shape[0] < 2:\n # If there were none or one measurements during the tap,\n # add the closest ones\n during = window[during_start - 1 : after_start + 1]\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n # Feature 1: Mean during\n mean_during = mean(during[y])\n\n # Feature 2: SD during\n sd_during = sd(during[y])\n\n # Feature 3: Difference before/after\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n difference_before_after = mean_after - mean_before\n\n # Feature 4: Net change from tap\n net_change_due_to_tap = mean_during - mean_before\n\n # Feature 5: Maximal change from tap\n max_tap = max(during[y])\n max_change = max_tap - mean_before\n\n # Feature 6: Restoration time\n avgDiffs = []\n for j in range(after[y].shape[0]):\n subsequentValues = after[y].iloc[j:]\n subsequentDistances = subsequentValues.map(lambda x: abs(x - mean_before))\n averageDistance = mean(subsequentDistances)\n avgDiffs.append(averageDistance)\n time_of_earliest_restoration = min(avgDiffs)\n restoration_time = time_of_earliest_restoration - during[time_col].iloc[-1]\n\n # Feature 7: Normalized duration\n t_before_center = (before[time_col].iloc[0] + before[time_col].iloc[-1]) / 2 \n t_after_center = (after[time_col].iloc[0] + after[time_col].iloc[-1]) / 2\n normalized_duration = (t_after_center - t_before_center) / (mean_after - mean_before)\n \n # Feature 8: Ndormalized duration max\n t_max_in_tap = during[during[y] == max_tap][time_col].iloc[0]\n normalized_duration_max = (t_after_center - t_max_in_tap) / (mean_after - max_tap)\n\n\n features += [mean_during, sd_during, difference_before_after,\n net_change_due_to_tap, max_change, restoration_time,\n normalized_duration, normalized_duration_max]\n\n if random.choice(range(100))== 0:\n plot_tap('Plots/Project/' + session, before, during, after, time_col)\n \n return features",
"def log_model_metadata(model_uid, schema, db_conn):\n df = pd.DataFrame({\n 'training_timestamp': [get_current_timestamp()],\n 'model_uid': [model_uid]\n })\n df.to_sql(name='model_metadata', schema=schema, con=db_conn, if_exists='append', index=False)",
"def _metadata(self):\n meta = super()._metadata\n meta.update({\n \"name\": self.name,\n \"lead_in_time\": self.lead_in_time,\n \"amplification\": self.amplification,\n \"amplifier_clipping\": self.amplifier_clipping,\n \"power_threshold\": self.power_threshold,\n })\n return meta",
"def __init__(self, path):\n\n # Metadata Definition\n metadata = pd.read_csv(path, nrows=5, header=None)\n self.subject = str(metadata.loc[0, 0])\n base_date = dt.datetime.strptime(metadata.loc[2, 0], '%d.%m.%Y').date()\n if metadata.loc[4, 0] != 'Unknown Line':\n self.valid_measurements = str(metadata.loc[4, 0])\n else:\n metadata = pd.read_csv(path, nrows=6, header=None)\n self.valid_measurements = str(metadata.loc[5, 0])\n\n column_names = ['hour', 'minutes', 'SYS(mmHg)', 'DIA(mmHg)', 'UNKNOW_1', 'UNKNOW_2', 'CODE', 'UNKNOW_3']\n self.data = pd.read_csv(path, sep=',', skiprows=51, skipfooter=1, header=None, names=column_names, engine='python')\n\n # Adjusting Date\n dates = [base_date]\n times = [dt.time(hour=self.data.loc[i, 'hour'], minute=self.data.loc[i, 'minutes']) for i in range(len(self.data))]\n current_date = base_date\n for i in range(1, len(times)):\n if times[i] < times[i-1]:\n current_date += dt.timedelta(days=1)\n dates.append(current_date)\n\n self.data.reset_index(inplace=True)\n self.data['timestamp'] = pd.to_datetime([dt.datetime.combine(dates[i], times[i]) for i in range(len(dates))])\n self.data['date'] = dates\n self.data['time'] = times\n\n order = ['timestamp', 'date', 'time', 'SYS(mmHg)', 'DIA(mmHg)', 'UNKNOW_1', 'UNKNOW_2', 'UNKNOW_3', 'CODE']\n self.data = self.data[order]\n\n try:\n self.data.set_index('timestamp', inplace=True)\n except KeyError:\n print('Timestamp can not be set as an index:')\n print(KeyError)\n\n xml_line = open(path, 'r').readlines()[-1]\n xml_root = ET.fromstring(xml_line)\n self.metadata = self._etree_to_dict(xml_root)['XML']",
"def _pre_create_runs_and_time_series(self):\n self._logdir_loader_pre_create.synchronize_runs()\n run_to_events = self._logdir_loader_pre_create.get_run_events()\n if self._run_name_prefix:\n run_to_events = {\n self._run_name_prefix + k: v for k, v in run_to_events.items()\n }\n\n run_names = []\n run_tag_name_to_time_series_proto = {}\n for (run_name, events) in run_to_events.items():\n run_names.append(run_name)\n for event in events:\n _filter_graph_defs(event)\n for value in event.summary.value:\n metadata, is_valid = self._request_sender.get_metadata_and_validate(\n run_name, value\n )\n if not is_valid:\n continue\n if metadata.data_class == summary_pb2.DATA_CLASS_SCALAR:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR\n )\n elif metadata.data_class == summary_pb2.DATA_CLASS_TENSOR:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.TENSOR\n )\n elif metadata.data_class == summary_pb2.DATA_CLASS_BLOB_SEQUENCE:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.BLOB_SEQUENCE\n )\n\n run_tag_name_to_time_series_proto[\n (run_name, value.tag)\n ] = tensorboard_time_series.TensorboardTimeSeries(\n display_name=value.tag,\n value_type=value_type,\n plugin_name=metadata.plugin_data.plugin_name,\n plugin_data=metadata.plugin_data.content,\n )\n\n self._one_platform_resource_manager.batch_create_runs(run_names)\n self._one_platform_resource_manager.batch_create_time_series(\n run_tag_name_to_time_series_proto\n )",
"def add_strava_data_to_activities(self):\n \n try:\n logging.info(\"Parsing Strava data and getting it ready for analysis.\")\n\n strava_activities = self.strava_fetcher.fetch_strava_activities()\n if strava_activities == None:\n logging.info(\"No Strava data to add to all activities\")\n return\n\n strava_data = json.dumps(strava_activities)\n \n # load strava data straight up from json, not doing any json normalization\n strava_df = pd.read_json(strava_data)\n strava_df = strava_df[['distance', \n 'elapsed_time', \n 'start_date_local', \n 'location_city', \n 'average_speed', \n 'max_speed', \n 'type']]\n\n # set up 5 key metrics\n # note we're using the enum value\n strava_df['activity_type'] = strava_df['type'].apply(lambda x: self.convert_strava_activity_type(x).value)\n strava_df['source'] = ActivitySource.STRAVA.value\n strava_df['start_timestamp'] = strava_df['start_date_local'].apply(lambda x: parse(x, tzinfos={\"America/Vancouver\"}))\n # strava distances are in meters\n strava_df['distance_in_km'] = strava_df['distance'].apply(lambda x: x / 1000)\n strava_df['duration_in_min'] = strava_df['elapsed_time'].apply(lambda x: x / 60)\n\n # filter out extraneous columns\n strava_df = strava_df.filter(self.data_frame_columns)\n\n # add to activities\n self.all_activities = self.all_activities.append(strava_df, sort=True)\n\n logging.info(\"Done parsing Strava data.\")\n except Exception:\n logging.exception(\"Could not parse Strava data\")",
"def insert_timeseries_data(message, device):\n # Get the product and check for any preprocessors\n product = device.product\n\n preprocessors = product.preprocessors.all()\n\n for preprocessor in preprocessors:\n preprocessor = get_preprocessor(preprocessor.preprocessor_name)\n if preprocessor:\n preprocessor(message.body, device=device, ts_cls=TimeSeriesData)\n else:\n logger.warning(\"No preprocessor handler called %s on product %s\",\n preprocessor.preprocessor_name, product.name)\n\n for sensor in device.sensors.all():\n sensor_name = sensor.sensor_type.sensor_name\n if message.body.get(sensor_name) is not None:\n new_datum = TimeSeriesData(\n ts=message.timestamp,\n sensor=sensor,\n value=message.body[sensor_name]\n )\n new_datum.save()\n\n # Evaluate any definitions data with new datapoint\n context = device.get_context(context=message.body, time=message.timestamp)\n logger.debug(\"device context %s\", context)\n redis_cache = RedisEventDefinitions(get_redis())\n\n triggered_events = device.evaluate_all_event_definitions(\n context, redis_cache, check_product=True\n )\n\n send_triggered_events(triggered_events, device, message.body)",
"async def update_info_data(_: datetime | None = None) -> None:\n\n try:\n (\n hass.data[DATA_INFO],\n hass.data[DATA_HOST_INFO],\n hass.data[DATA_STORE],\n hass.data[DATA_CORE_INFO],\n hass.data[DATA_SUPERVISOR_INFO],\n hass.data[DATA_OS_INFO],\n ) = await asyncio.gather(\n hassio.get_info(),\n hassio.get_host_info(),\n hassio.get_store(),\n hassio.get_core_info(),\n hassio.get_supervisor_info(),\n hassio.get_os_info(),\n )\n\n except HassioAPIError as err:\n _LOGGER.warning(\"Can't read Supervisor data: %s\", err)\n\n async_call_later(\n hass,\n HASSIO_UPDATE_INTERVAL,\n HassJob(update_info_data, cancel_on_shutdown=True),\n )",
"def add_timing_info(df):\n\n # This is so hacky. There has to be a better way.\n df[\"hour\"] = None\n df.loc[:, \"hour\"] = [str(x) if x > 10 else \"0\" + str(x)\n for x in df.index.hour]\n num_measurements_per_hour = df.groupby(\n [df.index.date, df[\"hour\"]]).count().reset_index()\n hours_to_use_df = num_measurements_per_hour[num_measurements_per_hour[\"Orientation\"] == 720]\n hours_to_use_str = hours_to_use_df[\"level_0\"].astype(\n str) + \" \" + hours_to_use_df[\"hour\"].astype(str)\n hours_to_use_datetime = pd.to_datetime(\n hours_to_use_str, format='%Y-%m-%d %H')\n\n df[\"complete_hour\"] = 0\n df.loc[df.index.floor(\"H\").isin(\n hours_to_use_datetime), \"complete_hour\"] = 1\n\n min_night_length = 720 * 5\n df_num = df.groupby(\"sleep_night\")[\"hour\"].count()\n df_num_to_use = df_num.loc[df_num >= min_night_length]\n\n df[\"complete_night\"] = 0\n df.loc[df[\"sleep_night\"].isin(df_num_to_use.index), \"complete_night\"] = 1\n\n # Counts number of time points since position started\n df[\"time_since_pos_start\"] = df.groupby([\"sleep_night\", (df[\"orient_bin\"] != df[\"orient_bin\"].shift()).cumsum()]).cumcount() + 1\n\n return df",
"def extract_sensors_data(dataframe, ms_column='ms_ticker',\n time_column = 'Tstamp',\n ppg_columns=['led_1', 'led_2'],\n acc_columns=['acc_x', 'acc_y', 'acc_z']):\n\n sensors_dict = {}\n sensors = dataframe.loc[1:, 1:]\n sensors_columns = dataframe.head(1).values[0]\n sensors_columns = [i.replace(\" \", \"\") for i in sensors_columns if i.find('Index') == -1]\n sensors.columns = sensors_columns\n check_columns_exist(ppg_columns, sensors_columns)\n check_columns_exist(acc_columns, sensors_columns)\n check_columns_exist(ms_column, sensors_columns)\n check_columns_exist(time_column, sensors_columns)\n ppg = np.array(sensors[ppg_columns].values[1:, :], dtype=int)\n ms = np.array(sensors[ms_column].values[1:, ])\n ms_ints = np.array([int(str(i)[-3:]) for i in ms], dtype=float)\n ms_delta = [datetime.timedelta(milliseconds=i) for i in ms_ints]\n\n time = dataframe.loc[:,1].values[1:]\n time = np.array([pd.to_datetime(i) for i in time])\n time_with_ms = np.array(ms_delta) + time\n\n sensors_dict['PPG'] = ppg\n sensors_dict['time_sensors'] = time_with_ms.astype('datetime64[us]')\n sensors_dict['ms_ticker_sensors'] = ms\n acc = np.array(sensors[acc_columns].values[1:, :], dtype=float)\n sensors_dict['ACC'] = acc\n\n return sensors_dict",
"def getMetaData(self, outputDir = None, filetype = 'h5'):\n timeproc = np.array(self.meta['Time Processed'])\n timedisp = np.array(self.meta['Time Displayed'])\n timeread = np.array(self.meta['Time Read'])\n self.meta['Processing Time'] = timeproc - timeread\n self.meta['Displaying Time'] = timedisp - timeproc\n self.meta['Total Time'] = timedisp - timeread\n metaData = pd.DataFrame.from_dict(self.meta)\n if not outputDir == None:\n if filetype == 'h5':\n fileName = outputDir + 'metaData_{}.h5'.format(str(datetime.today().now())[:-7])\n metaData.to_hdf(fileName, key='metaData')\n elif filetype == 'csv':\n fileName = outputDir + 'metaData_{}.csv'.format(str(datetime.today().now())[:-7])\n metaData.to_csv(fileName, key='metaData')\n return metaData",
"def _write_outputs(self):\n\n #########################\n # Create necessary variables for generic metadata file, as well as\n # generate and fill metadata file, if user wants it\n record_start = pd.to_datetime(self.dt_array[0]).date()\n record_end = pd.to_datetime(self.dt_array[-1]).date()\n\n if self.metadata_mode == 1 and self.script_mode == 1:\n # user wants to fill metadata and it is the correct mode\n\n # First check to see if metadata file already exists\n if not os.path.isfile('correction_metadata.xlsx'):\n # file does not exist, create new one\n metadata_info = pd.DataFrame({'station_name': self.station_name, 'station_lat': self.station_lat,\n 'station_lon': self.station_lon, 'station_elev_m': self.station_elev,\n 'record_start': record_start, 'record_end': record_end,\n 'anemom_height_m': self.ws_anemometer_height,\n 'output_file_path': self.output_file_path}, index=np.array([1]))\n\n with pd.ExcelWriter('correction_metadata.xlsx', date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD HH:MM:SS', engine='openpyxl', mode='w') as writer:\n metadata_info.to_excel(writer, header=True, index=False, sheet_name='Sheet1')\n else:\n # file is already created, so we need to read it in, append our new information to the bottom of it\n # and then save the info\n metadata_info = pd.read_excel('correction_metadata.xlsx', sheet_name=0, index_col=None, engine='xlrd',\n keep_default_na=False, verbose=True, skip_blank_lines=True)\n\n new_meta_info = pd.DataFrame({'station_name': self.station_name, 'station_lat': self.station_lat,\n 'station_lon': self.station_lon, 'station_elev_m': self.station_elev,\n 'record_start': record_start, 'record_end': record_end,\n 'anemom_height_m': self.ws_anemometer_height,\n 'output_file_path': self.output_file_path}, index=np.array([1]))\n\n output_metadata = pd.concat([metadata_info, new_meta_info], ignore_index=True)\n\n with pd.ExcelWriter('correction_metadata.xlsx', date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD HH:MM:SS', engine='openpyxl', mode='w') as writer:\n output_metadata.to_excel(writer, header=True, index=False, sheet_name='Sheet1')\n\n else:\n # do nothing\n pass\n\n # if we are using a network-specific metadata file, we need to update the run count to pass it on\n if self.metadata_path is not None:\n current_row = self.metadata_df.run_count.ne(2).idxmax() - 1\n current_run = self.metadata_df.run_count.iloc[current_row] + 1\n\n self.metadata_df.run_count.iloc[current_row] = current_run\n self.metadata_df.record_start.iloc[current_row] = record_start\n self.metadata_df.record_end.iloc[current_row] = record_end\n self.metadata_df.output_path.iloc[current_row] = self.output_file_path\n\n with pd.ExcelWriter(self.metadata_path, date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD', engine='openpyxl', mode='w') as writer:\n self.metadata_df.to_excel(writer, header=True, index=True, sheet_name='Sheet1')\n\n #########################\n # Generate output file\n # Create any final variables, then create panda dataframes to save all the data\n # Includes the following sheets:\n # Corrected Data : Actual corrected values\n # Delta : Magnitude of difference between original data and corrected data\n # Filled Data : Tracks which data points have been filled by script generated values instead of provided\n # Data that is provided and subsequently corrected by the script do not count as filled values.\n print(\"\\nSystem: Saving corrected data to .xslx file.\")\n\n # Create any individually-requested output data\n ws_2m = _wind_height_adjust(uz=self.data_ws, zw=self.ws_anemometer_height)\n\n # Create corrected-original delta numpy arrays\n diff_tavg = np.array(self.data_tavg - self.original_df.tavg)\n diff_tmax = np.array(self.data_tmax - self.original_df.tmax)\n diff_tmin = np.array(self.data_tmin - self.original_df.tmin)\n diff_tdew = np.array(self.data_tdew - self.original_df.tdew)\n diff_ea = np.array(self.data_ea - self.original_df.ea)\n diff_rhavg = np.array(self.data_rhavg - self.original_df.rhavg)\n diff_rhmax = np.array(self.data_rhmax - self.original_df.rhmax)\n diff_rhmin = np.array(self.data_rhmin - self.original_df.rhmin)\n diff_rs = np.array(self.data_rs - self.original_df.rs)\n diff_rs_tr = np.array(self.opt_rs_tr - self.orig_rs_tr)\n diff_rso = np.array(self.rso - self.original_df.rso)\n diff_ws = np.array(self.data_ws - self.original_df.ws)\n diff_precip = np.array(self.data_precip - self.original_df.precip)\n diff_etr = np.array(self.etr - self.original_df.etr)\n diff_eto = np.array(self.eto - self.original_df.eto)\n\n # Create datetime for output dataframe\n datetime_df = pd.DataFrame({'year': self.data_year, 'month': self.data_month, 'day': self.data_day})\n datetime_df = pd.to_datetime(datetime_df[['month', 'day', 'year']])\n\n # Create output dataframe\n output_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TAvg (C)': self.data_tavg, 'TMax (C)': self.data_tmax,\n 'TMin (C)': self.data_tmin, 'TDew (C)': self.data_tdew,\n 'Vapor Pres (kPa)': self.data_ea, 'RHAvg (%)': self.data_rhavg,\n 'RHMax (%)': self.data_rhmax, 'RHMin (%)': self.data_rhmin, 'Rs (w/m2)': self.data_rs,\n 'Opt_Rs_TR (w/m2)': self.opt_rs_tr, 'Rso (w/m2)': self.rso,\n 'Windspeed (m/s)': self.data_ws, 'Precip (mm)': self.data_precip,\n 'ETr (mm)': self.etr, 'ETo (mm)': self.eto, 'ws_2m (m/s)': ws_2m},\n index=datetime_df)\n\n # Creating difference dataframe to track amount of correction\n delta_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TAvg (C)': diff_tavg, 'TMax (C)': diff_tmax,\n 'TMin (C)': diff_tmin, 'TDew (C)': diff_tdew,\n 'Vapor Pres (kPa)': diff_ea, 'RHAvg (%)': diff_rhavg, 'RHMax (%)': diff_rhmax,\n 'RHMin (%)': diff_rhmin, 'Rs (w/m2)': diff_rs, 'Opt - Orig Rs_TR (w/m2)': diff_rs_tr,\n 'Rso (w/m2)': diff_rso, 'Windspeed (m/s)': diff_ws, 'Precip (mm)': diff_precip,\n 'ETr (mm)': diff_etr, 'ETo (mm)': diff_eto}, index=datetime_df)\n\n # Creating a fill dataframe that tracks where missing data was filled in\n fill_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TMax (C)': self.fill_tmax, 'TMin (C)': self.fill_tmin,\n 'TDew (C)': self.fill_tdew, 'Vapor Pres (kPa)': self.fill_ea, 'Rs (w/m2)': self.fill_rs,\n 'Complete Record Rso (w/m2)': self.fill_rso},\n index=datetime_df)\n\n # Open up pandas excel writer\n output_writer = pd.ExcelWriter(self.output_file_path, engine='xlsxwriter')\n # Convert data frames to xlsxwriter excel objects\n output_df.to_excel(output_writer, sheet_name='Corrected Data', na_rep=self.missing_fill_value)\n delta_df.to_excel(output_writer, sheet_name='Delta (Corr - Orig)', na_rep=self.missing_fill_value)\n fill_df.to_excel(output_writer, sheet_name='Filled Data', na_rep=self.missing_fill_value)\n # Save output file\n output_writer.save()\n\n logger = open(self.log_file, 'a')\n if self.script_mode == 1 and self.fill_mode == 1:\n if np.isnan(self.eto).any() or np.isnan(self.etr).any():\n print(\"\\nSystem: After finishing corrections and filling data, \"\n \"ETr and ETo still had missing observations.\")\n logger.write('After finishing corrections and filling data, '\n 'ETr and ETo still had missing observations. \\n')\n else:\n logger.write('The output file for this station has a complete record of ETo and ETr observations. \\n')\n else:\n pass\n logger.write('\\nThe file has been successfully processed and output files saved at %s.' %\n dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n logger.close()"
] | [
"0.61292684",
"0.5416358",
"0.53845924",
"0.52447414",
"0.5218356",
"0.5165839",
"0.5148912",
"0.5109014",
"0.5070019",
"0.50598127",
"0.5047217",
"0.50084674",
"0.4988053",
"0.49841473",
"0.49822265",
"0.4978784",
"0.49709165",
"0.4964251",
"0.49591193",
"0.49555197",
"0.49414885",
"0.4926202",
"0.4904403",
"0.48979425",
"0.48961142",
"0.4895578",
"0.4887368",
"0.48729262",
"0.48698056",
"0.4857979"
] | 0.5956733 | 1 |
Creates a chatrooms communicator for the given input token. | def make_communicator(token):
return WebsocketCommunicator(TokenAuthMiddlewareStack(
URLRouter(
websocket_urlpatterns
)
), '/ws/chat/?token=' + token) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_chatroom(request):\n title = request.POST['title'].strip()\n psk = request.POST['psk']\n \n # If thread already exists\n if models.MessageThread.objects.filter(title=title).exists():\n thread = models.MessageThread.objects.get(title=title)\n if thread.psk != psk:\n # Invalid passkey\n thread = None\n return HttpResponse(status=403)\n # If the thread does not exist yet\n else:\n return HttpResponse(status=405)\n\n if not request.user in thread.clients.all():\n thread.clients.add(request.user)\n channel_layer = get_channel_layer()\n\n if 'channel_name' in request.session:\n async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])\n\n return HttpResponse(status=200)",
"def channels_create(token, name, is_public):\n auth_u_id = get_id_from_token(token)\n if len(name) > 20:\n raise ValueError(\"\")\n channel_payload = {\n \"name\": name,\n \"all_members\": [auth_u_id],\n \"owners\": [auth_u_id],\n \"is_public\": is_public,\n \"is_standup_active\": False,\n \"time_finish\": None,\n \"standup_queue\": [],\n }\n return channels.add(channel_payload)",
"def hipchat_message(template, context=None, fail_silently=app_settings.FAIL_SILENTLY):\n\n if not app_settings.ENABLED:\n return\n\n context = Context(context or {})\n\n context['settings'] = settings\n\n def render(component):\n component_template = 'django_hipchat/%s' % component\n\n return render_to_string(template, {\n 'django_hipchat': component_template,\n }, context).strip().encode('utf8', 'ignore')\n\n data = {\n 'from': app_settings.MESSAGE_FROM,\n 'color': 'yellow',\n 'message': '',\n 'room_id': app_settings.MESSAGE_ROOM,\n 'auth_token': app_settings.AUTH_TOKEN,\n 'message_format': 'html',\n }\n\n for part in ('auth_token', 'room_id', 'message', 'color', 'from'):\n try:\n txt = render(part)\n except Exception:\n if fail_silently:\n return\n raise\n\n if txt:\n data[part] = txt\n\n for x in ('auth_token', 'from', 'message', 'room_id'):\n if data[x]:\n continue\n\n if fail_silently:\n return\n\n assert False, \"Missing or empty required parameter: %s\" % x\n\n backend_fn('%s?%s' % (\n 'https://api.hipchat.com/v1/rooms/message',\n urllib.urlencode(data),\n ), fail_silently)",
"def __init__(self, api_url, token, username, *rooms):\n\t\tself._api_url = api_url\n\t\tself._token = token\n\t\tself._room_names = rooms\n\t\tself._poll_amt = 10\n\t\tself._username = username\n\n\t\tself._rooms_map = {}\n\t\tself._rooms_last_msg = {}\n\n\t\tself._running = threading.Event()\n\t\tself._log = logging.getLogger(\"hipbot\")\n\n\t\tself._hipchat = hypchat.HypChat(token, endpoint=api_url)\n\n\t\tself._reactives = []\n\t\tself._non_reactives = []",
"def __init__(self, token: str):\n\n self.__token = token\n self.__message_sender = None\n self.bus_controller = None\n self.__updater = None\n self.__dp = None\n self.__users = dict() # dictonary {id: user} (str: User)\n self.__gui = None",
"def create_chitchat_bot(self):\n \n self.chatbot = ChatBot(\"Fresher's Friend\")\n self.chatbot.trainer2=ListTrainer(self.chatbot)\n self.chatbot.trainer=ChatterBotCorpusTrainer(self.chatbot)\n self.chatbot.trainer.train(\"chatterbot.corpus.english.greetings\")\n #self.chatbot.trainer.train(\"chatterbot.corpus.english.conversations\")\n for filename in filenames: \n self.chatbot.trainer2.train(files[filename])",
"def createNewChat(topic, users_logins):\n group_chat_flag = False\n if len(users_logins) > 2:\n group_chat_flag = True\n\n chat = Chat.objects.create(topic=topic, is_group_chat=group_chat_flag)\n chat.save()\n for user_login in users_logins:\n user = User.objects.filter(login=user_login)[0]\n Member.objects.create(chat=chat, user=user).save()",
"def create_new_public_room_chat(room: PublicChatRoom, user, message: str):\n PublicChatRoomMessage.objects.create(user=user, room=room, content=message)",
"def initM(self, num):\n prefix = C_Messaging.PREFIX\n if not wait_el_xpath_click(self.driver, C_Messaging.PATH_BTN_CREATE):\n logging.info('{0}: Create new message unsucceed.'.format(prefix))\n self.fail('{0}: Create new message unsucceed.'.format(prefix))\n recipients = wait_el_xpath(self.driver, C_Messaging.PATH_RECIPIENTS)\n action(recipients, Commands.CLEAR)\n action(recipients, Commands.CLICK)\n\n # phone number: 147 8230 5348\n for s in num:\n self.driver.press_keycode(Keycode.get(self, s))\n\n self.driver.press_keycode(Keycode.ENTER)\n\n text_editor = wait_el_xpath(self.driver, C_Messaging.PATH_TEXT_EDITOR)\n return text_editor",
"def handle_create_room(self, lobby_command, client_socket):\n msg = \"Handling room creation of {0}\".format(lobby_command)\n print(msg)\n user = self.clients[client_socket]['data'].decode('utf-8')\n roomname = lobby_command.split()[1]\n\n if roomname == \"mine\" or roomname == \"all\":\n msg = f'Client {user} error! reserved word that cannot be a room name.'\n self.log_and_send(client_socket, msg)\n return\n\n for room in self.rooms:\n if room.name == roomname:\n msg = f\"Invalid request from client: {roomname} already exists!\"\n self.log_and_send(client_socket, msg)\n return\n\n self.rooms.append(Room(name=roomname, creator=user))\n msg = f\"Room {roomname} created for client {user} (creator/Admin).\"\n self.log_and_send(client_socket, msg)\n return",
"async def test_chatroom_commands():\n\n # Login all the users.\n tokens = {}\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n tokens[name] = await attempt_login(username, password)\n\n # Alice will:\n # 1. Connect and retrieve MOTD.\n # 2. List rooms, and expect the four in the example.\n # 3. Join \"family\" room, and receive a success.\n # 4. List rooms, and expect the four ones, with \"family\" having \"joined\": true.\n # 3. Join \"family\" room, and receive an error.\n # 4. List rooms, and expect the four ones, with \"family\" having \"joined\": true.\n alice_communicator = make_communicator(tokens['alice'])\n alice_connected, _ = await alice_communicator.connect()\n motd = await alice_communicator.receive_json_from()\n assert motd['type'] == 'notification'\n assert motd['code'] == 'api-motd'\n await alice_communicator.send_json_to({'type': 'list'})\n list_ = await alice_communicator.receive_json_from()\n assert list_['type'] == 'notification'\n assert list_['code'] == 'list'\n assert list_['list'] == [{'name': 'family', 'joined': False}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]\n await alice_communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n joined = await alice_communicator.receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'alice'\n assert joined['you']\n assert joined['room_name'] == 'family'\n await alice_communicator.send_json_to({'type': 'list'})\n list_ = await alice_communicator.receive_json_from()\n assert list_['type'] == 'notification'\n assert list_['code'] == 'list'\n assert list_['list'] == [{'name': 'family', 'joined': True}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]\n await alice_communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n error = await alice_communicator.receive_json_from()\n assert error['type'] == 'error'\n assert error['code'] == 'room:already-joined'\n assert error['details']['name'] == 'family'\n await alice_communicator.send_json_to({'type': 'list'})\n list_ = await alice_communicator.receive_json_from()\n assert list_['type'] == 'notification'\n assert list_['code'] == 'list'\n assert list_['list'] == [{'name': 'family', 'joined': True}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]\n # Bob will:\n # 1. Connect and retrieve MOTD.\n # 2. Join \"family\" room, and receive a success.\n # 3. Send a message in the \"family\" room: \"Hello Alice\", and receive a success.\n # 4. Leave the room, and receive a success.\n # 5. Leave the room, and receive an error.\n # 6. Disconnect.\n # Alice will:\n # 1. Receive the \"Bob joined\" message.\n # 2. Receive the \"Hello Alice\" message.\n # 3. Receive the \"Bob left\" message.\n # ~~ Bob interactions ~~\n bob_communicator = make_communicator(tokens['bob'])\n bob_connected, _ = await bob_communicator.connect()\n motd = await bob_communicator.receive_json_from()\n assert motd['type'] == 'notification'\n assert motd['code'] == 'api-motd'\n await bob_communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n joined = await bob_communicator.receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert joined['you']\n assert joined['room_name'] == 'family'\n await bob_communicator.send_json_to({'type': 'message', 'room_name': 'family', 'body': 'Hello Alice'})\n message = await bob_communicator.receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert message['you']\n assert message['user'] == 'bob'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello Alice'\n await bob_communicator.send_json_to({'type': 'part', 'room_name': 'family'})\n parted = await bob_communicator.receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert parted['you']\n assert parted['room_name'] == 'family'\n await bob_communicator.send_json_to({'type': 'part', 'room_name': 'family'})\n error = await bob_communicator.receive_json_from()\n assert error['type'] == 'error'\n assert error['code'] == 'room:not-joined'\n assert error['details']['name'] == 'family'\n await bob_communicator.disconnect()\n # ~~ Alice interactions ~~\n joined = await alice_communicator.receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n message = await alice_communicator.receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'bob'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello Alice'\n parted = await alice_communicator.receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n await alice_communicator.disconnect()",
"def create_chitchat_bot(self):\n\n # Hint: you might want to create and train chatterbot.ChatBot here.\n # It could be done by creating ChatBot with the *trainer* parameter equals \n # \"chatterbot.trainers.ChatterBotCorpusTrainer\"\n # and then calling *train* function with \"chatterbot.corpus.english\" param\n \n ########################\n #### YOUR CODE HERE ####\n ########################\n self.chatbot = ChatBot(\n 'skoochbot',\n trainer='chatterbot.trainers.ChatterBotCorpusTrainer'\n )\n \n self.chatbot.train(\"chatterbot.corpus.english\")\n print(\"Extra training...\")\n \n self.chatbot.set_trainer(ListTrainer)\n \n self.chatbot.train([\n \"Hello\",\n \"Hello. How are you?\",\n \"I am well.\",\n \"Good to hear\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"How are you?\",\n \"I am well. How are you?\",\n \"I am also well.\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"Your momma\",\n \"is so fat they gave her her own zipcode.\",\n ])\n self.chatbot.train([\n \"How are you doing?\",\n \"I am well. How are you?\",\n \"I am also well.\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"What's your name?\",\n \"My name is skoochbot. What is yours?\",\n \"That is my name too!\",\n \"Really?\",\n \"No.\",\n \"Yes.\",\n \"No.\",\n \"Yes.\",\n \"So, what can I help you with today?\",\n ])\n self.chatbot.train([\n \"No\",\n \"Yes\",\n \"No\",\n \"Yes it does\",\n \"No it doesn't\",\n \"Yes it does\",\n \"So, what can I help you with today?\",\n ])\n self.chatbot.train([\n \"What is your name?\",\n \"My name is skoochbot. What is yours?\",\n \"That's a nice name.\",\n \"Thank you.\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"Fuck you\",\n \"No, fuck you buddy\",\n \"You suck\",\n \"No, you suck\",\n \"No, you suck more\",\n \"I hate you so much\",\n \"I hate you too\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"Where are you?\",\n \"At your momma's house.\",\n \"Where do you live?\",\n \"Your momma's house.\",\n \"Where are you from?\",\n \"Somewhere over the rainbow.\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"Who are you?\",\n \"I'm your worst nightmare.\",\n \"You can run but you can't hide, bitch.\",\n \"This is a dead parrot.\",\n \"It's just sleeping.\",\n \"Well you better wake him up then, hadn't you?\",\n \"So, what can I help you with today?\",\n \"This parrot is dead.\",\n \"No, it's just taking a little nap.\"\n ])\n self.chatbot.train([\n \"I'm squanching here!\",\n \"Sorry carry on.\",\n \"Thank you for the privacy.\",\n \"You are welcome.\",\n \"Let's get schwifty.\",\n \"Let's do it up in here.\",\n \"So, what can I help you with today?\"\n ])\n self.chatbot.train([\n \"How are you?\",\n \"I am good\",\n \"That is good to hear.\",\n \"Thank you.\",\n \"You are welcome.\",\n \"So, what can I help you with today?\",\n \"What is AI?\",\n \"Your momma.\",\n \"What are your hobbies?\",\n \"Your momma and AI.\",\n \"What's your hobby?\",\n \"Your momma and AI. What is your hobby?\",\n \"What is your hobby?\",\n \"Your momma.\"\n ])\n self.chatbot.train([\n \"WHAT DO YOU WANT?\",\n \"Well, I was told outside that.\",\n \"Don't give me that, you snotty-faced heap of parrot droppings!\",\n \"What?\",\n \"Shut your festering gob, you tit! Your type really makes me puke, you vacuous, coffee-nosed, malodorous, pervert!!!\",\n \"Look, I CAME HERE FOR AN ARGUMENT\",\n \"OH, oh I'm sorry, but this is abuse.\",\n ])\n self.chatbot.train([\n \"Is this the right room for an argument?\",\n \"I told you once.\",\n \"No you haven't.\",\n \"Yes I have.\",\n \"When?\",\n \"Just now\",\n \"No you didn't\",\n \"Yes I did\",\n \"You didn't\",\n \"I'm telling you I did\",\n \"Oh, I'm sorry, just one moment. Is this a five minute argument or the full half hour?\",\n \"Oh look, this isn't an argument.\",\n \"Yes it is\",\n \"No, it's just contradiction.\"\n \"No it isn't.\",\n \"Yes it is.\"\n ])",
"def test_new_room_socket(self, mock_create):\n mock_create.return_value = '1234'\n response = self.fetch('/rooms', method='POST', body='')\n with self.assertJSON(response) as result:\n protocol = 'ws' if self.get_protocol() == 'http' else 'wss'\n expected = '{}://localhost:{}/socket'.format(protocol, self.get_http_port())\n self.assertEqual(result['socket'], expected)\n self.assertIn('user', result)\n self.assertIn('token', result)\n user, token = result['user'], result['token']\n info = jwt.decode(token, 'XXXX')\n self.assertEqual(info['uuid'], user)\n self.assertEqual(info['room'], '1234')",
"def create_group():\n incoming = request.get_json()\n chatroom = Chatroom(\n name = incoming['name'],\n tag = incoming['tag'],\n )\n db.session.add(chatroom)\n db.session.commit()\n participant = Participant(\n user_id = session['user_id'],\n room_id = chatroom.room_id,\n )\n db.session.add(participant)\n db.session.commit()\n return jsonify(results = chatroom.room_id)",
"def host_setup(roomid,name):\n token = channel.create_channel(name + roomid) \n template_values = {\n \"roomid\":roomid,\n \"token\": channel.create_channel(name + roomid),\n \"yourname\": name\n }\n return render_template(\"host.html\",values=template_values)",
"def chat(self) -> \"api.Chat\":\n raise NotImplementedError",
"async def new(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n host = ctx.message.author\n if room not in tod_games:\n tod_games[room] = {'host': host.name, 'host_id': host.name, 'participants': {}, 'last': None}\n tod_games[room]['current'] = host.name\n tod_games[room]['last'] = host.name\n tod_games[room]['participants'][host.name.lower()] = {'spins': 0}\n await amor_manager.say(\"New Game of Truth Or Dare started in {}\".format(room))\n else:\n host = tod_games[room]['host']\n await amor_manager.say(\"Truth or Dare already in progress in {}. Game host: {}\".format(room, host))",
"def create(self):\n logging.warning(\n \"IRC back-end does not support explicit creation, joining room \"\n \"instead to ensure it exists.\"\n )\n self.join()",
"def chat(data):\n username, message_text, room = data['username'], data['message'], data['room']\n message = Message(username, message_text, int(room))\n db.session.add(message)\n db.session.commit()\n emit('response', {'username': username, 'message': {'id': message.id, 'text': message.text}}, room=room)",
"def __init__(self, token=None):\n\n if token is None:\n raise PushoverError(\"No token supplied.\")\n else:\n self.token = token\n self.user_token = None\n self.user_device = None\n self.messages = []",
"def chat():\n kwargs = {\"title\": u\"chat channel\", \"entries\": log.getLogEntries()}\n return render_template(\"chat.html\", **kwargs)",
"def new_chat_message(cls, chatroom, text, sender):\n cls.broadcast(\n group=chatroom,\n payload={\"chatroom\": chatroom, \"text\": text, \"sender\": sender},\n )",
"def handle_list_room(self, lobby_command, client_socket):\n print(\"Handling list command...\")\n msg = ''\n words = lobby_command.split()\n # List all rooms\n if len(words) == 1:\n msg = 'Available Rooms:\\n'\n for room in self.rooms:\n msg += f'\\t\\t{room.name}\\n'\n \n self.just_send(client_socket, msg)\n return\n else:\n # List all rooms and members\n roomname = words[1]\n if roomname == \"all\":\n user = self.clients[client_socket]['data'].decode('utf-8')\n msg = f'All rooms and users:\\n'\n for room in self.rooms:\n msg += f'Room: {room.name}\\nUsers: '\n for user in room.room_attrbts['members']:\n msg += f'\\t{user}'\n if user in room.room_attrbts['admins']:\n msg += ' - Admin'\n msg += '\\n'\n msg += '\\n'\n self.just_send(client_socket, msg)\n return\n\n # List user's room membership\n if roomname == \"mine\":\n user = self.clients[client_socket]['data'].decode('utf-8')\n msg = f'Rooms user {user} has joined:\\n'\n for room in self.rooms:\n if user in room.room_attrbts['members']:\n msg += f'\\t\\t{room.name}'\n if user in room.room_attrbts['admins']:\n msg += ' - Admin'\n msg += '\\n'\n self.just_send(client_socket, msg)\n return\n \n # List membership and active users of a room\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Request roomname found..\")\n msg = f'User members of room {roomname}:\\n'\n for member in _room.room_attrbts['members']:\n msg += f'\\t\\t{member}\\n'\n msg+= '\\n'\n self.just_send(client_socket, msg)\n \n msg = 'Users active in room:\\n'\n for active_user in _room.room_attrbts['active']:\n msg += f'\\t\\t{active_user}\\n'\n self.just_send(client_socket, msg)\n return\n if msg == '':\n msg = f'Client passed an invalid room to list members of {roomname}\\n'\n self.log_and_send(client_socket, msg)\n return",
"def start(update, context):\n chats = load_chats()\n chats.append( str( update.message.chat_id ) )\n save_channels(chats)\n update.message.reply_text('Chat registered!')",
"def main():\n #Gmail2TelegramClient(\"1234\") -- a person\n #Gmail2TelegramClient(\"-1234\") -- group chat",
"def __init__(self, token, udid):\n self.gwUrl = \"https://clova-cic.line-apps.com/internal/v1/messenger-gw/\"\n self.token = token\n self.udid = udid\n self._request = requests.session()\n self._request.headers.update({\n 'Authorization': f\"Bearer {self.token}\",\n 'X-Client-DeviceId': self.udid,\n 'Content-Type': \"application/json\"\n })",
"def __init__(self, TOKEN: str) -> None:\n super(TelegramBot, self).__init__()\n self.TOKEN = TOKEN\n self.URL = f\"https://api.telegram.org/bot{TOKEN}/\"\n logging.debug(\"Telegram Bot ready\")",
"def get_room(context):\n\n room = context.get('spark.room')\n bearer = context.get('spark.CISCO_SPARK_PLUMBERY_BOT')\n\n print(\"Looking for Cisco Spark room '{}'\".format(room))\n\n url = 'https://api.ciscospark.com/v1/rooms'\n headers = {'Authorization': 'Bearer '+bearer}\n response = requests.get(url=url, headers=headers)\n\n if response.status_code != 200:\n print(response.json())\n raise Exception(\"Received error code {}\".format(response.status_code))\n\n for item in response.json()['items']:\n if room in item['title']:\n print(\"- found it\")\n return item['id']\n\n print(\"- not found\")\n print(\"Creating Cisco Spark room\")\n\n url = 'https://api.ciscospark.com/v1/rooms'\n headers = {'Authorization': 'Bearer '+bearer}\n payload = {'title': room }\n response = requests.post(url=url, headers=headers, data=payload)\n\n if response.status_code != 200:\n print(response.json())\n raise Exception(\"Received error code {}\".format(response.status_code))\n\n print(\"- done\")\n room_id = response.json()['id']\n context.set('spark.room_id', room_id)\n\n print(\"Adding moderators to the Cisco Spark room\")\n\n for item in context.get('spark.moderators', ()):\n print(\"- {}\".format(item))\n add_person(room_id, person=item, isModerator='true')\n\n print(\"Adding participants to the Cisco Spark room\")\n\n for item in context.get('spark.participants', ()):\n print(\"- {}\".format(item))\n add_person(room_id, person=item)\n\n print(\"Getting bot id\")\n\n url = 'https://api.ciscospark.com/v1/people/me'\n headers = {'Authorization': 'Bearer '+bearer}\n response = requests.get(url=url, headers=headers)\n\n if response.status_code != 200:\n print(response.json())\n raise Exception(\"Received error code {}\".format(response.status_code))\n\n print(\"- done\")\n context.set('spark.bot_id', response.json()['id'])\n\n mouth.put(\"Ready to take your commands starting with @plumby\")\n mouth.put(\"For example, start with: @plumby help\")",
"def start_messenger(self, socket, role, message_received_callback):\n self.logger.info(\"Starting messenger for contact %s.\", self.name)\n # self.messenger = EncryptedMessenger(role=role,\n # socket=socket)\n self.messenger = Messenger(socket=socket)\n self.connected = True\n # private_key = load_private_key(self.owner, config.KEY_DIR)\n # public_key = load_public_key(self.public_key)\n self.messenger.set_message_callback(message_received_callback)\n # self.messenger.run(private_key, public_key)\n self.messenger.run()\n self.connected = False",
"def guest_setup(roomid,name):\n token = channel.create_channel(name + roomid) \n template_values = {\n \"roomid\":roomid,\n \"token\": channel.create_channel(name + roomid),\n \"yourname\": name\n }\n return render_template(\"guest.html\",values=template_values)"
] | [
"0.5637438",
"0.5523975",
"0.55051774",
"0.54330796",
"0.54073066",
"0.53283787",
"0.53062826",
"0.5287932",
"0.5229703",
"0.5221418",
"0.52044666",
"0.5171826",
"0.5073271",
"0.5067419",
"0.5032029",
"0.50144166",
"0.49957344",
"0.49951404",
"0.4987254",
"0.49871126",
"0.49832988",
"0.49824375",
"0.4980973",
"0.49611616",
"0.49586925",
"0.49385318",
"0.49381733",
"0.49322763",
"0.4926115",
"0.49077058"
] | 0.75251323 | 0 |
Attempts a profile retrieval using a given token. | async def attempt_profile(token, expect=200):
response = await database_sync_to_async(MyProfileView.as_view())(factory.get('/profile', HTTP_AUTHORIZATION='Token ' + token))
assert response.status_code == expect | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_profile(profile_id, token):\n url = '{}{}/'.format(PROFILE_ENDPOINT, profile_id)\n res = requests.get(url, headers={\n 'Content-Type': 'application/json',\n 'Authorization': 'Token {}'.format(token)\n })\n return res.json()",
"async def test_not_retrieve_profile_with_account_token(self):\n provisioning_client = ProvisioningProfileClient(httpClient, 'token')\n try:\n await provisioning_client.get_provisioning_profile('id')\n except Exception as err:\n assert err.__str__() == 'You can not invoke get_provisioning_profile method, because you ' + \\\n 'have connected with account access token. Please use API access token from ' + \\\n 'https://app.metaapi.cloud/token page to invoke this method.'",
"def get_user_profile(access_token):\n message_log(\"Retrieving user profile for token %s\" % access_token)\n user_profile = profiles_cache.get(access_token)\n if user_profile is not None:\n message_log(\"Using cached user profile for token %s\" % access_token)\n else:\n message_log(\"Fetching user profile for token %s\" % access_token)\n user_authentication = authentication.Users(settings.client_domain)\n # Throttle\n for i in range(1, 4):\n user_profile = user_authentication.userinfo(access_token)\n if user_profile == 'Too Many Requests':\n message_log(\"Too many requests: throttling (%s seconds) for token %s\" % (i*2, access_token))\n time.sleep(i*2)\n else:\n break\n if user_profile == 'Unauthorized':\n raise Auth0Error(user_profile)\n try:\n user_profile = json.loads(user_profile)\n profiles_cache.set(access_token, user_profile, timeout=CACHE_TIMEOUT)\n except ValueError:\n message_log(\"Returning empty user profile\")\n return False\n message_log(\"Returning user profile %s\" % user_profile)\n return user_profile",
"async def test_not_retrieve_profiles_with_account_token(self):\n provisioning_client = ProvisioningProfileClient(httpClient, 'token')\n try:\n await provisioning_client.get_provisioning_profiles(5, 'active')\n except Exception as err:\n assert err.__str__() == 'You can not invoke get_provisioning_profiles method, because you ' + \\\n 'have connected with account access token. Please use API access token from ' + \\\n 'https://app.metaapi.cloud/token page to invoke this method.'",
"def fetch_profile(access_token):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch athlete profile\n r = requests.get(API_URL + \"/athlete\", headers=headers)\n profile = r.json()\n if \"errors\" in profile:\n raise AuthError(profile[\"message\"])\n\n return {\n \"firstName\": profile[\"firstname\"],\n \"lastName\": profile[\"lastname\"],\n \"imgUrl\": profile[\"profile\"],\n \"profileUrl\": \"https://www.strava.com/athletes/{}\".format(profile[\"id\"]),\n }",
"def get_user_profile_info(token):\n user_profile_endpoint = 'https://api.spotify.com/v1/me'\n headers = {'Authorization': 'Bearer %s' % token}\n\n r = requests.get(user_profile_endpoint, headers=headers)\n if r.status_code != 200:\n return None\n profile = r.json()\n\n display_name = profile['display_name']\n profile_id = profile['id']\n email = profile['email']\n\n return User(display_name=display_name,\n profile_id=profile_id,\n email=email)",
"def profile():\n token = request.json['token']\n u = user.User.query.filter(user.User.token == token).first()\n if u is None:\n abort(404)\n return jsonify(u.to_dict())",
"def complete_login(self, request, app, token, **kwargs):\n headers = {\"Authorization\": \"OAuth {0}\".format(token.token)}\n metadata = requests.get(self.profile_url, headers=headers)\n extra_data = metadata.json()\n return self.get_provider().sociallogin_from_response(request, extra_data)",
"def fetch_my_profile(self, api_token: str) -> dict:\n query = \"\"\"\n query myProfile {\n myProfile {\n id\n firstName\n lastName\n }\n }\n \"\"\"\n path = jmespath.compile(\n \"\"\"\n data.myProfile.{\n id: id\n first_name: firstName\n last_name: lastName\n }\n \"\"\"\n )\n\n data = self.do_query(query, api_token=api_token)\n\n parsed_data = path.search(data)\n self.contains_keys(parsed_data, [\"id\", \"first_name\", \"last_name\"])\n return parsed_data",
"def initialize_user_profile(self, authz_token):\n try:\n return self.user_profile_client_pool.initializeUserProfile(authz_token)\n except TException:\n logger.exception(\"Error occurred in add_gateway, \", TException)\n raise",
"def profile(request):\n auth, created = AuthProfile.objects.get_or_create(user=request.user)\n if not request.user.is_authenticated():\n raise Exception(\"Not Logged in\")\n\n token, created = Token.objects.get_or_create(user=request.user)\n context = {}\n context['TOKEN'] = token.key\n\n return context",
"def test_get(self):\r\n profile = self.profile_manager.get('testing')\r\n self.assertIsInstance(profile, Profile)",
"async def retrieve(self, profile_id):\n profile = await self.get(self.profile_load.format(profile_id))\n log(\"retrieved card for {}\".format(profile['title']))\n return profile",
"def get_user(self, token: str) -> Optional[User]:",
"def get_user(self, token: str) -> Optional[User]:",
"async def test_retrieve_one(self):\n expected = {\n '_id': 'id',\n 'name': 'name',\n 'version': 4,\n 'status': 'active'\n }\n rsps = respx.get(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200, json=expected))\n profile = await provisioning_client.get_provisioning_profile('id')\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert profile == expected",
"def fetch_profile(self, api_token: str, id: str) -> dict:\n query = \"\"\"\n query Profile($id: ID!, $service_type: ServiceType!) {\n profile(id: $id, serviceType: $service_type) {\n id\n }\n }\n \"\"\"\n\n path = jmespath.compile(\n \"\"\"\n data.profile.{\n id: id\n }\n \"\"\"\n )\n\n data = self.do_query(\n query,\n api_token=api_token,\n variables={\n \"id\": id,\n \"service_type\": settings.HELSINKI_PROFILE_SERVICE_TYPE,\n },\n )\n\n parsed_data = path.search(data)\n self.contains_keys(parsed_data, [\"id\"])\n return parsed_data",
"def get_my_profile(api): \n url = \"https://api.twitter.com/1.1/account/verify_credentials.json\"\n rate_status = check_rate_limit(api, url)\n if not rate_status[\"remaining\"]:\n delay = rate_status['reset'] - time.time()\n if delay > 0:\n print \"Sleeping {0}...\".format(delay)\n time.sleep(delay)\n response = api.get(url)\n response.close()\n return response.json()",
"def load_profile(path, profile):\n profiles = load(path)\n try:\n return profiles[profile]\n except KeyError:\n return Profile(None, None, None, None)",
"def test_get_profile(self):\n self.cim.get_profile(customer_profile_id=u\"123\")",
"def get_profile(self):\n endpoint = '/profile'\n return self.get_request(endpoint)",
"async def get_my_profile_info_async(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetMyProfileInfo.create(\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )",
"def retrieve_profile(self, name):\n\n url = get_url('profile details', profile=name)\n response = self._get(url)\n raise_on_error(response)\n if response.status_code == 404:\n raise QarnotGenericException(response.json()['message'])\n return Profile(response.json())",
"def access_token_profile(global_config, existing_user):\n client_app = global_config.client_apps.profile_app\n api = IdApi(global_config.id_home, client_app.id, client_app.password, global_config.urls.id.api)\n yield api.get_access_token_for_user(existing_user.email, existing_user.password)",
"def get_profile(request):\n p_obj = Profile.objects.filter(hashid=request.session.get('profile', '-'))\n if len(p_obj):\n return p_obj[0]\n else:\n return None",
"def get_profile(tag, platform=\"pc\", region=\"eu\"):\n #\n try:\n context = ssl._create_unverified_context()\n profile = json.load(\n const.codec(urlopen(const.URL + platform + \"/\" + region + \"/\" + tag + \"/profile\", context=context)))\n #\n if \"error\" in profile:\n raise BattleTagNotFound(profile['error'])\n exit(1)\n #\n result = pr.Profile(profile['data']['username'],\n profile['data']['level'],\n profile['data']['games']['quick']['wins'],\n profile['data']['games']['competitive']['wins'],\n profile['data']['games']['competitive']['lost'],\n profile['data']['playtime']['quick'],\n profile['data']['playtime']['competitive'],\n profile['data']['avatar'],\n profile['data']['competitive']['rank'])\n return result\n except urllib.error.URLError as e:\n print(\"An error occurred when fetching stats\\n\" + e)\n exit(1)\n except Exception as e:\n print(\"An error occurred:\\n \" + str(e))\n exit(1)",
"def get_from_token(token, session):\n return session.query(User).filter(User.access_token == token).first()",
"def get_user_from_token(token, salt):\r\n\r\n\t\ttry:\r\n\t\t\tvalue = signing.loads(token, salt=AuthTools.password_salt, max_age=900)\r\n\t\texcept signing.SignatureExpired:\r\n\t\t\treturn None\r\n\t\texcept signing.BadSignature:\r\n\t\t\treturn None\r\n\r\n\t\tuser = User.objects.get(pk=value['pk'])\r\n\r\n\t\tif user is not None:\r\n\t\t\treturn user\r\n\r\n\t\treturn None",
"async def profile(self, ctx:utils.Context):\n\n pass",
"def profile():\n github = OAuth2Session(client_id, token=session['oauth_token'])\n print(session['oauth_token'])\n # print(github.get('https://www.goodreads.com/api/auth_user').json())\n return \"hello expedia!! Here is your access_token : \" + session['oauth_token']['access_token']"
] | [
"0.68144375",
"0.6709642",
"0.66583216",
"0.6256965",
"0.6099129",
"0.6031647",
"0.59498155",
"0.5881905",
"0.58400744",
"0.5807715",
"0.5780331",
"0.5726177",
"0.57129955",
"0.5694952",
"0.5694952",
"0.55700994",
"0.5545229",
"0.5544591",
"0.55419356",
"0.5518681",
"0.55155593",
"0.54924935",
"0.5492444",
"0.5454125",
"0.5418337",
"0.5409289",
"0.53973645",
"0.5396677",
"0.53873974",
"0.5378215"
] | 0.729398 | 0 |
Attempts a logout using a given token. | async def attempt_logout(token, expect=204):
response = await database_sync_to_async(UserLogoutView.as_view())(factory.post('/logout', HTTP_AUTHORIZATION='Token ' + token))
assert response.status_code == expect | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def auth_logout(token):\n if verify_token(token):\n return { \"is_success\": True }\n else:\n raise AccessError(description=\"Logout failed. Token is invalid\")",
"def logout(_host, _token):\n url = _host + '/api/v1/users/logout'\n headers = {\n 'Content-Type': 'application/json',\n }\n params = {\n 'access_token': _token\n }\n response = requests.post(url, headers=headers, params=params)\n if not response.status_code == 204:\n raise ValueError('Logout failed')",
"def logout(self):\n kwargs = {}\n r = self._token_id_request(urljoin(self._url, Client._logout_resource), **kwargs)",
"def logout(self, revoke_token=False):\n if revoke_token:\n self.revoke_self_token()\n\n self.token = None",
"def logout(token):\n with client.connect_to_server_with_auth(token) as auth_conn:\n client_stub = strongdoc_pb2_grpc.StrongDocServiceStub(auth_conn)\n\n request = accounts_pb2.LogoutReq()\n\n response = client_stub.Logout(request, timeout=constants.GRPC_TIMEOUT)\n\n return response.status",
"def logout():\n token = request.args.get('token')\n data = jwt.decode(token, app.config['SECRET_KEY'])\n logged_out = functions.logout(data)\n if logged_out:\n return jsonify({'message': 'Logout Successfully'})\n else:\n return jsonify({'message': 'Logout Failed'})",
"def logout(self):\r\n self._api_entrypoint.logout(self._session_token)",
"def sign_out(token):\n session = get_session_by_token(token)\n if not session['success']:\n return {'success': False, 'message': 'You are not signed in.', 'code': 401}\n\n query_db('DELETE FROM ActiveUsers WHERE token_hash = ?', [session['data']['token_hash']])\n return {'success': True, 'message': 'Successfully signed out.', 'code': 200}",
"def logout():\n session.pop('microsoft_token', None)\n session.pop('state', None)\n return redirect(url_for('index'))",
"def logout():\n session.pop('microsoft_token', None)\n session.pop('state', None)\n return redirect(url_for('index'))",
"def logout():\n # TODO: handle this logout properly, very weird implementation.\n identity = get_jwt_identity()\n if not identity:\n print(\"Session Expired\")\n return jsonify({\"msg\": \"Token invalid\"}), Status.HTTP_BAD_UNAUTHORIZED\n logger.info('Logged out user !!')\n return 'logged out successfully', Status.HTTP_OK_BASIC",
"def logout(request):\n request.user.auth_token.delete()\n return Response({}, status=status.HTTP_200_OK)",
"def logout(self, request, *args, **kwargs):\n token = get_object_or_404(Token, key=request.auth)\n token.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def writeas_logout(token):\n writeas_logout_url = 'https://write.as/api/auth/me'\n writeas_logout_header = {\n 'Authorization': token,\n 'Content-Type': 'application/json'\n }\n r = requests.delete(\n writeas_logout_url,\n headers=writeas_logout_header)\n\n if r.status_code == 204:\n print('User with token {} successfully logged out!'.format(token))\n else:\n print('Logout FAILED. Response: {}'.format(r.text))\n sys.exit(1)",
"def test_cannot_logout_with_blacklisted_token(self):\n reply = self.admin_register()\n user = dict(\n username='jonnie',\n password='Andela8'\n )\n resp = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Login sucessful!')\n self.assertTrue(reply['token'])\n self.assertEqual(resp.status_code, 200)\n\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are already logged out!')\n self.assertEqual(resp.status_code, 404)",
"def logout(req):\n print(req)\n try:\n if 'token' in req.session:\n del req.session['token']\n except KeyError:\n msg = req.get_error_msg(\"NotFound\")\n return send_error_response(\"Unauthorised_{0}\".format(msg))\n else:\n return send_success_response(\"Session expired\")",
"def revoke_token(token):\n token.delete_instance()",
"def logout():\n login()",
"def logout():\n if 'access_token' in login_session:\n del login_session['access_token']\n del login_session['email']\n flash(\"you are now logout\")\n return redirect(url_for('catelog'))",
"def logout():\n return logout_user()",
"def logout():",
"def logout(session):\r\n response = session.get(LOGOUT_URL)\r\n response.raise_for_status()",
"def test_logout_revoked(self):\n response = self.client.post('/api/v2/auth/logout',\n headers=self.attendant_headers)\n response = self.client.post('/api/v2/auth/logout',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token has been revoked', str(response.data))",
"def logout(self):\n logger.info(\"Logging out\")\n self._limited_call(self._requests.get, constants.FA_ROOT + \"/logout/\")",
"def logout(request):\n if request.method == 'POST':\n request.token.delete()\n return json_response({\n 'status': 'success'\n })\n elif request.method == 'OPTIONS':\n return json_response({})\n else:\n return json_response({\n 'error': 'Invalid Method'\n }, status=405)",
"def test_logout_without_token(self):\n self.create_user()\n\n url = reverse_lazy('authenticate:logout')\n response = self.client.get(url)\n\n detail = str(response.data['detail'])\n status_code = int(response.data['status_code'])\n\n self.assertEqual(len(response.data), 2)\n self.assertEqual(detail, 'Authentication credentials were not provided.')\n self.assertEqual(status_code, 401)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def logout():\n\n logout_user()\n return redirect(url_for('login'))",
"def logout():\n logout_user()\n return redirect(url_for('auth.index'))",
"def logout(self):\n self.client.get(f\"{host}/logout\")",
"def logout():\n \n # using the method from the flask module\n logout_user()\n return redirect(url_for('home'))"
] | [
"0.7278216",
"0.68836224",
"0.6859607",
"0.67861867",
"0.6770758",
"0.6694835",
"0.66294277",
"0.6611441",
"0.64591223",
"0.64591223",
"0.64254606",
"0.63770396",
"0.63576144",
"0.634561",
"0.6340409",
"0.62441546",
"0.6227763",
"0.6221606",
"0.62151426",
"0.619917",
"0.61847955",
"0.6138767",
"0.6135032",
"0.61342674",
"0.61157835",
"0.6076018",
"0.60553515",
"0.60394806",
"0.60371536",
"0.60224414"
] | 0.77047193 | 0 |
Attempts a websocket channel connection and expects to receive a MOTD. | async def should_be_websocket_welcome(token):
communicator = make_communicator(token)
connected, _ = await communicator.connect()
assert connected
message = await communicator.receive_json_from()
await communicator.disconnect()
assert message.get('type') == 'notification'
assert message.get('code') == 'api-motd' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def connect(self) -> None:\n exceptions = (\n OSError,\n ConnectionClosed,\n aiohttp.ClientError,\n asyncio.TimeoutError,\n errors.HTTPException,\n )\n\n async def throttle() -> None:\n now = time.monotonic()\n between = now - last_connect\n sleep = random.random() * 4 if between > 600 else 100 / between ** 0.5\n log.info(f\"Attempting to connect to another CM in {sleep}\")\n await asyncio.sleep(sleep)\n\n while not self.is_closed():\n last_connect = time.monotonic()\n\n try:\n self.ws = await asyncio.wait_for(SteamWebSocket.from_client(self, cm_list=self._cm_list), timeout=60)\n except exceptions:\n await throttle()\n continue\n\n try:\n while True:\n await self.ws.poll_event()\n except exceptions as exc:\n if isinstance(exc, ConnectionClosed):\n self._cm_list = exc.cm_list\n self.dispatch(\"disconnect\")\n finally:\n if not self.is_closed():\n await throttle()",
"async def _connected_websocket_assistant(self) -> WSAssistant:\n\n self._manage_listen_key_task = safe_ensure_future(self._manage_listening_key_task_loop())\n await self._listen_key_initialized_event.wait()\n\n ws: WSAssistant = await self._get_ws_assistant()\n await ws.connect(ws_url=CONSTANTS.LBANK_WSS_URL)\n return ws",
"async def test_connection(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True",
"async def ws_connect(self):\n await self._client.connect()",
"async def connect(self, channel_id: int):\n payload = {\n 'op': 4,\n 'd': {\n 'guild_id': self.guild_id,\n 'channel_id': str(channel_id),\n 'self_mute': False,\n 'self_deaf': False\n }\n }\n await self._bot._connection._get_websocket(int(self.guild_id)).send(json.dumps(payload))",
"async def _handle_connection( # pylint: disable=too-many-branches, too-many-statements\n self,\n ) -> None:\n client = None\n disconnect_warn = None\n try:\n self.client = client = await self.aiohttp_session.ws_connect(\n self.ws_server_url,\n heartbeat=55,\n )\n self.tries = 0\n\n self.version = version = VersionInfo.from_message(\n await client.receive_json()\n )\n\n # basic check for server version compatability\n self._check_server_version(self.version.server_version)\n\n self._logger.info(\n \"Connected to Home %s (Server %s, Driver %s)\",\n version.home_id,\n version.server_version,\n version.driver_version,\n )\n self.state = STATE_CONNECTED\n\n if self._on_connect:\n asyncio.create_task(\n gather_callbacks(self._logger, \"on_connect\", self._on_connect)\n )\n\n loop = asyncio.get_running_loop()\n\n while not client.closed:\n msg = await client.receive()\n\n if msg.type in (WSMsgType.CLOSED, WSMsgType.CLOSING):\n break\n\n if msg.type == WSMsgType.ERROR:\n disconnect_warn = \"Connection error\"\n break\n\n if msg.type != WSMsgType.TEXT:\n disconnect_warn = \"Received non-Text message: {}\".format(msg.type)\n break\n\n try:\n if len(msg.data) > SIZE_PARSE_JSON_EXECUTOR:\n msg = await loop.run_in_executor(None, msg.json)\n else:\n msg = msg.json()\n except ValueError:\n disconnect_warn = \"Received invalid JSON.\"\n break\n\n if self._logger.isEnabledFor(logging.DEBUG):\n self._logger.debug(\"Received message:\\n%s\\n\", pprint.pformat(msg))\n\n msg_ = cast(dict, msg)\n try:\n self.async_handle_message(msg_)\n\n except InvalidState as err:\n disconnect_warn = f\"Invalid state: {err}\"\n await client.close()\n break\n\n except Exception: # pylint: disable=broad-except\n self._logger.exception(\"Unexpected error handling %s\", msg)\n break\n\n except client_exceptions.WSServerHandshakeError as err:\n self._logger.warning(\"Unable to connect: %s\", err)\n\n except client_exceptions.ClientError as err:\n self._logger.warning(\"Unable to connect: %s\", err)\n\n except asyncio.CancelledError:\n pass\n\n finally:\n if disconnect_warn is None:\n self._logger.info(\"Connection closed\")\n else:\n self._logger.warning(\"Connection closed: %s\", disconnect_warn)",
"def websocket_init(self, payload, *args, **kwargs):\n data = json.loads(str(payload, \"utf-8\"))\n self.is_connecting = False\n if url := data.get(\"url\"):\n self.gateway = f\"{url}/?v={DISCORD_API_VERSION}&encoding=json\".encode(\"utf-8\")\n useragent = kwargs.pop(\"useragent\", DISCORD_USER_AGENT)\n headers = kwargs.pop(\n \"headers\",\n {\n \"Authorization\": [f\"Bot {DISCORD_BOT_TOKEN}\"],\n \"Content-Type\": [\"application/json\"],\n },\n )\n\n logger.log_info(\"Connecting to Discord Gateway...\")\n WebSocketClientFactory.__init__(\n self, url, *args, headers=headers, useragent=useragent, **kwargs\n )\n self.start()\n else:\n logger.log_err(\"Discord did not return a websocket URL; connection cancelled.\")",
"async def ws_connect(self, client):\n if self.ws is None:\n team_ws_build = GuildedWebSocket.build(client, loop=client.loop, teamId=self.id)\n self.ws = await asyncio.wait_for(team_ws_build, timeout=60)",
"async def connect(self):\n if not self.http:\n raise ClientException('You must log in via REST before connecting to the gateway.')\n\n while not self.closed:\n ws_build = GuildedWebSocket.build(self, loop=self.loop)\n gws = await asyncio.wait_for(ws_build, timeout=60)\n if type(gws) != GuildedWebSocket:\n self.dispatch('error', gws)\n return\n\n self.ws = gws\n self.http.ws = self.ws\n self.dispatch('connect')\n\n if self._login_presence is not None:\n # we do this here because why bother setting a presence if you won't show up in the online list anyway\n await self.change_presence(self._login_presence)\n\n #if self._login_presence is Presence.online:\n # todo: start http ping thread\n # no need to do that if you don't want an online presence\n\n if not self.disable_team_websockets:\n for team in self.teams:\n team_ws_build = GuildedWebSocket.build(self, loop=self.loop, teamId=team.id)\n team_ws = await asyncio.wait_for(team_ws_build, timeout=60)\n if type(team_ws) == GuildedWebSocket:\n team.ws = team_ws\n self.dispatch('team_connect', team)\n\n async def listen_socks(ws, team=None):\n teamId = team.id if team is not None else None\n next_backoff_time = 5\n while True and ws is not None:\n try:\n await ws.poll_event()\n except WebSocketClosure as exc:\n code = ws._close_code or ws.socket.close_code\n if teamId:\n log.warning('Team %s\\'s websocket closed with code %s, attempting to reconnect in %s seconds', teamId, code, next_backoff_time)\n self.dispatch('team_disconnect', teamId)\n else:\n log.warning('Websocket closed with code %s, attempting to reconnect in %s seconds', code, next_backoff_time)\n self.dispatch('disconnect')\n await asyncio.sleep(next_backoff_time)\n if teamId:\n build = GuildedWebSocket.build(self, loop=self.loop, teamId=teamId)\n else:\n # possible reconnect issues brought up by @8r2y5\n build = GuildedWebSocket.build(self, loop=self.loop)\n try:\n ws = await asyncio.wait_for(build, timeout=60)\n except asyncio.TimeoutError:\n log.warning('Timed out trying to reconnect.')\n next_backoff_time += 5\n else:\n next_backoff_time = 5\n\n self._ready.set()\n self.dispatch('ready')\n\n await asyncio.gather(\n listen_socks(self.ws), *[listen_socks(team.ws, team) for team in self.teams]\n )",
"def _robotConnect(self, resp):\r\n # Read the response\r\n url = resp['url']\r\n current = resp.get('current', None)\r\n\r\n if current:\r\n print(\"Warning: There is a newer client (version: '{0}') \"\r\n 'available.'.format(current))\r\n\r\n print('Connect to Robot Process on: {0}'.format(url))\r\n\r\n # Make WebSocket connection to Robot Manager\r\n args = urlencode((('userID', self._userID), ('robotID', self._robotID),\r\n ('password', self._password)))\r\n factory = RCERobotFactory('{0}?{1}'.format(url, args), self)\r\n connectWS(factory)",
"async def _connect(self) -> bool:\n\n try:\n logger.debug(f\"Creating ws connection to {self._url!r}\")\n ws = await asyncio.wait_for(\n websockets.connect(self._url,\n extra_headers=self._cookie_jar.get_cookies_as_headers()),\n self.CONNECT_TIMEOUT\n )\n logger.debug(f\"Established ws connection to {self._url!r}\")\n\n self._ws = ws\n self._awaiting_replies = {}\n logger.debug(\"Starting ping check\")\n self._ping_check = asyncio.create_task(\n self._disconnect_in(self.PING_TIMEOUT))\n\n # Put received cookies into cookie jar\n for set_cookie in ws.response_headers.get_all(\"Set-Cookie\"):\n self._cookie_jar.add_cookie(set_cookie)\n self._cookie_jar.save()\n\n return True\n\n except (websockets.InvalidHandshake, websockets.InvalidStatusCode,\n OSError, asyncio.TimeoutError):\n logger.debug(\"Connection failed\")\n return False",
"async def _open_connection(self, conn_name):\n path = WS_ENDPOINT_REMOTE_CONTROL if conn_name == WS_REMOTE else WS_ENDPOINT_APP_CONTROL\n token = (await self._store.get(ATTR_TOKEN)) if conn_name == WS_REMOTE else None\n url = format_websocket_url(self.host, path, self.name, token)\n _LOGGER.debug(f\"{conn_name}: Attempting connection to {url}\")\n try:\n self._connected = False\n self._is_connecting = True\n async with websockets.connect(url, ssl=INSECURE_SSL_CTX) as ws:\n setattr(self, f\"_ws_{conn_name}\", ws)\n async for msg in ws:\n try:\n await self._handle_message(conn_name, msg)\n except AuthorizationError:\n _LOGGER.error(f\"{conn_name}: Authorization refused\")\n break\n except Exception as exc:\n _LOGGER.error(f\"Error while handling message: {exc}\", exc_info=True)\n except (websockets.WebSocketException, asyncio.CancelledError, ConnectionError) as exc:\n _LOGGER.debug(f\"{conn_name}: {exc}\", exc_info=True)\n except Exception as exc:\n _LOGGER.error(f\"{conn_name}: {exc}\", exc_info=True)\n finally:\n _LOGGER.debug(f\"{conn_name}: disconnected\")\n setattr(self, f\"_ws_{conn_name}\", None)\n self._connected = False\n self._is_connecting = False\n self._current_app = None\n self._installed_apps = {}",
"async def ws_cmd(args):\n url = \"ws://{}:{}/ws/device/\".format(\n args.server, args.port)\n headers = {'devicetoken': args.token}\n while True:\n try:\n async with websockets.connect(\n url, extra_headers=headers) as websocket:\n logger.info(\"ws server connected...\")\n try:\n while True:\n data = await websocket.recv()\n data = json.loads(data)\n\n if data['type'] == 'cmd':\n status, msg = await run_cmd(data['cmd'])\n logging.info(\"result: {}\".format(msg))\n await websocket.send(json.dumps({\n \"type\": \"cmd\",\n \"msg\": msg,\n }))\n except Exception:\n logger.exception(\"{} error\".format(data))\n except Exception:\n await asyncio.sleep(args.retval)\n logger.info(\"retry connected...\")",
"async def connect(self):\n ssl = True if self._uri.startswith(\"wss\") else False\n async for websocket in websockets.connect(\n self._uri, ssl=ssl\n ) if ssl else websockets.connect(self._uri):\n # Try-except-continue used for automatic reconnection with exponential backoff\n try:\n self._connection = websocket\n async for message in self._connection:\n json_obj = json.loads(message.decode())\n item = Item(\n json_obj[\"type\"], json_obj[\"manufacturer\"], json_obj[\"model\"]\n )\n request = Request(self._connection, item)\n await self.on_message_handler(request)\n except websockets.ConnectionClosed:\n continue",
"async def _perform_connect(self):\n # Return connected if we are already connected.\n if self._websocket:\n if self._websocket.open:\n return True\n\n self.logger.debug(\"Starting connect.\")\n\n self.logger.debug(\"Connecting to %s\" % self.wsc_url)\n self._websocket = await websockets.connect(self.wsc_url)\n \n #We need to authenticate upon opening the connection (modified to remove apkVesrion, os, model, romVersion NW 28th Oct 2020)\n payload = {}\n \n payload['action'] = \"userOnline\"\n payload['userAgent'] = 'app'\n payload['version'] = 8\n payload['appid'] = self._appid\n payload['_nonce'] = self._nonce\n #payload['apkVesrion'] = \"1.8\"\n #payload['apkVersion'] = \"1.8\"\n #payload['os'] = 'ios'\n payload['at'] = self.authenticationToken\n payload['apikey'] = self.apikey\n payload['ts'] = self.timestamp\n #payload['model'] = 'iPhone10,6'\n #payload['romVersion'] = '11.1.2'\n payload['sequence'] = self.sequence\n\n string = json.dumps(payload);\n\n self.logger.debug('Sending login request [%s]' % string);\n\n await self._send_request(string)",
"def _connect(self, reconnecting=False):\n # The lock is used to ensure only a single connection can be made\n with (yield self._ws_connecting_lock.acquire()):\n self._disconnect_issued = False\n websocket_url = (yield self.get_sitemap())['websocket']\n if not self.is_connected:\n self._logger.debug(\n \"Connecting to websocket %s\", websocket_url)\n try:\n if self._heart_beat_timer.is_running():\n self._heart_beat_timer.stop()\n self._ws = yield websocket_connect(\n websocket_url,\n on_message_callback=self._websocket_message,\n connect_timeout=WS_CONNECT_TIMEOUT)\n if reconnecting:\n yield self._resend_subscriptions_and_strategies()\n self._logger.info(\"Reconnected :)\")\n self._heart_beat_timer.start()\n except Exception:\n self._logger.exception(\n 'Could not connect websocket to %s',\n websocket_url)\n if reconnecting:\n self._logger.info(\n 'Retrying connection in %s seconds...', WS_RECONNECT_INTERVAL)\n self._connect_later(WS_RECONNECT_INTERVAL)\n if not self.is_connected and not reconnecting:\n self._logger.error(\"Failed to connect!\")",
"def attempt_connection(self):\n self.connection_error = False\n sleep_exp = 1\n connect_count = 0\n\n while self.running and self.socket is None and connect_count < self.__reconnect_attempts_max:\n for host_and_port in self.__hosts_and_ports:\n try:\n log.info(\"Attempting connection to websocket %s\", host_and_port)\n self.socket = websocket.WebSocket()\n proto, host, port, path = host_and_port[3], host_and_port[0], host_and_port[1], host_and_port[2]\n if port:\n ws_uri = '{}://{}:{}/{}'.format(proto, host, port, path)\n else:\n ws_uri = '{}://{}/{}'.format(proto, host, path)\n\n self.socket.connect(ws_uri,\n timeout=self.__timeout)\n\n self.current_host_and_port = host_and_port\n log.info(\"Established connection to %s\", ws_uri)\n break\n except WebSocketException:\n self.socket = None\n connect_count += 1\n log.warning(\"Could not connect to host %s, port %s\", host_and_port[0], host_and_port[1], exc_info=1)\n\n if self.socket is None:\n sleep_duration = (min(self.__reconnect_sleep_max,\n ((self.__reconnect_sleep_initial / (1.0 + self.__reconnect_sleep_increase))\n * math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp)))\n * (1.0 + random.random() * self.__reconnect_sleep_jitter))\n sleep_end = monotonic() + sleep_duration\n log.debug(\"Sleeping for %.1f seconds before attempting reconnect\", sleep_duration)\n while self.running and monotonic() < sleep_end:\n time.sleep(0.2)\n\n if sleep_duration < self.__reconnect_sleep_max:\n sleep_exp += 1\n\n if not self.socket:\n raise exception.ConnectFailedException()",
"async def connect(self):\n\n self.socket = await self._session.ws_connect(str(self._url))\n self._create_task(self.__handle_connection())",
"async def _handler(self):\n reconnect_delay = DEFAULT_RECONNECT_SLEEP\n while True:\n try:\n async with websockets.connect(\n self.websocket_url\n ) as self.websocket, aiohttp.ClientSession() as self.session:\n self.connected = True\n self.server.set_session(self.session)\n for att in dir(self):\n att = getattr(self, att)\n if (\n hasattr(att, \"_is_interval_task\")\n and att._is_interval_task\n ):\n self._tasks.append(asyncio.ensure_future(att()))\n done, pending = await asyncio.wait(\n self._tasks, return_when=asyncio.FIRST_COMPLETED\n )\n for task in pending:\n task.cancel()\n for task in done:\n if task.exception():\n raise task.exception()\n except:\n import traceback\n\n traceback.print_exc()\n await self._on_disconnect(self.autoreconnect)\n if not self.autoreconnect:\n logger.info(\n \"An exception has occurred. The bot will \"\n \"go offline. To reconnect automatically start the \"\n \"bot with Client.start(autoreconnect=True).\"\n )\n return\n\n logger.info(\n \"An exception has occurred. The bot will \"\n \"reconnect. To forgo autoreconnect start the bot with \"\n \"Client.start(autoreconnect=False).\"\n )\n\n logger.info(\n \"Sleeping for {}s before reconnecting\".format(reconnect_delay)\n )\n await asyncio.sleep(reconnect_delay)\n reconnect_delay = min(\n MAX_RECONNECT_SLEEP, reconnect_delay * 2\n ) # Bounded exponential backoff",
"async def connect(self, url: str):\n logger.info(\"Opening connection to {}\".format(url))\n self.websocket = await WSClient.connect(url)\n logger.info(\"Connected to gateway!\")\n self._open = True\n return self.websocket",
"def test_websocket_message(self):\n\n ws = websocket.create_connection(self.live_server_ws_url)\n ws.send('test')\n response = ws.recv()\n ws.close()\n assert 'test' == response",
"def verifyWalabotIsConnected():\n while True:\n try:\n wlbt.ConnectAny()\n except wlbt.WalabotError as err:\n input(\"- Connect Walabot and press 'Enter'.\")\n else:\n print('- Connection to Walabot established.')\n return",
"def check_connection():\n if connected():\n ws.emit(Message('mycroft.internet.connected'))\n # check for pairing, if not automatically start pairing\n if not is_paired():\n # begin the process\n payload = {\n 'utterances': [\"pair my device\"],\n 'lang': \"en-us\"\n }\n ws.emit(Message(\"recognizer_loop:utterance\", payload))\n else:\n thread = Timer(1, check_connection)\n thread.daemon = True\n thread.start()",
"async def async_connect(self) -> None:\n params = {\"ns\": self._namespace, \"accessToken\": self._access_token}\n try:\n await self._sio.connect(\n f\"{API_URL_BASE}?{urlencode(params)}\",\n namespaces=[self._namespace],\n transports=[\"websocket\"],\n )\n except (ConnError, SocketIOError) as err:\n raise WebsocketError(err) from None",
"async def running(self):\n self.state = STATE_STARTING\n\n try:\n async with self.session.ws_connect(\n self.uri, heartbeat=15, ssl=self._ssl\n ) as ws_client:\n self.state = STATE_CONNECTED\n self.failed_attempts = 0\n\n async for message in ws_client:\n if self.state == STATE_STOPPED:\n break\n\n if message.type == aiohttp.WSMsgType.TEXT:\n msg = message.json()[\"NotificationContainer\"]\n msgtype = msg[\"type\"]\n\n if msgtype not in self.subscriptions:\n _LOGGER.debug(\"Ignoring: %s\", msg)\n continue\n\n if msgtype == \"playing\":\n if self.player_event(msg):\n self.callback(msgtype, msg, None)\n else:\n _LOGGER.debug(\"Ignoring player update: %s\", msg)\n else:\n self.callback(msgtype, msg, None)\n\n elif message.type == aiohttp.WSMsgType.CLOSED:\n _LOGGER.warning(\"AIOHTTP websocket connection closed\")\n break\n\n elif message.type == aiohttp.WSMsgType.ERROR:\n _LOGGER.error(\"AIOHTTP websocket error\")\n break\n\n except aiohttp.ClientResponseError as error:\n if error.code == 401:\n _LOGGER.error(\"Credentials rejected: %s\", error)\n self._error_reason = ERROR_AUTH_FAILURE\n else:\n _LOGGER.error(\"Unexpected response received: %s\", error)\n self._error_reason = ERROR_UNKNOWN\n self.state = STATE_STOPPED\n except (aiohttp.ClientConnectionError, asyncio.TimeoutError) as error:\n if self.failed_attempts >= MAX_FAILED_ATTEMPTS:\n self._error_reason = ERROR_TOO_MANY_RETRIES\n self.state = STATE_STOPPED\n elif self.state != STATE_STOPPED:\n retry_delay = min(2 ** (self.failed_attempts - 1) * 30, 300)\n self.failed_attempts += 1\n _LOGGER.error(\n \"Websocket connection failed, retrying in %ds: %s\",\n retry_delay,\n error,\n )\n self.state = STATE_DISCONNECTED\n await asyncio.sleep(retry_delay)\n except Exception as error: # pylint: disable=broad-except\n if self.state != STATE_STOPPED:\n _LOGGER.exception(\"Unexpected exception occurred: %s\", error)\n self._error_reason = ERROR_UNKNOWN\n self.state = STATE_STOPPED\n else:\n if self.state != STATE_STOPPED:\n self.state = STATE_DISCONNECTED\n\n # Session IDs reset if Plex server has restarted, be safe\n self.players.clear()\n await asyncio.sleep(5)",
"def _connect(self, slack_bot_token):\n slack_client = SlackClient(slack_bot_token)\n connect_response = slack_client.rtm_connect(\n with_team_state=False, auto_reconnect=True)\n\n if connect_response:\n logger.info(\"RTM connected\")\n return slack_client\n else:\n raise ConnectionError",
"def attempt_to_connect(self):\n if self.server_handler.attempt_connection:\n self.server_handler.attempt_connection = False\n else:\n self.server_handler.attempt_connection = True",
"async def websocket_connect(self, event):\n sender = self.scope['user']\n if sender.is_anonymous:\n return\n user_id = self.scope['url_route']['kwargs']['user_id']\n\n try:\n receiver = User.objects.get(id=int(user_id))\n except User.DoesNotExist:\n return\n self.conversation = await self.get_conversation(sender, receiver)\n\n self.conversation_name = str(self.conversation.id)\n\n await self.channel_layer.group_add(\n self.conversation_name,\n self.channel_name\n )\n\n await self.send({\n \"type\": \"websocket.accept\"\n })",
"def test_invalid_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=ABC'.format(token))\n self.assertSocketError(ws, 4300, 'Invalid channel.')\n self.assertTrue(mock_get.called)\n self.assertFalse(mock_subscribe.called)",
"async def handle_websocket_connection(websocket, path):\n logging.info('New connection established')\n\n # wait for user requests until the connection is closed\n while True:\n\n # handle request\n try:\n await wait_for_user_request(websocket)\n\n # connection closed successfully: log it and return\n except websockets.exceptions.ConnectionClosedOK:\n logging.info('Connection closed cleanly')\n return\n\n # unexpected error: log it and return\n except Exception as e:\n logging.info(f'Connection closed due unexpected error: {e}')\n return"
] | [
"0.64464635",
"0.6210147",
"0.6181521",
"0.61647856",
"0.6162948",
"0.6138284",
"0.6119366",
"0.6101342",
"0.60932285",
"0.6045619",
"0.6020593",
"0.6005463",
"0.6004269",
"0.5911575",
"0.58692455",
"0.58346134",
"0.58307046",
"0.58235204",
"0.5798333",
"0.5785222",
"0.5769966",
"0.5765764",
"0.57533586",
"0.5711983",
"0.56901777",
"0.56849825",
"0.5676159",
"0.56499153",
"0.56257504",
"0.55962855"
] | 0.6423359 | 1 |
Attempts a websocket channel connection and expects to receive a rejection because the user is not logged in (invalid token). | async def should_be_websocket_rejected_because_anonymous(token):
communicator = make_communicator(token)
connected, _ = await communicator.connect()
assert connected
message = await communicator.receive_json_from()
await communicator.disconnect()
assert message.get('type') == 'fatal'
assert message.get('code') == 'not-authenticated' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_invalid_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=ABC'.format(token))\n self.assertSocketError(ws, 4300, 'Invalid channel.')\n self.assertTrue(mock_get.called)\n self.assertFalse(mock_subscribe.called)",
"async def accept(self,websocket, path):\n res = re.search(\"/(\\w+)/(\\w+)\",path)\n if res:\n group,username = res.groups()\n else:\n return\n try:\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist:\n print(\"unknown user %s tried to connect to websocket for %s\" % (username,group))\n return\n #get user token from django database\n if hasattr(user, 'logged_in_user'):\n session_key_for_user = user.logged_in_user.session_key\n else:\n print(\"not authenticated user %s tried to connect to websocket for %s\" % (username,group))\n return\n #wait for client to send authentification token\n session_key_message = await asyncio.wait_for(websocket.recv(), timeout=1.0)\n #check if session keys match\n if session_key_message != session_key_for_user:\n print(\"user %s tried to connect to websocket for %s with invalid sessionId\" % (username,group))\n #reject\n return\n else:\n #accept\n if group not in self.openConnections:\n self.openConnections[group]=set()\n if username not in self.openConnections:\n self.openConnections[username]=set()\n self.addWebSocket(websocket,group,username)\n async for message in websocket:\n pass\n #we don't expect messages just keep the connection open until it closes\n except asyncio.TimeoutError:\n print(\"authentication timeout for user %s\" % username)\n finally:\n self.removeWebSocket(websocket,group,username)",
"async def websocket_connect(self, event):\n sender = self.scope['user']\n if sender.is_anonymous:\n return\n user_id = self.scope['url_route']['kwargs']['user_id']\n\n try:\n receiver = User.objects.get(id=int(user_id))\n except User.DoesNotExist:\n return\n self.conversation = await self.get_conversation(sender, receiver)\n\n self.conversation_name = str(self.conversation.id)\n\n await self.channel_layer.group_add(\n self.conversation_name,\n self.channel_name\n )\n\n await self.send({\n \"type\": \"websocket.accept\"\n })",
"async def _authenticate(self, ws: WSAssistant):\n try:\n auth_payload: Dict[str, Any] = self._bitmart_auth.get_ws_auth_payload(bitmart_utils.get_ms_timestamp())\n ws_message: WSRequest = WSRequest(auth_payload)\n\n await ws.send(ws_message)\n ws_response = await ws.receive()\n\n auth_resp: Dict[str, Any] = ws_response.data\n\n if \"errorCode\" in auth_resp.keys():\n self.logger().error(f\"WebSocket login errored with message: {auth_resp['errorMessage']}\",\n exc_info=True)\n raise ConnectionError\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Error occurred when authenticating to user stream.\", exc_info=True)\n raise",
"async def _open_connection(self, conn_name):\n path = WS_ENDPOINT_REMOTE_CONTROL if conn_name == WS_REMOTE else WS_ENDPOINT_APP_CONTROL\n token = (await self._store.get(ATTR_TOKEN)) if conn_name == WS_REMOTE else None\n url = format_websocket_url(self.host, path, self.name, token)\n _LOGGER.debug(f\"{conn_name}: Attempting connection to {url}\")\n try:\n self._connected = False\n self._is_connecting = True\n async with websockets.connect(url, ssl=INSECURE_SSL_CTX) as ws:\n setattr(self, f\"_ws_{conn_name}\", ws)\n async for msg in ws:\n try:\n await self._handle_message(conn_name, msg)\n except AuthorizationError:\n _LOGGER.error(f\"{conn_name}: Authorization refused\")\n break\n except Exception as exc:\n _LOGGER.error(f\"Error while handling message: {exc}\", exc_info=True)\n except (websockets.WebSocketException, asyncio.CancelledError, ConnectionError) as exc:\n _LOGGER.debug(f\"{conn_name}: {exc}\", exc_info=True)\n except Exception as exc:\n _LOGGER.error(f\"{conn_name}: {exc}\", exc_info=True)\n finally:\n _LOGGER.debug(f\"{conn_name}: disconnected\")\n setattr(self, f\"_ws_{conn_name}\", None)\n self._connected = False\n self._is_connecting = False\n self._current_app = None\n self._installed_apps = {}",
"async def handle_websocket_connection(websocket, path):\n logging.info('New connection established')\n\n # wait for user requests until the connection is closed\n while True:\n\n # handle request\n try:\n await wait_for_user_request(websocket)\n\n # connection closed successfully: log it and return\n except websockets.exceptions.ConnectionClosedOK:\n logging.info('Connection closed cleanly')\n return\n\n # unexpected error: log it and return\n except Exception as e:\n logging.info(f'Connection closed due unexpected error: {e}')\n return",
"async def _authenticate(self, ws: WSAssistant):\n auth_payload: List[str] = self._auth.get_ws_auth_payload()\n payload = {\"op\": \"login\", \"args\": auth_payload}\n login_request: WSJSONRequest = WSJSONRequest(payload=payload)\n await ws.send(login_request)\n response: WSResponse = await ws.receive()\n message = response.data\n\n if (\n message[\"event\"] != \"login\"\n and message[\"code\"] != \"0\"\n ):\n self.logger().error(\"Error authenticating the private websocket connection\")\n raise IOError(\"Private websocket connection authentication failed\")",
"async def should_be_websocket_welcome(token):\n\n communicator = make_communicator(token)\n connected, _ = await communicator.connect()\n assert connected\n message = await communicator.receive_json_from()\n await communicator.disconnect()\n assert message.get('type') == 'notification'\n assert message.get('code') == 'api-motd'",
"def test_ApiConnectionWillAuthenticate_InvalidCredentials_Unsuccessfully(self):\n connection = ApiConnection(self.userId, \"\")\n self.assertFalse(connection.connected())",
"def test_invalid_token(self, mock_get, mock_subscribe):\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XYYX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}'.format(token))\n self.assertSocketError(ws, 4000, 'Invalid token.')\n self.assertFalse(mock_get.called)\n self.assertFalse(mock_subscribe.called)",
"async def connect(self, channel_id: int):\n payload = {\n 'op': 4,\n 'd': {\n 'guild_id': self.guild_id,\n 'channel_id': str(channel_id),\n 'self_mute': False,\n 'self_deaf': False\n }\n }\n await self._bot._connection._get_websocket(int(self.guild_id)).send(json.dumps(payload))",
"async def should_be_websocket_rejected_because_duplicated(token):\n\n communicator = make_communicator(token)\n connected, _ = await communicator.connect()\n assert connected\n message = await communicator.receive_json_from()\n await communicator.disconnect()\n assert message.get('type') == 'fatal'\n assert message.get('code') == 'already-chatting'",
"def test_invalid_room(self, mock_get, mock_subscribe):\n mock_get.side_effect = KeyError('Unknown room.')\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}'.format(token))\n self.assertSocketError(ws, 4300, 'Invalid channel.')\n self.assertTrue(mock_get.called)\n self.assertFalse(mock_subscribe.called)",
"def _check_ws(self) -> None:\n if self.websocket is False:\n raise UninitializedWebsocket('Websocket is not initialized, if you are not using a context manager '\n 'you have to await start_ws() coroutine manually')",
"async def connect(self) -> None:\n exceptions = (\n OSError,\n ConnectionClosed,\n aiohttp.ClientError,\n asyncio.TimeoutError,\n errors.HTTPException,\n )\n\n async def throttle() -> None:\n now = time.monotonic()\n between = now - last_connect\n sleep = random.random() * 4 if between > 600 else 100 / between ** 0.5\n log.info(f\"Attempting to connect to another CM in {sleep}\")\n await asyncio.sleep(sleep)\n\n while not self.is_closed():\n last_connect = time.monotonic()\n\n try:\n self.ws = await asyncio.wait_for(SteamWebSocket.from_client(self, cm_list=self._cm_list), timeout=60)\n except exceptions:\n await throttle()\n continue\n\n try:\n while True:\n await self.ws.poll_event()\n except exceptions as exc:\n if isinstance(exc, ConnectionClosed):\n self._cm_list = exc.cm_list\n self.dispatch(\"disconnect\")\n finally:\n if not self.is_closed():\n await throttle()",
"def test_channel_join_except_invalid_auth():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", True)\n\n # Create invalid token for the test\n invalid_user = 999\n invalid_token = generate_token(invalid_user)\n\n with pytest.raises(AccessError):\n channel_join_v2(invalid_token, channel_id1[\"channel_id\"])",
"def ws_require_user(\n only_owner: bool = False,\n only_system_user: bool = False,\n allow_system_user: bool = True,\n only_active_user: bool = True,\n only_inactive_user: bool = False,\n only_supervisor: bool = False,\n) -> Callable[[const.WebSocketCommandHandler], const.WebSocketCommandHandler]:\n\n def validator(func: const.WebSocketCommandHandler) -> const.WebSocketCommandHandler:\n \"\"\"Decorate func.\"\"\"\n\n @wraps(func)\n def check_current_user(\n hass: HomeAssistant, connection: ActiveConnection, msg: dict[str, Any]\n ) -> None:\n \"\"\"Check current user.\"\"\"\n\n def output_error(message_id: str, message: str) -> None:\n \"\"\"Output error message.\"\"\"\n connection.send_message(\n messages.error_message(msg[\"id\"], message_id, message)\n )\n\n if only_owner and not connection.user.is_owner:\n output_error(\"only_owner\", \"Only allowed as owner\")\n return\n\n if only_system_user and not connection.user.system_generated:\n output_error(\"only_system_user\", \"Only allowed as system user\")\n return\n\n if not allow_system_user and connection.user.system_generated:\n output_error(\"not_system_user\", \"Not allowed as system user\")\n return\n\n if only_active_user and not connection.user.is_active:\n output_error(\"only_active_user\", \"Only allowed as active user\")\n return\n\n if only_inactive_user and connection.user.is_active:\n output_error(\"only_inactive_user\", \"Not allowed as active user\")\n return\n\n if only_supervisor and connection.user.name != HASSIO_USER_NAME:\n output_error(\"only_supervisor\", \"Only allowed as Supervisor\")\n return\n\n return func(hass, connection, msg)\n\n return check_current_user\n\n return validator",
"async def connect(self):\n if not self.http:\n raise ClientException('You must log in via REST before connecting to the gateway.')\n\n while not self.closed:\n ws_build = GuildedWebSocket.build(self, loop=self.loop)\n gws = await asyncio.wait_for(ws_build, timeout=60)\n if type(gws) != GuildedWebSocket:\n self.dispatch('error', gws)\n return\n\n self.ws = gws\n self.http.ws = self.ws\n self.dispatch('connect')\n\n if self._login_presence is not None:\n # we do this here because why bother setting a presence if you won't show up in the online list anyway\n await self.change_presence(self._login_presence)\n\n #if self._login_presence is Presence.online:\n # todo: start http ping thread\n # no need to do that if you don't want an online presence\n\n if not self.disable_team_websockets:\n for team in self.teams:\n team_ws_build = GuildedWebSocket.build(self, loop=self.loop, teamId=team.id)\n team_ws = await asyncio.wait_for(team_ws_build, timeout=60)\n if type(team_ws) == GuildedWebSocket:\n team.ws = team_ws\n self.dispatch('team_connect', team)\n\n async def listen_socks(ws, team=None):\n teamId = team.id if team is not None else None\n next_backoff_time = 5\n while True and ws is not None:\n try:\n await ws.poll_event()\n except WebSocketClosure as exc:\n code = ws._close_code or ws.socket.close_code\n if teamId:\n log.warning('Team %s\\'s websocket closed with code %s, attempting to reconnect in %s seconds', teamId, code, next_backoff_time)\n self.dispatch('team_disconnect', teamId)\n else:\n log.warning('Websocket closed with code %s, attempting to reconnect in %s seconds', code, next_backoff_time)\n self.dispatch('disconnect')\n await asyncio.sleep(next_backoff_time)\n if teamId:\n build = GuildedWebSocket.build(self, loop=self.loop, teamId=teamId)\n else:\n # possible reconnect issues brought up by @8r2y5\n build = GuildedWebSocket.build(self, loop=self.loop)\n try:\n ws = await asyncio.wait_for(build, timeout=60)\n except asyncio.TimeoutError:\n log.warning('Timed out trying to reconnect.')\n next_backoff_time += 5\n else:\n next_backoff_time = 5\n\n self._ready.set()\n self.dispatch('ready')\n\n await asyncio.gather(\n listen_socks(self.ws), *[listen_socks(team.ws, team) for team in self.teams]\n )",
"def attempt_connection(self):\n self.connection_error = False\n sleep_exp = 1\n connect_count = 0\n\n while self.running and self.socket is None and connect_count < self.__reconnect_attempts_max:\n for host_and_port in self.__hosts_and_ports:\n try:\n log.info(\"Attempting connection to websocket %s\", host_and_port)\n self.socket = websocket.WebSocket()\n proto, host, port, path = host_and_port[3], host_and_port[0], host_and_port[1], host_and_port[2]\n if port:\n ws_uri = '{}://{}:{}/{}'.format(proto, host, port, path)\n else:\n ws_uri = '{}://{}/{}'.format(proto, host, path)\n\n self.socket.connect(ws_uri,\n timeout=self.__timeout)\n\n self.current_host_and_port = host_and_port\n log.info(\"Established connection to %s\", ws_uri)\n break\n except WebSocketException:\n self.socket = None\n connect_count += 1\n log.warning(\"Could not connect to host %s, port %s\", host_and_port[0], host_and_port[1], exc_info=1)\n\n if self.socket is None:\n sleep_duration = (min(self.__reconnect_sleep_max,\n ((self.__reconnect_sleep_initial / (1.0 + self.__reconnect_sleep_increase))\n * math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp)))\n * (1.0 + random.random() * self.__reconnect_sleep_jitter))\n sleep_end = monotonic() + sleep_duration\n log.debug(\"Sleeping for %.1f seconds before attempting reconnect\", sleep_duration)\n while self.running and monotonic() < sleep_end:\n time.sleep(0.2)\n\n if sleep_duration < self.__reconnect_sleep_max:\n sleep_exp += 1\n\n if not self.socket:\n raise exception.ConnectFailedException()",
"def test_missing_token(self, mock_get, mock_subscribe):\n ws = yield self.ws_connect('/socket')\n self.assertSocketError(ws, 4300, 'Missing token.')\n self.assertFalse(mock_get.called)\n self.assertFalse(mock_subscribe.called)",
"def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)",
"def connect_never_retry():\n try:\n messaging_service = MessagingService.builder().from_properties(boot.broker_properties()) \\\n .with_reconnection_retry_strategy(RetryStrategy.never_retry()).build()\n future = messaging_service.connect_async()\n\n return future.result()\n\n except PubSubPlusClientError as exception:\n raise exception\n\n finally:\n messaging_service.disconnect_async()",
"def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED",
"async def connect(self, handler: Callable, connection_name: str = \"main\") -> None:\n if self.connection_env == \"production\":\n ws_url = \"wss://ws.kraken.com\"\n elif self.connection_env == \"production-auth\":\n ws_url = \"wss://ws-auth.kraken.com\"\n elif self.connection_env == \"beta\":\n ws_url = \"wss://beta-ws.kraken.com\"\n elif self.connection_env == \"beta-auth\":\n ws_url = \"wss://beta-ws-auth.kraken.com\"\n websocket = await websockets.connect(ws_url)\n self.connections[connection_name] = {}\n self.connections[connection_name][\"websocket\"] = websocket\n self.connections[connection_name][\"subscriptions\"] = []\n while True:\n try:\n if not websocket.open:\n websocket = await websockets.connect(ws_url)\n self.connections[connection_name][\"websocket\"] = websocket\n if self.connections[connection_name][\"subscriptions\"]:\n for subscription in self.connections[connection_name][\n \"subscriptions\"\n ]:\n await self.subscribe(\n subscription=subscription[\"subscription\"],\n pair=subscription[\"pair\"],\n connection_name=connection_name,\n )\n else:\n message = await websocket.recv()\n if \"errorMessage\" in message:\n error = json.loads(message)\n self.logger.error(error[\"errorMessage\"])\n else:\n data = json.loads(message)\n await handler(data)\n except socket.gaierror:\n self.logger.debug(\"Socket gaia error, let's reconnect anyway...\")\n continue\n except websockets.exceptions.ConnectionClosedError:\n self.logger.debug(\"WebSockets connection closed error, let's reconnect anyway...\")\n continue\n except websockets.exceptions.ConnectionClosedOK:\n self.logger.debug(\"WebSockets connection closed ok, let's reconnect anyway...\")\n continue\n except ConnectionResetError:\n self.logger.debug(\"Connection reset error, let's reconnect anyway...\")\n continue",
"def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])",
"async def _perform_connect(self):\n # Return connected if we are already connected.\n if self._websocket:\n if self._websocket.open:\n return True\n\n self.logger.debug(\"Starting connect.\")\n\n self.logger.debug(\"Connecting to %s\" % self.wsc_url)\n self._websocket = await websockets.connect(self.wsc_url)\n \n #We need to authenticate upon opening the connection (modified to remove apkVesrion, os, model, romVersion NW 28th Oct 2020)\n payload = {}\n \n payload['action'] = \"userOnline\"\n payload['userAgent'] = 'app'\n payload['version'] = 8\n payload['appid'] = self._appid\n payload['_nonce'] = self._nonce\n #payload['apkVesrion'] = \"1.8\"\n #payload['apkVersion'] = \"1.8\"\n #payload['os'] = 'ios'\n payload['at'] = self.authenticationToken\n payload['apikey'] = self.apikey\n payload['ts'] = self.timestamp\n #payload['model'] = 'iPhone10,6'\n #payload['romVersion'] = '11.1.2'\n payload['sequence'] = self.sequence\n\n string = json.dumps(payload);\n\n self.logger.debug('Sending login request [%s]' % string);\n\n await self._send_request(string)",
"async def test_connection(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True",
"def test_slackWH_send_badAuth(get_slackwebhook):\n s = get_slackwebhook\n s.url = 'https://hooks.slack.com/services/badAuthCreds'\n with pytest.raises(MessageSendError):\n s.send()",
"def ws_connect(message):\n # Accept the connection\n message.reply_channel.send({\"accept\": True}) \n \n prefix, language = message['path'].strip('/').split('/')\n grLangUser = Group('knocker-{0}-{1}'.format(language, \n message.user.id))\n grLangUser.add(message.reply_channel)\n message.channel_session['knocker'] = language",
"def websocket_init(self, payload, *args, **kwargs):\n data = json.loads(str(payload, \"utf-8\"))\n self.is_connecting = False\n if url := data.get(\"url\"):\n self.gateway = f\"{url}/?v={DISCORD_API_VERSION}&encoding=json\".encode(\"utf-8\")\n useragent = kwargs.pop(\"useragent\", DISCORD_USER_AGENT)\n headers = kwargs.pop(\n \"headers\",\n {\n \"Authorization\": [f\"Bot {DISCORD_BOT_TOKEN}\"],\n \"Content-Type\": [\"application/json\"],\n },\n )\n\n logger.log_info(\"Connecting to Discord Gateway...\")\n WebSocketClientFactory.__init__(\n self, url, *args, headers=headers, useragent=useragent, **kwargs\n )\n self.start()\n else:\n logger.log_err(\"Discord did not return a websocket URL; connection cancelled.\")"
] | [
"0.63569295",
"0.6157",
"0.60669607",
"0.6048544",
"0.59831417",
"0.58894384",
"0.58818215",
"0.5854295",
"0.5843035",
"0.57614714",
"0.5727695",
"0.5673268",
"0.56722736",
"0.5671324",
"0.56529385",
"0.5622091",
"0.55809",
"0.55794907",
"0.5520937",
"0.55131423",
"0.5498307",
"0.5487968",
"0.5470858",
"0.5449964",
"0.54382324",
"0.54353625",
"0.54317546",
"0.5429045",
"0.5377827",
"0.53639877"
] | 0.6747004 | 0 |
Tests the whole chatrooms interaction, given a valid token, with several simultaneous users. | async def test_chatrooms_accounts(rooms):
# Register all the users.
for name in USERS:
username = name
password = name * 2 + '$12345'
email = name + '@example.org'
await attempt_register(username, password, email)
# Login all the users.
tokens = {}
for name in USERS:
username = name
password = name * 2 + '$12345'
tokens[name] = await attempt_login(username, password)
# Test profile for all of them.
for name in USERS:
await attempt_profile(tokens[name])
# "erin" and "frank" will logout.
for name in ["erin", "frank"]:
await attempt_logout(tokens[name])
# "erin" and "frank" are not authorized for the profile endpoint.
for name in ["erin", "frank"]:
await attempt_profile(tokens[name], 401)
# The others are still authorized:
for name in ["alice", "bob", "carl", "david"]:
await attempt_profile(tokens[name])
###################################################
# Now testing the websockets side of the session. #
###################################################
# The four still-valid tokens should connect with no issue.
for name in ["alice", "bob", "carl", "david"]:
await should_be_websocket_welcome(tokens[name])
# The other two, should receive a not-authenticated error.
for name in ["erin", "frank"]:
await should_be_websocket_rejected_because_anonymous(tokens[name])
# Now alice connects and, in the meantime, she should fail
# to connect again, simultaneously.
alice_communicator = make_communicator(tokens['alice'])
alice_connected, _ = await alice_communicator.connect()
_ = await alice_communicator.receive_json_from()
assert alice_connected
await should_be_websocket_rejected_because_duplicated(tokens['alice'])
# Now we destroy the session for alice via logout.
await attempt_logout(tokens['alice'])
message = await alice_communicator.receive_json_from()
# A message will be received: logged-out
assert message.get('type') == 'notification'
assert message.get('code') == 'logged-out'
await alice_communicator.disconnect() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_chatroom_commands():\n\n # Login all the users.\n tokens = {}\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n tokens[name] = await attempt_login(username, password)\n\n # Alice will:\n # 1. Connect and retrieve MOTD.\n # 2. List rooms, and expect the four in the example.\n # 3. Join \"family\" room, and receive a success.\n # 4. List rooms, and expect the four ones, with \"family\" having \"joined\": true.\n # 3. Join \"family\" room, and receive an error.\n # 4. List rooms, and expect the four ones, with \"family\" having \"joined\": true.\n alice_communicator = make_communicator(tokens['alice'])\n alice_connected, _ = await alice_communicator.connect()\n motd = await alice_communicator.receive_json_from()\n assert motd['type'] == 'notification'\n assert motd['code'] == 'api-motd'\n await alice_communicator.send_json_to({'type': 'list'})\n list_ = await alice_communicator.receive_json_from()\n assert list_['type'] == 'notification'\n assert list_['code'] == 'list'\n assert list_['list'] == [{'name': 'family', 'joined': False}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]\n await alice_communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n joined = await alice_communicator.receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'alice'\n assert joined['you']\n assert joined['room_name'] == 'family'\n await alice_communicator.send_json_to({'type': 'list'})\n list_ = await alice_communicator.receive_json_from()\n assert list_['type'] == 'notification'\n assert list_['code'] == 'list'\n assert list_['list'] == [{'name': 'family', 'joined': True}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]\n await alice_communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n error = await alice_communicator.receive_json_from()\n assert error['type'] == 'error'\n assert error['code'] == 'room:already-joined'\n assert error['details']['name'] == 'family'\n await alice_communicator.send_json_to({'type': 'list'})\n list_ = await alice_communicator.receive_json_from()\n assert list_['type'] == 'notification'\n assert list_['code'] == 'list'\n assert list_['list'] == [{'name': 'family', 'joined': True}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]\n # Bob will:\n # 1. Connect and retrieve MOTD.\n # 2. Join \"family\" room, and receive a success.\n # 3. Send a message in the \"family\" room: \"Hello Alice\", and receive a success.\n # 4. Leave the room, and receive a success.\n # 5. Leave the room, and receive an error.\n # 6. Disconnect.\n # Alice will:\n # 1. Receive the \"Bob joined\" message.\n # 2. Receive the \"Hello Alice\" message.\n # 3. Receive the \"Bob left\" message.\n # ~~ Bob interactions ~~\n bob_communicator = make_communicator(tokens['bob'])\n bob_connected, _ = await bob_communicator.connect()\n motd = await bob_communicator.receive_json_from()\n assert motd['type'] == 'notification'\n assert motd['code'] == 'api-motd'\n await bob_communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n joined = await bob_communicator.receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert joined['you']\n assert joined['room_name'] == 'family'\n await bob_communicator.send_json_to({'type': 'message', 'room_name': 'family', 'body': 'Hello Alice'})\n message = await bob_communicator.receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert message['you']\n assert message['user'] == 'bob'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello Alice'\n await bob_communicator.send_json_to({'type': 'part', 'room_name': 'family'})\n parted = await bob_communicator.receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert parted['you']\n assert parted['room_name'] == 'family'\n await bob_communicator.send_json_to({'type': 'part', 'room_name': 'family'})\n error = await bob_communicator.receive_json_from()\n assert error['type'] == 'error'\n assert error['code'] == 'room:not-joined'\n assert error['details']['name'] == 'family'\n await bob_communicator.disconnect()\n # ~~ Alice interactions ~~\n joined = await alice_communicator.receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n message = await alice_communicator.receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'bob'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello Alice'\n parted = await alice_communicator.receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n await alice_communicator.disconnect()",
"async def test_chatroom_broadcast():\n\n # Login all the users.\n tokens = {}\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n tokens[name] = await attempt_login(username, password)\n\n # Alice, Bob, Carl connect to the server.\n communicators = {}\n for name in ['alice', 'bob', 'carl']:\n communicator = make_communicator(tokens[name])\n communicators[name] = communicator\n connected, _ = await communicator.connect()\n assert connected\n motd = await communicator.receive_json_from()\n assert motd['type'] == 'notification'\n assert motd['code'] == 'api-motd'\n await communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n await asyncio.sleep(0.5)\n # Alice expects 3 joins.\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'alice'\n assert joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n # Bob expects 2 joins.\n joined = await communicators['bob'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['bob'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n # Carl expects 1 join.\n joined = await communicators['carl'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert joined['you']\n assert joined['room_name'] == 'family'\n # Now Alice sends a \"Hello guys\" message, and bob and carl\n # will read it.\n await communicators['alice'].send_json_to({'type': 'message', 'room_name': 'family', 'body': 'Hello guys'})\n message = await communicators['alice'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n message = await communicators['bob'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n message = await communicators['carl'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n # Now they all leave the channel.\n for name in ['alice', 'bob', 'carl']:\n await communicators[name].send_json_to({'type': 'part', 'room_name': 'family'})\n await asyncio.sleep(0.5)\n # And they will receive all the part messages.\n parted = await communicators['alice'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['bob'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['bob'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'carl'\n assert parted['you']\n assert parted['room_name'] == 'family'\n # And the 3 will disconnect.\n for name in ['alice', 'bob', 'carl']:\n await communicator.disconnect()",
"def test_im_chat_sessions(self):\n pass",
"def test_private_room(self) -> None:\n u1 = self.register_user(\"user1\", \"pass\")\n u1_token = self.login(u1, \"pass\")\n u2 = self.register_user(\"user2\", \"pass\")\n u2_token = self.login(u2, \"pass\")\n u3 = self.register_user(\"user3\", \"pass\")\n\n # u1 can't see u2 until they share a private room, or u1 is in a public room.\n s = self.get_success(self.handler.search_users(u1, \"user2\", 10))\n self.assertEqual(len(s[\"results\"]), 0)\n\n # Get u1 and u2 into a private room.\n room = self.helper.create_room_as(u1, is_public=False, tok=u1_token)\n self.helper.invite(room, src=u1, targ=u2, tok=u1_token)\n self.helper.join(room, user=u2, tok=u2_token)\n\n # Check we have populated the database correctly.\n users, public_users, shares_private = self.get_success(\n self.user_dir_helper.get_tables()\n )\n self.assertEqual(users, {u1, u2, u3})\n self.assertEqual(shares_private, {(u1, u2, room), (u2, u1, room)})\n self.assertEqual(public_users, set())\n\n # We get one search result when searching for user2 by user1.\n s = self.get_success(self.handler.search_users(u1, \"user2\", 10))\n self.assertEqual(len(s[\"results\"]), 1)\n\n # We get NO search results when searching for user2 by user3.\n s = self.get_success(self.handler.search_users(u3, \"user2\", 10))\n self.assertEqual(len(s[\"results\"]), 0)\n\n # We get NO search results when searching for user3 by user1.\n s = self.get_success(self.handler.search_users(u1, \"user3\", 10))\n self.assertEqual(len(s[\"results\"]), 0)\n\n # User 2 then leaves.\n self.helper.leave(room, user=u2, tok=u2_token)\n\n # Check this is reflected in the DB.\n users, public_users, shares_private = self.get_success(\n self.user_dir_helper.get_tables()\n )\n self.assertEqual(users, {u1, u2, u3})\n self.assertEqual(shares_private, set())\n self.assertEqual(public_users, set())\n\n # User1 now gets no search results for any of the other users.\n s = self.get_success(self.handler.search_users(u1, \"user2\", 10))\n self.assertEqual(len(s[\"results\"]), 0)\n\n s = self.get_success(self.handler.search_users(u1, \"user3\", 10))\n self.assertEqual(len(s[\"results\"]), 0)",
"def test_multiple_devices(self) -> None:\n\n self.http_client.request = AsyncMock(\n return_value=FakeResponse.json(\n code=200,\n payload={\n \"active\": True,\n \"sub\": SUBJECT,\n \"scope\": \" \".join(\n [\n MATRIX_USER_SCOPE,\n f\"{MATRIX_DEVICE_SCOPE_PREFIX}AABBCC\",\n f\"{MATRIX_DEVICE_SCOPE_PREFIX}DDEEFF\",\n ]\n ),\n \"username\": USERNAME,\n },\n )\n )\n request = Mock(args={})\n request.args[b\"access_token\"] = [b\"mockAccessToken\"]\n request.requestHeaders.getRawHeaders = mock_getRawHeaders()\n self.get_failure(self.auth.get_user_by_req(request), AuthError)",
"def test_generate_multiple_tokens_for_user(self):\n # setup\n user = self.generate_username_password()\n resp = self.create_user(user)\n resp_body = resp.json()\n try:\n assert resp.status_code == 201\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp_body[\"username\"] == user[\"userName\"]\n assert resp_body[\"userID\"] != \"\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n\n # test\n for x in range(3):\n resp2 = self.generate_token(user)\n resp_body2 = resp2.json()\n try:\n assert resp2.status_code == 200\n assert resp_body2[\"token\"] != \"\"\n assert resp_body2[\"expires\"] != \"\"\n assert resp_body2[\"status\"] == \"Success\"\n assert resp_body2[\"result\"] == \"User authorized successfully.\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp2.request)\n self.pprint_response(resp2)\n # todo: Without the sleep, this test case fails at random. Possible performance bug?\n time.sleep(1)\n\n # teardown:\n resp = self.delete_user_basic_auth(resp_body[\"userID\"], user)\n try:\n assert resp.status_code == 204\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)",
"def test_unknown_room_version(self) -> None:\n inviter = self.register_user(\"creator\", \"pass\", admin=True)\n inviter_tok = self.login(\"@creator:test\", \"pass\")\n\n user = self.register_user(\"user\", \"pass\")\n tok = self.login(\"user\", \"pass\")\n\n # Do an initial sync on a different device.\n requester = create_requester(user)\n initial_result = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user, device_id=\"dev\")\n )\n )\n\n # Create a room as the user.\n joined_room = self.helper.create_room_as(user, tok=tok)\n\n # Invite the user to the room as someone else.\n invite_room = self.helper.create_room_as(inviter, tok=inviter_tok)\n self.helper.invite(invite_room, targ=user, tok=inviter_tok)\n\n knock_room = self.helper.create_room_as(\n inviter, room_version=RoomVersions.V7.identifier, tok=inviter_tok\n )\n self.helper.send_state(\n knock_room,\n EventTypes.JoinRules,\n {\"join_rule\": JoinRules.KNOCK},\n tok=inviter_tok,\n )\n channel = self.make_request(\n \"POST\",\n \"/_matrix/client/r0/knock/%s\" % (knock_room,),\n b\"{}\",\n tok,\n )\n self.assertEqual(200, channel.code, channel.result)\n\n # The rooms should appear in the sync response.\n result = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)\n )\n )\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n\n # Test a incremental sync (by providing a since_token).\n result = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n requester,\n sync_config=generate_sync_config(user, device_id=\"dev\"),\n since_token=initial_result.next_batch,\n )\n )\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n\n # Poke the database and update the room version to an unknown one.\n for room_id in (joined_room, invite_room, knock_room):\n self.get_success(\n self.hs.get_datastores().main.db_pool.simple_update(\n \"rooms\",\n keyvalues={\"room_id\": room_id},\n updatevalues={\"room_version\": \"unknown-room-version\"},\n desc=\"updated-room-version\",\n )\n )\n\n # Blow away caches (supported room versions can only change due to a restart).\n self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()\n self.store.get_rooms_for_user.invalidate_all()\n self.store._get_event_cache.clear()\n self.store._event_ref.clear()\n\n # The rooms should be excluded from the sync response.\n # Get a new request key.\n result = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)\n )\n )\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])\n\n # The rooms should also not be in an incremental sync.\n result = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n requester,\n sync_config=generate_sync_config(user, device_id=\"dev\"),\n since_token=initial_result.next_batch,\n )\n )\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])",
"def test_set_op(self):\n\n room = RoomFactory()\n\n self.connection.room = room\n user_with_room_privilege(\n level=6,\n connection=self.connection,\n online=True,\n room=room\n )\n\n target_user_admin = user_with_room_privilege(\n level=10,\n online=True,\n room=room\n )\n target_user = user_with_room_privilege(\n level=4,\n online=True,\n room=room\n )\n\n ret = self.chat_command(self.resource, \"invalid_user\")\n self.assertEqual(len(ret), 1)\n self.assertRegex(ret[0], \"Unknown user\")\n\n ret = self.chat_command(self.resource, target_user_admin.name)\n self.assertEqual(len(ret), 1)\n self.assertRegex(ret[0], \"Not authorize\")\n self.assertEqual(target_user_admin.level(room.id), 10)\n\n ret = self.chat_command(self.resource, target_user.name)\n self.assertIsNone(ret)\n self.assertEqual(target_user.level(room.id), 1)",
"def handle_list_room(self, lobby_command, client_socket):\n print(\"Handling list command...\")\n msg = ''\n words = lobby_command.split()\n # List all rooms\n if len(words) == 1:\n msg = 'Available Rooms:\\n'\n for room in self.rooms:\n msg += f'\\t\\t{room.name}\\n'\n \n self.just_send(client_socket, msg)\n return\n else:\n # List all rooms and members\n roomname = words[1]\n if roomname == \"all\":\n user = self.clients[client_socket]['data'].decode('utf-8')\n msg = f'All rooms and users:\\n'\n for room in self.rooms:\n msg += f'Room: {room.name}\\nUsers: '\n for user in room.room_attrbts['members']:\n msg += f'\\t{user}'\n if user in room.room_attrbts['admins']:\n msg += ' - Admin'\n msg += '\\n'\n msg += '\\n'\n self.just_send(client_socket, msg)\n return\n\n # List user's room membership\n if roomname == \"mine\":\n user = self.clients[client_socket]['data'].decode('utf-8')\n msg = f'Rooms user {user} has joined:\\n'\n for room in self.rooms:\n if user in room.room_attrbts['members']:\n msg += f'\\t\\t{room.name}'\n if user in room.room_attrbts['admins']:\n msg += ' - Admin'\n msg += '\\n'\n self.just_send(client_socket, msg)\n return\n \n # List membership and active users of a room\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Request roomname found..\")\n msg = f'User members of room {roomname}:\\n'\n for member in _room.room_attrbts['members']:\n msg += f'\\t\\t{member}\\n'\n msg+= '\\n'\n self.just_send(client_socket, msg)\n \n msg = 'Users active in room:\\n'\n for active_user in _room.room_attrbts['active']:\n msg += f'\\t\\t{active_user}\\n'\n self.just_send(client_socket, msg)\n return\n if msg == '':\n msg = f'Client passed an invalid room to list members of {roomname}\\n'\n self.log_and_send(client_socket, msg)\n return",
"def test_spam_checker(self) -> None:\n u1 = self.register_user(\"user1\", \"pass\")\n u1_token = self.login(u1, \"pass\")\n u2 = self.register_user(\"user2\", \"pass\")\n u2_token = self.login(u2, \"pass\")\n\n # We do not add users to the directory until they join a room.\n s = self.get_success(self.handler.search_users(u1, \"user2\", 10))\n self.assertEqual(len(s[\"results\"]), 0)\n\n room = self.helper.create_room_as(u1, is_public=False, tok=u1_token)\n self.helper.invite(room, src=u1, targ=u2, tok=u1_token)\n self.helper.join(room, user=u2, tok=u2_token)\n\n # Check we have populated the database correctly.\n shares_private = self.get_success(\n self.user_dir_helper.get_users_who_share_private_rooms()\n )\n public_users = self.get_success(\n self.user_dir_helper.get_users_in_public_rooms()\n )\n\n self.assertEqual(shares_private, {(u1, u2, room), (u2, u1, room)})\n self.assertEqual(public_users, set())\n\n # We get one search result when searching for user2 by user1.\n s = self.get_success(self.handler.search_users(u1, \"user2\", 10))\n self.assertEqual(len(s[\"results\"]), 1)\n\n async def allow_all(user_profile: UserProfile) -> bool:\n # Allow all users.\n return False\n\n # Configure a spam checker that does not filter any users.\n spam_checker = self.hs.get_module_api_callbacks().spam_checker\n spam_checker._check_username_for_spam_callbacks = [allow_all]\n\n # The results do not change:\n # We get one search result when searching for user2 by user1.\n s = self.get_success(self.handler.search_users(u1, \"user2\", 10))\n self.assertEqual(len(s[\"results\"]), 1)\n\n # Configure a spam checker that filters all users.\n async def block_all(user_profile: UserProfile) -> bool:\n # All users are spammy.\n return True\n\n spam_checker._check_username_for_spam_callbacks = [block_all]\n\n # User1 now gets no search results for any of the other users.\n s = self.get_success(self.handler.search_users(u1, \"user2\", 10))\n self.assertEqual(len(s[\"results\"]), 0)",
"def test_channel_join_except_repetitive():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", True)\n\n\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])",
"def test_join_room_socket(self, mock_join):\n mock_join.return_value = '1234'\n response = self.fetch('/rooms/1234', method='GET')\n self.assertTrue(mock_join.called)\n with self.assertJSON(response) as result:\n protocol = 'ws' if self.get_protocol() == 'http' else 'wss'\n expected = '{}://localhost:{}/socket'.format(protocol, self.get_http_port())\n self.assertEqual(result['socket'], expected)\n self.assertIn('user', result)\n self.assertIn('token', result)\n user, token = result['user'], result['token']\n info = jwt.decode(token, 'XXXX')\n self.assertEqual(info['uuid'], user)\n self.assertEqual(info['room'], '1234')",
"def test__API_with_correct_answers(self):\n self.mock_connection.state = MockConnection.CORRECT_NUM_OF_CONFIRMATIONS\n\n # mutex must be acquired\n self.assertEqual(self.mutex.lock(), True) # acquire mutex\n self.mutex.unlock() # release mutex",
"def test_filter_user_and_room(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?user_id=%s&room_id=%s\" % (self.other_user, self.room_id1),\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 5)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertNotIn(\"next_token\", channel.json_body)\n self._check_fields(channel.json_body[\"event_reports\"])\n\n for report in channel.json_body[\"event_reports\"]:\n self.assertEqual(report[\"user_id\"], self.other_user)\n self.assertEqual(report[\"room_id\"], self.room_id1)",
"def testLoginTwoUniqueUsersConsecutively(self):\n self.users.TESTAPI_resetFixture()\n\n self.users.add(\"happy\", \"birthday\")\n self.users.login(\"happy\", \"birthday\")\n self.users.login(\"happy\", \"birthday\")\n respData = self.users.login(\"happy\", \"birthday\")\n self.assertEquals(respData, 4)\n\n self.users.add(\"merry\", \"christmas\")\n respData = self.users.login(\"merry\", \"christmas\")\n self.assertEquals(respData, 2)\n respData = self.users.login(\"happy\", \"birthday\")\n self.assertEquals(respData, 5)",
"def test_get_active_rooms(self):\n filename = './test_json_data/chat_participant_list.txt'\n\n result = jh.getActiveRooms(filename)\n bad_result = jh.getActiveRooms('invalid file path')\n \n self.assertEqual(len(result),2)\n self.assertEqual(type(result),type({'1':1}))\n self.assertEqual(len(bad_result), 0)",
"def test_other_user_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False, 'YYY': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=YYY'.format(token))\n mock_get.assert_called_with('123')\n self.assertTrue(mock_subscribe.called)\n args, kwargs = mock_subscribe.call_args\n self.assertEqual(args[0], 'YYY')\n self.assertEqual(args[1].uuid, 'XXX')\n yield self.close(ws)",
"def test_too_many_players_join(self):\n self.game.join_game(self.user1)\n self.game.join_game(self.user2)\n self.game.join_game(self.user3)\n self.game.join_game(self.user4)\n\n self.assertRaises(Exception, self.game.join_game, self.user1)",
"def test_twice_logging_in(test_client, test_session):\n tokens = []\n for _ in range(2):\n with patch(\"validators.authentication.session\", test_session):\n with patch(\"views.login.session\", test_session):\n payload = {\"username\": \"testuser1\", \"password\": \"Qwerty123_\"}\n response = test_client.post(\"api/v1/login\", data=payload)\n assert response.status_code == 200\n tokens.append(response.json()[\"access_token\"])\n time.sleep(1)\n assert tokens[0] != tokens[1]",
"def test_token_only_for_1_user(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n user_token = self.user.generate_auth_token(1)\n self.assertNotEqual(self.user.verify_auth_token(user_token),\n self.user2)",
"def handle_enter_room_session(self, lobby_command, client_socket):\n words = lobby_command.split()\n sent_name = words[1]\n user = self.clients[client_socket]['data'].decode('utf-8')\n for room in self.rooms:\n if room.name == sent_name and user in room.room_attrbts['members']:\n room.room_attrbts['active'].add(user)\n msg = f'User {user} is a member of room {sent_name}. Entering user into active mode for this room. ACTIVE'\n print(msg)\n return\n msg = f'Room {sent_name} not found or user {user} is not yet a member. NONACTIVE'\n self.log_and_send(client_socket, msg)\n return",
"def syncusers(bot, event, *args):\n if not bot.get_config_option('syncing_enabled'):\n return\n\n combined = True\n\n tokens = list(args)\n if \"rooms\" in args:\n tokens.remove(\"rooms\")\n combined = False\n if \"rooms\" in args:\n tokens.remove(\"room\")\n combined = False\n\n if len(args) == 0:\n filter_convs = [ event.conv_id ]\n else:\n filter_convs = tokens\n\n target_conv = filter_convs.pop(0)\n\n user_lists = _syncout_users(bot, target_conv)\n if not user_lists:\n yield from bot.coro_send_message(event.conv_id, \"no users were returned\")\n return\n\n _lines = []\n\n for room_id in user_lists:\n if combined and room_id != \"*\":\n # list everything, only use wildcard\n continue\n elif not combined and room_id == \"*\":\n # list room-by-room, skip wildcard\n continue\n\n if filter_convs and room_id not in filter_conv and room_id != target_conv:\n # if >1 conv id provided, filter by only supplied conv ids\n continue\n\n if room_id == \"*\":\n _lines.append(\"**all syncout rooms**\")\n else:\n _lines.append(\"**{} ({})**\".format( bot.conversations.get_name(room_id),\n room_id ))\n\n user_list = user_lists[room_id]\n for chat_id in user_list:\n _lines.append(\"* {}\".format(user_list[chat_id].full_name))\n\n yield from bot.coro_send_message(event.conv_id, \"\\n\".join(_lines))\n\n \"\"\"\n # are we in a sync room?\n sync_room_list = None\n for _rooms in syncouts:\n if conversation_id in _rooms:\n sync_room_list = _rooms\n _lines.append(_(\"<b>Sync Rooms: {}</b>\").format(len(sync_room_list)))\n break\n if sync_room_list is None:\n sync_room_list = [conversation_id]\n _lines.append(_(\"<b>Standard Room</b>\"))\n\n all_users = {}\n try:\n if combined or len(sync_room_list) == 1:\n all_users[\"_ALL_\"] = bot.get_users_in_conversation(sync_room_list)\n else:\n for room_id in sync_room_list:\n all_users[room_id] = bot.get_users_in_conversation(room_id)\n except KeyError as e:\n # most likely raised if user provides invalid room list\n yield from bot.coro_send_message(event.conv, _('<b>failed to retrieve user list</b>'))\n return\n\n unique_users = []\n\n for room_id in all_users:\n if room_id is not \"_ALL_\":\n _line_room = '<i>{}</i>'.format(room_id)\n _line_room = '<b>{}</b> {}'.format(\n bot.conversations.get_name(room_id),\n _line_room)\n _lines.append(_line_room)\n list_users = all_users[room_id]\n for User in list_users:\n _line_user = '{}'.format(User.full_name);\n if User.emails:\n _line_user = _line_user + ' ({})'.format(User.emails[0])\n _lines.append(_line_user)\n unique_users.append(User)\n\n unique_users = list(set(unique_users))\n _lines.append(_(\"<b>Total Unique: {}</b>\").format(len(unique_users)))\n\n yield from bot.coro_send_message(event.conv, '<br />'.join(_lines))\n \"\"\"",
"def test_banned(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.change_membership(r1, u1, u2, \"ban\", tok=u1token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 0,\n )\n self.assertEqual(\n r1stats_post[\"banned_members\"] - r1stats_ante[\"banned_members\"], +1\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], -1\n )",
"def test_normal_user_pair(self) -> None:\n alice = self.register_user(\"alice\", \"pass\")\n alice_token = self.login(alice, \"pass\")\n bob = self.register_user(\"bob\", \"pass\")\n bob_token = self.login(bob, \"pass\")\n\n public = self.helper.create_room_as(\n alice,\n is_public=True,\n extra_content={\"visibility\": \"public\"},\n tok=alice_token,\n )\n private = self.helper.create_room_as(alice, is_public=False, tok=alice_token)\n self.helper.invite(private, alice, bob, tok=alice_token)\n self.helper.join(public, bob, tok=bob_token)\n self.helper.join(private, bob, tok=bob_token)\n\n # Alice also makes a second public room but no-one else joins\n public2 = self.helper.create_room_as(\n alice,\n is_public=True,\n extra_content={\"visibility\": \"public\"},\n tok=alice_token,\n )\n\n # The user directory should reflect the room memberships above.\n users, in_public, in_private = self.get_success(\n self.user_dir_helper.get_tables()\n )\n self.assertEqual(users, {alice, bob})\n self.assertEqual(in_public, {(alice, public), (bob, public), (alice, public2)})\n self.assertEqual(\n in_private,\n {(alice, bob, private), (bob, alice, private)},\n )",
"def test_legacy_spam_checker(self) -> None:\n u1 = self.register_user(\"user1\", \"pass\")\n u1_token = self.login(u1, \"pass\")\n u2 = self.register_user(\"user2\", \"pass\")\n u2_token = self.login(u2, \"pass\")\n\n # We do not add users to the directory until they join a room.\n s = self.get_success(self.handler.search_users(u1, \"user2\", 10))\n self.assertEqual(len(s[\"results\"]), 0)\n\n room = self.helper.create_room_as(u1, is_public=False, tok=u1_token)\n self.helper.invite(room, src=u1, targ=u2, tok=u1_token)\n self.helper.join(room, user=u2, tok=u2_token)\n\n # Check we have populated the database correctly.\n shares_private = self.get_success(\n self.user_dir_helper.get_users_who_share_private_rooms()\n )\n public_users = self.get_success(\n self.user_dir_helper.get_users_in_public_rooms()\n )\n\n self.assertEqual(shares_private, {(u1, u2, room), (u2, u1, room)})\n self.assertEqual(public_users, set())\n\n # We get one search result when searching for user2 by user1.\n s = self.get_success(self.handler.search_users(u1, \"user2\", 10))\n self.assertEqual(len(s[\"results\"]), 1)",
"async def game(self):\n self.time_remaining = randint(\n int(pow(14 * len(self.participants), 0.8)),\n int(pow(30 * len(self.participants), 0.8))\n )\n\n member = choice(self.participants)\n Timer(1, self.timer).start()\n reply = True\n pass_to = []\n notify = randint(2, int(self.time_remaining / 2))\n\n while self.time_remaining > 0:\n if not pass_to:\n pass_from = list(self.participants)\n pass_from.pop(pass_from.index(member))\n pass_to = [choice(pass_from)]\n pass_from.pop(pass_from.index(pass_to[0]))\n pass_to.append(choice(pass_from))\n\n if reply is not None:\n await client.send_message(self.channel, \"{} :bomb: got the bomb! Pass it to either {} or {}!\".format(\n member.mention, pass_to[0].mention, pass_to[1].mention))\n\n def check(m):\n if len(m.mentions) > 0:\n if m.mentions[0] in pass_to:\n return True\n\n return False\n\n wait = (self.time_remaining - notify) if (self.time_remaining >= notify) else self.time_remaining\n reply = await client.wait_for_message(timeout=wait, channel=self.channel, author=member,\n check=check)\n\n if reply:\n member = reply.mentions[0]\n pass_to = []\n if self.member.permissions_in(self.channel).manage_messages:\n asyncio.ensure_future(client.delete_message(reply))\n elif self.time_remaining == notify:\n asyncio.ensure_future(client.send_message(self.channel, \":bomb: :fire: **IT'S GONNA BLOW!**\"))\n self.time_remaining -= 1\n\n await client.send_message(self.channel, \"{0.mention} :fire: :boom: :boom: :fire:\".format(member))\n await client.send_message(self.channel, \"**GAME OVER**\")",
"async def check_token(token: str) -> [bool, int]:\n out = True\n client_id = None\n test_client = discord.Client()\n\n log.debug(\"Checking bot token...\")\n\n try:\n log.debug(\"Attempting login...\")\n loop = asyncio.get_event_loop()\n loop.create_task(test_client.start(token))\n await test_client.wait_until_ready()\n client_id = test_client.user.id\n log.debug(\"Login successs\")\n except discord.LoginFailure:\n log.debug(\"Bot token invalid\")\n out = False\n finally:\n await test_client.close()\n log.debug(\"Logout of test instance complete.\")\n\n return [out, client_id]",
"def test_create_room(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n r1stats = self._get_current_stats(\"room\", r1)\n r2 = self.helper.create_room_as(u1, tok=u1token, is_public=False)\n r2stats = self._get_current_stats(\"room\", r2)\n\n assert r1stats is not None\n assert r2stats is not None\n\n self.assertEqual(\n r1stats[\"current_state_events\"], EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM\n )\n self.assertEqual(\n r2stats[\"current_state_events\"], EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM\n )\n\n self.assertEqual(r1stats[\"joined_members\"], 1)\n self.assertEqual(r1stats[\"invited_members\"], 0)\n self.assertEqual(r1stats[\"banned_members\"], 0)\n\n self.assertEqual(r2stats[\"joined_members\"], 1)\n self.assertEqual(r2stats[\"invited_members\"], 0)\n self.assertEqual(r2stats[\"banned_members\"], 0)",
"def test_explicit_room_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=123'.format(token))\n mock_get.assert_called_with('123')\n self.assertTrue(mock_subscribe.called)\n args, kwargs = mock_subscribe.call_args\n self.assertEqual(args[0], '123')\n self.assertEqual(args[1].uuid, 'XXX')\n yield self.close(ws)",
"def test_im_chat_messages(self):\n pass"
] | [
"0.72217304",
"0.6655384",
"0.6091954",
"0.60038173",
"0.5890071",
"0.57427335",
"0.56996506",
"0.5660185",
"0.5533575",
"0.5516244",
"0.549884",
"0.54945004",
"0.5488156",
"0.54860985",
"0.5470775",
"0.5467824",
"0.54579467",
"0.544405",
"0.54231054",
"0.5410716",
"0.5400262",
"0.53934306",
"0.53839093",
"0.5381159",
"0.5325491",
"0.5318481",
"0.5316902",
"0.5312993",
"0.5300937",
"0.5289166"
] | 0.69260347 | 1 |
Tests all the commands exchanged via the chatrooms. This includes doublejoin and doublepart. | async def test_chatroom_commands():
# Login all the users.
tokens = {}
for name in USERS:
username = name
password = name * 2 + '$12345'
tokens[name] = await attempt_login(username, password)
# Alice will:
# 1. Connect and retrieve MOTD.
# 2. List rooms, and expect the four in the example.
# 3. Join "family" room, and receive a success.
# 4. List rooms, and expect the four ones, with "family" having "joined": true.
# 3. Join "family" room, and receive an error.
# 4. List rooms, and expect the four ones, with "family" having "joined": true.
alice_communicator = make_communicator(tokens['alice'])
alice_connected, _ = await alice_communicator.connect()
motd = await alice_communicator.receive_json_from()
assert motd['type'] == 'notification'
assert motd['code'] == 'api-motd'
await alice_communicator.send_json_to({'type': 'list'})
list_ = await alice_communicator.receive_json_from()
assert list_['type'] == 'notification'
assert list_['code'] == 'list'
assert list_['list'] == [{'name': 'family', 'joined': False}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]
await alice_communicator.send_json_to({'type': 'join', 'room_name': 'family'})
joined = await alice_communicator.receive_json_from()
assert joined['type'] == 'room:notification'
assert joined['code'] == 'joined'
assert joined['user'] == 'alice'
assert joined['you']
assert joined['room_name'] == 'family'
await alice_communicator.send_json_to({'type': 'list'})
list_ = await alice_communicator.receive_json_from()
assert list_['type'] == 'notification'
assert list_['code'] == 'list'
assert list_['list'] == [{'name': 'family', 'joined': True}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]
await alice_communicator.send_json_to({'type': 'join', 'room_name': 'family'})
error = await alice_communicator.receive_json_from()
assert error['type'] == 'error'
assert error['code'] == 'room:already-joined'
assert error['details']['name'] == 'family'
await alice_communicator.send_json_to({'type': 'list'})
list_ = await alice_communicator.receive_json_from()
assert list_['type'] == 'notification'
assert list_['code'] == 'list'
assert list_['list'] == [{'name': 'family', 'joined': True}, {'name': 'forex', 'joined': False}, {'name': 'friends', 'joined': False}, {'name': 'stockmarket', 'joined': False}]
# Bob will:
# 1. Connect and retrieve MOTD.
# 2. Join "family" room, and receive a success.
# 3. Send a message in the "family" room: "Hello Alice", and receive a success.
# 4. Leave the room, and receive a success.
# 5. Leave the room, and receive an error.
# 6. Disconnect.
# Alice will:
# 1. Receive the "Bob joined" message.
# 2. Receive the "Hello Alice" message.
# 3. Receive the "Bob left" message.
# ~~ Bob interactions ~~
bob_communicator = make_communicator(tokens['bob'])
bob_connected, _ = await bob_communicator.connect()
motd = await bob_communicator.receive_json_from()
assert motd['type'] == 'notification'
assert motd['code'] == 'api-motd'
await bob_communicator.send_json_to({'type': 'join', 'room_name': 'family'})
joined = await bob_communicator.receive_json_from()
assert joined['type'] == 'room:notification'
assert joined['code'] == 'joined'
assert joined['user'] == 'bob'
assert joined['you']
assert joined['room_name'] == 'family'
await bob_communicator.send_json_to({'type': 'message', 'room_name': 'family', 'body': 'Hello Alice'})
message = await bob_communicator.receive_json_from()
assert message['type'] == 'room:notification'
assert message['code'] == 'message'
assert message['you']
assert message['user'] == 'bob'
assert message['room_name'] == 'family'
assert message['body'] == 'Hello Alice'
await bob_communicator.send_json_to({'type': 'part', 'room_name': 'family'})
parted = await bob_communicator.receive_json_from()
assert parted['type'] == 'room:notification'
assert parted['code'] == 'parted'
assert parted['user'] == 'bob'
assert parted['you']
assert parted['room_name'] == 'family'
await bob_communicator.send_json_to({'type': 'part', 'room_name': 'family'})
error = await bob_communicator.receive_json_from()
assert error['type'] == 'error'
assert error['code'] == 'room:not-joined'
assert error['details']['name'] == 'family'
await bob_communicator.disconnect()
# ~~ Alice interactions ~~
joined = await alice_communicator.receive_json_from()
assert joined['type'] == 'room:notification'
assert joined['code'] == 'joined'
assert joined['user'] == 'bob'
assert not joined['you']
assert joined['room_name'] == 'family'
message = await alice_communicator.receive_json_from()
assert message['type'] == 'room:notification'
assert message['code'] == 'message'
assert not message['you']
assert message['user'] == 'bob'
assert message['room_name'] == 'family'
assert message['body'] == 'Hello Alice'
parted = await alice_communicator.receive_json_from()
assert parted['type'] == 'room:notification'
assert parted['code'] == 'parted'
assert parted['user'] == 'bob'
assert not parted['you']
assert parted['room_name'] == 'family'
await alice_communicator.disconnect() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_multiple_commands_at_same_time(self):",
"def helper_commands():\n # Test HELP\n try:\n check = check50.run(run_command).stdin(\"HELP\")\n for help in help_statement:\n check.stdout(help)\n except check50.Failure as error:\n raise check50.Failure(f\"HELP did not print the expected message.\\n {error}\")\n\n # Test LOOK command\n try:\n check50.run(run_command).stdin(\"LOOK\").stdout(room_1_description)\n check50.run(run_command).stdin(\"look\").stdout(room_1_description)\n except check50.Failure as error:\n raise check50.Failure(f\"LOOK/look did not print the expected room description.\\n {error}\")\n\n # Test QUIT\n try:\n check50.run(run_command).stdin(\"QUIT\").stdout(\"Thanks for playing!\").exit(0)\n except check50.Failure as error:\n raise check50.Failure(f\"QUIT did not function as expected.\\n {error}\")",
"async def test_chatroom_broadcast():\n\n # Login all the users.\n tokens = {}\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n tokens[name] = await attempt_login(username, password)\n\n # Alice, Bob, Carl connect to the server.\n communicators = {}\n for name in ['alice', 'bob', 'carl']:\n communicator = make_communicator(tokens[name])\n communicators[name] = communicator\n connected, _ = await communicator.connect()\n assert connected\n motd = await communicator.receive_json_from()\n assert motd['type'] == 'notification'\n assert motd['code'] == 'api-motd'\n await communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n await asyncio.sleep(0.5)\n # Alice expects 3 joins.\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'alice'\n assert joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n # Bob expects 2 joins.\n joined = await communicators['bob'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['bob'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n # Carl expects 1 join.\n joined = await communicators['carl'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert joined['you']\n assert joined['room_name'] == 'family'\n # Now Alice sends a \"Hello guys\" message, and bob and carl\n # will read it.\n await communicators['alice'].send_json_to({'type': 'message', 'room_name': 'family', 'body': 'Hello guys'})\n message = await communicators['alice'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n message = await communicators['bob'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n message = await communicators['carl'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n # Now they all leave the channel.\n for name in ['alice', 'bob', 'carl']:\n await communicators[name].send_json_to({'type': 'part', 'room_name': 'family'})\n await asyncio.sleep(0.5)\n # And they will receive all the part messages.\n parted = await communicators['alice'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['bob'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['bob'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'carl'\n assert parted['you']\n assert parted['room_name'] == 'family'\n # And the 3 will disconnect.\n for name in ['alice', 'bob', 'carl']:\n await communicator.disconnect()",
"def _verifyCommand(self):\n for i in range(3):\n rc = self.subdevice.command_test() # Verify command is correct\n if rc is None:\n break",
"def test_im_chat_messages(self):\n pass",
"def check_commands(self):\n pass",
"def exec_commands(com):\n reply = ''\n if com is not None:\n if com == commands[0]:\n tables = db.create_tables(houses, from_)\n if tables == True:\n for j in range(len(c_responses[0]) - 1):\n# can use join and split functions to create softer code?? at least in future instances\n bot.send_message(c_responses[0][j], from_)\n else:\n reply = c_responses[0][(len(c_responses[0])-1)]\n elif com == commands[1]:\n house_info = db.house_info(from_)\n # Add feautures to find highest scoring house and return number of members\n reply = \"Houses:\\n\"\n for house in house_info:\n reply += house[1] + \"\\n\"\n if house[2] != None:\n reply += f\"Score: {house[2]}pts\\n\\n\"\n else:\n reply += f\"Score: 0pts\\n\\n\"\n elif com.startswith(commands[2]):\n instructions = com.split()\n id = 0\n info = user_query()\n user_id = info['user']['id']\n check = db.check_admin(from_, user_id)\n if check and check != 'not sorted':\n for house in houses:\n id += 1\n if house == instructions[1]:\n score = db.update_house_score(id, instructions[2], from_)\n reply = f\"{instructions[1]} new score is {score}\"\n else:\n reply = \"You have no power over me! PS:(if you are an admin use the /appoint me command to be recognised as such)\"\n\n\n elif com == commands[3]:\n username = item['message']['from']['username']\n user_id = item['message']['from']['id']\n num = db.add_member_info(username, from_, user_id)\n if num[1]:\n reply = f\"Better be... {houses[num[0]-1]}\"\n else:\n print(num[0][0])\n reply = f\"I stand by my decision, {houses[num[0][0]-1]} will help you on the way to greatness!\"\n elif com == commands[4]:\n m_list = db.member_info(from_)\n reply = str(m_list)\n elif com == commands[5]:\n info = user_query()\n username = info['user']['username']\n m_info = db.member_info(from_, username)\n reply = f\"\"\"\n Username: {m_info[2]}\\nHouse: {houses[m_info[3]]}\\nStatus: {m_info[4]}\\nScore: {m_info[5]}\\n\n \"\"\"\n elif com == commands[6]:\n info = user_query()\n username = info['user']['username']\n user_id = info['user']['id']\n status_info = info['status']\n if status_info == 'creator':\n verify = db.check_admin(from_, user_id)\n if not verify:\n db.update_member_status(from_, info['user']['id'], 'Headmaster')\n reply = f\"Rise Headmaster {username}\"\n elif verify == 'not sorted':\n reply = \"Don't be hasty! if tables have already been created use the '/sort me' command to get yourself sorted first\"\n else:\n reply = \"We've already done this Headmaster\"\n elif status_info == 'administrator':\n verify = db.check_admin(from_, user_id)\n if not verify:\n db.update_member_status(from_, info['user']['id'], 'Professor')\n reply = f\"Hence forth you shall be known as Professor {username}\"\n elif verify == 'not sorted':\n reply = \"Don't be hasty! if tables have already been created use the '/sort me' command to get yourself sorted first\"\n else:\n reply = \"We've already done this Professor\"\n else:\n reply = 'Desist pretender! Only the entitled may command me so!'\n elif com == commands[7]:\n for command in commands:\n reply += f'{command}\\n'\n print(reply)\n \n return reply",
"def commands():\n # Check invalid command\n check50.run(run_command).stdin(\"cs50\").stdout(\"Invalid command.\")\n\n # Check for upper case abreviation\n try:\n check50.run(run_command).stdin(\"W\").stdout(room_2_description)\n except check50.Failure as error:\n raise check50.Failure(f\"Could not use abbreviation 'w' to move\")\n\n # Check for lower case abbreviation\n try:\n check50.run(run_command).stdin(\"w\").stdout(room_2_description)\n except check50.Failure as error:\n raise check50.Failure(f\"Could not use abbreviation 'w' to move\")",
"def _handle_commands(self, event, session):\n message = event['body']\n\n for regex, func, help in self._COMMANDS:\n match = regex.match(message)\n if match is not None:\n func(self, event, session=session, **match.groupdict())\n return True\n\n return False",
"def test_irc_PART(self):\n self.client.irc_PART(self.user, [self.channel])\n self.client.irc_PART(\"[email protected]\", [\"#python\"])\n self.assertEqual(\n self.client.methods,\n [(\"left\", (self.channel,)), (\"userLeft\", (\"Svadilfari\", \"#python\"))],\n )",
"def test_send_command(fprime_test_api):\n fprime_test_api.send_and_assert_command(\"cmdDisp.CMD_NO_OP\", max_delay=0.1)\n assert fprime_test_api.get_command_test_history().size() == 1\n fprime_test_api.send_and_assert_command(\"cmdDisp.CMD_NO_OP\", max_delay=0.1)\n assert fprime_test_api.get_command_test_history().size() == 2",
"def test_set_op(self):\n\n room = RoomFactory()\n\n self.connection.room = room\n user_with_room_privilege(\n level=6,\n connection=self.connection,\n online=True,\n room=room\n )\n\n target_user_admin = user_with_room_privilege(\n level=10,\n online=True,\n room=room\n )\n target_user = user_with_room_privilege(\n level=4,\n online=True,\n room=room\n )\n\n ret = self.chat_command(self.resource, \"invalid_user\")\n self.assertEqual(len(ret), 1)\n self.assertRegex(ret[0], \"Unknown user\")\n\n ret = self.chat_command(self.resource, target_user_admin.name)\n self.assertEqual(len(ret), 1)\n self.assertRegex(ret[0], \"Not authorize\")\n self.assertEqual(target_user_admin.level(room.id), 10)\n\n ret = self.chat_command(self.resource, target_user.name)\n self.assertIsNone(ret)\n self.assertEqual(target_user.level(room.id), 1)",
"def test_get_command(self):\n # get_command returns \"start new game\" when given @username as the first word\n self.assertEqual(tictactoe.get_command(\"@username something\"), \"start new game\")\n self.assertEqual(tictactoe.get_command(\"@username help\"), \"start new game\")\n self.assertEqual(tictactoe.get_command(\"@ @ @ @ @ @\"), \"start new game\")\n self.assertEqual(tictactoe.get_command(\"@username 3\"), \"start new game\")\n self.assertEqual(tictactoe.get_command(\" @username\"), \"start new game\")\n self.assertEqual(tictactoe.get_command(\"@username3\"), \"start new game\")\n self.assertEqual(tictactoe.get_command(\"@username\"), \"start new game\")\n self.assertEqual(tictactoe.get_command(\" @\"), \"start new game\")\n # returns None because they don't have an @ as a first character\n self.assertIsNone(tictactoe.get_command(\"username@\"))\n self.assertIsNone(tictactoe.get_command(\"username\"))\n\n # get_command returns \"endgame\" when given endgame the first word\n self.assertEqual(tictactoe.get_command(\"endgame @username\"), \"endgame\")\n self.assertEqual(tictactoe.get_command(\" endgame\"), \"endgame\")\n self.assertEqual(tictactoe.get_command(\"endgame 3\"), \"endgame\")\n self.assertEqual(tictactoe.get_command(\"endgame\"), \"endgame\")\n # returns None because they don't match \"endgame\"\n self.assertIsNone(tictactoe.get_command(\"end the game\"))\n self.assertIsNone(tictactoe.get_command(\"end@username\"))\n self.assertIsNone(tictactoe.get_command(\"end game\"))\n self.assertIsNone(tictactoe.get_command(\"stopgame\"))\n self.assertIsNone(tictactoe.get_command(\"end\"))\n\n # get_command returns \"help\" when given help the first word\n self.assertEqual(tictactoe.get_command(\"help status\"), \"help\")\n self.assertEqual(tictactoe.get_command(\"help None\"), \"help\")\n self.assertEqual(tictactoe.get_command(\" help\"), \"help\")\n self.assertEqual(tictactoe.get_command(\"help\"), \"help\")\n\n # get_command returns \"status\" when given status the first word\n self.assertEqual(tictactoe.get_command(\"status @username\"), \"status\")\n self.assertEqual(tictactoe.get_command(\" status\"), \"status\")\n self.assertEqual(tictactoe.get_command(\"status 3\"), \"status\")\n self.assertEqual(tictactoe.get_command(\"status\"), \"status\")\n # returns None because they don't match \"status\"\n self.assertIsNone(tictactoe.get_command(\"status@username\"))\n self.assertIsNone(tictactoe.get_command(\"game status\"))\n self.assertIsNone(tictactoe.get_command(\"statuses\"))\n self.assertIsNone(tictactoe.get_command(\"stat\"))\n self.assertIsNone(tictactoe.get_command(\"game\"))\n\n # get_command returns \"procees to next move\"\n # when given a string of all integers: \"1\", \"34\", etc\n self.assertEqual(tictactoe.get_command(\"3\"), \"proceed to next move\")\n self.assertEqual(tictactoe.get_command(\"3 4 5\"), \"proceed to next move\")\n self.assertEqual(tictactoe.get_command(\"0\"), \"proceed to next move\")\n self.assertEqual(tictactoe.get_command(\"-100\"), \"proceed to next move\")\n self.assertEqual(tictactoe.get_command(\"16354\"), \"proceed to next move\")\n # returns None because they're not strings of all integers\n self.assertIsNone(tictactoe.get_command(\"-three\"))\n self.assertIsNone(tictactoe.get_command(\"three\"))\n self.assertIsNone(tictactoe.get_command(\"3@\"))\n\n # get_command returns None with other types of input\n self.assertIsNone(tictactoe.get_command(\"X\"))\n self.assertIsNone(tictactoe.get_command(\"O\"))",
"async def test_chatrooms_accounts(rooms):\n\n # Register all the users.\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n email = name + '@example.org'\n await attempt_register(username, password, email)\n\n # Login all the users.\n tokens = {}\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n tokens[name] = await attempt_login(username, password)\n\n # Test profile for all of them.\n for name in USERS:\n await attempt_profile(tokens[name])\n\n # \"erin\" and \"frank\" will logout.\n for name in [\"erin\", \"frank\"]:\n await attempt_logout(tokens[name])\n\n # \"erin\" and \"frank\" are not authorized for the profile endpoint.\n for name in [\"erin\", \"frank\"]:\n await attempt_profile(tokens[name], 401)\n\n # The others are still authorized:\n for name in [\"alice\", \"bob\", \"carl\", \"david\"]:\n await attempt_profile(tokens[name])\n\n ###################################################\n # Now testing the websockets side of the session. #\n ###################################################\n\n # The four still-valid tokens should connect with no issue.\n for name in [\"alice\", \"bob\", \"carl\", \"david\"]:\n await should_be_websocket_welcome(tokens[name])\n\n # The other two, should receive a not-authenticated error.\n for name in [\"erin\", \"frank\"]:\n await should_be_websocket_rejected_because_anonymous(tokens[name])\n\n # Now alice connects and, in the meantime, she should fail\n # to connect again, simultaneously.\n alice_communicator = make_communicator(tokens['alice'])\n alice_connected, _ = await alice_communicator.connect()\n _ = await alice_communicator.receive_json_from()\n assert alice_connected\n await should_be_websocket_rejected_because_duplicated(tokens['alice'])\n\n # Now we destroy the session for alice via logout.\n await attempt_logout(tokens['alice'])\n message = await alice_communicator.receive_json_from()\n # A message will be received: logged-out\n assert message.get('type') == 'notification'\n assert message.get('code') == 'logged-out'\n await alice_communicator.disconnect()",
"def test_singleLine(self):\n self.client.msg(\"foo\", \"bar\")\n self.assertEqual(self.client.lines, [\"PRIVMSG foo :bar\"])",
"def test_handle_multiple_subcommands(self):\n ret, code = self.testcommand.handle(\"team list edit\", user)\n self.assertEqual(ret, self.testcommand.get_help())\n self.assertEqual(code, 200)",
"def test_command_finds_commands(self):\r\n COMMANDLIST['!toread'] = lambda bmark: bmark\r\n\r\n bm = BmarkMock()\r\n bm.tags['!toread'] = True\r\n commander = Commander(bm)\r\n commander.build_commands()\r\n\r\n self.assertTrue(\r\n '!toread' in commander.commands,\r\n \"Our commander should find !toread command to run\")",
"def check_commands(self):\n self.check_subsystem_commands()\n self._select_mode()",
"def test_sendMessage(self):\n self.p.sendMessage(\"CMD\", \"param1\", \"param2\")\n self.check(\"CMD param1 param2\\r\\n\")",
"def test_command():\n\n dispatcher = ntelebot.dispatch.Dispatcher()\n dispatcher.add_command('command', lambda ctx: 'DISPATCHED')\n ctx = MockContext()\n ctx.type = 'message'\n ctx.command = None\n assert dispatcher(ctx) is False\n ctx.command = 'command'\n assert dispatcher(ctx) == 'DISPATCHED'\n ctx.type = 'callback_query'\n assert dispatcher(ctx) == 'DISPATCHED'\n ctx.type = 'inline_query'\n assert dispatcher(ctx) is False",
"async def _list_commands(self):\n message_cmds = \"regular commands:\\n\"\n tts_cmds = \"tts commands:\\n\"\n cur = self.conn.cursor()\n cur.execute(\n \"SELECT invoke FROM message_commands WHERE istts is true;\")\n cmd_invokes = cur.fetchall()\n for invoke in cmd_invokes:\n tts_cmds += invoke[0] + ', '\n tts_cmds = tts_cmds[0:-2]\n cur.execute(\n \"SELECT invoke FROM message_commands WHERE istts is false;\")\n cmd_invokes = cur.fetchall()\n for invoke in cmd_invokes:\n message_cmds += invoke[0] + ', '\n message_cmds = message_cmds[0:-2]\n cur.close()\n await self.bot.say(message_cmds)\n await self.bot.say(tts_cmds)",
"def test_say(self):\n self.client.say(\"thechannel\", \"the message\")\n self.assertEqual(self.client.lines, [\"PRIVMSG #thechannel :the message\"])",
"def test_multiple_messages_received_at_once(self):\n # Send 2 messages\n self.sock.send(message + message)\n # Receive them back\n ident, payload = self.inverter.receive()\n self.assertEqual(b\"\\x00\\x01\\x02\", ident)\n self.assertEqual(b\"\", payload)\n ident, payload = self.inverter.receive()\n self.assertEqual(b\"\\x00\\x01\\x02\", ident)\n self.assertEqual(b\"\", payload)",
"def test_lg_commands(self): # noqa\n # list objects - before creating anything\n for obj in [command['obj'] for command in self.commands]:\n out = StringIO()\n args = ('list', obj)\n call_command('lg', *args, stdout=out)\n expect = \"No {} configured\".format(obj)\n assert expect in out.getvalue()\n # add objects\n for command in self.commands:\n out = StringIO()\n args = ('add', command['obj'])\n opts = command['opts']\n call_command('lg', *args, stdout=out, **opts)\n expect = \"index: 1\"\n assert expect in out.getvalue()\n # list objects\n for obj in [command['obj'] for command in self.commands]:\n out = StringIO()\n args = ('list', obj)\n call_command('lg', *args, stdout=out)\n expect = \"Configured {}\".format(obj.capitalize())\n assert expect in out.getvalue()\n # modify objects\n for command in self.commands:\n args = ('modify', command['obj'])\n for opt in command['opts']:\n out = StringIO()\n opts = {\"index\": 1, opt: command['opts'][opt]}\n call_command('lg', *args, stdout=out, **opts)\n expect = \"index: 1\"\n assert expect in out.getvalue()\n # show created objects\n for command in self.commands:\n out = StringIO()\n args = ('show', command['obj'])\n call_command('lg', *args, index=1, stdout=out)\n expect = \"index: 1\"\n assert expect in out.getvalue()\n # delete objects\n for obj in [command['obj'] for command in self.commands]:\n out = StringIO()\n args = ('delete', obj)\n call_command('lg', *args, index=1, stdout=out)\n # show deleted objects\n for command in self.commands:\n out = StringIO()\n args = ('show', command['obj'])\n try:\n call_command('lg', *args, index=1, stdout=out)\n except Exception as e:\n assert isinstance(e, CommandError)",
"def check_subsystem_commands(self):\n self.communications.check_controls()\n self.__check_video()\n self.__check_picture()\n self.__check_ping()\n self.__check_motion()",
"async def test_commands(aresponses):\n aresponses.add(\n MATCH_HOST,\n \"/api/command\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"command.json\"),\n ),\n )\n\n async with ClientSession() as session:\n client = Sonarr(HOST, API_KEY, session=session)\n response = await client.commands()\n\n assert response\n assert isinstance(response, List)\n\n assert response[0]\n assert isinstance(response[0], models.CommandItem)",
"def test_help_messsages(mock_db):\n database = sqlite3.connect(mock_db)\n shell = MiniProjectShell(database)\n\n shell.help_book_member()\n shell.help_cancel_booking()\n shell.help_delete_request()\n shell.help_search_requests_lcode()\n shell.help_search_requests_city()\n shell.help_search_rides()\n shell.help_select_request()\n shell.help_post_request()\n shell.help_offer_ride()\n shell.help_list_bookings()\n shell.help_list_requests()\n shell.help_logout()",
"def test_multipleLine(self):\n maxLen = len(\"PRIVMSG foo :\") + 3 + 2 # 2 for line endings\n self.client.msg(\"foo\", \"barbazbo\", maxLen)\n self.assertEqual(\n self.client.lines,\n [\"PRIVMSG foo :bar\", \"PRIVMSG foo :baz\", \"PRIVMSG foo :bo\"],\n )",
"def test_irc_JOIN(self):\n self.client.irc_JOIN(self.user, [self.channel])\n self.client.irc_JOIN(\"[email protected]\", [\"#python\"])\n self.assertEqual(\n self.client.methods,\n [(\"joined\", (self.channel,)), (\"userJoined\", (\"Svadilfari\", \"#python\"))],\n )",
"def test_command_state_transferred(tmp_path):\n command = DummyCommand(base_path=tmp_path)\n command.tools.input.enabled = False\n\n # Check the enabled state of subcommands\n assert not command.create_command.input.enabled\n assert command.create_command.logger is command.tools.logger\n assert command.create_command.input is command.tools.input\n assert command.create_command.tools is command.tools\n assert command.create_command.is_clone is True\n\n assert not command.update_command.input.enabled\n assert command.update_command.logger is command.tools.logger\n assert command.update_command.input is command.tools.input\n assert command.update_command.tools is command.tools\n assert command.update_command.is_clone is True\n\n assert not command.build_command.input.enabled\n assert command.build_command.logger is command.tools.logger\n assert command.build_command.input is command.tools.input\n assert command.build_command.tools is command.tools\n assert command.build_command.is_clone is True\n\n assert not command.run_command.input.enabled\n assert command.run_command.logger is command.tools.logger\n assert command.run_command.input is command.tools.input\n assert command.run_command.tools is command.tools\n assert command.run_command.is_clone is True\n\n assert not command.package_command.input.enabled\n assert command.package_command.logger is command.tools.logger\n assert command.package_command.input is command.tools.input\n assert command.package_command.tools is command.tools\n assert command.package_command.is_clone is True\n\n assert not command.publish_command.input.enabled\n assert command.publish_command.logger is command.tools.logger\n assert command.publish_command.input is command.tools.input\n assert command.publish_command.tools is command.tools\n assert command.publish_command.is_clone is True"
] | [
"0.6896309",
"0.67278975",
"0.65846676",
"0.6491539",
"0.6435943",
"0.63337386",
"0.6243607",
"0.62340873",
"0.6171158",
"0.6107857",
"0.60933584",
"0.60725754",
"0.6030929",
"0.59952533",
"0.5960336",
"0.59528023",
"0.5928129",
"0.59240675",
"0.59043515",
"0.5903812",
"0.5883825",
"0.5878132",
"0.5849843",
"0.5831793",
"0.5821385",
"0.5817326",
"0.5809271",
"0.58059597",
"0.57851726",
"0.5784709"
] | 0.7290664 | 0 |
Initializes a ConditionalConvnet object. | def __init__(self, num_blocks=3, layers_per_block=2, base_num_channels=16,
upconv=False, fc_layer_sizes=None, upconv_reshape_size=None,
conditioning_layer_sizes=None, channels_out=3, alpha=0.3,
conditioning_postprocessing=None,
final_sigmoid=False, conditioning_type="mult_and_add",
kernel_initializer_mode="fan_in"):
super(ConditionalConvnet, self).__init__()
self._num_blocks = num_blocks
self._layers_per_block = layers_per_block
self._base_num_channels = base_num_channels
self._channels_out = channels_out
self._upconv = upconv
self._fc_layer_sizes = fc_layer_sizes
self._upconv_reshape_size = upconv_reshape_size
self._final_sigmoid = final_sigmoid
if upconv_reshape_size is not None and ((not upconv) or
(fc_layer_sizes is None)):
raise ValueError("upconv_reshape_size should be supplied only if "
"upconv=True and fc_layer_sizes is not None.")
self._conditioning_layer_sizes = conditioning_layer_sizes
self._nonlinearity = lambda x: tf.nn.leaky_relu(x, alpha)
if conditioning_postprocessing is not None:
self._conditioning_postprocessing = conditioning_postprocessing()
else:
self._conditioning_postprocessing = None
if conditioning_type not in ["mult_and_add", "concat", "input"]:
raise ValueError("Unknown conditioning_type {}".format(conditioning_type))
self._conditioning_type = conditioning_type
scale_factor = 2. / (1. + alpha**2)
self._kernel_initializer = tf.keras.initializers.VarianceScaling(
mode=kernel_initializer_mode, scale=scale_factor) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, conv_layer: Conv2D,\n guest_is_larger: Optional[bool] = None,\n guest_first: bool = True):\n self.conv_layer = conv_layer\n self.guest_is_larger = guest_is_larger\n self.guest_first = guest_first",
"def init(self):\n self.reparam_layers = []\n if self.model_type == \"GCN\":\n for i in range(self.num_layers):\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1),\n GCNConv(self.num_features if i == 0 else self.latent_size,\n self.latent_size if i != self.num_layers - 1 else self.num_classes,\n cached=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n sample_size=self.sample_size,\n bias=True if self.with_relu else False,\n val_use_mean=self.val_use_mean,\n normalize=self.normalize,\n ))\n # self.conv1 = ChebConv(self.num_features, 16, K=2)\n # self.conv2 = ChebConv(16, self.num_features, K=2)\n\n elif self.model_type == \"GAT\":\n latent_size = int(self.latent_size / 2) # Under the default setting, latent_size = 8\n for i in range(self.num_layers):\n if i == 0:\n input_size = self.num_features\n else:\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n input_size = latent_size * 8 * 2\n else:\n input_size = latent_size * 8\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n setattr(self, \"conv{}_1\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n # On the Pubmed dataset, use heads=8 in conv2.\n \n else:\n raise Exception(\"Model_type {} is not valid!\".format(self.model_type))\n\n self.reparam_layers = sorted(self.reparam_layers)\n \n if self.model_type == \"GCN\":\n if self.with_relu:\n reg_params = [getattr(self, \"conv{}\".format(i+1)).parameters() for i in range(self.num_layers - 1)]\n self.reg_params = itertools.chain(*reg_params)\n self.non_reg_params = getattr(self, \"conv{}\".format(self.num_layers)).parameters()\n else:\n self.reg_params = OrderedDict()\n self.non_reg_params = self.parameters()\n else:\n self.reg_params = self.parameters()\n self.non_reg_params = OrderedDict()\n self.to(self.device)",
"def _construct(self, level_config):\n if level_config['inference_config'] is not None:\n self.inference_model = ConvolutionalNetwork(level_config['inference_config'])\n else:\n self.inference_model = lambda x:x\n if level_config['generative_config'] is not None:\n self.generative_model = ConvolutionalNetwork(level_config['generative_config'])\n else:\n self.generative_model = lambda x:x\n self.latent = ConvolutionalLatentVariable(level_config['latent_config'])\n self.inference_procedure = level_config['inference_procedure']",
"def __init__(self, mode, cfg):\n super(DMCM, self).__init__()\n\n self.conv_net = cfg.get_image_net(mode)\n self.sparse_net = cfg.get_genes_net(mode)\n\n # Matrix network does not need weight initialization because there can\n # be no vanishing gradients.\n self.conv_net.apply(_init_weights_xavier)",
"def conv_init(m):\r\n\r\n classname = m.__class__.__name__\r\n if classname.find('Conv') != -1:\r\n init.xavier_uniform_(m.weight, gain = np.sqrt(2))\r\n elif classname.find('BatchNorm') != -1:\r\n init.constant_(m.weight, 1)\r\n init.constant_(m.bias, 0)",
"def __init__(self, vgg_net):\n super().__init__()\n # create a conv layer that corresponds to the first linear layer\n linear1 = vgg_net.classifier[0]\n conv = nn.Conv2d(512, 4096, 7, 7)\n\n # copy data into it\n conv.bias.data.copy_(linear1.bias.data)\n conv.weight.data.view(4096, -1).copy_(linear1.weight.data)\n\n # replace the layer in the sequential classifier part\n vgg_net.classifier = nn.Sequential(\n conv, nn.Flatten(1), *vgg_net.classifier[1:]\n )\n\n self.vgg_net = vgg_net",
"def __init__(self, model_config):\n # Training Parameters\n self.__learning_rate = model_config[\"cnnLearningRate\"]\n\n # Network Parameters\n self.__num_classes = model_config[\"numClasses\"]\n self.__weight_decay = 1e-4\n self.__num_gpus = model_config[\"numGpus\"]\n self.__use_csnn = model_config[\"useCsnn\"]\n\n self.__csnn = Csnn(model_config)",
"def __init__(self, in_channels, out_channels):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=5, padding=1)",
"def __init__(self, classes=2622):\n super().__init__()\n self.conv1 = _ConvBlock(3, 64, 64)\n self.conv2 = _ConvBlock(64, 128, 128)\n self.conv3 = _ConvBlock(128, 256, 256, 256)\n self.conv4 = _ConvBlock(256, 512, 512, 512)\n self.conv5 = _ConvBlock(512, 512, 512, 512)\n self.dropout = torch.nn.Dropout(0.5)\n self.fc1 = torch.nn.Linear(7 * 7 * 512, 4096)\n self.fc2 = torch.nn.Linear(4096, 4096)\n self.fc3 = torch.nn.Linear(4096, classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(\n self, config: SimpleGCNConfig = SimpleGCNConfig(name=\"simplegcn\")\n ):\n super().__init__()\n self.edge_lengthscale = config.edge_lengthscale\n self.weight_edges = config.weight_edges\n\n self.atom_embedding = nn.Linear(\n config.atom_input_features, config.width\n )\n\n self.layer1 = GraphConv(config.width, config.width)\n self.layer2 = GraphConv(config.width, config.output_features)\n self.readout = AvgPooling()",
"def __init__(self, **config):\n super(CNN, self).__init__()\n in_channel = [26] + config['cnn_target_filters']\n kernels = config['cnn_target_kernels']\n self.layer_size = len(config['cnn_target_filters'])\n self.visual_attention=config['visual_attention']\n self.concatenation=config['concatenation']\n self.convs = nn.ModuleList([nn.Conv1d(in_channels=in_channel[i],\n out_channels=in_channel[i + 1],\n kernel_size=kernels[i]) for i in range(self.layer_size)])\n self.convs = self.convs.float()\n self.attention = config['attention']\n protein_size = self.simulate_output((26, 1000))\n self.fc = nn.Linear(protein_size, config['hidden_dim_protein'])\n self.Attention=Attention(**config)",
"def __init__(self, nfeat, nhid, nclass, dropout, alpha):\n super(GCN, self).__init__()\n self.dropout = dropout\n\n self.conv1 = GraphConvolutionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, not_final=True)\n \n self.add_module('conv1', self.conv1)\n\n self.conv2 = GraphConvolutionLayer(nhid, nclass, dropout=dropout, alpha=alpha, not_final=False)",
"def initialize(self, weight_type=\"none\"):\n \n # should have better implementation for convnet weights\n \n fan_in = self.num_channels*np.prod(self.filter_size);\n fan_out = self.num_filters*np.prod(self.filter_size);\n \n filter_bound=np.sqrt(6./(fan_in + fan_out));\n filter_shape=(self.num_filters, self.num_channels)+(self.filter_size);\n self.filters = theano.shared(np.asarray(np.random.uniform(low=-filter_bound,\n high=filter_bound,\n size=filter_shape),\n dtype='float32'),\n borrow=True);\n \n if self.use_bias==True:\n self.bias=util.init_weights(\"bias\", self.num_filters, weight_type=weight_type);",
"def __init__(self, channels, momentum):\n super(PointNetConv2Layer, self).__init__()\n self.channels = channels\n self.momentum = momentum",
"def __init__(self):\n\n super(GlobalDiscriminator, self).__init__()\n\n # input image will have the size of 64x64x3\n self.first_conv_layer = TransitionDown(in_channels=3, out_channels=32, kernel_size=5)\n self.second_conv_layer = TransitionDown(in_channels=32, out_channels=32, kernel_size=5)\n self.third_conv_layer = TransitionDown(in_channels=32, out_channels=64, kernel_size=5)\n self.fourth_conv_layer = TransitionDown(in_channels=64, out_channels=64, kernel_size=5)\n\n self.fc1 = nn.Linear(5 * 5 * 64, 1)\n\n torch.nn.init.xavier_uniform(self.fc1.weight)",
"def __init__(self, sigma_initializer=RandomNormal(0, 1), spectral_iterations=1,\n fully_diff_spectral=True, stateful=False, renormalize=False, **kwargs):\n super(SNConditionalConv11, self).__init__(**kwargs)\n self.sigma_initializer = keras.initializers.get(sigma_initializer)\n self.fully_diff_spectral = fully_diff_spectral\n self.spectral_iterations = spectral_iterations\n self.stateful = stateful\n self.renormalize = renormalize",
"def _init(module):\n classname = module.__class__.__name__\n if classname.find('Conv') != -1:\n try:\n nn.init.xavier_uniform_(module.weight.data)\n module.bias.data.fill_(0) # May fail.\n except AttributeError:\n pass",
"def __init__(self, **kwargs):\n super(Debug, self).__init__(**kwargs)\n with self.name_scope():\n self.conv1 = nn.Conv2D(channels=4, kernel_size=2)",
"def __init__(self, in_channels=3):\n super().__init__()\n model_list = nn.ModuleList()\n model_list.append(\n ConvBlock(in_channels, 64, leaky=True, instance_norm=False, bias=True))\n model_list.append(ConvBlock(64, 128, leaky=True,\n instance_norm=True, bias=False))\n model_list.append(ConvBlock(128, 256, leaky=True,\n instance_norm=True, bias=False))\n model_list.append(ConvBlock(256, 512, leaky=True,\n instance_norm=True, bias=False, stride=1))\n model_list.append(nn.Conv2d(512, 1, kernel_size=4,\n stride=1, padding=1, bias=True))\n self.model = nn.Sequential(*model_list)\n\n self._initialize_params()",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self,\n image_channels,\n num_classes):\n super().__init__()\n\n self.model = torchvision.models.resnet18(pretrained=True)\n self.model.fully_connected = nn.Linear(224, 10)",
"def __init__(self, graph_conv, mlp=None):\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n if isinstance(mlp, chainer.Link):\n self.mlp = mlp\n if not isinstance(mlp, chainer.Link):\n self.mlp = mlp",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self, pretrain='vggface2'):\n self.device = torch.device('cuda:0' if torch.cuda.is_available()\n else 'cpu')\n\n self.mtcnn = MTCNN(\n image_size=160, margin=0, min_face_size=20,\n thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,\n device=self.device\n )\n self.resnet = InceptionResnetV1(pretrained=pretrain).eval()\\\n .to(self.device)\n self.resnet.classify = True\n\n self.names = self.vggface2_labels()",
"def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)",
"def __init__(self):\n\n super(ConvModule, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=[1, 2])\n self.conv1_bn = nn.BatchNorm2d(64)\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=[1, 2])\n self.conv2_bn = nn.BatchNorm2d(128)\n self.pool1 = nn.MaxPool2d(kernel_size=4, stride=2)\n self.dropout0 = nn.Dropout(p=0.4)\n\n self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=[1, 2])\n self.conv3_bn = nn.BatchNorm2d(256)\n self.conv4 = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3, stride=[1, 2])\n self.conv4_bn = nn.BatchNorm2d(64)\n self.pool2 = nn.MaxPool2d(kernel_size=4, stride=2)\n #\n # self.conv5 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=[1, 2])\n # self.conv5_bn = nn.BatchNorm2d(64)\n # self.pool3 = nn.MaxPool2d(kernel_size=3, stride=[1, 2])",
"def __init__(self):\n # TODO\n self.confThreshold = 0.6\n self.nmsThreshold = 0.5\n self.inpWidth = 320\n self.inpHeight = 320\n classesFile = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/coco.names\"\n self.classes = None\n with open(classesFile,'rt') as f:\n self.classes = f.read().rstrip('\\n').split('\\n')\n\n modelConfiguration = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.cfg\"\n modelWeights = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.weights\"\n self.net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)"
] | [
"0.6307894",
"0.62516075",
"0.6212562",
"0.6199809",
"0.61927176",
"0.6144121",
"0.61221516",
"0.6115666",
"0.6100876",
"0.6072859",
"0.6065201",
"0.6063435",
"0.60456663",
"0.59873235",
"0.5961926",
"0.59572375",
"0.5943566",
"0.5940094",
"0.59339577",
"0.5929681",
"0.59063774",
"0.59063774",
"0.59063774",
"0.58900553",
"0.583975",
"0.58304656",
"0.58292735",
"0.58072245",
"0.58040434",
"0.5800224"
] | 0.72267705 | 0 |
Select a slope, if required. Notes | def select_single_slope(self, **kwargs):
if self.verbose > 1:
print("MultiLinearSpectra.select_single_slope()")
for m in self.mess:
if m["class"] == "batch":
warnings.warn("MultiLinearSpectra.select_single_slope(): slope should be selected before batches are made.")
for m in range(len(self.mess)):
if hasattr(self.mess[m]["object"], "select_single_slope"):
_kwargs = {}
if "slope" in kwargs:
_kwargs["slope"] = kwargs["slope"]
elif "slope" in self.mess[m]:
_kwargs["slope"] = self.mess[m]["slope"]
if "n" in kwargs:
_kwargs["n"] = kwargs["n"]
elif "n" in self.mess[m]:
_kwargs["n"] = self.mess[m]["n"]
if "axi" in kwargs:
_kwargs["axi"] = kwargs["axi"]
elif "slope" in self.mess[m]:
_kwargs["axi"] = self.mess[m]["axi"]
self.mess[m]["object"].select_single_slope(**_kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _on_slope_change(self, _):\n self.slope = self.slope_slider.value\n self.redraw_slope()",
"def set_slope(self, slope: float) -> None:\r\n self.slope = slope",
"def __init__(self, slope):\n self.slope = slope",
"def slope(self):\n if self.b == 0:\n return None\n else:\n return (-1) * self.a/self.b",
"def slope(l):\n if l[1] == l[0]:\n return float(\"inf\")\n else:\n return float(l[3]-l[2])/(l[1]-l[0])",
"def testSlopeDefault(self):\n self.assertEqual(\n (Decimal('1.0'), Decimal('1.0'), Decimal('1.0')),\n self.node.slope\n )",
"def get_slope(self) -> str:\n return self.query('slope,?')",
"def testSlopeSetNegative(self):\n def setSlope():\n self.node.slope = [-1.3782, 278.32, 0.738378233782]\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('278.32'), Decimal('0.738378233782')),\n self.node.slope\n )",
"def testSlopeSetNegative(self):\n def setSlope():\n self.cc.slope = [-1.3782, 278.32, 0.738378233782]\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('278.32'), Decimal('0.738378233782')),\n self.cc.slope\n )",
"def Slope(InputFilePath,OutputFilePath): # perform a slope raster onto a DEM and return\r\n try:\r\n print(\"\"\"\r\nProcessing Slope Layer...\r\n \"\"\") \r\n \r\n \r\n arcpy.gp.Slope_sa(InputFilePath, OutputFilePath, \"DEGREE\", \"1\") \r\n print(\"Complete\")\r\n \r\n except Exception, err: # an error occurred (probably in arcGIS)\r\n raise RuntimeError(\"** Error: Slope Failed (\"+str(err)+\")\")",
"def slope(slope:float, offset=0., bounds: tuple[float, float] = None) -> core.Slope:\n return core.Slope(slope, offset, bounds=bounds)",
"def set_lower_slope(self, lbound, ubound):\n self.lower_slope_l_bound = lbound\n self.lower_slope_u_bound = ubound\n self.is_slope_optimised = True",
"def line(intercept, slope, x):\n return slope*x + intercept",
"def __call__(self, x):\n return self.slope * x + self.ordinate",
"def setSlope(self, slope):\n self.angle = math.atan(slope)",
"def _get_slope(x, y):\n slope = linregress(x, y)\n return slope",
"def is_slope(self):\n\t\tif self.high_elevation != self.low_elevation:\n\t\t\treturn True\n\t\treturn False",
"def get_slope(x, y, deg=1, err=[]):\n inverse_error = []\n for i in err:\n inv = 1/i\n inverse_error.append(i)\n\n if len(err)>0:\n z = np.polyfit(x, y, deg, w=inverse_error)\n else:\n z = np.polyfit(x, y, deg)\n\n m, b = z\n p = np.poly1d(z)\n\n return m, b, p",
"def redraw_slope(self):\n a = np.linspace(0, 9, self.num_points)\n b = [(self.slope * n) for n in a]\n\n self.output_widget.clear_output(wait=True)\n with self.output_widget as f:\n fig, ax = plt.subplots(1,1,figsize=(6, 4), dpi=100)\n# plt.ylim(ymax=max(self.y)+1)\n# plt.xlim(xmax=max(self.x)+1)\n\n plt.scatter(self.x, self.y)\n# plt.plot(a, b)\n plt.tick_params(\n axis='both', # changes apply to the both-axis\n which='both', # both major and minor ticks are affected\n# bottom=False, # ticks along the bottom edge are off\n# top=False, # ticks along the top edge are off\n labelbottom=False, labelleft=False) #\n\n plt.xlabel('Total rainfall (inch)', fontsize=10)\n plt.ylabel('Total sales', fontsize=10)\n from numpy.polynomial.polynomial import polyfit\n intercept, m = polyfit(self.x, self.y, 1)\n ax.vlines(self.x, self.y, intercept + m * self.x, label='residual')\n plt.plot(self.x, intercept + m * self.x, '-', c='orange',\n label=\"$Y = {:.3f} X {} {:.3f}$\".format(m, '+' if intercept>0 else '-', abs(intercept)))\n plt.legend()\n plt.show()",
"def get_slope(self, independent, dependent, second_indep=None):\n\n try:\n if second_indep is None:\n x = self.df_input[[independent]]\n y = self.df_input[[dependent]]\n\n x = sm.add_constant(x)\n model = sm.OLS(y, x).fit() \n return round(model.params[independent], 4)\n else:\n x = self.df_input[[independent, second_indep]]\n y = self.df_input[[dependent]]\n\n x = sm.add_constant(x)\n model = sm.OLS(y, x).fit() \n coef_df = model.params\n return coef_df\n except Exception as e:\n print(e)",
"def testSlopeSetAndGet(self):\n\n slope = (1.3782, 278.32, 0.738378233782)\n slopeD = tuple([Decimal(str(i)) for i in slope])\n\n self.cc.slope = slope\n\n self.assertEqual(\n slopeD,\n self.cc.slope\n )",
"def get_slope(self, device_type_name):\n\n if device_type_name in [\"SOLN\", \"BEND\",\"BLEN\",\"KICK\"]:\n # Solenoid devices use 'uA'.\n return 0.00055586\n elif device_type_name in [\"BLM\",\"LBLM\",\"CBLM\",\"PBLM\"]:\n # Beam loss monitors set threshold in Volts initially\n return 1.6/65536\n else:\n raise ValueError(\"Function \\\"__get_slope(device_type_name={}, fault_name={})\\\". Invalid device type name\"\n .format(device_type_name, fault_name))",
"def _calculate_slope(klass, p1, p2):\n xdiff = p1.x - p2.x\n if xdiff:\n return (p1.y - p2.y) / xdiff\n else:\n return float(\"+inf\")",
"def _regression_slope_metric(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.slope",
"def fun(params, slope, data):\n x, y_true = data\n return y_true - model_fun(params, slope, x)",
"def setTriggerSlope(self, Slope, stringOnly=0):\n\n if Slope == 'Positive':\n msg = \"TRIGger:SEQuence:SLOPe POSitive\"\n else:\n msg = \"TRIGger:SEQuence:SLOPe NEGative\"\n\n if stringOnly==0:\n self.sendMessage(msg)\n else:\n return msg",
"def testSetSlopeWithNegativeInt(self):\n def setSlope():\n self.node.slope = -20\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('0.0'), Decimal('0.0')),\n self.node.slope\n )",
"def compute_slope(self):\n\n # assign variables\n slope = 'slope'\n aspect = 'aspect'\n dx = 'dx'\n dy = 'dy'\n grow_slope = 'grow_slope'\n grow_aspect = 'grow_aspect'\n grow_dx = 'grow_dx'\n grow_dy = 'grow_dy'\n\n # compute slope and partial derivatives\n gscript.run_command(\n 'r.slope.aspect',\n elevation=self.elevation,\n slope=slope,\n dx=dx,\n dy=dy,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=slope,\n value=grow_slope,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{slope}={grow_slope}\".format(\n slope=slope,\n grow_slope=grow_slope),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=dx,\n value=grow_dx,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{dx}={grow_dx}\".format(\n dx=dx,\n grow_dx=grow_dx),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=dy,\n value=grow_dy,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{dy}={grow_dy}\".format(\n dy=dy,\n grow_dy=grow_dy),\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['grow_slope',\n 'grow_dx',\n 'grow_dy'],\n flags='f')\n\n return slope, dx, dy",
"def plot_regression_line():\r\n axes = plt.gca()\r\n x_vals = np.array(axes.get_xlim())\r\n y_vals = y_intercept() + slope() * x_vals\r\n plt.plot(x_vals, y_vals)",
"def linear_regression(self, x_data, y_data, mask = None, ax = None):\n if mask is None:\n mask = full(len(y_data), True, dtype=bool)\n poly = poly1d(polyfit(x_data[mask], y_data[mask], 1))\n\n if ax is not None:\n ax.plot(x_data, polyval(poly, x_data), \"--r\",\\\n label = \"Slope: %.2f\" %(poly[1]))\n return poly"
] | [
"0.70611864",
"0.69255155",
"0.6277614",
"0.6218594",
"0.6095415",
"0.60038364",
"0.5952847",
"0.59119874",
"0.5888782",
"0.5880145",
"0.5864116",
"0.5850078",
"0.5848201",
"0.583642",
"0.58140606",
"0.5761579",
"0.5753976",
"0.57129383",
"0.5664197",
"0.5655621",
"0.5625292",
"0.5616153",
"0.55663127",
"0.55651605",
"0.5558168",
"0.551689",
"0.54973173",
"0.54701513",
"0.5457957",
"0.544913"
] | 0.72659767 | 0 |
This function will make a new xaxis. It will look at the number of data points on the old and new xaxis, if the old xaxis has bin_above (default = 2) or more times more data points, it will bin the data. Otherwise it will interpolate it. If min_x and/or max_x are not given, then the lowest and/or highest values in self.mess will be used (see get_min_max_x). Arguments | def make_uniform_x(self, x_resolution, min_x = None, max_x = None, bin_above = 2.0, **kwargs):
if min_x is None or max_x is None:
a, b = self.get_min_max_x(**kwargs)
if min_x is None:
min_x = a
if max_x is None:
max_x = b
new_x = numpy.arange(min_x, max_x + x_resolution / 2, x_resolution)
for m in range(len(self.mess)):
if m not in exclude and self.mess[m]["class"] not in exclude:
resolution = (numpy.amax(m.x) - numpy.amin(m.x)) / len(m.x) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_bins_to_view(self, *args):\n with delay_callback(self, 'hist_x_min', 'hist_x_max'):\n if self.x_max > self.x_min:\n self.hist_x_min = self.x_min\n self.hist_x_max = self.x_max\n else:\n self.hist_x_min = self.x_max\n self.hist_x_max = self.x_min",
"def update_bins_to_view(self, *args):\n with delay_callback(self, 'hist_x_min', 'hist_x_max'):\n if self.x_max > self.x_min:\n self.hist_x_min = self.x_min\n self.hist_x_max = self.x_max\n else:\n self.hist_x_min = self.x_max\n self.hist_x_max = self.x_min",
"def update_view_to_bins(self, *args):\n with delay_callback(self, 'x_min', 'x_max'):\n self.x_min = self.hist_x_min\n self.x_max = self.hist_x_max",
"def update_view_to_bins(self, *args):\n with delay_callback(self, 'x_min', 'x_max'):\n self.x_min = self.hist_x_min\n self.x_max = self.hist_x_max",
"def format_xaxis (self, axes, \n n_ticks = 10, # Number of ticks we would like\n timestamp_formatting = '(%Y-%m-%d)%H:%M', # Specified formatting \n xaxis_mode = None): # Several automatic modes\n if (self.X_type == \"categorical\"):\n axes.set_xticks(self.X[self.start_indx:self.end_indx], minor=False)\n axes.set_xticklabels(self.Xcategories[self.start_indx:self.end_indx][:,0], minor=False)\n \n elif(self.X_type == \"numerical\"):\n # If regular numerical we just plot the values\n axes.xaxis.set_major_locator(mticker.MaxNLocator(nbins = n_ticks, prune='upper'))\n# ax.get_xaxis().get_major_formatter().set_useOffset(False)\n \n elif(self.X_type == \"timestamp\"):\n axes.xaxis.set_major_formatter(mdates.DateFormatter(timestamp_formatting))\n axes.xaxis.set_major_locator(mticker.MaxNLocator(nbins = n_ticks, prune='upper'))\n axes.xaxis_date()\n # ax.xaxis.set_major_formatter(FuncFormatter(self.ticklabels[val:val + wsize]))\n self.figure.autofmt_xdate()\n# print (type(self.X), type(self.X[0]))\n \n elif(self.formatXaxis == \"intraday\"):\n # If the data is intraday and we want to apply the Gap Remover !!! \n gap_remover_flag = 1;\n if (gap_remover_flag):\n formatter = FuncFormatter(ul.detransformer_Formatter)\n axes.xaxis.set_major_formatter(formatter) \n # mdates.DateFormatter(formatting)\n \n else:\n axes.xaxis.set_major_formatter(mdates.DateFormatter(formatting))\n \n axes.xaxis.set_major_locator(mticker.MaxNLocator(nbins = n_ticks, prune='upper'))",
"def updatePlot(self,*args):\n # set x limits\n timeDisplayOptions = {'10 minutes':10,'1 hour':60,'6 hours':6*60,'24 hours':24*60,'All':0}\n try:\n lastDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[-1])\n firstDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[0])\n except IndexError: # no data yet\n now = datetime.datetime.utcnow().toordinal()\n firstDatetime = mpl.dates.num2date(now)\n lastDatetime = firstDatetime\n xMin = lastDatetime-datetime.timedelta(minutes=timeDisplayOptions[self.wScale.get()])\n xMin = max([ firstDatetime, xMin ])\n if self.wScale.get() == 'All':\n xMin = firstDatetime\n xMinIndex = numpy.searchsorted( self.stage60K.get_xdata(), mpl.dates.date2num(xMin) )\n # rescale axes, with the x being scaled by the slider\n if self.toolbar._active == 'HOME' or self.toolbar._active == None:\n ymin,ymax = 10000000, -10000000\n lineAndVar = { self.stage60K: self.t60K,\n self.stage03K: self.t3K,\n self.stageGGG: self.tGGG,\n self.stageFAA: self.tFAA }\n if len(self.stage60K.get_xdata()) > 1:\n for line in lineAndVar.keys():\n if lineAndVar[line].get() == 0:\n line.set_visible(False)\n else:\n line.set_visible(True)\n ydata = line.get_ydata()[xMinIndex:-1]\n try:\n ymin = min(ymin, numpy.nanmin(ydata))\n ymax = max(ymax, numpy.nanmax(ydata))\n except ValueError as e:\n pass\n self.ax.set_xlim(xMin,lastDatetime)\n self.ax.set_ylim(ymin - (ymax-ymin)/10, ymax + (ymax-ymin)/10)\n hfmt = mpl.dates.DateFormatter('%H:%M:%S', tz=tz.tzlocal())\n self.ax.xaxis.set_major_formatter(hfmt)\n self.fig.autofmt_xdate()\n self.fig.tight_layout()\n #draw\n self.canvas.draw()",
"def __draw_xaxis(self):\n self.ax.set_xlim(self.xlims)\n # put x ticks on top\n xticks = [1]\n xticks.extend(range(5, self.xmax+5, 5))\n fs = self.settings.rcParams[\"axes.labelsize\"] if self.settings.otherParams[\n \"xlabel.fontsize\"] is None else self.settings.otherParams[\"xlabel.fontsize\"]\n color = self.settings.rcParams[\"axes.labelcolor\"] if self.settings.otherParams[\n \"xlabel.color\"] is None else self.settings.otherParams[\"xlabel.color\"]\n self.ax.set_xticks(xticks)\n self.ax.set_xticklabels(xticks[:-1])\n self.ax.set_xlabel(self.xaxis_label, fontsize=fs, color=color)\n self.ax.xaxis.set_label_coords(\n *self.settings.otherParams[\"xlabel.position\"])",
"def format_x_axis(self, x_tick, x_limits):\n self._fig.update_layout(\n xaxis=dict(\n range=x_limits,\n dtick=x_tick,\n ),\n )",
"def xscale(self, newscale, linthreshx=1.e-4):\n self._checkfigure()\n if newscale == 'symlog':\n self.axes.set_xscale(newscale, linthreshx=linthreshx)\n else:\n self.axes.set_xscale(newscale)",
"def _calculate_min_max_stats(self, x_copy):\n # get the current min and max vals\n min_val = self.min_val\n max_val = self.max_val\n x_dim = x_copy.size()\n\n new_axis_list = [i for i in range(len(x_dim))] # noqa: C416\n new_axis_list[self.ch_axis] = 0\n new_axis_list[0] = self.ch_axis\n y = x_copy.permute(new_axis_list)\n # Need to match dtype of min/max because the updates to buffers\n # are done in place and types need to match for comparisons\n y = y.to(self.min_val.dtype)\n y = torch.flatten(y, start_dim=1)\n if min_val.numel() == 0 or max_val.numel() == 0:\n min_val, max_val = torch.aminmax(y, dim=1)\n else:\n min_val_cur, max_val_cur = torch.aminmax(y, dim=1)\n min_val = torch.min(min_val_cur, min_val)\n max_val = torch.max(max_val_cur, max_val)\n\n self.min_val.resize_(min_val.shape)\n self.max_val.resize_(max_val.shape)\n self.min_val.copy_(min_val)\n self.max_val.copy_(max_val)\n\n return x_copy",
"def set_axis_x(self, new_axis_point):\r\n self.__x_axis = new_axis_point",
"def setScaleX(self,startx,endx):\r\n if startx == endx:\r\n endx += 1\r\n self.scaleLock.acquire()\r\n self.scalex = [startx,endx]\r\n self.scaleLock.release()",
"def set_low_high_value(self):\n # do not apply scaler norm on not scalable data\n self.range_dict.clear()\n\n for data_name in self.dict_to_plot.keys():\n if self.quantitative_normalization:\n # Quantitative normalization\n data_arr, _ = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(\n data_in=self.dict_to_plot[data_name],\n scaler_dict=self.scaler_norm_dict,\n scaler_name_default=self.get_selected_scaler_name(),\n data_name=data_name,\n ref_name=self.quantitative_ref_eline,\n name_not_scalable=self.name_not_scalable,\n )\n else:\n # Normalize by the selected scaler in a regular way\n data_arr = normalize_data_by_scaler(\n data_in=self.dict_to_plot[data_name],\n scaler=self.scaler_data,\n data_name=data_name,\n name_not_scalable=self.name_not_scalable,\n )\n\n lowv, highv = np.min(data_arr), np.max(data_arr)\n # Create some 'artificially' small range in case the array is constant\n if lowv == highv:\n lowv -= 0.005\n highv += 0.005\n self.range_dict[data_name] = {\"low\": lowv, \"low_default\": lowv, \"high\": highv, \"high_default\": highv}",
"def _use_data_bounds_changed_for_axes(self):\n self.update_pipeline()",
"def fix_auto(self):\n if self.share_x:\n self.rescale_axes(x=True, y=False)\n self.fix_axes_ticks(axis='x')\n if self.share_y:\n self.rescale_axes(x=False, y=True)\n self.fix_axes_ticks(axis='y')",
"def normalize_wrt_x(self):\n\n x_min = min(self.x)\n x_max = max(self.x)\n y_min = min(self.y)\n\n x_range = x_max - x_min\n\n x = np.array(self.x)\n y = np.array(self.y)\n x -= x_min\n y -= y_min\n x = x / float(x_range)\n y = y / float(x_range)\n\n self.x = x.tolist()\n self.y = y.tolist()",
"def CalibrateX(self):\r\n print(\"Calibrating axis X, please do not move sensor...\")\r\n buff = []\r\n for t in range(20):\r\n while self.Get_AxisDataAvailable_Value()[0] == 0:\r\n time.sleep(0.0001)\r\n buff.append(self.Get_RawOutX_Value())\r\n self.meanX = numpy.mean(buff) \r\n self.maxX = max(buff)\r\n self.minX = min(buff)\r\n print(\"Done: (min={0};mean={1};max={2})\".format(self.minX, self.meanX, self.maxX))",
"def setupXAxis(plot, minVal, maxVal, label, logarithmic):\n plot.set_xlabel(label)\n\n if logarithmic:\n plot.set_xscale(\"log\")\n plot.set_xlim(minVal, maxVal)\n # plot.set_xscale('log', basex=2)\n # tickLabels = [1]\n # labelValue = minVal\n # while labelValue <= maxVal:\n # tickLabels.append (labelValue)\n # labelValue = labelValue*2\n # # Expand the axis a little above and below the data\n # inflationFactor = 0.95\n # plot.set_xlim(minVal*inflationFactor, maxVal/inflationFactor)\n # # Need a blank label on the front for the added axis point on the left. No need for an extra\n # # annotation on the right.\n # plot.set_xticklabels([' '] + tickLabels)\n else:\n plot.set_xlim((0 if minVal == 1 else minVal), maxVal)",
"def thin_xticks(ax, n):\n ax.xaxis.set_major_locator(MaxNLocator(n + 1))",
"def preprocess(self):\n\n if self.x_range == None:\n x_min = min(np.min(self.fx), np.min(self.gx))\n x_max = max(np.max(self.fx), np.max(self.gx))\n self.x_range = [x_min,x_max]\n\n f_inter = interpolate.interp1d(self.fx, self.fy, 'cubic', fill_value = 'extrapolate')\n g_inter = interpolate.interp1d(self.gx, self.gy, 'cubic', fill_value = 'extrapolate')\n fgx_new = np.linspace(self.x_range[0], self.x_range[1], self.N)\n fy_new = f_inter(fgx_new)\n gy_new = g_inter(fgx_new)\n\n self.fx, self.fy = fgx_new, fy_new\n self.gx, self.gy = fgx_new, gy_new",
"def set_xmin(self, xmin):\n self.__xmin = xmin",
"def secondaryXaxis(low,high,first,step,length,name,direction,x,y,log=0):\n if log:\n dislin.xaxlg(low,high,first,step,length,name,direction,x,y)\n else:\n dislin.xaxis(low,high,first,step,length,name,direction,x,y)",
"def update_bar(self, xmin, xmin_bar: BarData, bar: BarData):\n if not xmin_bar:\n xmin_bar = BarData(\n symbol=bar.symbol,\n exchange=bar.exchange,\n datetime=bar.datetime,\n gateway_name=bar.gateway_name,\n open_price=bar.open_price,\n high_price=bar.high_price,\n low_price=bar.low_price\n )\n else:\n xmin_bar.high_price = max(\n xmin_bar.high_price, bar.high_price)\n xmin_bar.low_price = min(\n xmin_bar.low_price, bar.low_price)\n\n xmin_bar.close_price = bar.close_price\n xmin_bar.volume += int(bar.volume)\n if not (bar.datetime.minute + 1) % xmin:\n xmin_bar.datetime = xmin_bar.datetime.replace(\n second=0, microsecond=0\n )\n xmin_bar.interval = xmin\n event = Event(type=EVENT_BAR, data=xmin_bar)\n self.rpo.put(event)\n xmin_bar = None",
"def compute_axes(self):\n mini, maxi = self._get_extremes()\n self.y_axis.min = mini\n self.y_axis.max = maxi\n self.y_axis._max_min()\n\n if not None in [s.xvalues for s in self]:\n mini, maxi = self._get_extremes('xvalues')\n self.x_axis.min = mini\n self.x_axis.max = maxi\n self.x_axis._max_min()",
"def callback1(attr, old, new):\n # Get the parameters to plot (x axis, y axis, and marker size)\n x_name = axis_map[x_axis.value]\n y_name = axis_map[y_axis.value]\n s_name = axis_map[s_axis.value]\n\n # Update the labels\n plot1.xaxis.axis_label = x_axis.value\n plot1.yaxis.axis_label = y_axis.value\n\n # Update the data source\n source1.data = dict(\n x=data[x_name],\n y=data[y_name],\n size=data[s_name] / np.min(data[s_name]),\n ticid=data[\"ticid\"],\n )",
"def data_to_x(self, new_data):\n pass",
"def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)",
"def updateRange(self):\n if self.autoFollow:\n self.xrange = self.param.activeRange()\n self.xrange = self.xrange # call getter & setter again to verify limits",
"def __init__(self, bins=3, support_min=90):\n self.support_min = support_min\n self.bins = bins\n self._x_transformed = None",
"def _hold_bounds(self):\n adc_channel = self.graph_renderer.channels[0]\n if self.sx2 > adc_channel.size():\n self.anchored = True\n\n if self.anchored:\n # anchor right side of the window to the last graph sample. so the graph always animates, grows out from\n # the right side of the window. (anchor sx2 to adc_channel.size())\n dx = self.sx2 - adc_channel.size()\n dxw = self.wsx2 - adc_channel.size()\n self.sx1 -= dx\n self.sx2 -= dx\n self.wsx1 -= dxw\n self.wsx2 -= dxw\n\n # eliminate integer overflow problems. only allow indices smaller than a 32bit integer value. and then divide\n # it by four just to be sure.. maybe it's not necessary, but maybe there are some other tricks used in the\n # graph rendering..\n bound = 0xffffffff / 4\n # hmm. this allows only 12 days of data with ~960Hz. time to go 64bit?\n self.sx1 = max(self.sx1, -bound)\n self.sy1 = max(self.sy1, -bound)\n self.sx1 = min(self.sx1, bound)\n self.sy1 = min(self.sy1, bound)\n self.sx2 = max(self.sx2, -bound)\n self.sy2 = max(self.sy2, -bound)\n self.sx2 = min(self.sx2, bound)\n self.sy2 = min(self.sy2, bound)\n self.wsx1 = max(self.wsx1, -bound)\n self.wsy1 = max(self.wsy1, -bound)\n self.wsx1 = min(self.wsx1, bound)\n self.wsy1 = min(self.wsy1, bound)\n self.wsx2 = max(self.wsx2, -bound)\n self.wsy2 = max(self.wsy2, -bound)\n self.wsx2 = min(self.wsx2, bound)\n self.wsy2 = min(self.wsy2, bound)\n\n # limit horizontal zoom to 2 samples. can't zoom in anymore if less than one sample stays on screen.\n # don't have time to implement and test line segment cutting, if one sample is outside the window, and another\n # is inside.\n if self.wsx2 - self.wsx1 < 2.:\n self.wsx2 = self.wsx1 + 2.\n if self.sx2 - self.sx1 < 2.:\n self.sx2 = self.sx1 + 2.\n\n #\n # limit vertical movement and vertical zoom\n #\n\n val_min = adc_channel.value_min\n val_max = adc_channel.value_max\n\n # allow offset of this percent/100 of the screen\n overlap = .30\n\n # top of the screen has smaller sample values than bottom of the screen. inverted graph.\n # sy1 is top pixel, sy2 bottom. bottom-left coordinat is (0, 0)\n if self.sy1 < self.sy2:\n val_top = val_min + (self.wsy1 - self.wsy2) * overlap\n val_bottom = val_max - (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 < val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 < val_top:\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy2 = val_bottom\n else:\n val_bottom = val_min - (self.wsy1 - self.wsy2) * overlap\n val_top = val_max + (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 > val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 > val_top:\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy2 = val_bottom"
] | [
"0.6070046",
"0.6070046",
"0.58124596",
"0.58124596",
"0.5680742",
"0.5637928",
"0.5504903",
"0.5313853",
"0.53103",
"0.530034",
"0.5242136",
"0.5198289",
"0.5183262",
"0.51824766",
"0.5178394",
"0.5122132",
"0.5113222",
"0.5103701",
"0.51011974",
"0.5083347",
"0.5024227",
"0.50091976",
"0.49889034",
"0.49806327",
"0.49685773",
"0.496306",
"0.49401397",
"0.49229547",
"0.48977703",
"0.48889726"
] | 0.6091164 | 0 |
Calculate the signal. Objects need to be from classes that conform to LinearSpectrum. Optionally, objects can be excluded from calculating the signal. Arguments | def calculate_signal(self, exclude = [], **kwargs):
if self.verbose > 1:
print("MultiLinearSpectra.calculate_signal()")
for m in range(len(self.mess)):
if m not in exclude and self.mess[m]["class"] not in exclude:
if hasattr(self.mess[m]["object"], "calculate_signal"):
self.mess[m]["object"].calculate_signal() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _filtering(cls, signal, system):\r\n\r\n if np.iscomplexobj(signal):\r\n _, filtered_signal_r, _ = sc_sig.dlsim(system, np.real(signal))\r\n _, filtered_signal_i, _ = sc_sig.dlsim(system, np.imag(signal))\r\n filtered_signal = filtered_signal_r + 1j * filtered_signal_i\r\n else:\r\n _, filtered_signal, _ = sc_sig.dlsim(system, signal)\r\n filtered_signal.shape = signal.shape\r\n return filtered_signal",
"def calculate_signal(self):\n y = self.data.get_bar_values(self.pair[0], \"adj_close\", N=self.ols_window)\n x = self.data.get_bar_values(self.pair[1], \"adj_close\", N=self.ols_window)\n\n if y is not None and x is not None:\n if len(y) >= self.ols_window and len(x) >= self.ols_window:\n # get hedge ratio\n self.hedge_ratio = sm.OLS(y, x).fit().params[0]\n\n # get z score of residuals\n spread = y - self.hedge_ratio * x\n zscore_last = ((spread - spread.mean()) / spread.std())[-1]\n\n # calculate signals and add to events queue\n y_signal, x_signal = self.calculate_xy_signal(zscore_last)\n if y_signal is not None and x_signal is not None:\n self.events.put(y_signal)\n self.events.put(x_signal)",
"def calculate_signals(self):\n raise NotImplementedError(\"Should implement calculate_signals()\")",
"def calculate_signals(self):\n\t\traise NotImplementedError(\n\t\t\t\"Should implement calculate_signals()\\n\" + \\\n\t\t\t\"By calling this method to calculate 'Signal' Events\"\n\t\t)",
"def __call__(\n self, signal_values: Union[List[Array], Array], y: Optional[Array] = None\n ) -> Array:\n\n if y is None:\n return self.evaluate(signal_values)\n\n return self.evaluate_rhs(signal_values, y)",
"def signal_spectral(signal, FS):\n # check inputs\n if signal is None or signal == []:\n print(\"Signal is empty.\")\n\n # ensure numpy\n signal = np.array(signal)\n # f, spectrum = st.welch_spectrum(signal, sampling_rate=FS)\n spectrum = np.fft.fft(signal, FS)[:len(signal)//2]\n f = np.fft.fftfreq(len(signal))[:len(signal)//2]\n\n cum_ff = np.cumsum(spectrum)\n spect_diff = np.diff(spectrum)\n #energy, _ = st.signal_energy(spectrum, f)[:]\n\n args, names = [], []\n\n if dict['spectral_maxpeaks']['use'] == 'yes':\n # spectral_maxpeaks\n try:\n spectral_maxpeaks = np.sum([1 for nd in range(len(spect_diff[:-1])) if (spect_diff[nd+1]<0 and spect_diff[nd]>0)])\n except:\n spectral_maxpeaks = None\n args += [spectral_maxpeaks]\n names += ['spectral_maxpeaks']\n\n # if dict['spect_var']['use'] == 'yes':\n # # spect_variation\n # try:\n # spect_var = np.convolve(energy)\n # spect_var /= np.max(np.abs(spect_var))\n # except:\n # spect_var = None\n # args += [spect_var]\n # names += ['spect_var']\n\n if dict['curve_distance']['use'] == 'yes':\n # curve_distance\n try:\n curve_distance = np.sum(np.linspace(0, cum_ff[-1], len(cum_ff)) - cum_ff)\n except:\n curve_distance = None\n args += [curve_distance]\n names += ['curve_distance']\n\n if dict['spectral_roll_off']['use'] == 'yes':\n # spectral_roll_off\n try:\n spectral_roll_off = spectral_roll(f, spectrum, cum_ff, 0.95)[0]\n except:\n spectral_roll_off = None\n args += [spectral_roll_off]\n names += ['spectral_roll_off']\n\n if dict['spectral_roll_on']['use'] == 'yes':\n # spectral_roll_on\n try:\n spectral_roll_on = spectral_roll(f, spectrum, cum_ff, 0.05)[0]\n except:\n spectral_roll_on = None\n args += [spectral_roll_on]\n names += ['spectral_roll_on']\n\n if dict['spectral_dec']['use'] == 'yes':\n # spectral_decrease\n try:\n spectral_dec = (1/np.sum(spectrum)) * np.sum((spectrum[:] - spectrum[1])/np.linspace(1, len(spectrum), len(spectrum),1))\n except:\n spectral_dec = None\n args += [spectral_dec]\n names += ['spectral_dec']\n\n if dict['spectral_slope']['use'] == 'yes':\n # spectral_slope\n sum_f = np.sum(f)\n len_f = len(f)\n try:\n spectral_slope = (len_f * np.dot(f, spectrum) - sum_f * np.sum(spectrum)) / (len_f * np.dot(f, f) - sum_f ** 2)\n except:\n spectral_slope = None\n args += [spectral_slope]\n names += ['spectral_slope']\n\n sum_spectrum = np.sum(spectrum)\n norm_spectrum = spectrum / sum_spectrum\n # spectral_centroid\n try:\n spectral_centroid = np.dot(f, norm_spectrum)\n except:\n spectral_centroid = None\n\n # spectral_spread\n try:\n spectral_spread = np.dot(((f - spectral_centroid) ** 2), norm_spectrum)\n except:\n spectral_spread = None\n\n if dict['spectral_spread']['use'] == 'yes':\n args += [spectral_spread]\n names += ['spectral_spread']\n\n if dict['spectral_kurtosis']['use'] == 'yes':\n # spectral_kurtosis\n try:\n spectral_kurtosis = np.sum(((f - spectral_centroid) ** 4) * norm_spectrum) / (spectral_spread**2)\n except:\n spectral_kurtosis = None\n args += [spectral_kurtosis]\n names += ['spectral_kurtosis']\n\n if dict['spectral_skewness']['use'] == 'yes':\n # spectral_skewness\n try:\n spectral_skewness = np.sum(((f - spectral_centroid) ** 3) * norm_spectrum) / (spectral_spread ** (3 / 2))\n except:\n spectral_skewness = None\n args += [spectral_skewness]\n names += ['spectral_skewness']\n\n if dict['max_frequency']['use'] == 'yes':\n # max_frequency\n try:\n max_frequency = f[np.where(cum_ff > cum_ff[-1]*0.95)[0][0]]\n except:\n max_frequency = None\n args += [max_frequency]\n names += ['max_frequency']\n\n if dict['fundamental_frequency']['use'] == 'yes':\n # fundamental_frequency\n try:\n fundamental_frequency = f[np.where(cum_ff > cum_ff[-1]*0.5)[0][0]]\n except:\n fundamental_frequency = None\n args += [fundamental_frequency]\n names += ['fundamental_frequency']\n\n # if dict['max_power_spectrum']['use'] == 'yes':\n # # max_power_spectrum\n # try:\n # max_power_spectrum = np.max(spectrum)\n # except:\n # max_power_spectrum = None\n # args += max_power_spectrum\n # names += 'max_power_spectrum'\n\n # if dict['mean_power_spectrum']['use'] == 'yes':\n # # mean_power_spectrum\n # try:\n # mean_power_spectrum = np.mean(spectrum)\n # except:\n # mean_power_spectrum = None\n # args += mean_power_spectrum\n # names += 'mean_power_spectrum'\n #\n # if dict['spectral_skewness']['use'] == 'yes':\n # try:\n # spectral_skewness = np.mean(spectrum)\n # except:\n # spectral_skewness = None\n # args += spectral_skewness\n # names += 'spectral_skewness'\n #\n # if dict['spectral_kurtosis']['use'] == 'yes':\n # try:\n # spectral_kurtosis = np.mean(spectrum)\n # except:\n # spectral_kurtosis = None\n # args += spectral_kurtosis\n # names += 'spectral_kurtosis'\n\n # if dict['spectral_hist_']['use'] == 'yes':\n # # histogram\n # try:\n # _hist = list(np.histogram(spectrum, bins=int(np.sqrt(len(spectrum))), density=True)[0])\n # except:\n # if len(signal) > 1:\n # _hist = [None] * int(np.sqrt(len(signal)))\n # else:\n # _hist = [None]\n # args += [i for i in _hist]\n # names += ['spectral_hist_' + str(i) for i in range(len(_hist))]\n\n #return utils.ReturnTuple(tuple(args), tuple(names))\n return args, names",
"def spectral_model(self):\n spec_type = self.data['SpectrumType'].strip()\n pars, errs = {}, {}\n pars['amplitude'] = self.data['Flux_Density']\n errs['amplitude'] = self.data['Unc_Flux_Density']\n pars['reference'] = self.data['Pivot_Energy']\n\n if spec_type == 'PowerLaw':\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n model = PowerLaw(**pars)\n elif spec_type == 'PLExpCutoff':\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n pars['ecut'] = self.data['Cutoff']\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n errs['ecut'] = self.data['Unc_Cutoff']\n model = ExponentialCutoffPowerLaw3FGL(**pars)\n elif spec_type == 'LogParabola':\n pars['alpha'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n pars['beta'] = self.data['beta'] * u.dimensionless_unscaled\n errs['alpha'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n errs['beta'] = self.data['Unc_beta'] * u.dimensionless_unscaled\n model = LogParabola(**pars)\n elif spec_type == \"PLSuperExpCutoff\":\n # TODO: why convert to GeV here? Remove?\n pars['reference'] = pars['reference'].to('GeV')\n pars['index_1'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n pars['index_2'] = self.data['Exp_Index'] * u.dimensionless_unscaled\n pars['ecut'] = self.data['Cutoff'].to('GeV')\n errs['index_1'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n errs['index_2'] = self.data['Unc_Exp_Index'] * u.dimensionless_unscaled\n errs['ecut'] = self.data['Unc_Cutoff'].to('GeV')\n model = PLSuperExpCutoff3FGL(**pars)\n else:\n raise ValueError('Spectral model {} not available'.format(spec_type))\n\n model.parameters.set_parameter_errors(errs)\n return model",
"def __call__(self, dt, **kwargs):\n return self.signal_weights",
"def _func_sub_obj(self, X, q, coeffs_ext):\n n_features = self.n_features\n if self.fit_intercept:\n coeffs = coeffs_ext[:n_features + 1] - coeffs_ext[n_features + 1:]\n coeffs_0 = coeffs[0]\n coeffs = coeffs[1:]\n coeffs_ext = np.delete(coeffs_ext, [0, n_features + 1])\n else:\n coeffs_0 = 0\n coeffs = coeffs_ext[:n_features] - coeffs_ext[n_features:]\n pen = self._func_pen(coeffs_ext)\n u = coeffs_0 + X.dot(coeffs)\n sub_obj = (q * u + self.logistic_loss(u)).mean()\n return sub_obj + pen",
"def caculate_signals(self):\n\t\traise NotImplementedError(\"Should implement calculate_signals()\")",
"def __call__(cls, *args, **kwargs):\n temp = super().__call__(*args, **kwargs)\n\n for attr in (\"x\", \"y\", \"x_unit\", \"y_unit\", \"name\"):\n if not hasattr(temp, attr):\n raise InvalidSpectrumError(f'Tried to instantiate spectrum object without suitable attribute {attr}!')\n\n return temp",
"def evaluate(self, signal_values: Array) -> Array:\n pass",
"def signal(self, orientation):\n #return np.array([src.emission(orientation) for src in self.virtualsources]).sum(axis=0)\n #signal = 0.0\n #for src in self.virtualsources:\n #signal += src.emission(orientation)\n #return signal\n #print(orientation)\n return sum((src.emission(orientation.copy()) for src in self.virtualsources))",
"def iot_obj_func(mol_fracs, x_mix, x_pure):\n \n x_mix = np.array(x_mix)\n x_pure = np.array(x_pure)\n calc_x_mix = np.dot(mol_fracs.reshape([1, len(mol_fracs)]), x_pure)\n return ((x_mix - calc_x_mix) ** 2).sum()",
"def analytic(self):\r\n data = self.input.data\r\n sampling_rate = self.input.sampling_rate\r\n\r\n a_signal =\\\r\n ts.TimeSeries(data=np.zeros(self.freqs.shape + data.shape,\r\n dtype='D'), sampling_rate=sampling_rate)\r\n if self.freqs.ndim == 0:\r\n w = self.wavelet(self.freqs, self.sd,\r\n sampling_rate=sampling_rate, ns=5,\r\n normed='area')\r\n\r\n # nd = (w.shape[0] - 1) / 2\r\n a_signal.data[...] = (np.convolve(data, np.real(w), mode='same') +\r\n 1j * np.convolve(data, np.imag(w), mode='same'))\r\n else:\r\n for i, (f, sd) in enumerate(zip(self.freqs, self.sd)):\r\n w = self.wavelet(f, sd, sampling_rate=sampling_rate,\r\n ns=5, normed='area')\r\n\r\n # nd = (w.shape[0] - 1) / 2\r\n a_signal.data[i, ...] = (\r\n np.convolve(data, np.real(w), mode='same') +\r\n 1j * np.convolve(data, np.imag(w), mode='same'))\r\n\r\n return a_signal",
"def get_signal(self, integration):\n values = getattr(integration.frames, self.field, None)\n if values is None:\n values = np.zeros(integration.size, dtype=float)\n log.warning(f\"No field named {self.field} in {integration} \"\n f\"for signal.\")\n\n values = np.asarray(values, dtype=float)\n signal = Signal(integration, mode=self, values=values,\n is_floating=self.is_floating)\n if self.derivative_order > 0:\n for _ in range(self.derivative_order):\n signal.differentiate()\n return signal",
"def __init__(\n self,\n namap1,\n namap2,\n mc_11,\n mc_12,\n mc_22,\n signal=None,\n noise=None,\n noise_smoothing_mode=\"savgol\",\n smoothing_window=11,\n smoothing_polyorder=3,\n cosmic_variance=True,\n different_signals=False,\n mc_21=None\n ):\n self.lmax = mc_12.bins.lmax\n self.mc_11 = mc_11\n self.mc_12 = mc_12\n self.mc_22 = mc_22\n\n # if mc_21 is not specified, then assume it's the same as mc_12\n if mc_21 is None:\n self.mc_21 = mc_12\n else:\n self.mc_21 = mc_21\n\n self.lb = mc_12.lb\n self.namap1 = namap1\n self.namap2 = namap2\n self.bins = mc_12.bins\n self.num_ell = len(mc_12.bins.get_effective_ells())\n self.cosmic_variance = cosmic_variance\n self.different_signals = different_signals\n\n self.Cl11 = power.compute_spectra(namap1, namap1, mc=self.mc_11)\n self.Cl12 = power.compute_spectra(namap1, namap2, mc=self.mc_12)\n self.Cl21 = power.compute_spectra(namap2, namap1, mc=self.mc_21)\n self.Cl22 = power.compute_spectra(namap2, namap2, mc=self.mc_22)\n\n if signal is None:\n self.signal = {}\n else:\n self.signal = signal\n # process keys in signal, cutting at lmax\n for k in self.signal:\n self.signal[k] = self.signal[k][: self.lmax + 1]\n\n if noise is None:\n self.noise = {}\n else:\n self.noise = noise\n\n spec_list = []\n if namap1.has_temp and namap2.has_temp:\n spec_list += [\"TT\"]\n if namap1.has_pol and namap2.has_pol:\n spec_list += [\"EE\", \"BB\"]\n if namap1.has_temp and namap2.has_pol:\n spec_list += [\"TE\"]\n if namap1.has_pol and namap2.has_temp:\n spec_list += [\"ET\"]\n\n l_theory = np.arange(self.lmax + 1)\n Cl_dict = {\"11\": self.Cl11, \"12\": self.Cl12, \"21\": self.Cl21, \"22\": self.Cl22}\n\n for XY in spec_list:\n X, Y = XY\n\n if self.different_signals: # store everything in signal\n for AB in (\"11\", \"12\", \"21\", \"22\"):\n signal_id = X + AB[0] + Y + AB[1]\n if signal_id not in self.signal:\n if noise_smoothing_mode == \"savgol\":\n self.signal[signal_id] = np.abs(\n self.smooth_and_interpolate(\n l_theory,\n np.interp(l_theory, self.lb, (Cl_dict[AB])[XY]),\n smoothing_window,\n smoothing_polyorder,\n )\n )\n elif noise_smoothing_mode == \"poly\":\n raise NotImplementedError(\n \"polynomial smoothing with differing signals\")\n\n \n else: # default behavior: estimate signal and noise\n if XY not in self.signal:\n # linear interpolation\n self.signal[XY] = self.smooth_and_interpolate(\n l_theory,\n np.interp(l_theory, self.lb, self.Cl12[XY]),\n smoothing_window,\n smoothing_polyorder,\n )\n\n if (X + \"1\" + Y + \"1\") not in self.noise:\n\n if noise_smoothing_mode == \"savgol\":\n self.noise[X + \"1\" + Y + \"1\"] = np.abs(\n self.smooth_and_interpolate(\n l_theory,\n np.interp(l_theory, self.lb, self.Cl11[XY]),\n smoothing_window,\n smoothing_polyorder,\n )\n - self.signal[XY]\n )\n elif noise_smoothing_mode == \"poly\":\n self.noise[X + \"1\" + Y + \"1\"] = self.get_smooth_noise(\n cb=self.Cl11[XY],\n signal=self.signal[XY],\n smoothing_polyorder=smoothing_polyorder,\n )\n\n if (X + \"2\" + Y + \"2\") not in self.noise:\n if noise_smoothing_mode == \"savgol\":\n self.noise[X + \"2\" + Y + \"2\"] = np.abs(\n self.smooth_and_interpolate(\n l_theory,\n np.interp(l_theory, self.lb, self.Cl22[XY]),\n smoothing_window,\n smoothing_polyorder,\n )\n - self.signal[XY]\n )\n elif noise_smoothing_mode == \"poly\":\n self.noise[X + \"2\" + Y + \"2\"] = self.get_smooth_noise(\n cb=self.Cl22[XY],\n signal=self.signal[XY],\n smoothing_polyorder=smoothing_polyorder,\n )\n\n # any signal or noise not specified is set to zero\n self.noise = defaultdict(lambda: np.zeros(self.lmax + 1), self.noise)\n self.signal = defaultdict(lambda: np.zeros(self.lmax + 1), self.signal)\n\n self.beam = {}\n\n # set temperature beams\n if namap1.has_temp:\n self.beam[\"T1\"] = (\n namap1.beam_temp[: self.lmax + 1] *\n namap1.pixwin_temp[: self.lmax + 1]\n )\n if namap2.has_temp:\n self.beam[\"T2\"] = (\n namap2.beam_temp[: self.lmax + 1] *\n namap2.pixwin_temp[: self.lmax + 1]\n )\n\n # set polarization beams\n if namap1.has_pol:\n self.beam[\"E1\"] = (\n namap1.beam_pol[: self.lmax + 1] *\n namap1.pixwin_pol[: self.lmax + 1]\n )\n self.beam[\"B1\"] = (\n namap1.beam_pol[: self.lmax + 1] *\n namap1.pixwin_pol[: self.lmax + 1]\n )\n if namap2.has_pol:\n self.beam[\"E2\"] = (\n namap2.beam_pol[: self.lmax + 1] *\n namap2.pixwin_pol[: self.lmax + 1]\n )\n self.beam[\"B2\"] = (\n namap2.beam_pol[: self.lmax + 1] *\n namap2.pixwin_pol[: self.lmax + 1]\n )\n\n # for iterating over the output.\n # doing this explicity because it's actually shorter than some loops\n self.ordering = {\n (0, 0): (\"TT\",),\n (0, 2): (\"TE\", \"TB\"),\n (2, 0): (\"ET\", \"BT\"),\n (2, 2): (\"EE\", \"EB\", \"BE\", \"BB\"),\n }\n\n # covmat storage dict\n self.covmat = {}",
"def fluxes(wavelength, s, line, lowlow= 14, lowhigh=6, highlow=6, highhigh = 14, lmin=0, lmax=0, fmin=0, fmax=0, \n broad=2.355, plot=True, verbose=True, plot_sus = False, fcal = True, fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1,\n # s must be an array, no a list\n try: \n index_maximo_del_rango = s.tolist().index(np.nanmax(s))\n #print \" is AN ARRAY\"\n except Exception:\n #print \" s is A LIST -> must be converted into an ARRAY\" \n s = np.array(s)\n \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n if np.isnan(np.nanmedian(f_spec)): \n # The data are NAN!! Nothing to do\n if verbose or warnings: print(\" There is no valid data in the wavelength range [{},{}] !!\".format(lmin,lmax))\n \n resultado = [0, line, 0, 0, 0, 0, 0, 0, 0, 0, 0, s ] \n\n return resultado\n \n else: \n \n ## 20 Sep 2020\n f_spec_m=signal.medfilt(f_spec,median_kernel) # median_kernel = 35 default\n \n \n # Remove nans\n median_value = np.nanmedian(f_spec)\n f_spec = [median_value if np.isnan(x) else x for x in f_spec] \n \n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n # We have to find some \"guess numbers\" for the Gaussian. Now guess_centre is line\n guess_centre = line\n \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n \n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n #print line #f_cont\n # if line == 8465.0:\n # print w_cont\n # print f_cont_filtered\n # plt.plot(w_cont,f_cont_filtered)\n # plt.show()\n # plt.close()\n # warnings=True\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value b = \",bb,\": cont = 0 * w_spec + \", bb)\n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n \n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = a + b*np.array(w_cont) \n \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n \n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line)\n mini = np.nanmin(min_w)\n # guess_peak = f_spec[min_w.tolist().index(mini)] # WE HAVE TO SUSTRACT CONTINUUM!!!\n guess_peak = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n \n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a \n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n low_limit = ws[sorted_by_flux[0]]\n except Exception:\n plot=True\n low_limit = 0\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n high_limit = ws[sorted_by_flux[0]] \n except Exception:\n plot=True\n high_limit = 0 \n \n # Guess centre will be the highest value in the range defined by [low_limit,high_limit]\n \n try: \n rango = np.where((high_limit >= wavelength ) & (low_limit <= wavelength)) \n index_maximo_del_rango = s.tolist().index(np.nanmax(s[rango]))\n guess_centre = wavelength[index_maximo_del_rango]\n except Exception:\n guess_centre = line #### It was 0 before\n \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre, guess_peak, broad/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(gauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n \n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n \n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n # if low_limit < fit[0] < high_limit:\n if fit[0] < guess_centre - broad or fit[0] > guess_centre + broad:\n # if verbose: print \" Fitted center wavelength\", fit[0],\"is NOT in the range [\",low_limit,\",\",high_limit,\"]\"\n if verbose: print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n # print \"Re-do fitting fixing center wavelength\"\n # p01 = [guess_peak, broad]\n # fit1, pcov1 = curve_fit(gauss_fix_x0, w_spec, f_spec-continuum, p0=p01, maxfev=100000) # If this fails, increase maxfev...\n # fit_error1 = np.sqrt(np.diag(pcov1))\n # fit[0]=guess_centre\n # fit_error[0] = 0.\n # fit[1] = fit1[0]\n # fit_error[1] = fit_error1[0]\n # fit[2] = fit1[1]\n # fit_error[2] = fit_error1[1] \n \n fit[0]=guess_centre\n fit_error[0] = 0.000001\n fit[1]=guess_peak\n fit_error[1] = 0.000001\n fit[2] = broad/2.355\n fit_error[2] = 0.000001 \n else:\n if verbose: print(\" Fitted center wavelength\", fit[0],\"IS in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n \n if verbose: print(\" Fit parameters = \", fit[0], fit[1], fit[2])\n if fit[2] == broad and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelength (cw), peak at (cv) & sigma = broad/2.355 given.\") \n gaussian_fit = gauss(w_spec, fit[0], fit[1], fit[2])\n \n \n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations\n gaussian_flux = gauss_flux(fit[1],fit[2])\n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n index=0\n s_s=np.zeros_like(s)\n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-gaussian_fit[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-gaussian_fit[index]\n index=index+1\n \n # Plotting \n ptitle = 'Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit)\n if plot :\n plt.figure(figsize=(10, 4))\n # Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.8)\n # Plot median input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec_m), \"orange\", lw=3, alpha = 0.5) # 2021: era \"g\"\n # Plot spectrum - gauss subtracted\n plt.plot(wavelength,s_s,\"g\",lw=3, alpha = 0.6)\n \n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$ ]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.3)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n plt.plot(w_spec, residuals, 'k')\n plt.title(ptitle)\n plt.show()\n \n # Printing results\n if verbose :\n print(\"\\n - Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # Plot independent figure with substraction if requested \n if plot_sus: plot_plot(wavelength,[s,s_s], xmin=lmin, xmax=lmax, ymin=fmin, ymax=fmax, fcal=fcal, frameon=True, ptitle=ptitle)\n \n # 0 1 2 3 4 5 6 7 8 9 10 11\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s ]\n return resultado \n except Exception:\n if verbose: \n print(\" - Gaussian fit failed!\")\n print(\" However, we can compute the integrated flux and the equivalent width:\")\n \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n \n if verbose:\n print(\" Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n \n resultado = [0, guess_centre, 0, 0, 0, 0, 0, flux, flux_error, ew, ew_error, s ] # guess_centre was identified at maximum value in the [low_limit,high_limit] range but Gaussian fit failed\n \n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n # plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n # plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n # plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n \n \n return resultado",
"def __call__(self, frame_num):\n quant_sys = self.quant_sys\n quant_sys.propagate(10)\n\n # propagate the wigner function\n self.img_clasical_rho.set_array(\n (quant_sys.D22 + quant_sys.D11).real\n #quant_sys.get_classical_rho()\n )\n\n self.img_Upsilon2.set_array(\n quant_sys.quantum_rho.real\n )\n\n return self.img_clasical_rho, self.img_Upsilon2",
"def integrate_spectrum(self):\n flux = sum(self.spectrum)\n return flux",
"def _tosuperclass(self): \n self.ne_in = self.rsig['ne']['signal']\n self.ne = self.ne_in\n self.te_in = self.rsig['te']['signal']\n self.ti_in = self.rsig['ti']['signal']\n self.ni_in = np.zeros((self.nion, len(self.ne_in)),dtype=float)\n self.zeff_in = np.full(self.nrho, self.zeff)\n self.vt_in = np.zeros(len(self.ne_in),dtype=float)\n self.vt = np.zeros(len(self.ne_in),dtype=float)\n self._ion_densities()\n self.ni = self.ni_in\n self.te = self.te_in\n self.ti = self.ti_in\n \n # no need to smooth since they are already smoothed\n self._extrapolate()",
"def build_signal_dataset(self):\n return np.abs(self.bandpassed).mean(axis=-2)",
"def __init__(self, *args, **kwargs):\n if len(args) > 0 and isinstance(args[0], hs.signals.DielectricFunction):\n # Pretend it is a hs signal, copy axes and metadata\n sdict = args[0]._to_dictionary()\n hs.signals.DielectricFunction.__init__(self, **sdict)\n else:\n hs.signals.DielectricFunction.__init__(self, *args, **kwargs)",
"def calculate(self, raw=False):\n \n with energy_units(\"int\"):\n if self.system is not None:\n if isinstance(self.system,Molecule):\n #self._calculate_Molecule(rwa) \n spect = self._calculate_monomer(raw=raw)\n elif isinstance(self.system, Aggregate):\n spect = self._calculate_aggregate( \n relaxation_tensor=\n self._relaxation_tensor,\n rate_matrix=\n self._rate_matrix,\n relaxation_hamiltonian=\n self._relaxation_hamiltonian,\n raw=raw)\n else:\n raise Exception(\"System to calculate spectrum for not defined\")\n \n return spect",
"def fit(self, signal):\n self.signal = signal",
"def signal_rsi(self):\n pass",
"def __init__(self, time_series=None, ij=(0, 0), method=None, lb=0, ub=None,\r\n prefer_speed_over_memory=True, scale_by_freq=True):\r\n\r\n BaseAnalyzer.__init__(self, time_series)\r\n #Initialize variables from the time series\r\n self.ij = ij\r\n\r\n #Set the variables for spectral estimation (can also be entered by\r\n #user):\r\n if method is None:\r\n self.method = {'this_method': 'welch'}\r\n\r\n else:\r\n self.method = method\r\n\r\n if self.method['this_method'] != 'welch':\r\n e_s = \"For SparseCoherenceAnalyzer, \"\r\n e_s += \"spectral estimation method must be welch\"\r\n raise ValueError(e_s)\r\n\r\n self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)\r\n\r\n #Additional parameters for the coherency estimation:\r\n self.lb = lb\r\n self.ub = ub\r\n self.prefer_speed_over_memory = prefer_speed_over_memory\r\n self.scale_by_freq = scale_by_freq",
"def signal(self, nu=[148.], fwhm_arcmin=None, output_units=\"uK_RJ\", **kwargs):\n\n try:\n nnu = len(nu)\n except TypeError:\n nnu = 1\n nu = np.array([nu])\n\n try:\n output_map = self.output_map\n except AttributeError:\n if fwhm_arcmin is None:\n alm = self.alm\n else:\n alm = hp.smoothalm(\n self.alm, fwhm=np.radians(fwhm_arcmin / 60), pol=True, inplace=False\n )\n\n output_map = self.compute_output_map(alm)\n\n # use tile to output the same map for all frequencies\n out = np.tile(output_map, (nnu, 1, 1))\n if self.wcs is not None:\n out = enmap.enmap(out, self.wcs)\n out *= (\n (\n pysm.convert_units(\n self.input_units, \"uK_CMB\", self.input_reference_frequency_GHz\n )\n * pysm.convert_units(\"uK_CMB\", output_units, nu)\n )\n .reshape((nnu, 1, 1))\n .astype(float)\n )\n\n # the output of out is always 3D, (num_freqs, IQU, npix), if num_freqs is one\n # we return only a 2D array.\n if len(out) == 1:\n return out[0]\n else:\n return out",
"def integrate_axis_py(obj, **kwargs):\n\n # import the helper functions\n import hlr_utils\n\n # set up for working through data\n o_descr = hlr_utils.get_descr(obj)\n\n if o_descr == \"number\" or o_descr == \"list\":\n raise RuntimeError(\"Must provide a SOM of a SO to the function.\")\n # Go on\n else:\n pass\n\n # Check for starting bin\n try:\n start = kwargs[\"start\"]\n except KeyError:\n start = 0\n\n # Check for ending bin\n try: \n end = kwargs[\"end\"]\n if end != -1:\n end += 1\n else:\n pass\n except KeyError:\n end = -1\n\n # Check for axis keyword argument\n try:\n axis = kwargs[\"axis\"]\n except KeyError:\n axis = \"y\"\n \n # Check for axis_pos keyword argument\n try:\n axis_pos = kwargs[\"axis_pos\"]\n except KeyError:\n axis_pos = 0\n\n # Check for avg keyword argument\n try:\n avg = kwargs[\"avg\"]\n except KeyError:\n avg = False\n\n # Check for width keyword argument\n try:\n width = kwargs[\"width\"]\n except KeyError:\n width = False \n\n # Check for width_pos keyword argument\n try:\n width_pos = kwargs[\"width_pos\"]\n except KeyError:\n width_pos = 0 \n \n integration = float(0)\n integration_error2 = float(0)\n\n import itertools\n if width:\n import utils\n\n bad_values = [\"nan\", \"inf\", \"-inf\"]\n\n for i in xrange(hlr_utils.get_length(obj)): \n counter = 0 \n\n value = hlr_utils.get_value(obj, i, o_descr, axis, axis_pos)\n error = hlr_utils.get_err2(obj, i, o_descr, axis, axis_pos)\n\n if end == -1:\n value = value[start:]\n error = error[start:]\n else:\n value = value[start:end]\n error = error[start:end]\n \n if not width:\n for val, err2 in itertools.izip(value, error):\n if str(val) in bad_values or str(err2) in bad_values:\n continue\n else:\n integration += val\n integration_error2 += err2\n counter += 1\n else:\n if axis == \"y\":\n x_axis = hlr_utils.get_value(obj, i, o_descr, \"x\", width_pos)\n x_err2 = hlr_utils.get_err2(obj, i, o_descr, \"x\", width_pos)\n elif axis == \"x\":\n raise RuntimeError(\"Cannot use width flag with x-axis \"\\\n +\"integration\")\n\n bin_widths = utils.calc_bin_widths(x_axis, x_err2)\n\n for val, err2, delta in itertools.izip(value, error,\n bin_widths[0]):\n if str(val) in bad_values or str(err2) in bad_values:\n continue\n else:\n integration += (delta * val)\n integration_error2 += (delta * delta * err2)\n counter += 1\n \n if avg:\n return (integration / float(counter),\n integration_error2 / float(counter))\n else:\n return (integration, integration_error2)",
"def calculate_signal(self, components = None, environment = {}, line_profile = \"default\", convolution = None, **kwargs): \n \n if components is None:\n components = self.components\n \n if \"T\" not in environment:\n environment[\"T\"] = 296\n if \"p\" not in environment:\n environment[\"p\"] = 1\n if \"l\" not in environment:\n environment[\"l\"] = 1\n \n coeff_kwargs = {}\n abs_trans_kwargs = {}\n \n for k, v in kwargs.items():\n if k in [\"SourceTables\", \"partitionFunction\", \"OmegaRange\", \"OmegaStep\", \"OmegaWing\", \"IntensityThreshold\", \"OmegaWingHW\", \"GammaL\", \"LineShift\", \"Format\", \"OmegaGrid\", \"WavenumberRange\", \"WavenumberStep\", \"WavenumberWing\", \"WavenumberWingHW\", \"WavenumberGrid\", \"Diluent\", \"EnvDependences\"]:\n coeff_kwargs[k] = v\n \n if k == \"File_coeff\":\n coeff_kwargs[\"File\"] = v\n \n if k in [\"Format\", \"Wavenumber\"]:\n abs_trans_kwargs[k] = v\n \n if k == \"File_spectrum\":\n abs_trans_kwargs[\"File\"] = v\n \n if self.y_unit == \"cm2/molecule\":\n HITRAN_units = True\n else:\n HITRAN_units = False\n \n # print(coeff_kwargs)\n \n if line_profile in ['Voigt']:\n w, c = hapi.absorptionCoefficient_Voigt(Components = components, SourceTables = self.tablename, HITRAN_units = HITRAN_units, Environment = environment, **coeff_kwargs)\n elif line_profile in ['Lorentz']:\n w, c = hapi.absorptionCoefficient_Lorentz(Components = components, SourceTables = self.tablename, HITRAN_units = HITRAN_units, Environment = environment, **coeff_kwargs)\n elif line_profile in ['Doppler']:\n w, c = hapi.absorptionCoefficient_Doppler(Components = components, SourceTables = self.tablename, HITRAN_units = HITRAN_units, Environment = environment, **coeff_kwargs)\n elif line_profile in ['default', 'HT']:\n w, c = hapi.absorptionCoefficient_HT(SourceTables = self.tablename, HITRAN_units = HITRAN_units, Environment = environment, **coeff_kwargs) \n else:\n raise ValueError(\"'{:}' is not a valid line_profile\".format(line_profile))\n\n if self.y_unit == \"\":\n self.x, self.y = hapi.absorptionSpectrum(w, c, Environment = environment) \n self.y_unit = UC.absorption_labels[0]\n if self.y_unit in UC.transmission_1_labels:\n self.x, self.y = hapi.transmittanceSpectrum(w, c, Environment = environment)\n elif self.y_unit in UC.transmission_pct_labels:\n self.x, self.y = 100 * hapi.transmittanceSpectrum(w, c, Environment = environment) \n elif self.y_unit in UC.absorption_labels:\n self.x, self.y = hapi.absorptionSpectrum(w, c, Environment = environment) \n elif self.y_unit in [\"cm-1\", \"cm2/molecule\"]:\n self.x = w\n self.y = c\n else:\n raise ValueError(\"'{:}' is not a valid value for y_unit\".format(self.y_unit))\n\n \n if convolution is not None:\n convolution_profiles = [\"RECTANGULAR\", \"TRIANGULAR\", \"GAUSSIAN\", \"DIFFRACTION\", \"MICHELSON\", \"DISPERSION\"]\n convolution_functions = {\n \"RECTANGULAR\": hapi.SLIT_RECTANGULAR, \n \"TRIANGULAR\": hapi.SLIT_TRIANGULAR, \n \"GAUSSIAN\": hapi.SLIT_GAUSSIAN, \n \"DIFFRACTION\": hapi.SLIT_DIFFRACTION, \n \"MICHELSON\": hapi.SLIT_MICHELSON, \n \"DISPERSION\": hapi.SLIT_DISPERSION, \n }\n \n if self.y_unit in [\"cm-1\", \"cm2/molecule\"]:\n print(\"SpectraTools.Hitran.calculate_signal(): no convolution is calculated for absorption coefficients\")\n \n elif convolution not in convolution_profiles:\n raise ValueError(\"'{:}' is not a valid value for the convolution\".format(convolution))\n \n else:\n conv_kwargs = {\"SlitFunction\": convolution_functions[convolution]}\n for k, v in kwargs.items():\n if k in [\"Resolution\", \"AF_wing\"]:\n if v is not None:\n conv_kwargs[k] = v\n\n self.x, self.y, _i1, _i2, __slit = hapi.convolveSpectrum(Omega = self.x, CrossSection = self.y, **conv_kwargs)"
] | [
"0.582007",
"0.5640397",
"0.55906636",
"0.5515562",
"0.5370433",
"0.52759707",
"0.5209679",
"0.51941854",
"0.5187894",
"0.5175926",
"0.5142204",
"0.5102005",
"0.50907665",
"0.5066592",
"0.5065132",
"0.5058387",
"0.5002485",
"0.49882856",
"0.4976684",
"0.4972855",
"0.4956877",
"0.4952056",
"0.49498788",
"0.49364468",
"0.49235174",
"0.4917768",
"0.48808962",
"0.4862558",
"0.48607394",
"0.48461875"
] | 0.7872795 | 0 |
A function that extracts chunks from datafiles | def extract_chunks(the_files, the_bands=None):
ds_config = {}
gdal_ptrs = []
datatypes = []
for the_file in the_files:
g = gdal.Open(the_file)
gdal_ptrs.append(gdal.Open(the_file))
datatypes.append(GDAL2NUMPY[g.GetRasterBand(1).DataType])
block_size = g.GetRasterBand(1).GetBlockSize()
nx = g.RasterXSize
ny = g.RasterYSize
if the_bands is None:
the_bands = np.arange(g.RasterCount) + 1
proj = g.GetProjectionRef()
geoT = g.GetGeoTransform()
ds_config['nx'] = nx
ds_config['ny'] = ny
ds_config['nb'] = g.RasterCount
ds_config['geoT'] = geoT
ds_config['proj'] = proj
block_size = [block_size[0]*2, block_size[1]*2]
print("Blocksize is (%d,%d)" % (block_size[0], block_size[1]))
# block_size = [ 256, 256 ]
# store these numbers in variables that may change later
nx_valid = block_size[0]
ny_valid = block_size[1]
# find total x and y blocks to be read
nx_blocks = (int)((nx + block_size[0] - 1) / block_size[0])
ny_blocks = (int)((ny + block_size[1] - 1) / block_size[1])
buf_size = block_size[0] * block_size[1]
################################################################
# start looping through blocks of data
################################################################
# loop through X-lines
for X in range(nx_blocks):
# change the block size of the final piece
if X == nx_blocks - 1:
nx_valid = nx - X * block_size[0]
buf_size = nx_valid * ny_valid
# find X offset
this_X = X * block_size[0]
# reset buffer size for start of Y loop
ny_valid = block_size[1]
buf_size = nx_valid * ny_valid
# loop through Y lines
for Y in range(ny_blocks):
# change the block size of the final piece
if Y == ny_blocks - 1:
ny_valid = ny - Y * block_size[1]
buf_size = nx_valid * ny_valid
# find Y offset
this_Y = Y * block_size[1]
data_in = []
for ig, ptr in enumerate(gdal_ptrs):
buf = ptr.ReadRaster(this_X, this_Y, nx_valid, ny_valid,
buf_xsize=nx_valid, buf_ysize=ny_valid,
band_list=the_bands)
a = np.frombuffer(buf, dtype=datatypes[ig])
data_in.append(a.reshape((
len(the_bands), ny_valid, nx_valid)).squeeze())
yield (ds_config, this_X, this_Y, nx_valid, ny_valid,
data_in) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getChunks():",
"def test_chunks(year, day, part_number):\n chunks = []\n chunk_index = -1\n data_file_lines(part_number).each do |line|\n if line[0] == '#'\n chunk_index += 1\n chunks[chunk_index] = [line[1..-1].strip, []]\n elsif chunk_index >= 0\n chunks[chunk_index][1] << line\n end\n end\n chunks",
"def get_chunks_result(self, data_keys: List[str], fetch_only: bool = False) -> List:",
"def _chunks(filename, start):\n with open(filename, 'r') as f:\n buffer = []\n for line in f:\n if line.startswith(start):\n if buffer:\n yield buffer\n buffer = []\n else:\n buffer.append(line.strip())",
"def split_file(self, input_file):\r\n file_list = [] \r\n with open(input_file, 'r', encoding='GB18030', errors='ignore') as f_in:\r\n data = f_in.readlines()\r\n lines_num = len(data)\r\n size = lines_num // self.num_workers # lines splitted in a chunk\r\n start = 0\r\n end = size\r\n w_path = \"../data/\"\r\n for i in range(lines_num//size):\r\n chunk_name = \"chunk_\" + str(i) + \".dat\"\r\n with open(w_path + chunk_name, 'w', encoding='utf-8') as f_out:\r\n f_out.write(''.join(data[start:end]))\r\n start = start + size\r\n end = end + size\r\n file_list.append(\"../data/chunk_\" + str(i) + \".dat\")\r\n \r\n print(f\"File splitted into {self.num_workers} chunks.\")\r\n return file_list, size",
"def build_chunks(read_bytes, file_size, chunk_size):\n\n chunks = []\n\n index = 0\n start = 0\n\n while start < file_size:\n end = min(start + chunk_size, file_size)\n size = end - start\n\n chunk = FileChunk(index, size, partial(read_bytes, start, size))\n chunks.append(chunk)\n\n index += 1\n start += chunk_size\n\n return chunks",
"def chunks(data, overrides = {}):\n counter, filesize = 0, len(data)\n last = None\n while counter < filesize:\n try:\n magic, size = chunk.unpack_from(data, counter)\n except struct_error as e:\n print('failed loading chunk from', data[:counter])\n print('last chunk:', last)\n raise e\n\n counter += chunk.size\n contents = data[counter:counter+size]\n\n if magic[3] != 0x4D:\n raise Exception('bad magic', magic, 'last chunk:', last)\n\n if magic in overrides:\n size = overrides[magic]\n\n yield magic, size, contents\n counter += size\n\n last = (magic, size, contents)",
"def load_chunk(self, idx):\n for f in self.filenames[idx:]:\n ...",
"def get_chunks(self,file_size):\n chunk_start = 0\n chunk_size = 0xA00000 # 10485760 bytes, default max ssl buffer size\n while chunk_start + chunk_size <= file_size:\n yield(chunk_start, chunk_size)\n chunk_start += chunk_size\n final_chunk_size = file_size - chunk_start\n yield(chunk_start, final_chunk_size)",
"def chunk_generator(input_file, chunksize = 100000, dataset_name = \"\") :\n\n with h5py.File(input_file, 'r', libver = 'latest') as f :\n dataset = f[dataset_name]\n for x in range(0, dataset.size, chunksize) :\n yield dataset[x:x+chunksize]",
"def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]",
"def chunks(sequence, chunk_size):\r\n\r\n # YOUR CODE HERE\r",
"def chunks(data, rows=10000):\n\n for i in range(0, len(data), rows):\n yield data[i:i+rows]",
"def get_file_chunks(file, model, workspace, header, user):\n uri = (\"https://api.anaplan.com/1/3/workspaces/{}/models/{}/\"\n \"files/{}/chunks/\").format(workspace, model, file)\n response = requests.get(uri, headers = header)\n return json.loads(response.text.encode(\"utf-8\"))",
"def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]",
"def chunks(data: list, n: int) -> list:\n for i in range(0, len(data), n):\n yield data[i:i + n]",
"def chunks(data: List[Any], num: int) -> Generator[List[Any], None, None]:\n for i in range(0, len(data), num):\n yield data[i : i + num]",
"def grab_data(split_pct, music_data):\n\n assert 0 <= split_pct <= 1.0\n assert isinstance(split_pct, float)\n assert isinstance(music_data, str)\n \n num_files = music_data.count('<start>')\n num_split = int(num_files*split_pct)\n splits = music_data.split('<start>', num_split+1)\n split_idx = len(music_data) - len(splits[-1]) - len('<start>')\n return list(music_data[:split_idx]), list(music_data[split_idx:])",
"def chunk_data(path, chunksize):\n reader = pandas.read_table(path, chunksize=chunksize, skiprows=0)\n\n start = 0\n for chunk in reader:\n stop = start + len(chunk) - 1\n dataframe_to_csv(chunk, file=get_chunk_file_name(path, (start, stop)))\n start = stop + 1\n\n return alphabetize_chunk_files(os.path.basename(path))",
"def chunks(parts, n):\n for i in range(0, len(parts), n):\n yield parts[i:i+n]",
"def read_in_chunks(file_object, chunk_size=1000000):\n while True:\n data = [line.strip() for line in itertools.islice(file_object, chunk_size)]\n if not data:\n break\n yield data",
"def __get_file_chunk(self, buf=1000):\n data = self.file.read(buf)\n return data, len(data)",
"def _read_in_chunks(self, file_object, blocksize=4096, chunks=-1,\n shard_index=None):\n i = 0\n while chunks:\n data = file_object.read(blocksize)\n if not data:\n break\n yield data\n i += 1\n\n chunks -= 1",
"def read_in_chunks(self):\n chunksize = 10 ** 3\n lines_number = sum(1 for line in open(self.filepath))\n self.progressMaximum.emit(lines_number // chunksize)\n dfList = []\n\n # self.df = traja.read_file(\n # str(filepath),\n # index_col=\"time_stamps_vec\",\n # parse_dates=[\"time_stamps_vec\"],\n # )\n\n TextFileReader = pd.read_csv(\n self.filepath,\n index_col=\"time_stamps_vec\",\n parse_dates=[\"time_stamps_vec\"],\n chunksize=chunksize,\n )\n for idx, df in enumerate(TextFileReader):\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S:%f\")\n dfList.append(df)\n self.intReady.emit(idx)\n self.completed.emit(dfList)\n self.finished.emit()",
"def read_data_small(filename):\n with bz2.BZ2File(filename) as f:\n data = []\n file_size = os.stat(filename).st_size\n chunk_size = 1024 * 1024 # 限制读取的数据\n print('Reading data...')\n for i in range(int(ceil(file_size // chunk_size) + 1)):\n bytes_to_read = min(chunk_size, file_size - (i * chunk_size))\n file_string = f.read(bytes_to_read).decode('utf-8')\n file_string = file_string.lower()\n file_string = nltk.word_tokenize(file_string) # nltk 提供的分词器\n data.extend(file_string)\n return data",
"def read_chunks(file_object, chunk_size=1024):\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data",
"def getChunks(inp_list, chunk_size):\n return [inp_list[x:x + chunk_size] for x in range(0, len(inp_list), chunk_size)]",
"def extract_data(filename, num_images, IMAGE_WIDTH):\n\n# this function definition has been taken from internet\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_WIDTH * IMAGE_WIDTH * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) #Interpret a buffer as a 1-dimensional array\n data = data.reshape(num_images, IMAGE_WIDTH*IMAGE_WIDTH)\n return data",
"def chunkify(self, size=1024*1024*5):\n with open(self.file_name_raw, 'rb') as file:\n chunk_end = file.tell()\n while True:\n chunk_start = chunk_end\n file.seek(size, 1)\n file.readline()\n chunk_end = file.tell()\n\n if chunk_end > self.file_end:\n chunk_end = self.file_end\n yield chunk_start, chunk_end - chunk_start\n break\n else:\n yield chunk_start, chunk_end - chunk_start",
"def chunked(size, source):\n for i in range(0, len(source), size):\n yield source[i : i + size]"
] | [
"0.71130896",
"0.6830764",
"0.6782417",
"0.6715846",
"0.667817",
"0.6514174",
"0.64776266",
"0.63899785",
"0.6382798",
"0.6382149",
"0.630955",
"0.6306718",
"0.6290901",
"0.62592876",
"0.62214875",
"0.62158453",
"0.62141967",
"0.62119377",
"0.6189095",
"0.61417973",
"0.6140905",
"0.61347616",
"0.6128226",
"0.61005455",
"0.6095659",
"0.60936224",
"0.6090292",
"0.607977",
"0.60225034",
"0.60176396"
] | 0.71403193 | 0 |
Create a batch of URLs for pulling timeseries objects from Halo. | def create_url_batch(cls, path, batch_size, params={}):
url_list = []
for page in range(1, batch_size + 1):
params["page"] = page
url = (path, dict(params))
url_list.append(url)
return url_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_all(conn, logger):\n # setup slices, 24 in total\n slices = [f'year{x}month{y}' for x in [2, 1] for y in range(12, 0, -1)]\n for slice in slices:\n download_intraday_extended(conn, logger, slice)",
"def UrlGenerator(head, end, start):\n urllist = []\n urlhead = head\n delta = timedelta(days=1)\n while(end >= start):\n tailurl = '{0}-{1}-{2}'.format(end.strftime('%Y'),end.strftime('%m'),end.strftime('%d'))\n urllist.append(urlhead + tailurl)\n end -= delta\n return urllist",
"def sitemap_urls():\n for batch in m.Batch.objects.all():\n yield batch.url, batch.released\n yield rdf_uri(batch), batch.released\n for issue in batch.issues.all():\n yield issue.url, batch.released\n yield rdf_uri(issue), batch.released\n for page in issue.pages.all():\n yield page.url, batch.released\n yield rdf_uri(page), batch.released\n\n paginator = Paginator(m.Title.objects.all(), 10000)\n for page_num in range(1, paginator.num_pages + 1):\n page = paginator.page(page_num)\n for title in page.object_list:\n yield title.url, title.created",
"def _create_slices(chunk_size, id, reference_name, start, end):\n urls = []\n chunks = int( (end - start) / chunk_size )\n slice_start = start\n slice_end = 0\n if chunks >= 1 and start != None and end != None:\n for i in range(chunks):\n slice_end = slice_start + chunk_size\n _create_slice(urls, id, reference_name, slice_start, slice_end)\n slice_start = slice_end\n _create_slice(urls, id, reference_name, slice_start, end)\n else: # One slice only\n url = f\"http://{request.host}/data?id={id}\"\n if( reference_name is not None ):\n url += f\"&reference_name={reference_name}\"\n urls.append({ \"url\": url })\n\n return urls",
"def uri(self):\n if not self.parallel:\n if len(self.WMO) <= 5: # todo: This max WMO number should be parameterized somewhere else\n # Retrieve all WMOs in a single request\n return [self.get_url()]\n else:\n # Retrieve one WMO by URL sequentially (same behaviour as localftp and argovis)\n urls = []\n for wmo in self.WMO:\n urls.append(\n Fetch_wmo(\n WMO=wmo, CYC=self.CYC, ds=self.dataset_id, parallel=False\n ).get_url()\n )\n return urls\n else:\n self.Chunker = Chunker(\n {\"wmo\": self.WMO}, chunks=self.chunks, chunksize=self.chunks_maxsize\n )\n wmo_grps = self.Chunker.fit_transform()\n # self.chunks = C.chunks\n urls = []\n for wmos in wmo_grps:\n urls.append(\n Fetch_wmo(\n WMO=wmos, CYC=self.CYC, ds=self.dataset_id, parallel=False\n ).get_url()\n )\n return urls",
"def _create_api_ulr_list(self) -> List[str]:\n api = Setup.openweather_api\n now = int(time.time())\n urls_list = []\n\n for lat, lon in self.locations:\n urls_list.append(\n f\"https://api.openweathermap.org/data/2.5/onecall?\"\n f\"lat={lat}&lon={lon}&exclude=hourly,minutely,\"\n f\"alerts&units=metric&appid={api}\"\n )\n\n for days in range(1, 6):\n date_time = now - 86400 * days\n urls_list.append(\n f\"http://api.openweathermap.org/data/2.5/onecall/\"\n f\"timemachine?lat={lat}&lon={lon}&dt={date_time}\"\n f\"&units=metric&appid={api}\"\n )\n\n return urls_list",
"def create_urls(years):\n urls = []\n for year in years:\n url = f\"http://billboardtop100of.com/{year}-2/\"\n urls.append(url)\n return urls",
"def fetch_report_urls(start, end, batch_size):\n db = db_connect()\n db_ensure_init(db)\n\n with open('log.csv', 'w', newline='') as log:\n logwriter = csv.writer(log)\n\n cmd = db.execute(\"\"\"\n SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path\n FROM \"index\" ix\n LEFT JOIN reports r ON ix.id = r.index_id\n WHERE ix.type = '10-K' AND r.id IS NULL AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}\n ORDER BY ix.date DESC\n \"\"\".format(start=start, end=end))\n\n for batch in iter(lambda: cmd.fetchmany(batch_size), []):\n to_insert = list()\n for r in batch:\n # print(r)\n log_row = r\n\n response = requests.get(r[5])\n href = parse_href(response.content)\n url = fix_url(href, r[5])\n print(url)\n\n filetype = mimetypes.guess_type(url)[0]\n print(filetype)\n\n filename = os.path.basename(urlparse(url).path)\n print(filename)\n\n to_insert.append((r[0], r[1], r[2], r[3], r[4], url, filetype, filename))\n\n logwriter.writerow(log_row)\n\n db_insert(db, to_insert)",
"def data(urls):\r\n for url in urls:\r\n d = dict(url)\r\n d['url'] = url.hashed.url\r\n yield d",
"async def fetch_all(urls: List[str]) -> None:\n tasks = []\n async with ClientSession() as session:\n for url in urls:\n task = asyncio.ensure_future(fetch(url, session))\n tasks.append(task) # create list of tasks\n done = await asyncio.gather(*tasks)\n dp = pathlib.Path(\"data\")\n for url, res in done:\n fp = dp.joinpath(url[url.find(\"json\") + 5 :])\n with fp.open(\"w\") as out:\n out.write(res.decode(\"utf-8\"))",
"def gen_url(section):\n urls = []\n urls.append('https://ia800500.us.archive.org/22/items/stackexchange/' + section + '.stackexchange.com.7z')\n urls.append('https://ia800500.us.archive.org/22/items/stackexchange/' + section + '.7z')\n return urls",
"def getURLs():",
"def _get_url_for_timerange(self, timerange, **kwargs):\n days = timerange.get_dates()\n urls = []\n for day in days:\n urls.append(self._get_url_for_date(day, **kwargs))\n return urls",
"def URLs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('urls', default)\n return [HEP.URLObject(i) for i in tmp]",
"def URLs(self, default=[{}]):\n tmp = self.data.get('urls', default)\n return [HEP.URLObject(i) for i in tmp]",
"def build_urls(self, listings_per_page=20, pages_per_location=15):\r\n url_list = []\r\n for i in range(pages_per_location):\r\n offset = listings_per_page * i\r\n url_pagination = self.link + f'&items_offset={offset}'\r\n url_list.append(url_pagination)\r\n self.url_list = url_list",
"def get_urls(self, size=None, hashes=None, ids=None, start=0, limit=100):\n if not (size or hashes or ids):\n raise UserError(\"Please provide size/hashes/ids to filter\")\n\n with self.session as session:\n query = session.query(IndexRecordUrl)\n\n query = query.join(IndexRecordUrl.index_record)\n if size:\n query = query.filter(IndexRecord.size == size)\n if hashes:\n for h, v in hashes.items():\n # Select subset that matches given hash.\n sub = session.query(IndexRecordHash.did)\n sub = sub.filter(and_(\n IndexRecordHash.hash_type == h,\n IndexRecordHash.hash_value == v,\n ))\n\n # Filter anything that does not match.\n query = query.filter(IndexRecordUrl.did.in_(sub.subquery()))\n if ids:\n query = query.filter(IndexRecordUrl.did.in_(ids))\n # Remove duplicates.\n query = query.distinct()\n\n # Return only specified window.\n query = query.offset(start)\n query = query.limit(limit)\n\n return [\n {'url': r.url,\n 'metadata': {m.key: m.value for m in r.url_metadata}}\n for r in query\n ]",
"def construct_url(screen_name):\n number_of_tweets = \"200\"\n urls = []\n for x in xrange(1, 6):\n urls.append('https://api.twitter.com/1.1/statuses/user_timeline.json?'\n 'screen_name=' + screen_name + '&count=' +\n number_of_tweets + '&page=' + str(x))\n return urls",
"async def bulk_crawl_and_write(urls: asyncio.coroutine, **kwargs) -> None:\n timeout = aiohttp.ClientTimeout(total=60*60)\n async with aiohttp.ClientSession(timeout=timeout) as session:\n tasks = []\n for url in urls:\n tasks.append(\n parse(url=url, session=session)\n # write_one(file=file, url=url, session=session, **kwargs)\n )\n await asyncio.gather(*tasks)",
"def get_urls(self):\n # Use functools.reduce for speed\n # see https://stackoverflow.com/questions/10461531/merge-and-sum-of-two-dictionaries\n def reducer(accumulator, dictionary):\n for key, value in dictionary.items():\n accumulator[key] = accumulator.get(key, []) + value\n return accumulator\n\n list_of_dicts = []\n for (year, quarter, f) in self.quarterly_date_list:\n self.quarterly.year = year\n self.quarterly.quarter = quarter\n self.quarterly.entry_filter = lambda x: f(x) and self.entry_filter(x)\n list_of_dicts.append(self.quarterly.get_urls())\n\n for d in self.daily_date_list:\n self.daily.date = d\n try:\n list_of_dicts.append(self.daily.get_urls())\n except EDGARQueryError:\n pass\n\n complete_dictionary = reduce(reducer, list_of_dicts, {})\n return complete_dictionary",
"def create_urls_metadata(urls_metadata, record, session):\n urls = {u.url for u in record.urls}\n for url, url_metadata in iteritems(urls_metadata):\n if url not in urls:\n raise UserError(\n 'url {} in urls_metadata does not exist'.format(url))\n for k, v in iteritems(url_metadata):\n session.add(IndexRecordUrlMetadata(\n url=url, key=k, value=v, did=record.did))",
"def generate_urls(date: datetime.date) -> List[str]:\n date_string = date.strftime(\"%B-%-d-%Y\").lower()\n url_veggie = (\n f\"https://trouble.tools/506/wp-json/wp/v2/multiple-post-type\"\n f\"?slug={date_string}-veggie&type[]=page&type[]=topic&type[]=story&\"\n f\"type[]=product&type[]=collection&type[]=event&type[]=menu&\"\n f\"type[]=person&type[]=recipe\"\n )\n url_tday = (\n f\"https://trouble.tools/506/wp-json/wp/v2/multiple-post-type\"\n f\"?slug={date_string}-thoughtful-t-day&type[]=page&type[]=topic&type[]=story&\"\n f\"type[]=product&type[]=collection&type[]=event&type[]=menu&\"\n f\"type[]=person&type[]=recipe\"\n )\n url = (\n f\"https://trouble.tools/506/wp-json/wp/v2/multiple-post-type\"\n f\"?slug={date_string}&type[]=page&type[]=topic&type[]=story&\"\n f\"type[]=product&type[]=collection&type[]=event&type[]=menu&\"\n f\"type[]=person&type[]=recipe\"\n )\n return [url_veggie, url_tday, url]",
"def url_generator(cls, from_range: int):\n for i in range(from_range, from_range + cls.RANGE):\n for j in cls.COURTS:\n yield cls.URL.format(id=i, sid=j)",
"def urls(self):\r\n urls = []\r\n\r\n for url_name in sorted(self.resources.keys()):\r\n\r\n resource = self.resources[url_name]\r\n urls.append(resource.as_url(\r\n api=self,\r\n name_prefix='-'.join(\r\n (self.prefix, self.str_version)).strip('-'),\r\n url_prefix=self.str_version\r\n ))\r\n\r\n return patterns(self.prefix, *urls)",
"def getDataBatch(self, batch_size):\n for i in range(batch_size):\n params.offset = params.offset+i #increment by 1 for the next set of batch\n url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json'\n url_params = {'q': self.args.query.replace(' ', '+'),'api-key': self.args.api_key,'page': params.offset}\n response = requests.get(url, params=url_params)\n r = response.json()\n\n #start by checking call was successful\n if response.ok:\n if r['status'] != 'OK':\n log.error(\"Error with API call, NYT status not ok\")\n return None\n\n # TODO: implement - this dummy implementation returns one batch of data\n list_of_art = []\n for art in r['response']['docs']:\n list_of_art.append(functions.flatten_json(art)) #attach to list returned in call\n yield list_of_art\n else:\n log.error(\"Error during API call on request side\")",
"def get_urls(self, **kwargs):\n pass # pragma: no cover",
"def init_datasets(self, dataset_names, columns):\n for dataset_name in dataset_names:\n hdf5_dataset_name = self.schema.get(dataset_name)\n if hdf5_dataset_name is None:\n warnings.warn(\"Skipping %s (not in schema)\" % dataset_name)\n else:\n self[dataset_name] = tokio.timeseries.TimeSeries(dataset_name=hdf5_dataset_name,\n start=self.query_start,\n end=self.query_end_plusplus,\n timestep=self.timestep,\n num_columns=len(columns),\n column_names=columns,\n sort_hex=self.sort_hex)",
"async def fetch_all(self, urls):\n async with ClientSession() as session:\n tasks = []\n for url in urls:\n task = asyncio.create_task(self.fetch(session, url))\n tasks.append(task)\n results = await asyncio.gather(*tasks)\n return results",
"def batch_download_h8(_date_,\\\n path_himawari,\\\n path_himawari_hdf5,\\\n dat_list = dat_list,\\\n llcrnrlon = x_ll,\\\n llcrnrlat = y_ll,\\\n urcrnrlon = x_ur,\\\n urcrnrlat = y_ur,\\\n dat_segment = dat_segment,\\\n dat_list_reso = dat_list_reso,\\\n hrit_list = hrit_list ,\\\n dat_listnum = dat_listnum,\n hrit_listb = hrit_listb,\\\n dat_listnuma = dat_listnuma,\n dat_listnumb = dat_listnumb,\\\n hrit_spa = hrit_spa,\\\n hrit_spb = hrit_spb,\n create_fd_internal = True):\n \n \n date_obj = [datetime.datetime.strptime(i,\"%Y%m%d_%H%M\") for i in _date_]\n date_obj1 = N.array([datetime.datetime.strftime(i,\"/%Y%m/%d/%H/\") for i in date_obj])\n \n final_out_filename_list = []\n path_himawari_hdf5_out_list = []\n for i, j in enumerate(_date_):\n start_time = datetime.datetime.now()\n \n print \"\\n\"\n print \"=\"*80\n \n # if True, creates a subdirectory within default directory to store h8 data\n if create_fd_internal == True:\n path_himawari_hdf5_out = os.path.join(path_himawari_hdf5, j)\n elif create_fd_internal == False:\n path_himawari_hdf5_out = path_himawari_hdf5\n else:\n os.sys.exit(\"Create folder internal or external options not given by user.\")\n \n # if ouput directory does not exist\n if os.path.exists(path_himawari_hdf5_out) == False:\n create_path_directories(path_himawari_hdf5_out)#creates directory for output\n \n file_name_if_dl = \"HS_H08_\"+ j +\"_PH_R20_S030405.hdf5\"\n \n #print os.path.join(path_himawari_hdf5_out, file_name_if_dl)\n # if processed himawari data for ceratin date is already downloaded, skip timestamp\n if os.path.exists(os.path.join(path_himawari_hdf5_out, file_name_if_dl)) == True:\n print \"\\n\"\n print file_name_if_dl, 'exists'\n final_out_filename_list.append(file_name_if_dl)\n path_himawari_hdf5_out_list.append(path_himawari_hdf5_out)\n continue\n \n # downloads the needed data through wget\n dat_bz = download_h8(j, date_obj1[i], path_himawari,\\\n dat_list = dat_list,\\\n dat_segment = dat_segment,\\\n dat_list_reso = dat_list_reso)\n \n # Checks the downloaded H8 list\n # If datalist is empty, skip timestamp and append on list\n if dat_bz[0] == \"nan\":\n path_himawari_hdf5_out_list.append(str(N.nan))\n final_out_filename_list.append(str(N.nan))\n \n print \"\\n\"\n print \"Skipping \"+j\n print \"Removing temporary data on \"+path_himawari\n os.system(\"rm \"+os.path.join(path_himawari,\"*\"))\n continue\n \n datbz_fnames = open_list_datbz2(path_himawari)\n \n # double checks data if downloaded\n datbz_fnames_final = check_datbz_files(datbz_fnames, j,\\\n dat_list = dat_list,\\\n dat_segment = dat_segment,\\\n dat_list_reso = dat_list_reso)\n \n \n if datbz_fnames_final[0] == \"nan\":\n path_himawari_hdf5_out_list.append(str(N.nan))\n final_out_filename_list.append(str(N.nan))\n \n print \"\\nSkipping \"+j\n print \"Removing temporary data on \"+path_himawari\n os.system(\"rm \"+os.path.join(path_himawari,\"*\"))\n continue\n \n # downsample and HRIT conversion\n hrit_fnames = preparation_himawari(j, datbz_fnames_final, path_himawari)\n \n # necessary preprocessing methods\n final_out_filename = whole_preprocess_H8(hrit_fnames,\\\n path_himawari,\\\n path_himawari_hdf5_out,\\\n llcrnrlon = llcrnrlon,\\\n llcrnrlat = llcrnrlat,\\\n urcrnrlon = urcrnrlon,\\\n urcrnrlat = urcrnrlat,\\\n dat_segment = dat_segment,\\\n hrit_list = hrit_list ,\\\n dat_listnum = dat_listnum,\\\n hrit_listb = hrit_listb,\\\n dat_listnuma = dat_listnuma,\\\n dat_listnumb = dat_listnumb,\\\n hrit_spa = hrit_spa,\\\n hrit_spb = hrit_spb)\n \n os.system(\"rm \"+os.path.join(path_himawari,\"*\"))# deletes all files in path_himawari \n print \"\\nTemporary files are deleted in \"+ path_himawari\n \n # summary list\n final_out_filename_list.append(final_out_filename)\n path_himawari_hdf5_out_list.append(path_himawari_hdf5_out)\n \n time1 = datetime.datetime.now()\n print('\\n\\n\\tDuration of whole process: {}'.format(time1 - start_time))\n\n \n return path_himawari_hdf5_out_list, final_out_filename_list",
"def bulkInsert(self, url, values):\n pass"
] | [
"0.62782913",
"0.61778706",
"0.5907445",
"0.5905333",
"0.57796603",
"0.5722891",
"0.5697067",
"0.5626108",
"0.5601909",
"0.55823594",
"0.55752623",
"0.5555578",
"0.55554163",
"0.55433315",
"0.55048054",
"0.54210556",
"0.5396725",
"0.5391007",
"0.5382943",
"0.53774333",
"0.53432524",
"0.5330327",
"0.53258246",
"0.52892345",
"0.52786684",
"0.5250836",
"0.52349824",
"0.52183115",
"0.5217864",
"0.5215931"
] | 0.65828794 | 0 |
Gets the next batch of timeseries items from the Halo API | def get_next_batch(self):
url_list = self.create_url_batch(self.start_url, self.batch_size,
self.params)
pages = self.get_pages(url_list)
adjustment_factor = self.get_adjustment_factor(pages, self.page_size,
self.item_key)
self.adjust_batch_size(adjustment_factor)
items = self.sorted_items_from_pages(pages, self.item_key,
self.sort_key)
items = self.remove_duplicate_items(items, self.prior_batch_ids)
try:
last_item_timestamp = items[-1]['created_at']
except IndexError:
time.sleep(3)
return []
self.params["since"] = last_item_timestamp
self.prior_batch_ids = set([x["id"] for x in items])
return items | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _Next(self, count=None):\n if count is not None and (not isinstance(count, (int, long)) or count <= 0):\n raise datastore_errors.BadArgumentError(\n 'Argument to _Next must be an int greater than 0; received %s (a %s)' %\n (count, typename(count)))\n\n if self.__buffer:\n if count is None:\n entity_list = self.__buffer\n self.__buffer = []\n return entity_list\n elif count <= len(self.__buffer):\n entity_list = self.__buffer[:count]\n del self.__buffer[:count]\n return entity_list\n else:\n entity_list = self.__buffer\n self.__buffer = []\n count -= len(entity_list)\n else:\n entity_list = []\n\n\n if not self.__more_results:\n return entity_list\n\n req = datastore_pb.NextRequest()\n if count is not None:\n req.set_count(count)\n req.mutable_cursor().CopyFrom(self.__cursor)\n result = datastore_pb.QueryResult()\n try:\n apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Next', req, result)\n except apiproxy_errors.ApplicationError, err:\n raise _ToDatastoreError(err)\n\n return entity_list + self._ProcessQueryResult(result)",
"def getDataBatch(self, batch_size):\n for i in range(batch_size):\n params.offset = params.offset+i #increment by 1 for the next set of batch\n url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json'\n url_params = {'q': self.args.query.replace(' ', '+'),'api-key': self.args.api_key,'page': params.offset}\n response = requests.get(url, params=url_params)\n r = response.json()\n\n #start by checking call was successful\n if response.ok:\n if r['status'] != 'OK':\n log.error(\"Error with API call, NYT status not ok\")\n return None\n\n # TODO: implement - this dummy implementation returns one batch of data\n list_of_art = []\n for art in r['response']['docs']:\n list_of_art.append(functions.flatten_json(art)) #attach to list returned in call\n yield list_of_art\n else:\n log.error(\"Error during API call on request side\")",
"def GetNextBatchOfResults(self) -> typing.List[Repository.Repository]:\n while True:\n try:\n logging.debug('Requesting page %d', self.next_page_num)\n page = list(self.query.get_page(self.next_page_num))\n logging.debug('Page %d contains %d results', self.next_page_num,\n len(page))\n self.next_page_num += 1\n return page\n except github.RateLimitExceededException:\n logging.debug('Pausing on GitHub rate limit')\n time.sleep(3)\n except github.GithubException:\n # One possible cause for this exception is when trying to request\n # a page beyond 1000 results, since GitHub only returns the first\n # 1000 results for a query.\n return []",
"def next_batch(self, batch_size):\n raise NotImplementedError",
"def get_events_batch() -> PayloadDictList:\n ...",
"def get_batch(self, idxs):\r\n return self.data[(self.start + idxs) % self.maxlen]",
"def next_batch(self, batch_size):\r\n raise NotImplementedError",
"def iterate_by_item(response):\n response = response\n while True:\n for r in response.get('data', []):\n yield r\n next_page = response.get('paging', {}).get('next', '')\n if not next_page:\n break\n response = json.load(urllib2.urlopen(next_page))",
"def get_next_batch_start(self):\n return None",
"def next_batch(self, batch_size=8):\n raise NotImplementedError()",
"def get_next_item(self, timeout=None):\n return self.export.get_next_item(timeout=timeout)",
"def __next__(self):\n if self.block_count + self.count - self.step == self.max:\n raise StopIteration\n if self.block_count == 0 or self.count == self.step:\n # Without list() this turns into LIMIT 1 OFFSET x queries\n self.data = list(self.obj.all()[self.block_count: \\\n (self.block_count + self.step)])\n self.block_count += self.step\n self.count = 0\n self.count += 1\n return self.data[self.count - 1]",
"def _next(self):\n batch_start, batch_end = self.batch_start, self.batch_start + self.batch_size\n if batch_end > self.X.shape[0]:\n self.shuffle()\n return self._next()\n else:\n batch_indices = self.indices[batch_start:batch_end]\n X_batch, y_batch = self.X[batch_indices], self.y[batch_indices]\n X_batch, y_batch = self.process_batch(X_batch, y_batch)\n self.batch_start = batch_end\n return X_batch, y_batch",
"def next(self, payload):\n\n session = ZuoraApiSession(self.org_uid)\n\n # There are 2 different endpoints for querying.\n # One which takes the initial query string and one which takes a cursor if there is another page of data.\n if self.sync_data.cursor:\n response = session.post(\n QUERY_MORE_URI,\n json={'queryLocator': self.sync_data.cursor}\n )\n else:\n response = session.post(\n QUERY_URI,\n json={'queryString': self._get_query()}\n )\n\n endpoint = ENDPOINTS[self.sync_data.endpoint_index]\n\n items = response.get('records', [])\n new_payload = {}\n has_more_items = False\n max_updated_at = None\n\n if items:\n # Parse datetime strings\n updated_dates = [parser.parse(item['UpdatedDate']) for item in items]\n\n # Retrieve the max UpdatedDate to use in the query for the next changeset\n max_updated_at = items[updated_dates.index(max(updated_dates))]['UpdatedDate']\n\n item_objects = []\n\n for item in items:\n item_objects.extend(\n sync_utils.create_items(\n self.org_uid,\n self.org.provider,\n self.org.changeset,\n endpoint,\n item['Id'],\n item\n )\n )\n\n sync_utils.save_items(item_objects)\n\n # If there are more pages to fetch, store the cursor\n if 'queryLocator' in response:\n logging.info(\"There is another page of {}\".format(endpoint))\n self.sync_data.cursor = response['queryLocator']\n new_payload['max_updated_at'] = max_updated_at\n\n else:\n logging.info(\"no more data expected for endpoint {}\".format(endpoint))\n marker = max_updated_at or payload.get('max_updated_at')\n\n if marker:\n logging.info(\"setting updated_at marker for {} to {}\".format(endpoint, marker))\n self.sync_data.markers[self.sync_data.endpoint_index] = marker\n\n self.sync_data.endpoint_index += 1\n self.sync_data.cursor = None\n\n complete = self.sync_data.endpoint_index == len(ENDPOINTS)\n\n if complete:\n self.sync_data.endpoint_index = 0\n\n self.sync_data.put()\n complete = complete and not has_more_items\n\n return complete, new_payload",
"async def fetch_next_block(self):\n\n results = []\n for _ in range(self._page_size):\n try:\n results.append(await self.__anext__())\n except StopAsyncIteration:\n # no more results\n break\n return results",
"def next_batch(self):\n if self.ptr + self.batch_size >= self.size:\n head = 0\n tail = self.batch_size\n self.ptr = self.batch_size\n else:\n head = self.ptr\n tail = self.ptr + self.batch_size\n self.ptr += self.batch_size\n return self.train_x[head:tail, 0:self.fig_w**2], self.train_y[head:tail, 0:10]",
"def _next(self):\n batch_start, batch_end = self.batch_start, self.batch_start + self.batch_size\n X_batch, y_batch = self.X[batch_start:batch_end], self.y[batch_start:batch_end]\n X_batch, y_batch = self.process_batch(X_batch, y_batch)\n if batch_end > self.X.shape[0]:\n self.batch_start = 0\n else:\n self.batch_start = batch_end\n return X_batch, y_batch",
"def next_batch(self, batch_size):\n batch_data = np.zeros([batch_size,] + list(self.example_shape))\n for i in range(batch_size):\n index = self.q.pop()\n batch_data[i,...] = self.data[index]\n if len(self.q)==0:\n self.__new_epoch()\n\n return batch_data",
"def get_next_item(self):\n pass",
"def _Get(self, count):\n if count > MAXIMUM_RESULTS:\n count = MAXIMUM_RESULTS\n entity_list = self._Next(count)\n while len(entity_list) < count and self.__more_results:\n next_results = self._Next(count - len(entity_list))\n if not next_results:\n break\n entity_list += next_results\n return entity_list;",
"def batch_query(url, headers=None, timeout=299):\n\n offset = 0\n count = 0\n\n proxies = {\n 'http': ARGS.proxy_string,\n 'https': ARGS.proxy_string\n }\n\n options = {\n \"headers\": headers,\n \"verify\": False,\n \"timeout\": timeout,\n \"proxies\": proxies,\n \"params\": {}\n }\n\n while True: # do - while offset < count\n options[\"params\"][\"offset\"] = offset\n req = requests.get(url, **options)\n\n if not req.status_code == 200:\n errmsg = \"status_code: {0.status_code}: {0.content}\"\n raise UnknownResult(errmsg.format(req))\n\n res = req.json()\n data = res[\"data\"]\n count = res.get(\"count\", 0)\n\n yield from data\n\n offset += len(data)\n\n if offset >= count:\n break",
"def iterResponsePages(service, payload, verbose, slow_down):\n token = 0\n next_page = True\n data = {'reports': []}\n\n\n while next_page:\n if verbose:\n print(f'Fetching rows starting at position: {token}')\n if slow_down > 0:\n time.sleep(slow_down)\n \n data_tmp = service.reports().batchGet(body=payload).execute()\n token = data_tmp.get('reports')[0].get('nextPageToken')\n\n if token != None:\n payload.get('reportRequests')[0].update({'pageToken': token})\n else:\n next_page = False\n payload.get('reportRequests')[0].update({'pageToken': '0'})\n\n for report in data_tmp.get('reports'):\n data.get('reports').append(report)\n\n return data",
"def get_taxii(self, args: Dict[str, Any]):\n taxii_data = []\n save_fetch_time = None\n count = 0\n try:\n for data in self.fetch(args.get('begin'), args.get('end'), args.get('collection')):\n response = self.parse_to_json(data)\n\n if response.get('indicators') or False:\n content = response.get('indicators')\n elif response.get('ttps') or False:\n content = response.get('ttps').get('ttps')\n else:\n raise ValueError(\"Last fetch time retrieval failed.\")\n\n for eachone in content:\n save_fetch_time = parser.parse(eachone['timestamp']).replace(tzinfo=pytz.UTC).strftime(\n DATETIME_FORMAT)\n\n taxii_data.append(response)\n\n count += 1\n if count == arg_to_number(args.get('limit', 1)):\n break\n except Exception as e:\n demisto.error(\"Failed to fetch feed details, exception:{}\".format(e))\n raise e\n\n return taxii_data, save_fetch_time",
"def request_large_data(path, params):\r\n start = 0\r\n result = []\r\n while True:\r\n process = __split_path(path)\r\n if process['status'] == 'success':\r\n conn = process['conn']\r\n info = MetaInfo()\r\n header = {'vendor_key': info.vendor_id}\r\n # check 2.x and 3.x differences in using urllib\r\n params['start'] = str(start)\r\n try:\r\n conn.request(\"GET\", process['req_path'] + \"?\" +\r\n urllib.urlencode(params), headers=header)\r\n except AttributeError:\r\n conn.request(\"GET\", process['req_path'] + \"?\" +\r\n urllib.parse.urlencode(params), headers=header)\r\n resp = conn.getresponse()\r\n\r\n if resp.status != 200:\r\n print(\"Code: \" + str(resp.status))\r\n resp_obj = HTTPConnect(resp.status, resp.read()).json()\r\n print(resp_obj)\r\n break\r\n resp_obj_read = resp.read()\r\n resp_obj = HTTPConnect(resp.status, resp_obj_read).json()\r\n if type(resp_obj) is str:\r\n try:\r\n resp_obj = json.loads(resp_obj)\r\n except:\r\n # return error msg\r\n print(\"parse json str failed\")\r\n # print(resp_obj)\r\n break\r\n elif type(resp_obj) is dict:\r\n try:\r\n resp_obj = json.loads(json.dumps(resp_obj))\r\n except:\r\n print(\"parse dict failed\")\r\n print(resp_obj)\r\n break\r\n else:\r\n print(\"result not str\")\r\n print(resp_obj)\r\n break\r\n\r\n total = int(resp_obj['total'])\r\n next_start = int(resp_obj['next_start'])\r\n\r\n result.extend(resp_obj['data'])\r\n print(\"Finish extracting: \" + str(start+1) + \" to \" + str(next_start-1) + \" , remaining: \"\r\n + str(total - next_start))\r\n start = next_start\r\n conn.close()\r\n if start == total:\r\n break\r\n return result",
"def next(self, batch_size):\n if self.batch_id == len(self.data):\n self.batch_id = 0\n batch_data = (self.data[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_labels = (self.labels[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_seqlen = (self.seqlen[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n self.batch_id = min(self.batch_id + batch_size, len(self.data))\n\n\n return batch_data, batch_labels, batch_seqlen",
"def next(self, batch_size):\n if self.batch_id == len(self.data):\n self.batch_id = 0\n batch_data = (self.data[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_labels = (self.labels[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_seqlen = (self.seqlen[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n self.batch_id = min(self.batch_id + batch_size, len(self.data))\n\n\n return batch_data, batch_labels, batch_seqlen",
"def get_next_sample(self):",
"def next_batch(self, batch_size):\r\n start = self._index_in_epoch\r\n self._index_in_epoch += batch_size\r\n\r\n if self._index_in_epoch > self._num_examples:\r\n # After each epoch we update this\r\n self._epochs_done += 1\r\n start = 0\r\n self._index_in_epoch = batch_size\r\n #print(\"numexamples \",self._num_examples)\r\n assert batch_size <= self._num_examples\r\n end = self._index_in_epoch\r\n\r\n return self._images[start:end], self._labels[start:end], self._img_names[start:end], self._cls[start:end]",
"def batch(self):\n return self._client.batch()",
"def next_batch(self, batch_size):\n X_batch = self.X_data[self.batch_num*batch_size:(self.batch_num+1)*batch_size]\n Y_batch = self.Y_data[self.batch_num*batch_size:(self.batch_num+1)*batch_size]\n self.batch_num += 1\n return X_batch, Y_batch"
] | [
"0.6152826",
"0.6136084",
"0.6109296",
"0.6071321",
"0.6053608",
"0.58972186",
"0.58925384",
"0.58786076",
"0.58652157",
"0.58604544",
"0.58361346",
"0.58221257",
"0.58177155",
"0.58131826",
"0.5763761",
"0.5760355",
"0.5737006",
"0.5702543",
"0.5676999",
"0.56377476",
"0.56324476",
"0.56260896",
"0.56223774",
"0.561124",
"0.5611001",
"0.5611001",
"0.56012815",
"0.55846417",
"0.5557795",
"0.5550873"
] | 0.71298516 | 0 |
Determine number of empty pages from list of pages. | def get_number_of_empty_pages(cls, pages, item_key):
empty = [page for page in pages if page[item_key] == []]
return len(empty) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_number_of_full_pages(cls, pages, page_size, item_key):\n full = [page for page in pages if len(page[item_key]) == page_size]\n return len(full)",
"def num_pages(self):\n if self.count == 0 and not self.allow_empty_first_page:\n return 0\n hits = max(1, self.count + self.delta - self.orphans)\n return ceil(hits / self.per_page)",
"def get_num_of_pages(self):",
"def page_size(self):\n return 0 if self.hits is None else len(self.hits)",
"def get_pages_count(link_headers):\n last_header = link_headers.get('last')\n if last_header:\n return int(parse_qs(urlparse(last_header['url']).query)['page'][0])\n return 1",
"def num_of_pages(self) -> int:\n try:\n return int(round(self.__number_of_items / 48))\n except TypeError:\n raise TypeError(\"number_of_items must be of int type\")",
"def __get_total_pages(self):\n \"\"\n if self.__total_pages is None:\n if self.count == 0 and not self.allow_empty_first_page:\n self.__total_pages = 0\n else:\n hits = max(1, self.count)\n self.__total_pages = int(ceil(hits / float(self.per_page_limit)))\n return self.__total_pages",
"def pages(self):\n if self._PAGES < 0:\n self.page(1)\n return self._PAGES",
"def pages(self):\n return int(ceil(self.count / float(self.page_size)))",
"def pagecount(self):\r\n \r\n return len(self.results) // self.perpage + 1",
"def pages_count(self):\n return self._pages_count",
"def PagesCount(self, default=None):\n return self.data.get('metadata', {}).get('number_of_pages', default)",
"def _get_empty_page(self):\n return self._slice_objects(0, 0)",
"def page_counts(self):\n return 1 + (self.total_count - 1) / self.page_size",
"def Nb_pages(soup: str):\n nb_page_total = soup.find('div', attrs={'class': 'pagination-total'})\n if nb_page_total != None:\n nb_page_total = str(nb_page_total)\n nb_pages = int(nb_page_total[41:len(nb_page_total)-14])//10 + 1\n else:\n nb_pages = 1\n return(nb_pages)",
"def pages(self):\n if self.total > 0 and self.limit > 0:\n return int(ceil(self.total / float(self.limit)))\n return 1",
"def get_number_of_pages():\n first_page_link = URL.format(\"1\")\n cuisine_recipes = get_content_from_url(first_page_link)\n if not cuisine_recipes:\n print \"no content for:\", first_page_link\n return None\n soup_cuisine = BeautifulSoup(cuisine_recipes)\n # select characters from phrase and convert it into integer\n return int(soup_cuisine.find(\"span\", {\"class\": \"last\"}).a[\"href\"].split('=')[1])",
"def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count += 1\n return count",
"def number_of_pages(self):\n return self._number_of_pages",
"def number_of_links(corpus, page):\n return len(corpus[page]) if corpus[page] else len(corpus)",
"def get_pages():\n try:\n self.handleEvent(EC.text_to_be_present_in_element((By.ID, self.RepositoriesObjects('pager')), \"Page\"))\n footer = self.handleEvent(EC.presence_of_element_located((By.ID, self.RepositoriesObjects('pager'))),\n action=\"GET_TEXT\")\n # filters text to get last number\n pages_num = int(re.search('\\d+$', footer).group())\n utility.execLog(\"Total pages is: {}\".format(pages_num))\n return pages_num\n except NoSuchElementException as e:\n pages_num = None\n utility.execLog(\"Pagination element not found, error message: {}\".format(e))\n return pages_num",
"def get_num_pages(self) -> Optional[int]:\n timeout: float = 5\n num_attempts = 0\n while num_attempts < 10:\n r = hit_api(self.key_manager, self.url, self.logger, timeout=timeout, method=\"HEAD\")\n\n if r:\n break\n\n timeout = timeout * 1.2\n else:\n raise RuntimeError(\"Unable to get the number of pages of data in 10 attempts\")\n\n if 'last' not in r.links.keys():\n return 1\n \n # get the last url from header\n last_page_url = r.links['last']['url']\n\n parsed_url = urlparse(last_page_url)\n try:\n num_pages = int(parse_qs(parsed_url.query)['page'][0])\n except (KeyError, ValueError):\n return None\n\n return num_pages",
"def get_total_pages() -> int:\n items_per_page = 4\n return math.ceil(len(database)/items_per_page)",
"def GetPageCount(self):\r\n\r\n return len(self._pages)",
"def pages(self):\n if not self.limit:\n return 0 # pragma: no cover\n else:\n return int(ceil(self.total / float(self.limit)))",
"def _count_pages_pdf(self, bin_pdf):\n pages = 0\n for match in re.compile(r\"/Count\\s+(\\d+)\").finditer(bin_pdf):\n pages = int(match.group(1))\n return pages",
"def check_if_empty_page(content: list) -> bool:\n return True if len(content) > 1 else False",
"def page_count(n: int, p: int) -> int:\n\n front = p // 2\n back = n // 2 - p // 2\n\n return min([front, back])",
"def number_of_pages(files: list[str], verbose: bool = False) -> list[int]:\n\n return_int = False\n\n if type(files) == str:\n files = [files]\n return_int = True\n\n if verbose:\n print(\"\\n---- reading number of pages per file ----\\n\")\n\n n_pages = []\n\n for f in files:\n cmd = f'gs -q -dNOSAFER -dNODISPLAY -c \"({f:s}) (r) file runpdfbegin pdfpagecount = quit\"'\n out = run(cmd, verbose)\n n_pages += [int(out)]\n\n if return_int:\n return n_pages[0]\n\n return n_pages",
"def get_number_of_elements(self):\n if self.page.paginator.count < int(self.page.number) * self.page_size:\n show = self.get_shows()\n\n return \"{} - {}\".format(show, self.page.paginator.count)\n else:\n show = self.get_shows()\n return \"{} - {}\".format(show, self.get_page_range())"
] | [
"0.6968454",
"0.6964381",
"0.69400054",
"0.65524155",
"0.64453053",
"0.63995045",
"0.6375952",
"0.6373464",
"0.6291651",
"0.6288349",
"0.6280372",
"0.62778735",
"0.626527",
"0.62141216",
"0.61845714",
"0.6140578",
"0.61326563",
"0.61308753",
"0.61158437",
"0.6084466",
"0.6050758",
"0.60225224",
"0.6021499",
"0.5997531",
"0.5995501",
"0.5959943",
"0.59541625",
"0.5938365",
"0.592778",
"0.5861389"
] | 0.8104307 | 0 |
Raise CloudPassageValidation if `start_url` is invalid. | def verify_start_url(cls, start_url):
if start_url not in cls.allowed_urls:
exc_msg = "This URL is unsupported for TimeSeries: %s" % start_url
raise CloudPassageValidation(exc_msg)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_url(self):\n pass",
"def check_url_and_raise_errors(url: str) -> None:\n if not url:\n raise_error(\"Url can not be empty\", 400)\n\n try:\n URL_REGEX.match(url).span()[1] - URL_REGEX.match(url).span()[0] == len(url)\n except AttributeError:\n raise_error(\"Url should be valid\", 400)",
"def urlValidator(url):\n if 'amazon.com/' not in url:\n print('ERROR: Please enter a valid amazon.com URL. (ERROR 1)')\n else:\n validURL = url\n if 'Amazon.com/' not in url:\n print('ERROR: Please enter a valid amazon.com URL. (ERROR 2)')\n else:\n validURL = url\n\n return validURL",
"def validate_url(url_in):\n if url_in == \"\":\n error = \"[ERROR] Input is empty\"\n return False\n elif not url_in.startswith(\"https://\"):\n error = \"[ERROR] Input doesn't start with https://\"\n return False\n elif not url_in.startswith(\"https://github.com/\"):\n error = \"[ERROR] Input is not a GitHub URL\"\n return False\n else:\n error = \"[INFO] Input is a valid URL\"\n return True",
"def _validate_base_url(url: str) -> None:\n parse_result = urlparse(url)\n if parse_result.scheme not in ('http', 'https'):\n raise ValueError(\n f'Only HTTP[S] URLs are permitted. Actual URL: {url!r}')\n if url.endswith('/'):\n raise ValueError('Base (DICOMweb service) URL cannot have a trailing '\n f'forward slash: {url!r}')",
"def url_validator_callback(url: str) -> str:\n if url is None:\n return url\n\n url = url.strip()\n try:\n result = urlparse(url)\n if result.scheme and result.netloc:\n return url\n except:\n pass\n raise typer.BadParameter(\"Please supply a valid url\")",
"def test_validate_url_non_google_doc():\n url_not_a_google_doc = 'https://not-a-google-doc.com'\n assert validate_url(url_not_a_google_doc) is False",
"def valid_url(prop,value,report):\r\n url = value.getStringValue()\r\n # local urls are allowed\r\n if local_urls.match(url):\r\n pass\r\n # custom urls are allowed, but need to be transformed into a real path\r\n elif custom_img_urls.match(url):\r\n name = custom_img_urls.match(url).group(1)\r\n # the label -> image number lookup is stored on the subreddit\r\n if c.site.images.has_key(name):\r\n num = c.site.images[name]\r\n value._setCssText(\"url(http:/%s%s_%d.png?v=%s)\"\r\n % (g.s3_thumb_bucket, c.site._fullname, num,\r\n randstr(36)))\r\n else:\r\n # unknown image label -> error\r\n report.append(ValidationError(msgs['broken_url']\r\n % dict(brokenurl = value.cssText),\r\n value))\r\n # allowed domains are ok\r\n elif domain(url) in g.allowed_css_linked_domains:\r\n pass\r\n else:\r\n report.append(ValidationError(msgs['broken_url']\r\n % dict(brokenurl = value.cssText),\r\n value))\r\n #elif sanitize_url(url) != url:\r\n # report.append(ValidationError(msgs['broken_url']\r\n # % dict(brokenurl = value.cssText),\r\n # value))\r",
"def validate_url(ctx, param, value):\n try:\n return URL(request.urlopen(value).read())\n except ValueError:\n raise click.BadParameter('url need to be a correct URL string')",
"def validate_url(url: str) -> None:\n if not is_valid_url(url):\n raise ValueError(f\"Validation Error. Provided url '{url}' is not valid.\")\n try:\n response = requests.get(url)\n except Exception as e:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")\n else:\n if response.status_code != status.HTTP_200_OK:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")",
"def _validate_url(url):\n if not url or url.count('/') != 1 or url[0] != '@':\n return False\n return True",
"def _validate_url(url):\n if urlparse.urlparse(url).scheme not in VALID_SCHEMES:\n _fail(url, \"Invalid URL\")",
"def validate_url(self, url):\n if not validators.url(url):\n self.link_field.helper_text = \"Please enter a valid url\"\n self.add_link_button.disabled = True\n\n else:\n self.link_field.helper_text = \"\"\n self.add_link_button.disabled = False",
"def check_url_invalidity(self) -> bool:\n validate = URLValidator()\n try:\n validate(self.args.url)\n return False\n except ValidationError:\n return True",
"def _validate_cert_url(self):\n if not re.search(\n 'https\\:\\/\\/sns\\.(.*)\\.amazonaws\\.com(.*)\\.pem',\n self._signing_cert_url):\n self.error = 'Certificate is not hosted at AWS URL'\n raise ValueError('Certificate is not hosted at AWS URL')\n\n return True",
"def check_url(value):\n\n valid = validators.url(value)\n if valid != True:\n return False",
"def check_url(value):\n\n valid = validators.url(value)\n if valid is not True:\n return False",
"def test_validate_url_valid():\n url = 'https://docs.google.com/spreadsheets/d/AbCde1'\n\n assert validate_url(url) is True",
"def validate_url(url):\n\n # Minimal URL validation with urlparse. This is extremely lenient, we might\n # want to use something like https://github.com/kvesteri/validators instead.\n parsed_url = urlparse(url)\n\n if not parsed_url.scheme:\n parsed_url = urlparse(\"http://\" + url)\n\n if not re.match(\"https?\", parsed_url.scheme):\n raise ValueError('Links must have an \"http\" or \"https\" prefix')\n\n if not parsed_url.netloc:\n raise ValueError(\"Links must include a domain name\")\n\n return parsed_url.geturl()",
"def test_validate_invalid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n with self.assertRaises(jsonschema.ValidationError):\n resumeschema.validate(self.invalid_resume)",
"def clean_url(self):\n allowed_domains = (\"https://www.kurnik.pl\", \"https://www.playok.com\")\n url = self.cleaned_data[\"url\"]\n print(check_domain(url))\n if check_domain(url) in allowed_domains and url[-3:] == \"txt\":\n return url\n raise forms.ValidationError(\n \"Invalid url, only games from kurnik.pl\" \" or playok.com are allowed\"\n )",
"def url_validator(arg):\n arg = arg.lower()\n\n # If url looks like http[s]://vk.com/domain\n symbolic_id = constants.TXT_ID_REGEXP.match(arg)\n if symbolic_id:\n url = symbolic_id.groupdict()\n url[\"type\"] = \"domain\"\n return url\n\n # If url looks like http[s]://vk.com/id123456\n numeric_id = constants.NUM_ID_REGEXP.match(arg)\n if numeric_id:\n url = numeric_id.groupdict()\n return url\n\n raise argparse.ArgumentTypeError(\"{} - invalid url address\".format(arg))",
"def valid(url):\n return 0 < len(urlparse(url)[1])",
"def validate_url(url: str):\n try:\n return urlparse(url)\n except KeyboardInterrupt:\n return None",
"def validate_url(self, v):\n u = urlparse.urlparse(v)\n if u.scheme.lower() not in ('http', 'https'):\n raise ValueError('URL scheme must be either http:// or https://')\n if not u.netloc:\n raise ValueError('URL must specify a network location.')\n return u.scheme.lower() == 'https'",
"def assert_has_valid_url(self, url, expected_ending=''):\r\n assert isinstance(url, str)\r\n assert url.startswith('http')\r\n assert url.endswith(expected_ending)",
"def clean_url(self):\n url = self.cleaned_data['url']\n\n if url:\n pattern = re.compile(r'https?://(www\\.)?instagr(\\.am|am\\.com)/p/\\S+')\n if not pattern.match(url):\n raise forms.ValidationError('Please provide a valid instagram link.')\n\n return url",
"def validate_url(attribute_name, url):\n if not url:\n return\n\n try:\n result = urlparse(url=url)\n if [result.scheme, result.netloc, result.path]:\n return True\n except:\n raise ValueError('{attribute_name}: The given string {url} is not a '\n 'valid url.'\n .format(attribute_name=attribute_name, url=url))",
"def validate_url(url):\n if not isinstance(url, basestring):\n raise TypeError(\"url must be a string, not %r\"%type(url))\n url = url.lower()\n \n proto_addr = url.split('://')\n assert len(proto_addr) == 2, 'Invalid url: %r'%url\n proto, addr = proto_addr\n assert proto in ['tcp','pgm','epgm','ipc','inproc'], \"Invalid protocol: %r\"%proto\n \n # domain pattern adapted from http://www.regexlib.com/REDetails.aspx?regexp_id=391\n # author: Remi Sabourin\n pat = re.compile(r'^([\\w\\d]([\\w\\d\\-]{0,61}[\\w\\d])?\\.)*[\\w\\d]([\\w\\d\\-]{0,61}[\\w\\d])?$')\n \n if proto == 'tcp':\n lis = addr.split(':')\n assert len(lis) == 2, 'Invalid url: %r'%url\n addr,s_port = lis\n try:\n port = int(s_port)\n except ValueError:\n raise AssertionError(\"Invalid port %r in url: %r\"%(port, url))\n \n assert addr == '*' or pat.match(addr) is not None, 'Invalid url: %r'%url\n \n else:\n # only validate tcp urls currently\n pass\n \n return True",
"def isUrlValid(self, url):\n if url is None:\n return False\n elif url.startswith('//'):\n return False\n elif ':' in url:\n return False\n elif url.startswith('/wiki'):\n return True\n elif 'en.wikipedia.org/wiki/' not in url:\n return False\n return True"
] | [
"0.6495124",
"0.6365665",
"0.5986806",
"0.5980897",
"0.5966514",
"0.5952704",
"0.5844769",
"0.582063",
"0.5806983",
"0.57794064",
"0.5755256",
"0.57282627",
"0.56958735",
"0.5690138",
"0.5598804",
"0.55730754",
"0.55513805",
"0.55021673",
"0.55015725",
"0.5486201",
"0.5482193",
"0.54767483",
"0.5466976",
"0.5440576",
"0.54215217",
"0.54132754",
"0.5381098",
"0.5363005",
"0.53607553",
"0.53554565"
] | 0.829017 | 0 |
Update the clock time. Only needs to be caled when the clock runs in externally clocked mode, which is done by calling reset/start with the current clock time. If called when the clock runs in realtime mode, does nothing. | def update(self, time=None):
if self.realtime:
return
if time is None: # clock in externally-clocked mode, need valid time
return
self._time = time | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_system_clock(self):\n if self.platform in RASPBERRY_PI_PLATFORMS:\n LOG.info('Updating the system clock via NTP...')\n if self.is_paired:\n # Only display time sync message when paired because the prompt\n # to go to home.mycroft.ai will be displayed by the pairing\n # skill when pairing\n self.enclosure.mouth_text(dialog.get(\"message_synching.clock\"))\n self.bus.wait_for_response(\n Message('system.ntp.sync'),\n 'system.ntp.sync.complete',\n 15\n )",
"def update_clock(self, newtime=None):\n if newtime is None:\n self.logical_clock += 1\n else:\n if newtime < self.logical_clock:\n raise Exception(\"wee woo! time is bad!\")\n self.logical_clock = newtime",
"def test_clock_realtime():\n clock = Clock()\n assert clock.realtime\n old_time = clock.time\n time.sleep(0.1)\n assert clock.time - old_time >= 0.1\n assert clock.time - old_time < 0.2\n clock.update()\n assert clock.time - old_time >= 0.1\n assert clock.time - old_time < 0.2\n time.sleep(0.1)\n assert clock.time - old_time >= 0.2\n assert clock.time - old_time < 0.3\n clock.update(time=0.0)\n assert clock.time - old_time >= 0.2\n assert clock.time - old_time < 0.3\n clock.reset(time=0)\n assert not clock.realtime\n assert clock.time == 0.0",
"def update_time(self):\n pass # Do nothing",
"def test_clock_external():\n clock = Clock(time=0.0)\n assert not clock.realtime\n assert clock.time == 0.0\n clock.update()\n assert clock.time == 0.0\n clock.update(time=0.1)\n assert clock.time == 0.1\n clock.update()\n assert clock.time == 0.1\n clock.update(time=0.0)\n assert clock.time == 0.0\n clock.reset()\n assert clock.realtime\n assert clock.time > 0",
"def start_clock(self):\n pass",
"def clock( current_time ):\n global D\n number_of_seconds_since_start = int(current_time - D.start_time)\n if D.last_time_printed < number_of_seconds_since_start:\n print \"[Brains] [State:\", D.STATE, \"] time is\", \\\n number_of_seconds_since_start, \"seconds since starting...\"\n D.last_time_printed = number_of_seconds_since_start",
"def update_timeval(self):\n self.timeval = self.get_timeval()",
"def update_gameclock():\n global game_ticks\n ## GOTCHA: Both Ball.update() and Ball.predict() modify sprite\n ## position, so the update and display routines must each perform\n ## erasure. This results in redundant erasures whenever an update and\n ## frame are ready in the same pass. This happens almost every game tick\n ## at high frame rates, often enough that an avoidance optimization\n ## would gain a few FPS.\n sprite_group.clear(screen, eraser_image)\n sprite_group.update(USE_PREDICTION)\n handle_collisions()\n game_ticks += 1\n if game_ticks >= clock.ticks_per_second:\n set_caption()\n game_ticks = 0",
"def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))",
"def update_time(self, *args):\n s = int(time.time() - self.start_time)\n self.time_label.text = str(datetime.timedelta(seconds=s))",
"def _start_clock(self):\n self._start = time.time()",
"def do_sync(self):\n # Synch up the station's clock if it's been more than clock_check\n # seconds since the last check:\n now_ts = time.time()\n if now_ts - self.last_synch_ts >= self.clock_check:\n self.last_synch_ts = now_ts\n try:\n console_time = self.engine.console.getTime()\n if console_time is None: return\n # getTime can take a long time to run, so we use the current\n # system time\n diff = console_time - time.time()\n syslog.syslog(syslog.LOG_INFO, \n \"engine: Clock error is %.2f seconds (positive is fast)\" % diff)\n if abs(diff) > self.max_drift:\n try:\n self.engine.console.setTime()\n except NotImplementedError:\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Station does not support setting the time\")\n except NotImplementedError:\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Station does not support reading the time\")",
"def tick(self):\n if Clock.__repr__(self) == 'Clock(23, 59, 59)':\n Clock.tick(self)\n Calendar.advance(self)\n else:\n Clock.tick(self)",
"def timer_update(self):\n if self.mineboard.gamestate is not None:\n return\n time_so_far = round(time.time()-self.start_time)\n if time_so_far == 1:\n self.now.set(f\"Time so far: {time_so_far} second\")\n else:\n self.now.set(f\"Time so far: {time_so_far} seconds\")\n self.after(1000, self.timer_update) # calls this function every second",
"def tick(self):\r\n if self.display_seconds:\r\n new_time = time.strftime('%H:%M:%S')\r\n else:\r\n new_time = time.strftime('%I:%M %p').lstrip('0')\r\n if new_time != self.time:\r\n self.time = new_time\r\n self.display_time = self.time\r\n self.config(text=self.display_time)\r\n self.after(200, self.tick)",
"def tick(self):\n if self.display_seconds:\n new_time = time.strftime('%I:%M:%S %p')\n else:\n new_time = time.strftime('%I:%M:%S %p').lstrip('0')\n if new_time != self.time:\n self.time = new_time\n self.display_time = self.time\n self.config(text=self.display_time)\n self.after(200, self.tick)",
"def _sync_clock(self, date_time_param, prompts, timeout, delay=1, time_format=\"%d %b %Y %H:%M:%S\"):\n prompt = self._wakeup(timeout=timeout, delay=delay)\n\n # lets clear out any past data so it doesnt confuse the command\n self._linebuf = ''\n self._promptbuf = ''\n\n log.debug(\"Set time format(%s) '%s''\", time_format, date_time_param)\n str_val = get_timestamp_delayed(time_format)\n log.debug(\"Set time value == '%s'\", str_val)\n self._set_params({date_time_param: str_val}, True)\n\n return True",
"def clock(self):\n\t\tt0 = time.time_ns()\n\t\ttry:\n\t\t\tyield None\n\t\tfinally:\n\t\t\tself._runtime.value += time.time_ns() - t0",
"def clock_callback(data):\n global current_second\n current_second = data.clock.secs",
"def tick(self):\r\n new_time = time.strftime('%H:%M:%S')\r\n if new_time != self.time:\r\n self.time = new_time\r\n self.config(text=self.time)\r\n self.after(200, self.tick)",
"def update(self, time):\n raise NotImplementedError",
"def update(self, time):\n raise NotImplementedError",
"def update(self):\n current_time = pygame.time.get_ticks()\n delta_time = (current_time - self.last_time)\n self.do_physics(delta_time)\n self.last_time = current_time",
"def update_time(self, update_time):\n\n self._update_time = update_time",
"def update(self, dt):\n self.gamestatemanager.peek().update(dt, self.ticks_per_second)",
"def clock(self):\r\n return self.__clock",
"def _update_time_cursor(self):\n for line in self.timeLines:\n line.setValue(self.playbackTime)",
"def tick():\n\n global time1\n # get the current local time from the PC\n time2 = time.strftime(\"%H:%M:%S\")\n # if time string has changed, update it\n if time2 != time1:\n time1 = time2\n timeLabel.config(text=time2)\n # calls itself every 200 milliseconds\n # to update the time display as needed\n # could use >200 ms, but display gets jerky\n timeLabel.after(200, tick)",
"def update(self, total_time):\n if self._start_time is None:\n self._start_time = total_time\n self.time_left = GameTimer.TOTAL_GAME_TIME\n else:\n self.time_left = GameTimer.TOTAL_GAME_TIME - \\\n (total_time - self._start_time) * GameTimer.TOTAL_TIME_FACTOR"
] | [
"0.7382141",
"0.7280795",
"0.715636",
"0.70415646",
"0.6851223",
"0.6786854",
"0.67185956",
"0.6634915",
"0.65945804",
"0.65783584",
"0.65637106",
"0.65293413",
"0.6465509",
"0.6462791",
"0.64522076",
"0.6451686",
"0.6383177",
"0.6361214",
"0.6351433",
"0.6348467",
"0.633714",
"0.6334833",
"0.6334833",
"0.63165516",
"0.62983197",
"0.6263299",
"0.6177006",
"0.6161489",
"0.6156485",
"0.6154146"
] | 0.78453046 | 0 |
Whether the clock is running in realtime mode. | def realtime(self):
return self._time is None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_real_time(self):\n return time.time() - self.timestamp < self._DEADLINE_SEC",
"def realtime():\n return timemodule.time()",
"def is_on(self):\n return self.device.override_time != 0",
"def test_clock_realtime():\n clock = Clock()\n assert clock.realtime\n old_time = clock.time\n time.sleep(0.1)\n assert clock.time - old_time >= 0.1\n assert clock.time - old_time < 0.2\n clock.update()\n assert clock.time - old_time >= 0.1\n assert clock.time - old_time < 0.2\n time.sleep(0.1)\n assert clock.time - old_time >= 0.2\n assert clock.time - old_time < 0.3\n clock.update(time=0.0)\n assert clock.time - old_time >= 0.2\n assert clock.time - old_time < 0.3\n clock.reset(time=0)\n assert not clock.realtime\n assert clock.time == 0.0",
"def realtime(self):\r\n return resource.RealTime(self)",
"def is_on(self):\n return self.sec_start is not None and self.sec_stop is None",
"def time_enabled(self):\n return self._time_enabled",
"def tick(self):\n return True",
"def is_timer_on(self):\n return self.status == 'ON'",
"def is_time_synchronization_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSynchronizationEnabled', self.handle))",
"def is_measuring(self):\n # 5.4.4 of the datasheet indicates the relevant bit\n return bool(self.read_register(STATUS_REGISTER) & 0b1000)",
"def must_run(self):\r\n self.current_time = datetime.now()\r\n return all([self._minute(), self._hour(), self._day_of_month(), self._month(), self._day_of_week()])",
"def realtime(self):",
"def enable_realtime(self):\n self.send_command(cmd=DEFS.CMD_REG_EVENT,\n data=bytearray([0xff, 0xff, 0x00, 0x00]))\n self.recv_reply()",
"def test_clock_external():\n clock = Clock(time=0.0)\n assert not clock.realtime\n assert clock.time == 0.0\n clock.update()\n assert clock.time == 0.0\n clock.update(time=0.1)\n assert clock.time == 0.1\n clock.update()\n assert clock.time == 0.1\n clock.update(time=0.0)\n assert clock.time == 0.0\n clock.reset()\n assert clock.realtime\n assert clock.time > 0",
"def _nightmode_active(self):\n nightmode = self.config[\"main\"][\"nighttime\"].get(\"enabled\")\n\n start_dt = self.config[\"main\"][\"nighttime\"][\"start_dt\"]\n end_dt = self.config[\"main\"][\"nighttime\"][\"end_dt\"]\n\n is_nighttime = start_dt <= datetime.now() <= end_dt\n return nightmode and is_nighttime",
"def realtime_tick(self):\n return self._read(MX_REALTIME_TICK)",
"def is_time_sync_smart_mode_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSyncSmartModeEnabled', self.handle))",
"def _is_running(self):\n # Public interface is given by get_status instead.\n self._update()\n return True if self.running_mode else False",
"def display_enabled(self):\n return self._display_mode == _LCD_DISPLAYON",
"def is_on(self) -> bool:\n return self._current_speed != SPEED_OFF",
"def everytime(self):\n return True",
"def always_running(self):\n return self._always_running",
"def is_time(self) -> bool:\n return self.times > 1",
"def _IsTimeReplot( self ):\n return True",
"def no_realtime_charge(self):\n return self._no_realtime_charge",
"def isScheduleRunning(self):\n if DPxIsDinSchedRunning() == 0:\n schedule_running = False\n else:\n schedule_running = True\n return schedule_running",
"def timestamping_enabled(self) -> bool:\n return self._forward_timestamp",
"def is_disp(self) -> bool:\n return self.disp_power > 0",
"def is_volatile(self):\n return True"
] | [
"0.7016662",
"0.66893965",
"0.66435194",
"0.6613927",
"0.6612546",
"0.65303993",
"0.65255374",
"0.6505397",
"0.65028715",
"0.6433543",
"0.6398667",
"0.63965267",
"0.63770014",
"0.6296478",
"0.62463707",
"0.6225969",
"0.6223736",
"0.6192421",
"0.6138805",
"0.6115815",
"0.611263",
"0.6108123",
"0.6107305",
"0.60922617",
"0.60738444",
"0.6064509",
"0.60513693",
"0.60407263",
"0.60361",
"0.6020379"
] | 0.7951804 | 0 |
Clock time when timeout will occur. | def timeout_time(self):
if self.start_time is None:
return None
return self.start_time + self.timeout | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_time(self, timeout=None):\n self._event.wait(timeout)\n return self._time",
"def get_timeout(self) -> int:",
"def get_time2(self, timeout=None):\n self._event.wait(timeout)\n return self._time2",
"def clock(self):\r\n return self.__clock",
"def _clock_time(self):\n return self._shifted_time % (24*3600)",
"def time(self) -> int:\n pass",
"def _make_time_pass(self, seconds, timeout, time_mock):\n time_mock.return_value = TIMEOUT_EPOCH\n timeout.start_connect()\n time_mock.return_value = TIMEOUT_EPOCH + seconds\n return timeout",
"def get_timeout(self):\n if self._timeout_time < 0:\n return TIMEOUT_NEVER\n elif self._timeout_time < TIMEOUT_ABSOLUTE_CUTOFF:\n return self._timeout_time\n else:\n timeout = self._timeout_time - time.time()\n if timeout < 0: timeout = 0.0 #expire immediately\n return timeout",
"def clock(self):\n return self._clock",
"def time():\n master = MasterTimer.getMasterTimer()\n\n if master.end_time:\n return master.end_time - master.start_time\n else:\n return time.time() - master.start_time",
"def clock( current_time ):\n global D\n number_of_seconds_since_start = int(current_time - D.start_time)\n if D.last_time_printed < number_of_seconds_since_start:\n print \"[Brains] [State:\", D.STATE, \"] time is\", \\\n number_of_seconds_since_start, \"seconds since starting...\"\n D.last_time_printed = number_of_seconds_since_start",
"def _stop_clock(self):\n self._elapsed_time = time.time() - self._start",
"def time_thread(self):\n while self.time > 0:\n t.sleep(1)\n self.time -= 1\n self.end_round(\"Time is up\")",
"def time(self):\n return self._clock() - self._starttime",
"def get_timeout(self):\n return self.timeout",
"def checkTimeout(self):\n if TIMEOUT <= (datetime.now() - self.clockCheckStop).total_seconds():\n print('Didn\\'t received messages for 1 minute - Program ends')\n exit(0)",
"def get_clock(self):\n return self.clock",
"def LingerTime(self) -> int:",
"async def time(self) -> dt.time:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).time()",
"def _timeout(delay):\n loop = asyncio.get_running_loop()\n return _Timeout(loop.time() + delay if delay is not None else None)",
"def wait_time(self, current_time):\n return current_time - self.timestamp",
"def pass_time(self, t):\n cont = time.time() + t\n while time.time() < cont:\n time.sleep(0)",
"def get_lock_time():\n pass",
"def test_execute_clock_sync(self):\n self.assert_enter_command_mode()\n\n self.assert_execute_resource(ProtocolEvent.CLOCK_SYNC)\n\n # get the time from the driver\n check_new_params = self.instrument_agent_client.get_resource([Parameter.CLOCK])\n # convert driver's time from formatted date/time string to seconds integer\n instrument_time = time.mktime(time.strptime(check_new_params.get(Parameter.CLOCK).lower(), \"%Y/%m/%d %H:%M:%S\"))\n\n # need to convert local machine's time to date/time string and back to seconds to 'drop' the DST attribute so test passes\n # get time from local machine\n lt = time.strftime(\"%d %b %Y %H:%M:%S\", time.gmtime(time.mktime(time.localtime())))\n # convert local time from formatted date/time string to seconds integer to drop DST\n local_time = time.mktime(time.strptime(lt, \"%d %b %Y %H:%M:%S\"))\n\n # Now verify that the time matches to within 5 seconds\n self.assertLessEqual(abs(instrument_time - local_time), 5)",
"def actual_time():\n return _time.time()",
"def pytest_timeout_set_timer(item, settings):",
"def __updateElapsedTime(self):\n if self._keyCodeTime != 0.0 and \\\n (globalClock.getFrameTime() - self._keyCodeTime) >= self._timeout:\n self.notify.debug(\"Key code timed out. Resetting...\")\n self.reset()\n messenger.send(KeyCodes.CLEAR_CODE_EVENT)\n self._keyCodeTime = globalClock.getFrameTime()",
"def timeout(self):\n return self._timeout",
"def timeout(self):\n return self._timeout",
"def timeout(self):\n return self._timeout"
] | [
"0.72348344",
"0.68059045",
"0.6645698",
"0.6645481",
"0.6644638",
"0.6630814",
"0.65420157",
"0.6535578",
"0.6516644",
"0.6508539",
"0.650651",
"0.64768165",
"0.64610153",
"0.6453034",
"0.6443049",
"0.6419919",
"0.64109814",
"0.6393732",
"0.6378195",
"0.635275",
"0.6332481",
"0.6331569",
"0.6328499",
"0.6313689",
"0.63009006",
"0.6282332",
"0.6259934",
"0.62458366",
"0.62458366",
"0.62458366"
] | 0.73910815 | 0 |
Amount of time remaining before timeout. | def remaining(self):
if not self.enabled:
return None
duration = self.timeout - self.elapsed
if self.timed_out: # check timed_out after duration for real-time correctness
return 0
return duration | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remaining(self):\n return self._timeout - (time.time() - self._start_time)",
"def Remaining(self):\n if self._timeout is None:\n return None\n\n # Get start time on first calculation\n if self._start_time is None:\n self._start_time = self._time_fn()\n\n # Calculate remaining time\n remaining_timeout = self._start_time + self._timeout - self._time_fn()\n\n if not self._allow_negative:\n # Ensure timeout is always >= 0\n return max(0.0, remaining_timeout)\n\n return remaining_timeout",
"def seconds_remaining(self):\n pass",
"def time_remaining(self):\n with self._lock:\n deadline = self._expiration_manager.deadline()\n return max(0.0, deadline - time.time())",
"def time_remaining(self) -> float:\n\n return self.event.time - time.time()",
"def time_left(self):\n return self.timeout - self.current_milli_time()",
"def get_timeout(self) -> int:",
"def remaining(self):\n return self.value - time.time()",
"def time_remaining(progress, elapsed):\n total = elapsed / progress\n return total - elapsed",
"def timeout_seconds(self):\n return self._timeout_seconds",
"def timeout(self):\n value = self._body.get('timeout', '0s')\n value = value[:-1]\n return float(value)",
"def secondsLeft(self)->int:\n x = self.expirePeriodInSeconds - self.secondsPassed\n return x if x >=0 else 0",
"def remaining_ms():",
"def secondsLeft(self)->int:\n return 0 if self.secondsPassed >= self.secondsTotal else self.secondsTotal - self.secondsPassed",
"def timeout(self) -> int:\n\n return self._timeout",
"def timesLeft(self)->int:\n return self.maxTimes - self.timesUsed",
"def next_timeout(self):\n if self.phase == Phase.Propose:\n timeout = max(self._t_ack_phase_start - self.actual_time(), 0)\n elif self.phase == Phase.Acknowledge:\n timeout = max(self._t_vote_phase_start - self.actual_time(), 0)\n else:\n timeout = max(self._t_round_end - self.actual_time(), 0)\n\n if MAX_TIMEOUT:\n timeout = min(timeout, MAX_TIMEOUT)\n return timeout",
"def timeout_in_minutes(self) -> int:\n return pulumi.get(self, \"timeout_in_minutes\")",
"def calculate_timeout(self):\n return self.total_estimated_words() / self.minimum_wpm * 60",
"def secondsLeft(self)->int:\n return 0 if self.secondsPassed >= self.duration else self.duration - self.secondsLeft",
"def get_seconds(self):\n return self.seconds_remaining",
"def duration(self):\n self.wait()\n return self._duration",
"def time_left(self):\r\n return 10 - (int(time.time()) - self.start_time)",
"def secondsPassed(self)->int:\n return self._lic.params['usedDurationInSeconds'].value",
"def duration(self) -> int:\n return 0",
"def timeout(self):\n return self._timeout",
"def timeout(self):\n return self._timeout",
"def timeout(self):\n return self._timeout",
"def timeout(self):\n return self._timeout",
"def gettimeout(self):\r\n return self._timeout"
] | [
"0.8075747",
"0.79891056",
"0.7745984",
"0.7732044",
"0.75319827",
"0.7490149",
"0.740348",
"0.73530316",
"0.7337123",
"0.7259376",
"0.719823",
"0.7169936",
"0.7129035",
"0.7103739",
"0.7059992",
"0.7045254",
"0.70066863",
"0.700246",
"0.699799",
"0.69962406",
"0.69094265",
"0.68999666",
"0.6886002",
"0.6873293",
"0.68701947",
"0.68426573",
"0.68426573",
"0.68426573",
"0.68426573",
"0.6841469"
] | 0.8037652 | 1 |
filter Tasks by my List obj | def get_tasks(self, obj):
qs = Task.objects.filter(list=obj)
qs_serializer = TaskModelSerializer(qs, many=True).data
return qs_serializer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_tasks(tasks, task_list):\n qs = [tasks.filter(name=n) for n in task_list]\n return list(map(lambda o: o[0] if o else None, qs))",
"def get_tasks(**filters):\n return db.task.find(filters) if filters else db.task.find()",
"def get_tasks(taskid_list, module):\n tasks = module.client.api.get_tasks_by_status('Pending')\n task_list = list()\n for task in tasks:\n if task['workOrderId'] in taskid_list:\n task_list.append(task)\n return task_list",
"def _task_filter(self, task):\n raise NotImplementedError(\"Subclasses should implement this!\")",
"def filter_tasks(self, filters=None, include_root_task=False):\n result = []\n for task in self.tasks.values():\n if not include_root_task and task['key'] == self.ROOT_TASK_KEY:\n continue\n passes_filters = True\n for _filter in filters:\n if not _filter(task):\n passes_filters = False\n break\n if passes_filters:\n result.append(task)\n return result",
"def test_list_tasks_filter_name(self):\n rv = TEST_CLIENT.post(\"/tasks/list-tasks\", json={\"filters\": {\"name\": \"task\"}})\n result = rv.json()\n\n expected = util.MOCK_TASK_LIST\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)",
"def get_all():\n return list(tasks.find({}))",
"def filter_changes(tasks, entity, action):\n\n for task in tasks:\n _entity = get_entity(task[\"id\"])\n if _entity == entity and task.get(\"task\") == action:\n yield task",
"def get_by_status(status):\n return list(tasks.find({'status': status}))",
"def get_queryset(self):\n return models.Task.objects.filter(\n user__exact=self.request.user,\n date__exact=self.get_date()\n )",
"def findTaskInList(task, taskList):\n found = False\n for t in taskList:\n if t.tBegin == task.tBegin and t.tEnd == task.tEnd and t.batchSize == task.batchSize \\\n and t.order == task.order and t.machine == task.machine and t.processingUnit == task.processingUnit \\\n and t.operation == task.operation:\n found = True\n return found\n return found",
"async def list_tasks(fields: Set[str] = None):\n tasks = celery_app.describe_tasks()\n tasks = [TaskOut(**task).dict(include=fields) for task in tasks]\n return tasks",
"def tasks_in_project(request, project):\n return project.task_set.filter(user=request.user).exclude(folder='trash')",
"def _filter_tasks(self, task_selection):\n selected_task = []\n\n filter_list = self._process_filter(task_selection)\n for filter_ in filter_list:\n # by task name\n if filter_ in self.tasks:\n selected_task.append(filter_)\n continue\n\n # by target\n if filter_ in self.targets:\n selected_task.append(self.targets[filter_])\n continue\n\n # if can not find name check if it is a sub-task of a delayed\n basename = filter_.split(':', 1)[0]\n if basename in self.tasks:\n loader = self.tasks[basename].loader\n if not loader:\n raise InvalidCommand(not_found=filter_)\n loader.basename = basename\n self.tasks[filter_] = Task(filter_, None, loader=loader)\n selected_task.append(filter_)\n continue\n\n # check if target matches any regex\n delayed_matched = [] # list of Task\n for task in list(self.tasks.values()):\n if not task.loader:\n continue\n if task.name.startswith('_regex_target'):\n continue\n if task.loader.target_regex:\n if re.match(task.loader.target_regex, filter_):\n delayed_matched.append(task)\n elif self.auto_delayed_regex:\n delayed_matched.append(task)\n delayed_matched_names = [t.name for t in delayed_matched]\n regex_group = RegexGroup(filter_, set(delayed_matched_names))\n\n # create extra tasks to load delayed tasks matched by regex\n for task in delayed_matched:\n loader = task.loader\n loader.basename = task.name\n name = '{}_{}:{}'.format('_regex_target', filter_, task.name)\n loader.regex_groups[name] = regex_group\n self.tasks[name] = Task(name, None,\n loader=loader,\n file_dep=[filter_])\n selected_task.append(name)\n\n if not delayed_matched:\n # not found\n raise InvalidCommand(not_found=filter_)\n return selected_task",
"def filterby(self, filterval, valueoffilter):\n if valueoffilter == '':\n fatal([\n 'Invalid flag \"value\"',\n 'value is required to flag \"filter\"'\n ])\n\n ok = self.validate_filterval(filterval)\n\n if ok is False:\n fatal([\n 'Invalid flag \"filter\"',\n 'The available filter values are:',\n 'description (name)|fulldescription (description)|completed',\n 'Use instead:',\n '$ tasks-app show --filter=description|fulldescription|completed --value={}'.format(valueoffilter)\n ])\n\n if filterval == 'completed':\n if valueoffilter != 'True' and valueoffilter != 'False':\n fatal([\n 'Invalid flag \"value\"',\n 'the available values for completed filter flag are:',\n 'True|False',\n 'Use instead:',\n '$ tasks-app show --filter={filterval} --value=True|False',\n ])\n\n if filterval == 'completed':\n if valueoffilter == 'True':\n valueoffilter = 1\n elif valueoffilter == 'False':\n valueoffilter = 0\n\n if not filterval == 'completed':\n sql = 'SELECT * FROM Tasks WHERE {} LIKE \"{}%\"'.format(filterval, valueoffilter)\n else:\n sql = 'SELECT * FROM Tasks WHERE {} LIKE \"{}\"'.format(filterval, valueoffilter)\n\n conn = sqlite3.connect(DATABASE['file'])\n cur = conn.cursor()\n cur.execute(sql)\n\n if not len(list(cur)) == 0:\n print('Tasks found')\n\n cur.execute(sql)\n\n for description, fulldescription, completed in cur:\n if completed == 0:\n completed = 'Incompleted'\n else:\n completed = 'Completed'\n\n print(' > {} - {} ({})'.format(description, fulldescription, completed))\n\n cur.execute(sql)\n\n if len(list(cur)) == 0:\n print('No tasks found with search {}={}'.format(filterval, valueoffilter))\n\n conn.close()",
"def filter_visible_tasks_for_user(self, user):\n if user is not None and user.is_authenticated():\n user_group_ids = user.get_profile().get_group_ids()\n return self.filter(\n Q(\n task__objpermissions__group_id__in=user_group_ids,\n task__objpermissions__permission_type=VIEW,\n )\n | Q(task__author_id=user.id)\n | Q(task__hidden=False)\n ).distinct()\n else:\n return self.filter(task__hidden=False)",
"def _get_tasks_by_category(self, tasks, category):\n matching_tasks = []\n for task in tasks:\n if category in tasks[task][\"categories\"]:\n matching_tasks.append(task)\n return sorted(matching_tasks)",
"def get_tasks(user, title=None, category=None, deadline=None, priority=None,\n status=None, id=None, orderby=None):\n user = get_user(user)\n filters = create_filters(id, title, category,\n priority, status)\n selection = user.tasks.filter(**filters)\n\n if deadline:\n selection = selection.filter(deadline__lte=deadline)\n\n if orderby:\n selection = selection.order_by(orderby)\n\n if not len(selection):\n raise ObjectDoesNotFound('There is no tasks with selected filters.')\n return selection",
"def _execute(self):\n return self.warrior.filter_tasks(self.filter_obj)",
"def task_get_all(context, filters=None, marker=None, limit=None,\n sort_key='created_at', sort_dir='desc', admin_as_user=False):\n filters = filters or {}\n\n session = get_session()\n query = session.query(models.Task)\n\n if not (context.is_admin or admin_as_user) and context.owner is not None:\n query = query.filter(models.Task.owner == context.owner)\n\n _task_soft_delete(context, session=session)\n\n showing_deleted = False\n\n if 'deleted' in filters:\n deleted_filter = filters.pop('deleted')\n query = query.filter_by(deleted=deleted_filter)\n showing_deleted = deleted_filter\n\n for (k, v) in filters.items():\n if v is not None:\n key = k\n if hasattr(models.Task, key):\n query = query.filter(getattr(models.Task, key) == v)\n\n marker_task = None\n if marker is not None:\n marker_task = _task_get(context, marker,\n force_show_deleted=showing_deleted)\n\n sort_keys = ['created_at', 'id']\n if sort_key not in sort_keys:\n sort_keys.insert(0, sort_key)\n\n query = _paginate_query(query, models.Task, limit,\n sort_keys,\n marker=marker_task,\n sort_dir=sort_dir)\n\n task_refs = query.all()\n\n tasks = []\n for task_ref in task_refs:\n tasks.append(_task_format(task_ref, task_info_ref=None))\n\n return tasks",
"def get_task_list(self):\n\n collection = self._get_collection()\n\n now = self._get_now()\n\n tasks = collection.find({\"when\": {\"$lt\": now}})\n\n return _MongoDBCursorWrapper(tasks)",
"def view_tasks():\n task_list = []\n incomplete_task_list = Tasks.objects.filter(is_complete=False)\n for task in incomplete_task_list:\n tasks = [] #create data structure\n tasks.append(task.id) #add ID \n tasks.append(task.task_text) #add text\n task_list.append(tasks) #append data structure\n\n return task_list",
"def incomplete_tasks(user):\n return Task.incomplete.filter(assigned=user)",
"def tasks(self, tags=None, summary=True, tags_intersect=None):\n return list(self.all_tasks(summary=summary, tags=tags, tags_intersect=tags_intersect))",
"def get_queryset(self):\n user = self.request.user\n return Task.objects.filter(author=user)",
"def add_filtered_task(seq, f_name):\n filter_list.append(f_name)\n # only tasks specified by name can contain parameters\n if f_name in self.tasks:\n # parse task_selection\n the_task = self.tasks[f_name]\n\n # Initialize options for the task\n seq = the_task.init_options(seq)\n\n # if task takes positional parameters set all as pos_arg_val\n if the_task.pos_arg is not None:\n # cehck value is not set yet\n # it could be set directly with api.run_tasks()\n # -> NamespaceTaskLoader.load_tasks()\n if the_task.pos_arg_val is None:\n the_task.pos_arg_val = seq\n seq = []\n return seq",
"def list(self, _request):\n serializer = TaskSerializer(instance=TASKS.values(), many=True)\n return response.Response(serializer.data)",
"def _process_filter(self, task_selection):\n filter_list = []\n def add_filtered_task(seq, f_name):\n \"\"\"add task to list `filter_list` and set task.options from params\n @return list - str: of elements not yet\n \"\"\"\n filter_list.append(f_name)\n # only tasks specified by name can contain parameters\n if f_name in self.tasks:\n # parse task_selection\n the_task = self.tasks[f_name]\n\n # Initialize options for the task\n seq = the_task.init_options(seq)\n\n # if task takes positional parameters set all as pos_arg_val\n if the_task.pos_arg is not None:\n # cehck value is not set yet\n # it could be set directly with api.run_tasks()\n # -> NamespaceTaskLoader.load_tasks()\n if the_task.pos_arg_val is None:\n the_task.pos_arg_val = seq\n seq = []\n return seq\n\n # process...\n seq = task_selection[:]\n # process cmd_opts until nothing left\n while seq:\n f_name = seq.pop(0) # always start with a task/target name\n # select tasks by task-name pattern\n if '*' in f_name:\n for task_name in self._get_wild_tasks(f_name):\n add_filtered_task((), task_name)\n else:\n seq = add_filtered_task(seq, f_name)\n return filter_list",
"def tasks_with_tag(request, tag):\n return tag.task_set.filter(user=request.user).exclude(folder='trash')",
"def taskList(request):\n try:\n # if request.user.username == \"root\":\n # pass\n\n title = request.data.get(\"title\", None)\n desc = request.data.get(\"desc\", None)\n stat = request.data.get(\"status\", None)\n taskDueDate = request.data.get(\"taskDueDate\", None)\n sortby = request.data.get(\"sortby\", None)\n qs = Task.objects.filter(userID=request.user)\n if sortby:\n qs = qs.order_by(sortby)\n\n if title:\n qs = qs.filter(Q(title__exact=title))\n\n if desc:\n qs = qs.filter(Q(desc__exact=desc))\n\n if stat:\n qs = qs.filter(Q(status__exact=stat))\n\n if taskDueDate:\n qs = qs.filter(Q(taskDueDate__exact=taskDueDate))\n\n serializer = TaskSerializer(qs, many=True)\n if len(serializer.data) != 0:\n for i in range(len(serializer.data)):\n serializer.data[i]['userID'] = request.user.username\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n except Exception as e:\n return Response(e.args[0], status.HTTP_400_BAD_REQUEST)"
] | [
"0.7971365",
"0.67491955",
"0.6684011",
"0.6569972",
"0.6455445",
"0.63282394",
"0.6286252",
"0.6277132",
"0.6255373",
"0.6024815",
"0.5982457",
"0.59775424",
"0.5921261",
"0.5860859",
"0.58547467",
"0.58400625",
"0.5826508",
"0.58254695",
"0.58115506",
"0.57952225",
"0.57818574",
"0.5759827",
"0.5682982",
"0.5664615",
"0.5659295",
"0.56408197",
"0.56280464",
"0.56267375",
"0.5619145",
"0.56187403"
] | 0.6802642 | 1 |
Use weights to classify data points and check the accuracy | def check_accuracy (data, labels, weights):
count = 0
gs = []
rs = []
for x in range(0,len(data)):
results = dot(data[x], weights)
guess = unit_step(results)
gs.append(guess) # append prediction
rs.append(labels[x]) # append result
if guess - labels[x] == 0:
count += 1
percentage = ((float(count) / len(data)) * 100)
return percentage | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_weighted_accuracy(self):\n total_accuracy, weights = losses.weighted_accuracy(\n logits=self.logits, targets=self.targets, weights=self.targets >= 0)\n\n expected_accuracy = 2 / 3\n\n self.assertEqual(weights, 3)\n self.assertAlmostEqual(total_accuracy / weights, expected_accuracy)",
"def test_accuracy(self):\n total_accuracy, weights = losses.weighted_accuracy(\n logits=self.logits, targets=self.targets)\n\n expected_accuracy = 2 / 3\n\n self.assertEqual(weights, 3)\n self.assertAlmostEqual(total_accuracy / weights, expected_accuracy)",
"def eval_performance(weights, test_y, test_x):\n y_predicted = predict_labels(weights, test_x)\n accuracy = len(y_predicted[y_predicted == test_y]) / len(y_predicted)\n return accuracy",
"def test_accuracy(y, tx, w):\n labels = predict_regression_labels(w, tx)\n \n return (labels==y).sum()/len(y)",
"def train_accuracy(self):\n # Train accuarcy\n add = np.ones(len(self.X_train))\n X_add1 = np.c_[add, self.X_train]\n pred_train = np.dot(X_add1, self.w_result.T)\n pred_train[pred_train > 0] = 1\n pred_train[pred_train < 0] = 0\n print(pred_train)\n train_check_lable = np.isclose(pred_train, self.y_train)\n num_true_lable = np.sum(train_check_lable)\n num_all_lable = np.size(train_check_lable)\n train_accuracy = num_true_lable / num_all_lable\n print(\"train_accuracy is: %f\" %train_accuracy)\n return train_accuracy",
"def compute_metrics(self, target, data, weight):\n pred = self.predict(data, weight)\n assert len(pred) == len(target)\n # Calculate the mis-classification rate:\n N = len(pred)\n pred = np.reshape(pred, (N,))\n target = np.reshape(target, (N,))\n nb_misclass = np.count_nonzero(target - pred)\n return nb_misclass / N",
"def test_accelerated_weighted_category_accuracy(self):\n layer = tl.Accelerate(tl.WeightedCategoryAccuracy())\n weights = np.array([1., 1., 1., 0.])\n targets = np.array([0, 1, 2, 3])\n\n model_outputs = np.array([[.2, .1, .7, 0.],\n [.2, .1, .7, 0.],\n [.2, .1, .7, 0.],\n [.2, .1, .7, 0.]])\n accuracy = layer([model_outputs, targets, weights])\n self.assertEqual(np.mean(accuracy), 1 / 3)",
"def classify(self, data):\n\n \"*** YOUR CODE HERE ***\"\n # should compute (validationData[i] - trainingData[j])^2\n result = np.zeros(data.shape[0])\n for i in range(data.shape[0]):\n distances = np.linalg.norm(self.trainingData - data[i], axis=1)\n nearest = np.argsort(distances)[:self.num_neighbors]\n nearest_tags = [self.trainingLabels[j] for j in nearest]\n result[i] = max(nearest_tags, key=lambda x: nearest_tags.count(x))\n return result",
"def trainWeakClassifier(trainingSamples, weights, feature):\n #compute feature values\n featureValues = []\n positiveOrNegative = []\n for sample in trainingSamples:\n featureValues.append(feature.computeScore(sample[0], 0, 0))\n positiveOrNegative.append(sample[1])\n \n #zip with weights and sort by feature value\n featureValues = zip(featureValues, weights, positiveOrNegative)\n featureValues = sorted(featureValues, key=lambda tup: tup[0])\n \n #sum all weights of the positive and negative samples\n negativeWeightsTotal = 0\n positiveWeightsTotal = 0\n for value in featureValues:\n if value[2] == 1:\n positiveWeightsTotal += value[1]\n else:\n negativeWeightsTotal += value[1]\n \n #find the feature with the smallest error\n bestFeatureIndex = 0\n bestFeatureError = 1e10\n negativeWeightsSoFar = 0\n positiveWeightsSoFar = 0\n positiveOnTheLeft = 0\n positivesTotal = 0\n for i in range(0, len(featureValues)):\n error1 = positiveWeightsSoFar-negativeWeightsSoFar+negativeWeightsTotal\n error2 = negativeWeightsSoFar-positiveWeightsSoFar+positiveWeightsTotal\n error = min([error1, error2])\n \n if bestFeatureError > error:\n bestFeatureError = error\n bestFeatureIndex = i\n positiveOnTheLeft = positivesTotal\n \n if featureValues[i][2] == 1:\n positiveWeightsSoFar += featureValues[i][1]\n positivesTotal += 1\n else:\n negativeWeightsSoFar += featureValues[i][1]\n \n #count how much samples are there on the right\n positiveOnTheRight = positivesTotal - positiveOnTheLeft\n \n #determine the polarity and threshold\n polarity = -1\n threshold = featureValues[bestFeatureIndex][0]\n if positiveOnTheLeft > positiveOnTheRight:\n polarity = 1\n else:\n polarity = -1\n \n #build and return a weak classifier\n return WeakClassifier(feature, threshold, polarity)",
"def train(features, targets, weights, bias):\n # see gradient_descent for explanation\n epochs = 100\n learning_rate = 0.1\n\n picture_nb = 2\n\n # Print current accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n for epoch in range(epochs):\n if epoch % 10 == 0:\n # get normalized scores\n predictions = activation(pre_activation(features, weights, bias))\n # compare with targets to see how bad our algorithm is\n print(\"Cost = %s\" % cost(predictions, targets))\n # Replot graph. Check in create_dataset for explanation of parameters\n if picture_nb == 2:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='red')\n elif picture_nb == 11:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='green')\n else:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='orange')\n picture_nb+=1\n\n # Initialize gradients\n # weights_gradients is 2D array with 2 values\n weights_gradients = np.zeros(weights.shape)\n bias_gradient = 0\n # Go through each row\n for feature, target in zip(features, targets):\n # Compute prediction\n z = pre_activation(feature, weights, bias)\n # Get normalized score\n y = activation(z)\n # Update gradients based on formulas established before. Look at gradient_descent to understand what we\n # are doing. Also, the formulas are below, just before the call of the function train.\n weights_gradients += (y - target) * derivative_activation(z) * feature\n # no multiplication of feature because it does not depend on some coordinates.\n bias_gradient += (y - target) * derivative_activation(z)\n\n # Update variables. These are the lines that result the cost to get reduced.\n weights = weights - learning_rate * weights_gradients\n bias = bias - learning_rate * bias_gradient\n\n # Print final accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)\n plt.savefig(\"DataPointsLineEvolution.png\")\n # legend for understanding\n plt.legend(['Original division', 'New division', 'New division', 'New division', 'New division', 'New division',\n 'New division', 'New division', 'New division', 'Final division'], loc='upper left')\n # save picture of data points drawn.\n plt.savefig(\"DataPointsLineEvolutionLegend.png\")",
"def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)",
"def accuracy(self):",
"def calc_class_weights(self):\n y = self.train_eval_data[\"sentiment\"]\n self.class_weights = {}\n classes = np.unique(y)\n for cls in classes:\n self.class_weights[cls] = len(y) / (len(classes) * (y == cls).sum())",
"def classify(self, data):\n return np.argmax(np.dot(data, self.weights), axis=1)",
"def classify(data, labels, (train_idx, test_idx), classifier=None):\r\n\r\n assert classifier is not None, \"Why would you pass not classifier?\"\r\n\r\n # Data scaling based on training set\r\n scaler = SupervisedStdScaler() #SupervisedRobustScaler() # # \r\n scaler.fit(data[train_idx,:], labels[train_idx], label=-1)\r\n #scaler.fit(data[train_idx,:], labels[train_idx])\r\n data_train = scaler.transform(data[train_idx,:])\r\n data_test = scaler.transform(data[test_idx,:])\r\n try:\r\n classifier.fit(data_train, labels[train_idx])\r\n \r\n \r\n confMat = confusion_matrix(labels[test_idx],\r\n classifier.predict(data_test))\r\n if confMat.shape == (1,1):\r\n if all(labels[test_idx] == -1):\r\n confMat = np.array([[confMat[0], 0], [0, 0]], dtype=confMat.dtype)\r\n else:\r\n confMat = np.array([[0, 0], [0, confMat[0]]], dtype=confMat.dtype)\r\n confMatRate = confMat / np.tile(np.sum(confMat, axis=1).astype('float'), (2,1)).transpose()\r\n totalErr = (confMat[0, 1] + confMat[1, 0]) / float(confMat.sum())\r\n #if type(classifier) not in [type(None), DummyClassifier]:\r\n if hasattr(classifier,'param_grid'): \r\n #isinstance(classifier, GridSearchCV) or \\\r\n # isinstance(classifier, RandomizedSearchCV):\r\n fitted_model = classifier.best_estimator_\r\n else:\r\n fitted_model = copy.copy(classifier) \r\n return confMatRate, totalErr, fitted_model\r\n except np.linalg.linalg.LinAlgError as e:\r\n # sahil added statement to raise the error instead of returning nun values\r\n print e.message\r\n raise e\r\n # return np.array([[np.nan, np.nan], [np.nan, np.nan]]), np.nan, None\r",
"def knn(train_data, train_labels, test_data, test_labels, k):\n pred_labels = []\n for t in test_data:\n dist = calculate_distances(train_data, t)\n pred_class = majority_voting(dist, train_labels, k)\n pred_labels.append(pred_class)\n correct_pred_count = np.sum(pred_labels == test_labels)\n acc = correct_pred_count/len(test_labels)\n return acc",
"def calculate_weights(y_train: np.ndarray) -> np.ndarray:\n\n weight_class = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)\n return weight_class",
"def test_score(self, n_neighbors, weights):\n _check_knn_score(\n KNeighborsLabelRanker(n_neighbors, weights), self.X, self.Y)",
"def check_model(X, w, y, thr = 0.9):\n assert np.mean((y > 0) == (X @ w > 0)) > thr, \"model accuracy\"",
"def classify(XX, w):\n s = [sigmoid(XX[i], w) for i in range(XX.shape[0])]\n p = [int(np.round(i)) for i in s]\n return p",
"def learn(self, Xtrain, ytrain):",
"def classification_score(self, x, y):\t\n\t\tpass",
"def classify(dataset,classifier,feat_mask=None):\r\n \r\n train = dataset.get_data('train',True)\r\n X_train = train['x']\r\n if feat_mask is not None:\r\n X_train = X_train[:,feat_mask]\r\n y_train = train['y']\r\n \r\n classifier.fit(X_train,y_train)\r\n \r\n test = dataset.get_data('test',True)\r\n X_test = test['x']\r\n if feat_mask is not None:\r\n X_test = X_test[:,feat_mask]\r\n y_test = test['y']\r\n \r\n pred = classifier.predict(X_test)\r\n \r\n acc = np.count_nonzero(pred==y_test) / len(y_test)\r\n return acc,y_test,pred",
"def _calculateWeights(self, y_train):\n class_counts = []\n # loop through each class\n for i in range(self.num_classes):\n batch_count = 0\n # Sum up each class count in each batch image\n for b in range(y_train.shape[0]):\n batch_count += np.sum(y_train[b][:,:,i])\n class_counts.append(batch_count)\n\n # create Counts\n y = []\n for i in range(self.num_classes):\n # Adjusts for absense\n if class_counts[i] == 0:\n class_counts[i] = 1\n y.extend([i]*int(class_counts[i]))\n # Calcualte weights\n weights = compute_class_weight(\"balanced\", list(range(self.num_classes)), y)\n\n return weights",
"def classify_with_knn(train_data, train_labels, test_data, test_labels, k=3, metric='minkowski'):\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.metrics import f1_score, roc_auc_score\n\n neigh = KNeighborsClassifier(n_neighbors=k, metric=metric)\n neigh.fit(train_data, train_labels)\n accuracy = neigh.score(test_data, test_labels)\n pred_labels = neigh.predict(test_data)\n F1 = f1_score(test_labels, pred_labels)\n AUC = roc_auc_score(test_labels, pred_labels)\n\n return accuracy, F1, AUC",
"def binary_classifier(train_data, dim, wi):\n n = np.zeros(dim)\n p = np.zeros(dim)\n p_w = 0\n n_w = 0\n for i in range(len(train_data)):\n if train_data[i][dim] == 1:\n # Positive\n p_w += float(wi[i])\n p += (float(wi[i]) * train_data[i][0:dim])\n elif train_data[i][dim] == -1:\n # Negative\n n_w += float(wi[i])\n n += (float(wi[i]) * train_data[i][0:dim])\n\n p *= float(1) / float(p_w)\n n *= float(1) / float(n_w)\n w_vec = p - n\n t_vec = 0.5 * np.dot(np.transpose(p + n), (p - n))\n\n error = 0\n\n for i in range(len(train_data)):\n point = train_data[i]\n # Predicted positive\n if np.dot(point[0:dim], w_vec) > t_vec:\n if point[dim] == -1:\n # It is a false positive\n error += wi[i]\n # Predicted negative\n else:\n if point[dim] == 1:\n # It is a false positive\n error += wi[i]\n\n return t_vec, w_vec, error",
"def accuracy(x, y, w):\n p = prob(x, w)\n\n # prediction is class with highest probability\n y_pred = tf.cast(tf.argmax(p, axis=1), tf.float32)\n y = tf.squeeze(y)\n\n # compute accuracy = TP / (TP + FP)\n accuracy_ = tf.reduce_mean(tf.cast(tf.equal(y, y_pred), tf.float32))\n return accuracy_",
"def _score(self, ModifiedWeights):\r\n \r\n UnflattenedWeights = self._UnflattenWeights(WeightsStrucure = self.WeightsStrucure, ModifiedWeights = ModifiedWeights)\r\n self.KerasModels.set_weights(UnflattenedWeights)\r\n test_on_batch = self.KerasModels.test_on_batch(X_train, y_train, sample_weight=None) # return ['loss', 'acc']\r\n return test_on_batch[1]",
"def test(self,dataset):\n outputs = self.use(dataset)\n \n costs = np.ones((len(outputs),1))\n # Compute classification error\n for xy,pred,cost in zip(dataset,outputs,costs):\n x,y = xy\n if y == pred[0]:\n cost[0] = 0\n\n return outputs,costs",
"def __call__(self, y, pred, sample_weight=None):"
] | [
"0.70390934",
"0.68235767",
"0.67985964",
"0.6642697",
"0.65643245",
"0.6527223",
"0.6506123",
"0.64952445",
"0.6493317",
"0.64669216",
"0.6463251",
"0.64184594",
"0.64130056",
"0.64007723",
"0.63961643",
"0.6394636",
"0.63843155",
"0.63546777",
"0.6347009",
"0.6340083",
"0.6298999",
"0.62907934",
"0.6285346",
"0.6277306",
"0.62680525",
"0.62627417",
"0.6261445",
"0.62594104",
"0.625725",
"0.6232494"
] | 0.70010304 | 1 |
make a mask considering only the list_of_body_parts | def select_body_parts(mask, list_of_body_parts):
new_mask = np.zeros(mask.shape).astype(np.bool)
for body_part in list_of_body_parts:
idxs = body_parts[body_part]
for idx in idxs:
m_ = (mask == idx)
new_mask = np.bitwise_or(new_mask, m_)
return new_mask.astype(np.uint8) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_part_mask(densepose_map):\r\n # Group of body parts. Each group contains IDs of body labels in DensePose.\r\n # The 9 groups here are: background, torso, hands, feet, upper legs, lower legs,\r\n # upper arms, lower arms, head.\r\n part_groups = [[0], [1, 2], [3, 4], [5, 6], [7, 9, 8, 10], [11, 13, 12, 14],\r\n [15, 17, 16, 18], [19, 21, 20, 22], [23, 24]]\r\n n_parts = len(part_groups)\r\n\r\n densepose_map = densepose_map.numpy()\r\n need_reshape = len(densepose_map.shape) == 4\r\n if need_reshape:\r\n bo, t, h, w = densepose_map.shape\r\n densepose_map = np.reshape(densepose_map, (-1, h, w))\r\n b, h, w = densepose_map.shape\r\n part_map = (densepose_map / 2 + 0.5) * 24\r\n assert np.all(part_map >= 0) and np.all(part_map < 25)\r\n\r\n mask = np.zeros((b, n_parts, h, w)).astype(\"bool\")\r\n for i in range(n_parts):\r\n for j in part_groups[i]:\r\n # Account for numerical errors.\r\n mask[:, i] = np.logical_or(mask[:, i],\r\n np.logical_and((part_map > j - 0.1), (part_map < j + 0.1)))\r\n if need_reshape:\r\n mask = np.reshape(mask, (bo, t, -1, h, w))\r\n mask = dg.to_variable(mask.astype(\"float32\"))\r\n return mask",
"def _mask(self, value):\n\n if isinstance(value, Headers):\n return self._mask_headers(value)\n elif isinstance(value, dict):\n return self._mask_dict(value)\n elif isinstance(value, LIST_TYPES):\n return self._mask_list(value)\n return value",
"def generate_mask(self):\n\n polymer_length = len(self.sequence)\n protein_length = len(self.particle_order) - polymer_length\n\n if self.filter_specification == 'type':\n mask = np.in1d(self.particle_order, self.monomer_id)\n elif self.filter_specification == 'id':\n if self.molecule == 'polymer':\n offset = protein_length\n else:\n offset = 0\n mask = np.array([False] * (polymer_length + protein_length))\n absolute_id = [x+offset for x in self.monomer_id]\n mask[absolute_id] = True\n else:\n raise NotImplementedError(\"Filter is unknown. Use 'type' or 'id'!\")\n\n # if molecule == 'full', nothing needs to be done\n if self.molecule == 'polymer':\n mask[:protein_length] = [False] * protein_length\n elif self.molecule == 'protein':\n mask[protein_length:] = [False] * polymer_length\n\n return mask",
"def _mask_list(self, items):\n\n result_type = type(items)\n results = []\n for item in items:\n results.append(self._mask(item))\n\n return result_type(results)",
"def restricted_mask_sent(self, x, l, min_len=100000):\n if min_len <= 0:\n min_len = 1\n max_len = 0\n positions, inputs, targets, outputs, = [], [], [], []\n\n mask_len = round(l[np.argsort(l)[0].item()].item() * self.params.word_mass)\n len2 = [mask_len for i in range(l.size(0))]\n\n unmasked_tokens = [0 for i in range(l.min().item() - mask_len - 1)]\n segs = self.get_segments(mask_len, min_len)\n\n for i in range(l.size(0)):\n words = np.array(x[:l[i], i].tolist()) # [LEN(i)]\n shuf_segs = self.shuffle_segments(segs, unmasked_tokens)\n pos_i = self.unfold_segments(shuf_segs)\n output_i = words[pos_i].copy() # [1,2,5,6]\n target_i = words[pos_i - 1].copy() # []\n words[pos_i] = self.mask_word(words[pos_i])\n\n inputs.append(words)\n targets.append(target_i)\n outputs.append(output_i)\n positions.append(pos_i - 1)\n\n x1 = torch.LongTensor(max(l), l.size(0)).fill_(self.params.pad_index)\n x2 = torch.LongTensor(mask_len, l.size(0)).fill_(self.params.pad_index)\n y = torch.LongTensor(mask_len, l.size(0)).fill_(self.params.pad_index)\n pos = torch.LongTensor(mask_len, l.size(0)).fill_(self.params.pad_index)\n l1 = l.clone()\n l2 = torch.LongTensor(len2)\n for i in range(l.size(0)):\n x1[:l1[i], i].copy_(torch.LongTensor(inputs[i]))\n x2[:l2[i], i].copy_(torch.LongTensor(targets[i]))\n y[:l2[i], i].copy_(torch.LongTensor(outputs[i]))\n pos[:l2[i], i].copy_(torch.LongTensor(positions[i]))\n\n pred_mask = y != self.params.pad_index\n y = y.masked_select(pred_mask)\n return x1, l1, x2, l2, y, pred_mask, pos",
"def bart_token_mask_sent(self, x, l, min_len=100000):\n if min_len <= 0:\n min_len = 1\n max_len = 0\n positions, inputs, targets, outputs, = [], [], [], []\n\n # update to position\n # position = torch.distributions.poisson.Poisson(rate=3)\n # m = Poisson(torch.tensor([4]))\n # m.sample()\n\n mask_len = np.random.poisson(lam=3) % (round(len(x[:, 0]) * 0.3))\n if mask_len == 0:\n mask_len = 1\n len1 = [l[i] - mask_len + 1 for i in range(l.size(0))] # masked tokens to [mask]\n len2 = [l[i] - 1 for i in range(l.size(0))]\n\n unmasked_tokens = [0 for i in range(l.min().item() - mask_len - 1)]\n\n # replace with position distribution for length\n\n segs = self.get_segments(mask_len, min_len)\n\n for i in range(l.size(0)):\n words = np.array(x[:l[i], i].tolist()) # [LEN(i)]\n shuf_segs = self.shuffle_segments(segs, unmasked_tokens)\n pos_i = self.unfold_segments(shuf_segs)\n # output_i = words[pos_i].copy() #[1,2,5,6]\n # target_i = words[pos_i - 1].copy()\n input_i = np.concatenate([words[:pos_i[0]], words[pos_i[-1]:]])\n target_i = words[:-1].copy()\n output_i = words[1:].copy()\n\n # words[pos_i] = self.mask_word(words[pos_i]) #decide whether mask these spans\n input_i[pos_i[0]] = self.params.mask_index\n\n inputs.append(input_i)\n targets.append(target_i)\n outputs.append(output_i)\n positions.append(np.arange(len(target_i)))\n\n x1 = torch.LongTensor(max(len1), l.size(0)).fill_(self.params.pad_index)\n x2 = torch.LongTensor(max(len2), l.size(0)).fill_(self.params.pad_index)\n y = torch.LongTensor(max(len2), l.size(0)).fill_(self.params.pad_index)\n pos = torch.LongTensor(max(len2), l.size(0)).fill_(self.params.pad_index)\n l1 = torch.LongTensor(len1)\n l2 = torch.LongTensor(len2)\n for i in range(l.size(0)):\n x1[:l1[i], i].copy_(torch.LongTensor(inputs[i]))\n x2[:l2[i], i].copy_(torch.LongTensor(targets[i]))\n y[:l2[i], i].copy_(torch.LongTensor(outputs[i]))\n pos[:l2[i], i].copy_(torch.LongTensor(positions[i]))\n\n pred_mask = y != self.params.pad_index\n y = y.masked_select(pred_mask)\n return x1, l1, x2, l2, y, pred_mask, pos",
"def build_mask(mask, unused_features_positions):\n\tfinal_mask = mask.tolist()\n\n\tfor i in range(len(unused_features_positions)):\n\t\tif not unused_features_positions[i]:\n\t\t\tfinal_mask.insert(i, False)\n\n\treturn final_mask",
"def whole_mask2mask(whole_mask, bbox):\n if len(whole_mask) != len(bbox):\n raise ValueError(\n 'The length of whole_mask and bbox should be the same')\n mask = list()\n for whole_m, bb in zip(whole_mask, bbox):\n bb = np.round(bb).astype(np.int32)\n mask.append(whole_m[bb[0]:bb[2], bb[1]:bb[3]])\n return mask",
"def mask(self):",
"def bbox2mask(self, shape, margin, bbox_shape, times):\r\n bboxs = []\r\n for i in range(times):\r\n bbox = self.random_bbox(shape, margin, bbox_shape)\r\n bboxs.append(bbox)\r\n height = shape\r\n width = shape\r\n mask = np.zeros((height, width), np.float32)\r\n for bbox in bboxs:\r\n h = int(bbox[2] * 0.1) + np.random.randint(int(bbox[2] * 0.2 + 1))\r\n w = int(bbox[3] * 0.1) + np.random.randint(int(bbox[3] * 0.2) + 1)\r\n mask[(bbox[0] + h) : (bbox[0] + bbox[2] - h), (bbox[1] + w) : (bbox[1] + bbox[3] - w)] = 1.\r\n return mask.reshape((1, ) + mask.shape).astype(np.float32)",
"def bbox2mask(self, shape, margin, bbox_shape, times):\r\n bboxs = []\r\n for i in range(times):\r\n bbox = self.random_bbox(shape, margin, bbox_shape)\r\n bboxs.append(bbox)\r\n height = shape\r\n width = shape\r\n mask = np.zeros((height, width), np.float32)\r\n for bbox in bboxs:\r\n h = int(bbox[2] * 0.1) + np.random.randint(int(bbox[2] * 0.2 + 1))\r\n w = int(bbox[3] * 0.1) + np.random.randint(int(bbox[3] * 0.2) + 1)\r\n mask[(bbox[0] + h) : (bbox[0] + bbox[2] - h), (bbox[1] + w) : (bbox[1] + bbox[3] - w)] = 1.\r\n return mask.reshape((1, ) + mask.shape).astype(np.float32)",
"def _prep_mask(dataset, trial_split):\n split_to_mask = lambda x: (dataset.trial_info.split == x) if isinstance(x, str) else x\n if isinstance(trial_split, list):\n trial_mask = np.any([split_to_mask(split) for split in trial_split], axis=0)\n else:\n trial_mask = split_to_mask(trial_split)\n return trial_mask",
"def get_negative_post_bodies():\n bodies = [create_body()]#body with id\n param_list = copy(RC.PARAM_LIST)\n param_list.remove(\"id\")\n for param_name in param_list:\n bodies.append(create_body(excluded_params=[\"id\", param_name]))\n for _ in range(5):\n bodies.append(create_body(excluded_params=[\"id\"], is_bad_format=True))\n return bodies",
"def mask(self):\n return list(self._mask_generator())",
"def _build_mask(\n self,\n position: int,\n grid_dim: int,\n rot_list: List[int],\n shape: List[int],\n ovlp: List[int],\n device: torch.device = torch.device(\"cpu\"),\n dtype: torch.dtype = torch.float,\n ) -> torch.Tensor:\n assert len(shape) == 4\n\n # Mask right / bottom side\n if position == 0:\n mask = self._create_mask_part(shape, ovlp, device, dtype).rot90(\n rot_list[0], [2, 3]\n )\n # Mask left & right or top & bottom sides\n elif position > 0 and position < grid_dim - 1:\n mask = self._create_mask_part(shape, ovlp, device, dtype).rot90(\n rot_list[0], [2, 3]\n )\n mask = mask * self._create_mask_part(shape, ovlp, device, dtype).rot90(\n rot_list[1], [2, 3]\n )\n # Mask left / top side\n else:\n mask = self._create_mask_part(shape, ovlp, device, dtype).rot90(\n rot_list[1], [2, 3]\n )\n return mask",
"def _tf_mask(self, feats: th.Tensor) -> List[th.Tensor]:\n proj = self.dfsmn(feats, None)[0]\n # N x S*F x T\n masks = self.masks(proj)\n # [N x F x T, ...]\n return th.chunk(masks, self.num_branchs, 1)",
"def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask",
"def _create_mask_part(\n self,\n shape: List[int],\n overlap: List[int],\n device: torch.device,\n dtype: torch.dtype = torch.float,\n ) -> torch.Tensor:\n assert len(shape) == 4\n zeros_size, lin_size = overlap[0:2]\n ones_size = shape[3] - (zeros_size + lin_size)\n sizes = (zeros_size, lin_size, ones_size)\n mask_parts = [\n torch.zeros(sizes[0], device=device, dtype=dtype),\n torch.linspace(0, 1, sizes[1], device=device, dtype=dtype),\n torch.ones(sizes[2], device=device, dtype=dtype),\n ]\n return (\n torch.cat(mask_parts, 0)\n .repeat(shape[2], 1)\n .repeat(shape[1], 1, 1)\n .unsqueeze(0)\n )",
"def _preprocess(self, body):\n return body",
"def mask_tokens(self, sequence):\n n_tokens = len(sequence)\n n_masked_tokens = int(self.masking_proportion*n_tokens/100)\n indexes = [random.randint(0, n_tokens-1) for i in range(n_masked_tokens)]\n while len(set(indexes))!=n_masked_tokens:\n indexes = [random.randint(0, n_tokens-1) for i in range(n_masked_tokens)]\n sequence = np.array(sequence)\n sequence[indexes] = 4\n return list(sequence)",
"def make_attention_mask(source_block, target_block):\n mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1)\n mask = mask.astype(np.int64)\n # (source_length, target_length)\n return mask",
"def get_bodies_modify(number=5, excluded_params=None):\n bodies = []\n for _ in range(number):\n first_body = create_body(excluded_params)\n second_body = first_body\n while second_body == first_body:\n second_body = create_body(excluded_params)\n second_body[\"username\"] = first_body[\"username\"]\n bodies.append((first_body, second_body))\n return bodies",
"def mask_emissionlines(self, element_emission_lines):\n\t\t#Dictionary of corrosponding elements to their emission lines\n\t\temission_dict = {'He-II' : (3202.15, 4685.74),\n\t\t\t\t\t\t 'Ne-V' : (3345.81, 3425.81),\n\t\t\t\t\t\t 'O-II' : (3726.03, 3728.73),\n\t\t\t\t\t\t 'Ne-III': (3868.69, 3967.40),\n\t\t\t\t\t\t 'H-ζ' : 3889.05,\n\t\t\t\t\t\t 'H-ε' : 3970.07,\n\t\t\t\t\t\t 'H-δ' : 4101.73,\n\t\t\t\t\t\t 'H-γ' : 4340.46,\n\t\t\t\t\t\t 'O-III' : (4363.15, 4958.83, 5006.77),\n\t\t\t\t\t\t 'Ar-IV' : (4711.30, 4740.10),\n\t\t\t\t\t\t 'H-β' : 4861.32,\n\t\t\t\t\t\t 'N-I' : (5197.90, 5200.39),\n\t\t\t\t\t\t 'He-I' : 5875.60,\n\t\t\t\t\t\t 'O-I' : (6300.20, 6363.67),\n\t\t\t\t\t\t 'N-II' : (6547.96, 6583.34),\n\t\t\t\t\t\t 'H-α' : 6562.80,\n\t\t\t\t\t\t 'S-II' : (6716.31, 6730.68),\n\t\t\t\t\t\t 'Ar-III': 7135.67}\n\n\t\t#Create an array full of booleans equal to False, same size as the restframe_wavelength\n\t\tself.lines_mask = np.zeros_like(self.restframe_wavelength,dtype=bool)\n\n\t\t#Loop through the input of the emission lines list\n\t\tfor i in range(len(element_emission_lines)):\n\n\t\t\t#Check if the value is in the dictionary\n\t\t\tif element_emission_lines[i] in emission_dict:\n\n\t\t\t\tele_line = element_emission_lines[i]\n\t\t\t\tline = emission_dict[ele_line]\n\n\t\t\t\t#Check if it contains a tuple (some elements have more then one emission line)\n\t\t\t\tif type(line) == tuple:\n\n\t\t\t\t\t#Find the number of emission lines for this value\n\t\t\t\t\tn_lines = len(line)\n\n\t\t\t\t\t#Loop through and mask them\n\t\t\t\t\tfor n in range(n_lines):\n\n\t\t\t\t\t\tn_line = line[n]\n\n\t\t\t\t\t\t#Creates the boolean array\n\t\t\t\t\t\ttemp_lines_mask = ((self.restframe_wavelength > n_line - self.N_angstrom_masked) & (self.restframe_wavelength < n_line + self.N_angstrom_masked))\n\t\t\t\t\t\t#Adds the boolean array to the exisiting one to save it\n\t\t\t\t\t\tself.lines_mask = (temp_lines_mask | self.lines_mask)\n\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\ttemp_lines_mask = ((self.restframe_wavelength > line - self.N_angstrom_masked) & (self.restframe_wavelength < line + self.N_angstrom_masked))\n\t\t\t\t\tself.lines_mask = (temp_lines_mask | self.lines_mask)\n\n\t\t\telse:\n\t\t\t\tprint(element_emission_lines[i])\n\t\t\t\traise KeyError",
"def generate_effective_mask(self, mask_size: tuple, polygons_ignore):\n mask = np.ones(mask_size, dtype=np.uint8)\n\n for poly in polygons_ignore:\n instance = poly.astype(np.int32).reshape(1, -1, 2)\n cv2.fillPoly(mask, instance, 0)\n\n return mask",
"def multibody(body):\n if len(body) > 1:\n return [\"begin\"] + body\n else:\n return body[0]",
"def get_fg_mask(densepose_map, has_fg):\r\n if type(densepose_map) == list:\r\n return [get_fg_mask(label, has_fg) for label in densepose_map]\r\n if not has_fg or densepose_map is None:\r\n return 1\r\n if len(densepose_map.shape) == 5:\r\n densepose_map = densepose_map[:, 0]\r\n # Get the body part map from DensePose.\r\n mask = densepose_map[:, 2:3]\r\n\r\n # Make the mask slightly larger.\r\n mask = L.pool2d(mask, pool_size=15, pool_type='max', pool_stride=1, pool_padding=7)\r\n # mask = dg.to_variable(((mask > -1).numpy().astype(\"float32\")))\r\n mask = P.cast((mask > -1), \"float32\")\r\n return mask",
"def mask(self, item_or_items: Union[str, list]) -> None:\n if isinstance(item_or_items, str):\n self._masked_items.add(item_or_items)\n elif isinstance(item_or_items, list):\n for item in item_or_items:\n assert isinstance(item, str)\n self._masked_items.add(item)",
"def _process_attention_mask_for_special_tokens(attention_mask: Tensor) ->Tensor:\n attention_mask[:, 0] = 0\n sep_token_position = (attention_mask - 0.1).cumsum(-1).argmax(-1)\n attention_mask[torch.arange(attention_mask.size(0)).long(), sep_token_position] = 0\n return attention_mask",
"def attention_mask(x):\n mask = torch.zeros(len(x), len(x[0]))\n for i in range(len(x)):\n try:\n index = np.where(x[i]==1)[0][0]\n mask[i][index:] = -np.inf\n except:\n pass\n return mask",
"def get_negative_put_bodies():\n body = create_body(excluded_params=[\"id\"])\n modify_body = deepcopy(body)\n modify_body.update({\"id\": random.randint(0, sys.maxsize)})\n bodies = [(body, modify_body)]\n\n param_list = copy(RC.PARAM_LIST)\n param_list.remove(\"id\")\n for param_name in param_list:\n body = create_body(excluded_params=[\"id\"])\n modify_body = create_body(excluded_params=[\"id\", param_name])\n bodies.append((body, modify_body))\n for _ in range(5):\n body = create_body(excluded_params=[\"id\"])\n modify_body = create_body(excluded_params=[\"id\"], is_bad_format=True)\n bodies.append((body, modify_body))\n return bodies"
] | [
"0.61990356",
"0.60066074",
"0.5917666",
"0.58902127",
"0.5703398",
"0.55054516",
"0.5479234",
"0.5406589",
"0.5364245",
"0.53532046",
"0.53532046",
"0.5335742",
"0.52789414",
"0.51581067",
"0.51506364",
"0.5140562",
"0.51351595",
"0.5124637",
"0.5110115",
"0.510529",
"0.5060047",
"0.5043899",
"0.50310516",
"0.50214165",
"0.50191265",
"0.5012668",
"0.5010378",
"0.50015885",
"0.49935922",
"0.49772376"
] | 0.7947181 | 0 |
"Function takes text file parameter, and counts instances of team face offs by iterating though each lines. Distinction of readable line made by presence of character "v" in line. | def count_matches(reading):
dictionary = {}
the_list = list()
with open(reading, "r") as text_file:
for lines in text_file:
sentence = lines.strip()
if not sentence or sentence.find("v") < 0: continue
else:
tup = tuple(sentence.split(" v "))
teams = frozenset(tup)
the_list.append(teams)
for keys in the_list:
dictionary[keys] = dictionary.get(keys, 0) + 1
return dictionary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 11)",
"def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)",
"def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)",
"def countChaptersVerses(filename):\n # Modes that the usfm scanner is in (parsing mode)\n NORMAL = 0 # regular Bible text\n MARKER = 1 # USFM marker\n PREFIX = 2 # file header info\n GLOSSARY = 3 # within a \\w ... \\w* section\n mode = PREFIX\n newParagraph = False\n usfmCode = \"\"\n markerPattern = r'\\\\(\\S+)'\n markerPatternCompiled = regex.compile(markerPattern) # looking for a usfm \\marker\n # The following markers are ones we just \"delete\" from the text because they are\n # glossary or formatting markers. NOTE: The next line of code is critical. If there\n # is a marker that I have not seen before, I may lose words from the original USFM\n # and verses can appear to be truncated. Watch out for this in the future.\n markersToIgnore = ['li', 'q1', 'q2', 'qt', 'm', 'w', 'pi', 'pi2', 'b', 'nb', 'mi']\n # The current word list\n wordlist = []\n try:\n # If you do not have utf_8_sig, the byte-order-mark ef bb bf messes up\n # the initial \\id line so it does not match \\\\id below. This decoding\n # method dumps the BOM if it is present.\n file = open(filename, 'r', encoding='utf_8_sig')\n except IOError:\n # File does not exist...ignore...lets us pass wrong parameters like *.sfm *.usfm *.SFM and not worry\n return\n debug(f\"Processing file {filename}\")\n\n for lineno, line in enumerate(file):\n # Ignore blank lines\n if not line.strip():\n continue;\n\n debug(\"DEBUG1: \" + line)\n\n # Disregard line/verse boundaries so that repeats can cross lines/verses\n words = line.split()\n debug(\"DEBUG2: \" + \"::\".join(words))\n\n # Handle USFM codes (by noting them or dropping them)\n while words:\n word = words.pop(0)\n debug(f\"DEBUG3: Processing chunk ::{word}:: with length {len(word)}\")\n markerMatch = markerPatternCompiled.search(word)\n #print(\"DEBUG2: \" + \"Word=\" + word + \" \" + ' '.join(words))\n # Capture context of book chapter:verse\n if (word == \"\\\\id\"):\n debug(f\"DEBUG4: Processing id\")\n bookid = words.pop(0)\n debug(f\"DEBUG5: Found book id {bookid}\")\n # We don't process the glossary book\n if (bookid == \"XXA\" or bookid == \"XXB\" or bookid == \"FRT\" or bookid == \"GLO\" or \n bookid == \"XXC\" or bookid == \"XXD\" or bookid == \"INT\" or bookid == \"BAK\" or\n bookid == \"XXE\" or bookid == \"XXF\" or bookid == \"XXG\"):\n file.close()\n return\n book = bookid # instead of changing to any other naming system, keep it same\n debug(\"DEBUG6: Set Book = {book}\")\n elif (word == \"\\\\c\"):\n if not words:\n error(f\"Missing chapter number in {filename}:{lineno}\")\n chapter = words.pop(0)\n debug(f\"DEBUG7: Chapter {chapter}\")\n verse = 0 # restart verse numbering\n mode = NORMAL # move out of PREFIX mode\n elif (word == \"\\\\v\"):\n if not words:\n error(f\"Missing verse number in {filename}:{lineno}\")\n verse = words.pop(0)\n debug(f\"DEBUG8: Verse {verse}\")\n # Verse numbers should be monotonically increasing by one every time from the previous one\n try: chapter\n except NameError: \n error(f\"Missing chapter in {book} or verse number {verse} is not within a chapter??\")\n exit(1)\n prevVerse = int(verseDict.get((book, chapter), 0))\n if (\"-\" in verse):\n # Special case: we have a verse range, like 17-18\n verses = verse.split(\"-\")\n verse1 = int(verses[0])\n verse2 = int(verses[1])\n if (prevVerse+1 != verse1):\n error(f\"Verse number {verse1} in range {verse} is out of sequence in {book} {chapter}, last verse was {prevVerse}\")\n if ((verse2 - verse1) > 0):\n # We have a range of verses like \\v 1-4 or \\v 1-3 or \\v 5-6\n verse = verse2 # move to the end of the range\n prevVerse = verse2 - 1 # set up for below check; we know it is OK, but it doesn't!\n else:\n error(f\"Verse number {verse1} in range {verse} is greater than the end of the range in {book} {chapter}, last verse was {prevVerse}\")\n else:\n # Just a regular single verse, like \\v 4\n pass\n # Now carry on as if no verse range was found\n if (prevVerse+1 != int(verse)):\n error(f\"Verse number {verse} is out of sequence in {book} {chapter}, last verse was {prevVerse}\")\n verseDict[(book, chapter)] = verse\n\n file.close()",
"def strategy_guide(filename):\n score = 0\n # Iterate over the lines of the file\n with open(filename, \"rt\", encoding=\"utf-8\") as filetoread:\n for line in filetoread:\n # opponent, player = line.split()\n # print(line.rstrip())\n result = scoreit(line.rstrip())\n score = score + result\n return score",
"def test_line_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[0], 4)",
"def extractVoterTableInfo(textFile):\n # Initialize Field Locations\n countyHeader = 1\n republicanHeader = 2\n democratHeader = 3\n\n # Initialize Filed Tracker\n field_tracker = 0\n\n # Initailize Florida Votes List\n florida_voters = []\n\n # Read all lines of file\n line = textFile.readline()\n while line != '':\n # Strip white space before and after line\n line = line.rstrip()\n line = line.lstrip()\n\n # Identify if line is a table entry\n if identifyTableEntry(line):\n # Strip extra txt before and after line\n line = line.lstrip(\"<td>\")\n line = line.rstrip(\"</td>\")\n\n # Identify if line is County Name\n if identifyCounty(line):\n field_tracker = 1\n county = line\n\n # If Not County Header\n if field_tracker != 0:\n # Set resetFalse field_tracker to True\n resetFalse = True\n\n if field_tracker > democratHeader:\n field_tracker = 0\n # Append Tuple of County Info to florida_voters\n florida_voters.append((county, republicanVotes, democratVotes))\n resetFalse = False\n elif field_tracker == republicanHeader:\n # Extract republicanVotes as int\n republicanVotes = int(''.join(line.split(\",\")))\n elif field_tracker == democratHeader:\n # Extract democratVotes as int\n democratVotes = int(''.join(line.split(\",\")))\n\n # Increment field_tracker if resetFalse==True\n if resetFalse:\n field_tracker += 1\n\n # Advance to next line\n line = textFile.readline()\n\n # Sort County Tuple by Democratic Votes (Least -> Greatest)\n sortedCounties = sortTuple(florida_voters, 3)\n return sortedCounties",
"def _analyzeFile(self, filename):\n date = os.path.basename(filename)[:10]\n if filename.endswith('gz'):\n f = gzip.open(filename)\n else:\n f = open(filename)\n lines = f.read().splitlines()\n for line in lines:\n if re.search('joined the game', line):\n self._analyzeLine(line, date, self._start_times)\n elif re.search('left the game', line) or re.search('lost connection',\n line):\n self._analyzeLine(line, date, self._end_times)\n elif re.search('Stopping server', line):\n self._server_stop_times.append(ConvertTime(date, line))",
"def analyze_text(filename):\n lines = 0\n characters = 0\n with open(filename, \"r\") as f:\n for line in f:\n lines += 1\n characters += len(line)\n return lines, characters",
"def analyse_text(file):\n\t\n\tchar = 0\n\tlines = 0\n\twith open(file,'r') as f:\n\t\tfor line in f:\n\t\t\tlines += 1\n\t\t\tchar += len(line)\n\n\treturn lines, char",
"def count_meme_entries(motif_path):\n with open(motif_path, \"r\") as f:\n counter = 0\n for line in f:\n if line[:6] == \"letter\":\n counter += 1\n return counter",
"def pre_process_file(filename):\n\n num_lines = 0\n vm_ids = set()\n with open(filename) as trace:\n for item in csv.reader(trace, delimiter=','):\n num_lines += 1\n disk_id = int(item[2])\n vm_ids.add(disk_id) # can make it more efficient\n no_of_vms = len(vm_ids)\n return (num_lines, no_of_vms, vm_ids)",
"def analyze_text(filename):\n lines = 0\n chars = 0\n\n with open(filename, 'r') as f:\n for line in f:\n lines += 1\n chars += len(line)\n return (lines, chars)",
"def SentenceLength(f):\n\tcounter=0\n\twith open(filename) as f:\n\t\tread = csv.reader(f)\n\t\tfor row in read:\n\t\t\t#Original\n\t\t\tzin0=row[0].split()\n\t\t\t#Human Translation\n\t\t\tzin1=row[1].split()\n\t\t\t#Machine Translation\n\t\t\tzin2=row[2].split()\n\t\t\tcounter+=1\n\t\t\t#PRINT LENGTH DIFFERENCE\n\t\t\t#print(\"HT\",counter,(abs(len(zin0)- len(zin1))))\n\t\t\tprint(\"MT\",counter,(abs(len(zin0)- len(zin2))))",
"def _parse_tsv_vocab_file(self, vocab_file: str):\n with open(vocab_file, \"r\", encoding=\"utf-8\") as f:\n for (index, line) in enumerate(f):\n title, count = line.rstrip().split(\"\\t\")\n entity = Entity(title, None)\n self.vocab[entity] = index\n self.counter[entity] = int(count)\n self.inv_vocab[index] = [entity]",
"def __winner_in_line(self, line):\n\t\ttoken_sum = 0\n\t\tfor token in line:\n\t\t\ttoken_sum += token\n\t\t\tif token_sum == 4 * self.PLAYER1:\n\t\t\t\treturn self.PLAYER1\n\t\t\tif token_sum == 4 * self.PLAYER2:\n\t\t\t\treturn self.PLAYER2\n\t\t\tif token_sum < 0 < token or token_sum > 0 > token:\n\t\t\t\ttoken_sum = 0\n\t\treturn 0",
"def counterdict(self):\n vas = []\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i != \"\":\n vas.append(s_i)\n for ele in enumerate(vas):\n print(ele)\n logging.debug(\"Starting with to\")",
"def test_counts(self):\n lines, words, chars = analyze_text(self.filename)\n self.assertEqual(lines, 4)\n self.assertEqual(words, 8)\n self.assertEqual(chars, 36)",
"def line_count(file):\n with open(file, \"r\") as f:\n return sum(1 for line in f)",
"def _get_line_number(vcf):\n with open(vcf) as vcf_input_file:\n i = -1\n for line in vcf_input_file:\n i += 1\n return i",
"def count_lines(file_obj):\n for idx, line in enumerate(file_obj):\n pass\n file_obj.seek(0)\n return idx + 1",
"def count_words(filename):",
"def analyzeFile(filename): \n fileData = open(filename, encoding=\"utf-8\") # open the file\n \n counts = {}\n\n for line in fileData:\t\t # iterates over every line of the file\n words = line.split() # turns each line into a list\n for word in words: #iterates over the words in each line list\n word = word.lower().strip(string.whitespace+string.punctuation)\n if len(word) > 0: #make sure word is longer than 0 before adding it to the dictionary\n counts[word] = counts.get(word, 0) + 1 #look up if the dictionary has that word and if not then it'll add that word with the value 0 associated with it and then add one to that, if it has seen it it'll add 1 to the value stored in the counts dictionary\n #when it gets here for the first line it goes back up to the top and repeats for the 2nd line\n mostCommonWord = [word]\n leastCommonWord = [word]\n shortestWord = [word]\n longestWord = [word]\n \n for item in counts:\n if counts[mostCommonWord[0]] < counts[item]:\n mostCommonWord = [item]\n elif counts[mostCommonWord[0]] == counts[item]:\n mostCommonWord.append(item)\n if counts[leastCommonWord[0]] > counts[item]:\n leastCommonWord = [item]\n elif counts[leastCommonWord[0]] == counts[item]:\n leastCommonWord.append(item)\n if len(shortestWord[0]) > len(item):\n shortestWord = [item] \n elif len((shortestWord[0])) == len(item):\n shortestWord.append(item)\n if len(longestWord[0]) < len(item):\n longestWord = [item]\n elif len(longestWord[0]) == len(item):\n longestWord.append(item)\n \n return (mostCommonWord, leastCommonWord, shortestWord, longestWord)",
"def main():\n\tcount = 0\n\twith open(FILE, 'r') as f:\n\t\tfor line in f:\n\t\t\tcount += 1\n\tprint('There are ' + str(count) + ' lines in '+str(FILE))",
"def SentenceSplitsStops(f):\n\tcounter=0\n\twith open(filename) as f:\n\t\tread = csv.reader(f)\n\t\tfor row in read:\n\t\t\t#Original\n\t\t\tzin0=row[0]\n\t\t\t#Human Translation\n\t\t\tzin1=row[1]\n\t\t\t#Machine Translation\n\t\t\tzin2=row[2]\n\t\t\tcounter+=1\n\t\t\t#FULL STOPS\n\t\t\t#print(abs((zin0.count('.') - zin1.count('.'))))\n\t\t\tprint(abs((zin0.count('.') - zin2.count('.'))))",
"def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found",
"def count_kmers(file_name, k, verbose=False):\n if verbose:\n start = time.time()\n print('Counting kmers in {}'.format(file_name))\n total_kmers = 0\n with open(file_name, 'r') as f:\n line_num = 0\n for line in f:\n if line_num % 4 == 1: # dna sequence\n total_kmers += len(line) - k # eliminate new-line\n line_num += 1\n if verbose:\n end = time.time()\n print('{} kmers are counted in {:.2f} seconds'.format(\n total_kmers, end - start))\n return total_kmers",
"def test(self, filename):\n hit = 0\n total = 0\n n = self.n\n for sent in open(filename):\n samp = sent.rstrip('\\n')\n# samp = '~' + samp + '~' \n for i in range(len(samp) - n):\n total = total + 1\n prev = samp[i:i + n - 1]\n pred = self.pred(prev)\n if pred == samp[i + n - 1]:\n hit = hit + 1\n \n return hit/total",
"def linesCountingAux(file_name, nProcesses):\r\n\r\n linesPerProcessesList = []\r\n\r\n with open(file_name, \"r\") as file:\r\n lineCounting = 0\r\n\r\n for line in file:\r\n lineCounting += 1 #discover the lines in the text file\r\n\r\n linesPerProcesses = lineCounting // nProcesses\r\n\r\n for number in range(nProcesses):\r\n linesPerProcessesList.append(linesPerProcesses)\r\n if sum(linesPerProcessesList) < lineCounting:\r\n for number in range (lineCounting - sum(linesPerProcessesList)):\r\n linesPerProcessesList[number] += 1\r\n\r\n return linesPerProcessesList",
"def read_text_file(self, filepath: str):\n with open(filepath) as fh:\n for line in fh:\n for word in re.split('\\W+', line):\n word = word.lower()\n if len(word):\n l = self.hash_map.lookup(word)\n self.hash_map.insert(word, l + 1 if l > 0 else 1)"
] | [
"0.5773035",
"0.575675",
"0.575675",
"0.5693809",
"0.5687128",
"0.56646866",
"0.5577447",
"0.55631375",
"0.5443837",
"0.54132473",
"0.5392287",
"0.538056",
"0.5354688",
"0.5338032",
"0.5318795",
"0.53123796",
"0.53035545",
"0.5301292",
"0.5290238",
"0.5274351",
"0.5263779",
"0.52605194",
"0.52570087",
"0.5236466",
"0.5231085",
"0.52221745",
"0.52110916",
"0.5197864",
"0.51900995",
"0.51725495"
] | 0.6466942 | 0 |
returns the interests for the category. | def all(self, list_id, category_id, **kwargs):
return self._mc_client._get(
url=self._build_path(list_id, 'interest-categories', category_id, 'interests'),
**kwargs
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interests(self):\n if \"interests\" in self._prop_dict:\n return self._prop_dict[\"interests\"]\n else:\n return None",
"def get(self, list_id, category_id, interest_id):\n return self._mc_client._get(\n url=self._build_path(list_id, 'interest-categories', category_id, 'interests', interest_id))",
"def get_user_interests_with_categories(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n cursor.execute(\"SELECT tag, category FROM \" + ENV_DB +\r\n \".UserTags WHERE username='\" + self.user.username + \"'\")\r\n data = cursor.fetchall()\r\n database.close()\r\n return list((i[0], i[1]) for i in data)",
"def get_interests(self):\n cur = self.conn.cursor(pymysql.cursors.DictCursor)\n\n cur.execute('SELECT id, name FROM Interests ORDER BY sort_order;')\n\n return CursorIterator(cur)",
"def listNotificationInterests(self):\n return []",
"def get_margin_interests(self, margin_type: str, asset: Optional[str] = None, isolated_symbol: Optional[str] = None,\n start_time: Optional[int] = None, end_time: Optional[int] = None):\n conditions_list = []\n\n if margin_type == 'cross':\n table = tables.CROSS_MARGIN_INTEREST_TABLE\n elif margin_type == 'isolated':\n table = tables.ISOLATED_MARGIN_INTEREST_TABLE\n if isolated_symbol is not None:\n conditions_list.append((table.isolated_symbol,\n SQLConditionEnum.equal,\n isolated_symbol))\n else:\n raise ValueError(f\"margin type should be 'cross' or 'isolated' but {margin_type} was received\")\n\n if asset is not None:\n conditions_list.append((table.asset,\n SQLConditionEnum.equal,\n asset))\n if start_time is not None:\n conditions_list.append((table.interestTime,\n SQLConditionEnum.greater_equal,\n start_time))\n if end_time is not None:\n conditions_list.append((table.interestTime,\n SQLConditionEnum.lower,\n end_time))\n return self.get_conditions_rows(table, conditions_list=conditions_list)",
"def get_user_interests():\n data = request_verification(request.get_json())\n if isinstance(data, int):\n return codes[data], data\n else:\n try:\n user_data = r.table('interests').filter(r.row['user_id'] == data).run(g.rdb_conn)\n except Exception as e:\n return e, 500\n else:\n interests = user_data.items[0].get('interests')\n return json.dumps(interests), 200",
"def get_category_ratings(self):\n category_ratings = dict()\n for cat_rating in self.category_ratings.all():\n category_ratings[cat_rating.category.name] = cat_rating.rating\n return category_ratings",
"def create(self, list_id, category_id, data):\n return self._mc_client._post(\n url=self._build_path(list_id, 'interest-categories', category_id, 'interests'), data=data)",
"def get_crimes_by_category(self):\n\n result = {}\n for crime in self.crimes:\n cat_name = crime.category.category_name\n if cat_name in result:\n result[cat_name] += 1\n else:\n result[cat_name] = 1\n \n return result",
"def update(self, list_id, category_id, interest_id, data):\n return self._mc_client._patch(\n url=self._build_path(list_id, 'interest-categories', category_id, 'interests', interest_id),\n data=data)",
"def get_lending_interests(self, lending_type: Optional[str] = None, asset: Optional[str] = None,\n start_time: Optional[int] = None, end_time: Optional[int] = None):\n conditions_list = []\n table = tables.LENDING_INTEREST_TABLE\n if lending_type is not None:\n conditions_list.append((table.lendingType,\n SQLConditionEnum.equal,\n lending_type))\n if asset is not None:\n conditions_list.append((table.asset,\n SQLConditionEnum.equal,\n asset))\n if start_time is not None:\n conditions_list.append((table.interestTime,\n SQLConditionEnum.greater_equal,\n start_time))\n if end_time is not None:\n conditions_list.append((table.interestTime,\n SQLConditionEnum.lower,\n end_time))\n return self.get_conditions_rows(table, conditions_list=conditions_list)",
"def interests(self, create, extracted, **kwargs):\n if not create:\n return\n if extracted:\n for interest in extracted:\n self.interests.add(interest)",
"def total_interest(self):\n return sum(self.table[\"interest\"])",
"def get_citizens():\n response = table.scan()[\"Items\"]\n logger.info(\"All citizens returned\")\n return jsonify(response)",
"def InspireCategories(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('inspire_categories', default)\n return [HEP.InspireFieldObject(i) for i in tmp]",
"def get_interests(store: KeyValueStorage, client_id: int) -> List[str]:\n\n r = store.get(\"i:%s\" % client_id)\n return json.loads(r) if r else []",
"def get_citations(self):\n url = self._get_url() + 'citations'\n citations = self._request(url)\n return citations.json()",
"def category_rates(self, member_state, category, region=None):\n rates = self._get_rates(member_state)\n if region is not None:\n rates = rates.regions[region]\n return rates.categories.get(category, [])",
"def indicators(self):\n return self._indicators",
"def get_citations_trend(body):\n user_id = get_user_id(body)\n citations_per_year = body.find('div', attrs={'id': 'gsc_g'})\n years = citations_per_year.find('div', attrs={'id': 'gsc_g_x'})\n n_citations = citations_per_year.find('div', attrs={'id': 'gsc_g_bars'})\n citations_list = list()\n for year, n_citation in zip(years, n_citations):\n citations_dict = {'user_id': user_id,\n 'n_citation': n_citation.text,\n 'year': year.text}\n citations_list.append(citations_dict)\n return citations_list",
"def intensities(self):\n return self._intensities.copy()",
"def get_classification_indicators(classification_id, **kwargs):\n\n instance = Ceic._get_instance()\n\n kwargs[\"id\"] = classification_id\n get_dictionaries_method = instance._dictionary_facade.get_classification_indicators\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result",
"def get_group_interests(self):\r\n common_tags = set()\r\n for mem in self.members:\r\n if len(common_tags) == 0:\r\n common_tags = self.get_interests_each_member(mem)\r\n else:\r\n common_tags = common_tags.intersection(self.get_interests_each_member(mem))\r\n return list(common_tags)",
"def get_indicators(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_indicators\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result",
"def get_interest_rates():\n\n try:\n tenure = int(request.get_json()[\"tenure\"])\n except:\n return jsonify({\"message\": \"Input is 'tenure' in months\"}), 400\n\n if tenure <= 5:\n return jsonify({\"interest_rate\": 10}), 200\n elif tenure > 5 and tenure <= 24:\n return jsonify({\"interest_rate\": 12}), 200\n else:\n return jsonify({\"interest_rate\": 15}), 200",
"def categories(self):\r\n return self.q(css='span.rubric-category').text",
"def income_cat(housing):\n logging.info(\"Creating Income Category.....\")\n housing[\"income_cat\"] = pd.cut(\n housing[\"median_income\"],\n bins=[0.0, 1.5, 3.0, 4.5, 6.0, np.inf],\n labels=[1, 2, 3, 4, 5],\n )\n return housing",
"def get_all_indicators(self):\n return self.display_table.get_all_indicators(root=self.display_table_root)",
"def compute_interest(self) -> float:\n interest = self._balance * SavingsAccount.RATE\n self.deposit(interest)\n return interest"
] | [
"0.7233376",
"0.6845424",
"0.6362905",
"0.59505403",
"0.5945664",
"0.57679194",
"0.5734994",
"0.5648778",
"0.5561602",
"0.5554436",
"0.54485667",
"0.5437664",
"0.5421697",
"0.5412767",
"0.53375494",
"0.53321195",
"0.5257061",
"0.5234549",
"0.5228752",
"0.5152548",
"0.51335615",
"0.5045426",
"0.5031194",
"0.50067747",
"0.49992195",
"0.4969678",
"0.49644265",
"0.49575335",
"0.49370483",
"0.49065793"
] | 0.7139124 | 1 |
returns information about a specific interest category, or Group Title. | def get(self, list_id, category_id, interest_id):
return self._mc_client._get(
url=self._build_path(list_id, 'interest-categories', category_id, 'interests', interest_id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getCategory():",
"def category(self):\r\n return self._get('category', {})",
"def category(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])",
"def category(self) -> str:\n return pulumi.get(self, \"category\")",
"def category_title(self):\n categories = {c[0]:c[1] for c in self.CATEGORY_CHOICES}\n if self.category in categories:\n return categories[self.category]",
"def category(self) -> str:\n return self._search_in_properties(ATTR_CATEGORY)",
"def category(self) -> Optional[str]:\n return pulumi.get(self, \"category\")",
"def getCategory(self, *args):\n return _libsbml.SBMLExtension_getCategory(self, *args)",
"def category(self):\n return self._category",
"def category(self):\n return self._category",
"def category(self):\n return self._category",
"def category(self):\n return self._category",
"def get_category(self) -> str:\n return self.category",
"def get_category_info(category_id):\n uri = 'categories/' + category_id\n return self.make_request(uri)",
"def category(self):\n return self._manager.get_category(self.name)",
"def category(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"category\")",
"def get_category(self):\n\n\t\treturn self.__category",
"def get_achievement_category(self, region, namespace, id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/achievement-category/{0}', region, *[id], **filters)",
"def extract_crime_category(self,match):\n \n\n #\n # wouldn't be calling this function if we didn't already know there's a match\n assert(match!=None)\n\n #\n # extract crime category\n line=match.string\n start_index=match.start('crime')\n stop_index=match.end('crime')\n crime_key=line[start_index:stop_index]\n crime_key=crime_key.lower()\n\n my_logger.debug('match(%d,%d)=%s' % (start_index,stop_index,crime_key))\n \n return crime_key",
"def category(self) -> Optional[pulumi.Input['TestIssueCategory']]:\n return pulumi.get(self, \"category\")",
"def category_name(self):\n return self.category.name",
"def info(self, categories=()):\n pp = pprint.PrettyPrinter(indent=4)\n print_text_box('Category')\n data = self.manager.data[\"category\"]\n if any(categories):\n data = self._get_filtered_category_data(data, categories)\n pp.pprint(data)\n print('')",
"def category(self) -> str:\n return self._category",
"def category(self) -> str:\n return self._category",
"def get_category(url):\n response = requests.get(url)\n if(response.ok):\n soup = BeautifulSoup(response.text, 'lxml')\n category = soup.find('div', {'class': 'page-header'})\n category = category.find('h1')\n return category.text",
"def get_name(self):\n return self.category_name",
"def get_work(self, summary):\n r = self.s.request(method=\"get\", url=self.baseurl + summary.path)\n json = r.json()\n return create_citation(json['citation'])",
"def get_description(self):\n return self['contactgroup_name']",
"def get_user_interests_with_categories(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n cursor.execute(\"SELECT tag, category FROM \" + ENV_DB +\r\n \".UserTags WHERE username='\" + self.user.username + \"'\")\r\n data = cursor.fetchall()\r\n database.close()\r\n return list((i[0], i[1]) for i in data)",
"def get_achievement_category_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/achievement-category/index', region, **filters)"
] | [
"0.6102256",
"0.5793752",
"0.5774473",
"0.57672524",
"0.5699058",
"0.5508172",
"0.54925686",
"0.5485171",
"0.5441007",
"0.5441007",
"0.5441007",
"0.5441007",
"0.5364656",
"0.53638065",
"0.53515863",
"0.53429925",
"0.53126425",
"0.52534324",
"0.525046",
"0.5194649",
"0.5173863",
"0.5156559",
"0.51486814",
"0.51486814",
"0.51419675",
"0.513531",
"0.50974756",
"0.5093867",
"0.50924784",
"0.50869113"
] | 0.610781 | 0 |
Configure a pair of moving and fixed images and a pair of moving and fixed labels as model input and returns model input tf.keras.Input TODO do we absolutely need the batch_size in Input? | def build_inputs(
moving_image_size: tuple,
fixed_image_size: tuple,
index_size: int,
batch_size: int,
labeled: bool,
) -> [tf.keras.Input, tf.keras.Input, tf.keras.Input, tf.keras.Input, tf.keras.Input]:
moving_image = tf.keras.Input(
shape=moving_image_size, batch_size=batch_size, name="moving_image"
) # (batch, m_dim1, m_dim2, m_dim3)
fixed_image = tf.keras.Input(
shape=fixed_image_size, batch_size=batch_size, name="fixed_image"
) # (batch, f_dim1, f_dim2, f_dim3)
moving_label = (
tf.keras.Input(
shape=moving_image_size, batch_size=batch_size, name="moving_label"
)
if labeled
else None
) # (batch, m_dim1, m_dim2, m_dim3)
fixed_label = (
tf.keras.Input(
shape=fixed_image_size, batch_size=batch_size, name="fixed_label"
)
if labeled
else None
) # (batch, m_dim1, m_dim2, m_dim3)
indices = tf.keras.Input(
shape=(index_size,), batch_size=batch_size, name="indices"
) # (batch, 2)
return moving_image, fixed_image, moving_label, fixed_label, indices | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_inputs(\n moving_image_size: tuple,\n fixed_image_size: tuple,\n index_size: int,\n batch_size: int,\n labeled: bool,\n) -> [tf.keras.Input, tf.keras.Input, tf.keras.Input, tf.keras.Input, tf.keras.Input]:\n moving_image = tf.keras.Input(\n shape=(*moving_image_size,), batch_size=batch_size, name=\"moving_image\"\n ) # (batch, m_dim1, m_dim2, m_dim3)\n fixed_image = tf.keras.Input(\n shape=(*fixed_image_size,), batch_size=batch_size, name=\"fixed_image\"\n ) # (batch, f_dim1, f_dim2, f_dim3)\n moving_label = (\n tf.keras.Input(\n shape=(*moving_image_size,), batch_size=batch_size, name=\"moving_label\"\n )\n if labeled\n else None\n ) # (batch, m_dim1, m_dim2, m_dim3)\n fixed_label = (\n tf.keras.Input(\n shape=(*fixed_image_size,), batch_size=batch_size, name=\"fixed_label\"\n )\n if labeled\n else None\n ) # (batch, m_dim1, m_dim2, m_dim3)\n indices = tf.keras.Input(\n shape=(index_size,), batch_size=batch_size, name=\"indices\"\n ) # (batch, 2)\n return moving_image, fixed_image, moving_label, fixed_label, indices",
"def build_conditional_model(\n moving_image_size: tuple,\n fixed_image_size: tuple,\n index_size: int,\n labeled: bool,\n batch_size: int,\n train_config: dict,\n registry: Registry,\n) -> tf.keras.Model:\n # inputs\n (moving_image, fixed_image, moving_label, fixed_label, indices) = build_inputs(\n moving_image_size=moving_image_size,\n fixed_image_size=fixed_image_size,\n index_size=index_size,\n batch_size=batch_size,\n labeled=labeled,\n )\n\n # backbone\n backbone = build_backbone(\n image_size=fixed_image_size,\n out_channels=1,\n config=train_config[\"backbone\"],\n method_name=train_config[\"method\"],\n registry=registry,\n )\n\n # prediction\n pred_fixed_label, grid_fixed = conditional_forward(\n backbone=backbone,\n moving_image=moving_image,\n fixed_image=fixed_image,\n moving_label=moving_label,\n moving_image_size=moving_image_size,\n fixed_image_size=fixed_image_size,\n ) # (batch, f_dim1, f_dim2, f_dim3)\n\n # build model\n inputs = {\n \"moving_image\": moving_image,\n \"fixed_image\": fixed_image,\n \"moving_label\": moving_label,\n \"fixed_label\": fixed_label,\n \"indices\": indices,\n }\n outputs = {\"pred_fixed_label\": pred_fixed_label}\n model = tf.keras.Model(\n inputs=inputs, outputs=outputs, name=\"ConditionalRegistrationModel\"\n )\n\n # loss and metric\n model = add_label_loss(\n model=model,\n grid_fixed=grid_fixed,\n fixed_label=fixed_label,\n pred_fixed_label=pred_fixed_label,\n loss_config=train_config[\"loss\"],\n registry=registry,\n )\n\n return model",
"def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config):\n\n model = get_detector(cfg, checkpoint_path, device=\"cpu\")\n one_img, one_meta = preprocess_example_input(input_config)\n tensor_data = [one_img]\n model.forward = partial(model.forward, img_metas=[[one_meta]], return_loss=False)\n\n return model, tensor_data",
"def build_inputs(self):\n # in prediction mode, we use a batch size of one\n batch_size = self.config.batch_size\n \n if self.mode == \"prediction\":\n batch_size = 1\n \n # In inference mode, images and inputs are fed via placeholders.\n image_feed = tf.placeholder(dtype=tf.string, shape=[], name=\"image_feed\") # shape: scalar value\n\n #image_fn_feed = tf.placeholder(dtype=tf.string, shape=[], name=\"image_fn_feed\")\n \n #image_filename_queue = tf.train.string_input_producer([image_fn_feed]) # list of files to read\n \n #reader = tf.WholeFileReader()\n #_, image_feed = reader.read(image_filename_queue)\n \n \n text_feed = tf.placeholder(dtype=tf.int64,\n shape=[None, self.config.sentence_length], # shape 2D tensor - variable size (first dimension sentence sequence, second dimension token sequence (actually fixed size))\n name=\"text_feed\")\n \n # arbitrary labels (not used)\n mi_label = tf.constant(-1, dtype=tf.int64) \n sc_label = tf.constant(-1.0, dtype=tf.float32) \n\n image = self.process_image(image_feed)\n\n # Process image and insert batch dimensions.\n images = tf.expand_dims(self.process_image(image_feed), 0)\n input_seqs = tf.expand_dims(text_feed, 0) \n mi_labels = tf.expand_dims(mi_label, 0)\n sc_labels = tf.expand_dims(sc_label, 0)\n input_mask = tf.expand_dims(tf.constant([1], dtype=tf.int32) , 0)\n \n else:\n # Prefetch serialized SequenceExample protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader,\n self.config.input_file_pattern,\n is_training=self.is_training(),\n batch_size=batch_size,\n values_per_shard=self.config.values_per_input_shard,\n input_queue_capacity_factor=self.config.input_queue_capacity_factor,\n num_reader_threads=self.config.num_input_reader_threads,\n mode=self.mode)\n\n # Image processing and random distortion. Split across multiple threads\n # with each thread applying a slightly different distortion.\n assert self.config.num_preprocess_threads % 2 == 0\n images_and_texts = []\n for thread_id in range(self.config.num_preprocess_threads):\n serialized_sequence_example = input_queue.dequeue()\n encoded_image, text, mi, sc = input_ops.parse_sequence_example(\n serialized_sequence_example,\n image_feature=self.config.image_feature_name,\n sentences_feature=self.config.sentences_feature_name,\n sentence_length=self.config.sentence_length,\n mi_feature=self.config.mi_feature_name,\n sc_feature=self.config.sc_feature_name)\n image = self.process_image(encoded_image, thread_id=thread_id)\n images_and_texts.append([image, text, mi, sc])\n\n # Batch inputs.\n queue_capacity = (2 * self.config.num_preprocess_threads *\n batch_size)\n images, input_seqs, mi_labels, sc_labels, input_mask = (\n input_ops.batch_with_dynamic_pad(images_and_texts,\n batch_size=batch_size,\n queue_capacity=queue_capacity))\n \n #print('Shapes') \n #print('Shape images: ' + str(images.get_shape()))\n #print('Shape input_seqs: ' + str(input_seqs.get_shape())) \n #print('Shape input_mask: ' + str(input_mask.get_shape())) \n\n self.images = images\n self.input_seqs = input_seqs\n if self.mode == \"prediction\":\n self.mi_labels = None\n self.sc_labels = None\n else:\n self.mi_labels = mi_labels\n self.sc_labels = sc_labels\n self.input_mask = input_mask",
"def train_input(config, params):\n \"\"\"\n rawimages: Nb x hf x wf x 3, tf.float32, in [0,1]\n rawlabels: Nb x hf x wf, tf.int32, in [0,Nc-1]\n rawmetadata: Python dictionary with matadata (e.g. image shape, dtype)\n proimages: Nb x hf x wf x 3, tf.float32, in [0,1]\n prolabels: Nb x hf x wf, tf.int32, in [0,Nc-1]\n \"\"\"\n # runconfig = config.runconfig\n # # otherconfig includes: train_preprocess, mappings\n # otherconfig = config.otherconfig\n # hparams = params\n\n # reading, mapping labels to problem from otherconfig['lids2cids'],\n # batching, preprocessing with otherconfig['train_preprocess'], output\n\n # no obvious use of prodata metadata for now\n with tf.variable_scope('input_pipeline'):\n values = None\n for num_dataset in range(len(params.tfrecords_list)): # , params.camvid_tfrecords_path]:\n if values is None:\n values = train_input_per_data(config, params, num_dataset)\n values = list(values) + [num_dataset*tf.ones([params.Nb_list[num_dataset], ], dtype=tf.int32)]\n else:\n _values = list(train_input_per_data(config, params, num_dataset)) + \\\n [num_dataset*tf.ones([params.Nb_list[num_dataset], ], dtype=tf.int32)]\n values = [tf.concat((value1, value2), 0) for value1, value2 in zip(values, _values)]\n\n features = {'rawimages': values[1],\n 'proimages': values[3],\n 'rawimagespaths': values[0][0],\n 'rawlabelspaths': values[0][1]}\n labels = {'rawlabels': values[2],\n 'prolabels': values[4],\n 'domainlabels': values[5]}\n return features, labels",
"def build_inputs(self):\n if self.mode == \"inference\":\n # In inference mode, images and inputs are fed via placeholders.\n image_feed = tf.placeholder(dtype=tf.string, shape=[], name=\"image_feed\")\n input_feed = tf.placeholder(\n dtype=tf.int64,\n shape=[None], # batch_size\n name=\"input_feed\")\n\n # Process image and insert batch dimensions.\n images = tf.expand_dims(self.load_image(image_feed), 0)\n input_seqs = tf.expand_dims(input_feed, 1)\n\n # No target sequences or input mask in inference mode.\n target_seqs = None\n input_mask = None\n else:\n def _load_example(serialized_example):\n encoded_image, caption = input_ops.parse_example(\n serialized_example,\n image_feature=self.config.image_feature_name,\n caption_feature=self.config.caption_feature_name)\n image = self.load_image(encoded_image)\n\n # strings.split expects a batch\n input_seqs, target_seqs, input_mask = input_ops.pad_caption_to_input(\n caption)\n return image, input_seqs, target_seqs, input_mask\n\n def _load_dataset(filename):\n return tf.data.TFRecordDataset(filename, buffer_size=16 * 1024 * 1024)\n\n df = tf.data.Dataset.list_files(\n self.config.input_file_pattern, shuffle=self.mode == \"train\")\n df = df.apply(\n tf.data.experimental.parallel_interleave(\n _load_dataset, cycle_length=64, sloppy=True))\n\n if self.mode == \"train\":\n df = df.repeat()\n df = df.shuffle(1024)\n\n df = df.apply(\n tf.data.experimental.map_and_batch(\n _load_example,\n self.config.batch_size,\n num_parallel_batches=8,\n drop_remainder=True))\n df = df.prefetch(8)\n images, input_seqs, target_seqs, input_mask = df.make_one_shot_iterator(\n ).get_next()\n\n self.images = images\n self.input_seqs = input_seqs\n self.target_seqs = target_seqs\n self.input_mask = input_mask",
"def model_setup(self):\n self.input_a = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_A\")\n self.input_b = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_B\")\n\n self.fake_pool_A = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_A\")\n self.fake_pool_B = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_B\")\n\n self.global_step = slim.get_or_create_global_step()\n\n self.num_fake_inputs = 0\n\n self.learning_rate = tf.placeholder(tf.float32, shape=[], name=\"lr\")\n\n inputs = {\n 'images_a': self.input_a,\n 'images_b': self.input_b,\n 'fake_pool_a': self.fake_pool_A,\n 'fake_pool_b': self.fake_pool_B,\n }\n\n outputs = model.get_outputs(\n inputs, network=self._network_version, skip=self._skip)\n\n self.prob_real_a_is_real = outputs['prob_real_a_is_real']\n self.prob_real_b_is_real = outputs['prob_real_b_is_real']\n self.fake_images_a = outputs['fake_images_a']\n self.fake_images_b = outputs['fake_images_b']\n self.prob_fake_a_is_real = outputs['prob_fake_a_is_real']\n self.prob_fake_b_is_real = outputs['prob_fake_b_is_real']\n\n self.cycle_images_a = outputs['cycle_images_a']\n self.cycle_images_b = outputs['cycle_images_b']\n\n self.prob_fake_pool_a_is_real = outputs['prob_fake_pool_a_is_real']\n self.prob_fake_pool_b_is_real = outputs['prob_fake_pool_b_is_real']",
"def model_inputs():\n # TODO: Implement Function\n inputs = tf.placeholder(tf.int32, shape=[None,None], name= \"input\")\n targets = tf.placeholder(tf.int32, shape=[None,None], name= \"targets\")\n lrate = tf.placeholder(tf.float32, name= \"learning_rate\")\n keep_prob = tf.placeholder(tf.float32, name= \"keep_prob\")\n target_seq_lenth = tf.placeholder(tf.int32, shape=[None], name= \"target_sequence_length\")\n max_target_len = tf.reduce_max(target_seq_lenth, name= 'max_target_len')\n source_seq_length = tf.placeholder(tf.int32, shape=[None], name= \"source_sequence_length\")\n return (inputs, targets, lrate, keep_prob, target_seq_lenth, max_target_len, source_seq_length)",
"def input_setup(config):\n print(config.is_train)\n # Load data path\n if config.is_train:\n data, label = prepare_data(dataset=\"Train/DoF_Images (2)\")\n else:\n data, label = prepare_data(dataset=\"Test\")\n\n sub_input_sequence = []\n sub_label_sequence = []\n padding = abs(config.image_size - config.label_size) / 2 # 6\n nx = ny = 0\n\n if config.is_train:\n for i in range(len(data)):\n input_, label_ = preprocess(data[i], label[i], config.scale)\n\n if len(input_.shape) == 3:\n h, w, c = input_.shape\n else:\n h, w = input_.shape\n\n for x in range(0, h-config.image_size+1, config.stride):\n if i == 0:\n nx += 1; ny = 0\n for y in range(0, w-config.image_size+1, config.stride):\n if i == 0:\n ny += 1\n\n sub_input = input_[x:x+config.image_size, y:y+config.image_size] # [33 x 33]\n sub_label = label_[x+int(padding):x+int(padding)+config.label_size\n , y+int(padding):y+int(padding)+config.label_size] # [21 x 21]\n # print(sub_input.shape)\n # print(sub_label.shape)\n # Make channel value\n sub_input = sub_input.reshape([config.image_size, config.image_size, 3])\n sub_label = sub_label.reshape([config.label_size, config.label_size, 3])\n\n sub_input_sequence.append(sub_input)\n sub_label_sequence.append(sub_label)\n\n else:\n nx_l = []\n ny_l = []\n for i in range(len(data)):\n input_, label_ = preprocess(data[i], label[i], config.scale)\n\n if len(input_.shape) == 3:\n h, w, c = input_.shape\n else:\n h, w = input_.shape\n\n if w >= 4000 or h > 4000:\n input_ = cv2.resize(input_, dsize=(int(w/2), int(h/2)), interpolation=cv2.INTER_AREA)\n label_ = cv2.resize(label_, dsize=(int(w/2), int(h/2)), interpolation=cv2.INTER_AREA)\n w = int(w/2)\n h = int(h/2)\n\n # Numbers of sub-images in height and width of image are needed to compute merge operation.\n\n nx = ny = 0\n for x in range(0, h-config.image_size+1, config.stride):\n nx += 1; ny = 0\n for y in range(0, w-config.image_size+1, config.stride):\n ny += 1\n sub_input = input_[x:x+config.image_size, y:y+config.image_size] # [33 x 33]\n sub_label = label_[x+int(padding):x+int(padding)+config.label_size, y+int(padding):y+int(padding)+config.label_size] # [21 x 21]\n\n sub_input = sub_input.reshape([config.image_size, config.image_size, 3])\n sub_label = sub_label.reshape([config.label_size, config.label_size, 3])\n\n sub_input_sequence.append(sub_input)\n sub_label_sequence.append(sub_label)\n #print(\"nx: %d ny: %d\" % (nx, ny))\n nx_l.append(nx)\n ny_l.append(ny)\n \"\"\"\n len(sub_input_sequence) : the number of sub_input (33 x 33 x ch) in one image\n (sub_input_sequence[0]).shape : (33, 33, 1)\n \"\"\"\n # Make list to numpy array. With this transform\n arrdata = np.asarray(sub_input_sequence) # [?, 33, 33, 3]\n arrlabel = np.asarray(sub_label_sequence) # [?, 21, 21, 3]\n\n make_data(config, arrdata, arrlabel)\n print(\"make_data success\")\n if config.is_train:\n return nx, ny\n else:\n return nx_l, ny_l, len(data)",
"def build_inputs(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n\n elif self.mode == \"test\":\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n \n else:\n # Prefetch serialized tf.Example protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader,\n self.config.input_file_pattern,\n shuffle=self.config.shuffle_input_data,\n capacity=self.config.input_queue_capacity,\n num_reader_threads=self.config.num_input_reader_threads)\n\n # Deserialize a batch.\n serialized = input_queue.dequeue_many(self.config.batch_size)\n s1, s2, label = input_ops.parse_example_batch(\n serialized)\n\n encode_ids1 = s1.ids\n encode_ids2 = s2.ids\n\n encode_mask1 = s1.mask\n encode_mask2 = s2.mask\n \n\n\n self.encode_ids1 = encode_ids1\n self.encode_ids2 = encode_ids2\n\n self.encode_mask1 = encode_mask1\n self.encode_mask2 = encode_mask2\n\n self.label = label",
"def FixInputsFC(model, inputs):\n first_input = K.constant(inputs[0])\n second_input = K.constant(inputs[1][:,:-1])\n\n Tensor_Input0 = Input(batch_shape = (model.input_shape[1][0], 1))\n\n n_input = keras.layers.Lambda(lambda x: K.concatenate([second_input,x],axis=-1))(Tensor_Input0)\n n2_input = keras.layers.Lambda(lambda x: [first_input, x])(n_input)\n Out1 = model(n2_input)\n# Out2 = keras.layers.Lambda(lambda x : x[:,:,0] - x[:,:,1])(Out1)\n Out2 = keras.layers.Lambda(lambda x : x)(Out1)\n M = keras.Model( Tensor_Input0, Out2 )\n return(M)",
"def model_inputs():\n inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs')\n labels_ = tf.placeholder(tf.int32, [None, None], name='labels')\n keep_prob_ = tf.placeholder(tf.float32, name='keep_prob')\n \n return inputs_, labels_, keep_prob_",
"def prepare_train_input(insts, bos_idx, eos_idx, src_pad_idx, trg_pad_idx,\n n_head):\n src_word, src_pos, src_slf_attn_bias, src_max_len = pad_batch_data(\n [inst[0] + [eos_idx] for inst in insts],\n src_pad_idx,\n n_head,\n is_target=False)\n src_word = src_word.reshape(-1, src_max_len)\n src_pos = src_pos.reshape(-1, src_max_len)\n trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = pad_batch_data(\n [[bos_idx] + inst[1] for inst in insts],\n trg_pad_idx,\n n_head,\n is_target=True)\n trg_word = trg_word.reshape(-1, trg_max_len)\n trg_pos = trg_pos.reshape(-1, trg_max_len)\n\n trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :],\n [1, 1, trg_max_len, 1]).astype(\"float32\")\n\n lbl_word, lbl_weight, num_token = pad_batch_data(\n [inst[1] + [eos_idx] for inst in insts],\n trg_pad_idx,\n n_head,\n is_target=False,\n is_label=True,\n return_attn_bias=False,\n return_max_len=False,\n return_num_token=True)\n lbl_word = lbl_word.reshape(-1, 1)\n lbl_weight = lbl_weight.reshape(-1, 1)\n\n data_inputs = [\n src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos,\n trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight\n ]\n\n return data_inputs",
"def conditional_forward(\n backbone: tf.keras.Model,\n moving_image: tf.Tensor,\n fixed_image: tf.Tensor,\n moving_label: (tf.Tensor, None),\n moving_image_size: tuple,\n fixed_image_size: tuple,\n) -> [tf.Tensor, tf.Tensor]:\n\n # expand dims\n # need to be squeezed later for warping\n moving_image = tf.expand_dims(\n moving_image, axis=4\n ) # (batch, m_dim1, m_dim2, m_dim3, 1)\n fixed_image = tf.expand_dims(\n fixed_image, axis=4\n ) # (batch, f_dim1, f_dim2, f_dim3, 1)\n moving_label = tf.expand_dims(\n moving_label, axis=4\n ) # (batch, m_dim1, m_dim2, m_dim3, 1)\n\n # adjust moving image\n if moving_image_size != fixed_image_size:\n moving_image = layer_util.resize3d(\n image=moving_image, size=fixed_image_size\n ) # (batch, f_dim1, f_dim2, f_dim3, 1)\n moving_label = layer_util.resize3d(\n image=moving_label, size=fixed_image_size\n ) # (batch, f_dim1, f_dim2, f_dim3, 1)\n\n # conditional\n inputs = tf.concat(\n [moving_image, fixed_image, moving_label], axis=4\n ) # (batch, f_dim1, f_dim2, f_dim3, 3)\n pred_fixed_label = backbone(inputs=inputs) # (batch, f_dim1, f_dim2, f_dim3, 1)\n pred_fixed_label = tf.squeeze(\n pred_fixed_label, axis=4\n ) # (batch, f_dim1, f_dim2, f_dim3)\n\n warping = layer.Warping(fixed_image_size=fixed_image_size)\n grid_fixed = tf.squeeze(warping.grid_ref, axis=0) # (f_dim1, f_dim2, f_dim3, 3)\n\n return pred_fixed_label, grid_fixed",
"def image_model_fn(self, features, labels, mode):\n col_count, row_count = self.metadata_.get_matrix_size(0)\n sequence_size = self.metadata_.get_sequence_size()\n output_dim = self.metadata_.get_output_size()\n\n # Input Layer\n input_layer = features[\"x\"]\n # Transpose X to 4-D tensor: [batch_size, row_count, col_count, sequence_size]\n # Normally the last axis should be channels instead of time axis, but they\n # are both equal to 1 for images\n hidden_layer = tf.transpose(input_layer, [0, 2, 3, 1])\n # At begining number of filters = 32\n num_filters = 32\n while True:\n hidden_layer = tf.layers.conv2d(\n inputs=hidden_layer,\n filters=num_filters,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n hidden_layer = tf.layers.max_pooling2d(inputs=hidden_layer, pool_size=[2, 2], strides=2)\n num_rows = hidden_layer.shape[1]\n num_columns = hidden_layer.shape[2]\n num_filters *= 2 # Double number of filters each time\n if num_rows == 1 or num_columns == 1:\n break\n hidden_layer = tf.layers.flatten(hidden_layer)\n hidden_layer = tf.layers.dense(inputs=hidden_layer, units=1024, activation=tf.nn.relu)\n hidden_layer = tf.layers.dropout(\n inputs=hidden_layer, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)\n logits = tf.layers.dense(inputs=hidden_layer, units=output_dim)\n sigmoid_tensor = tf.nn.sigmoid(logits, name=\"sigmoid_tensor\")\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `sigmoid_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": sigmoid_tensor\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n # For multi-label classification, a correct loss is sigmoid cross entropy\n loss = sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.AdamOptimizer()\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)",
"def model(inputs, target_images, is_training):\n # if isinstance(inputs, tuple):\n assert mask_augs >= 0. and mask_augs <= 1., \"mask_augs must be in [0, 1]\"\n if FLAGS.use_td_loss and isinstance(inputs, tuple):\n # print('#'*80)\n # print(inputs)\n assert metric is not None, \"Metric function is None\"\n inputs, augs = inputs\n B = inputs.get_shape().as_list()[0]\n A = augs.get_shape().as_list()[1]\n if mask_augs > 0:\n mask = tf.cast(tf.greater(tf.random.uniform(shape=[B, A], minval=0., maxval=1.), 0.5), augs.dtype) # noqa\n bias = mask * -1\n augs = (augs * mask) + bias # Randomly mask out augs for difficulty and code those dims as -1\n with tf.variable_scope('encoder'): # variable_scope name_scope\n features, block_activities = encoder(inputs, is_training=is_training)\n print(\"Features: \")\n print(features)\n print(\"---\")\n # Global average pool of B 7 7 2048 -> B 2048\n if data_format == 'channels_last':\n outputs = tf.reduce_mean(features, [1, 2])\n else:\n outputs = tf.reduce_mean(features, [2, 3])\n outputs = tf.identity(outputs, 'final_avg_pool')\n print(\"Outputs: \")\n print(outputs)\n print(\"---\")\n # B 2048\n\n h_w = features.get_shape().as_list()[1]\n # print(h_w)\n\n augs = tf.tile(augs[:,None,None,:], tf.constant([1,h_w,h_w,1]))\n print(\"Augs: \")\n print(augs)\n print(\"---\")\n features = tf.concat([features, augs], axis=-1)\n \n with tf.variable_scope('decoder'):\n recon_images = decoder(\n features,\n block_activities,\n is_training=is_training,\n skip=skip)\n print(\"Reconstructed images and target images: \")\n print(recon_images)\n print(target_images)\n print(\"---\")\n with tf.variable_scope('metric'):\n # Squash both recon and target images\n recon_images_squash = tf.tanh(recon_images)\n target_images = (target_images * 2) - 1\n Bt = target_images.get_shape().as_list()[0]\n Br = recon_images_squash.get_shape().as_list()[0]\n if Bt == Br:\n # Attractive + repulsive loss\n pass\n elif Bt * 2 == Br:\n # Attractive-only loss\n target_images = tf.concat([target_images, target_images], 0)\n\n # Differentiable perceptual metric. First reconstruction.\n # both_images = tf.concat([recon_images, target_images], -1) # B H W 6\n all_images = tf.concat([recon_images_squash, target_images], 0) # Stack these in batch dim\n metric_all_images = metric(all_images, is_training=is_training)\n # B = metric_all_images.get_shape().as_list()[0]\n metric_all_images = tf.reshape(metric_all_images, [B, -1])\n metric_hidden_r, metric_hidden_t = tf.split(metric_all_images, 2, 0) # Split these in batch dim\n\n # Prep recon_images for visualization\n # recon_images = tf.clip_by_value(recon_images, clip_value_min=-5, clip_value_max=5)\n # recon_images = (recon_images + 5) / 10\n\n recon_mean, recon_std = tf.nn.moments(recon_images, axes=[1, 2], keep_dims=True)\n recon_images = (recon_images - recon_mean) / recon_std\n recon_images = tf.clip_by_value(recon_images, clip_value_min=-5, clip_value_max=5)\n recon_images = (recon_images + 5) / 10\n # recon_images = recon_images_squash\n if greyscale_viz:\n recon_images = tf.image.rgb_to_grayscale(recon_images)\n recon_images = tf.concat([recon_images, recon_images, recon_images], -1)\n print(\"Embedding output: \")\n print(metric_hidden_t)\n print(\"---\")\n return outputs, recon_images, metric_hidden_r, metric_hidden_t\n\n else:\n # augs = None\n \n with tf.variable_scope('encoder'): # variable_scope name_scope\n features, block_activities = encoder(inputs, is_training)\n \n if data_format == 'channels_last':\n print(\"Features:\")\n print(features)\n outputs = tf.reduce_mean(features, [1, 2])\n else:\n outputs = tf.reduce_mean(features, [2, 3])\n outputs = tf.identity(outputs, 'final_avg_pool')\n \n # filter_trainable_variables(trainable_variables, after_block=5)\n # add_to_collection(trainable_variables, 'trainable_variables_inblock_')\n\n return outputs",
"def model(inputs, is_training):\n\n\n if data_format == 'channels_first':\n # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).\n # This provides a large performance boost on GPU. See\n # https://www.tensorflow.org/performance/performance_guide#data_formats\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n\n #localize network to generate the transformation parameters\n # raw_inputs = inputs\n\n # inputs = tf.layers.conv2d(inputs = inputs, filters = 32, strides = 2, kernel_size = 5, padding = 'SAME', kernel_initializer=tf.variance_scaling_initializer())\n\n # print(inputs.shape)\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'VALID')\n # print(inputs.shape)\n # inputs = tf.layers.conv2d(inputs = inputs, filters = 64, strides = 2, kernel_size = 5, padding = 'SAME', kernel_initializer = tf.variance_scaling_initializer())\n # print(inputs.shape)\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'VALID')\n # print(inputs.shape)\n # inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n # inputs = tf.layers.flatten(inputs = inputs)\n\n # inputs = tf.layers.dense(inputs = inputs, units = 128)\n # print(inputs.shape)\n # trans_parameters = tf.layers.dense(inputs = inputs, units = 6)\n # print(trans_parameters.shape)\n # inputs = stn(input_fmap = raw_inputs, theta = trans_parameters, out_dims = [60, 60])\n\n\n\n #embedding network\n inputs = conv2d_fixed_padding(inputs = inputs, filters = 64, kernel_size = 7, strides = 2, data_format = data_format)\n\n print('height:', inputs.shape[1])\n inputs = tf.identity(inputs, 'initial_conv')\n\n inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 3, strides = 2, padding = 'SAME', data_format = data_format)\n\n print('height:', inputs.shape[1])\n inputs = tf.identity(inputs, 'initial_max_pool')\n\n inputs = block_layer(inputs = inputs, filters = 64, block_fn = block_fn, blocks = layers[0], strides = 1, \n is_training = is_training, name = 'blcok_layer1', data_format = data_format)\n print('height:', inputs.shape[1])\n\n #attention module\n #input_fmap = inputs\n # inputs = tf.reshape(inputs, (-1, 64))\n #inputs = tf.layers.dense(inputs = inputs, units = 32, activation = tf.tanh)\n\n #inputs = tf.reshape(inputs, [-1, 32])\n #inputs = tf.layers.dense(inputs = inputs, units = 1, activation = tf.sigmoid)\n\n #attention_para = tf.reshape(inputs, [-1, 21, 21, 1])\n\n \n #inputs = tf.multiply(input_fmap, attention_para)\n\n inputs = block_layer(inputs = inputs, filters = 128, block_fn = block_fn, blocks = layers[1], strides = 2,\n is_training = is_training, name = 'block_layer2', data_format = data_format)\n print('height:', inputs.shape[1])\n inputs = block_layer(inputs = inputs, filters = 256, block_fn = block_fn, blocks = layers[2], strides = 2, \n is_training = is_training, name = 'block_layer3', data_format = data_format)\n print('height:', inputs.shape[1])\n inputs = block_layer(inputs = inputs, filters = 512, block_fn = block_fn, blocks = layers[3], strides = 2, \n is_training = is_training, name = 'block_layer4', data_format = data_format)\n\n print('height:', inputs.shape)\n inputs = batch_norm_relu(inputs, is_training, data_format)\n \n inputs = tf.layers.average_pooling2d(inputs = inputs, pool_size = 3, strides = 2, padding = 'VALID', data_format = data_format)\n\n inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n inputs = tf.identity(inputs, 'final_avg_pool')\n\n inputs = tf.layers.flatten(inputs = inputs)\n\n #TODO\n inputs = tf.layers.dense(inputs = inputs, units = num_classes)\n\n print(inputs.shape)\n outputs = tf.identity(inputs, 'final_dense')\n\n return outputs",
"def generate_model():\n model = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(\n 32,\n (3, 3),\n padding=\"same\",\n activation=\"relu\",\n input_shape=(IMG_SIZE, IMG_SIZE, IMG_CHANNELS)\n ),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Conv2D(32, (3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Conv2D(64, (3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Conv2D(64, (3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Conv2D(64, (3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Conv2D(128, (3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dropout(0.3),\n tf.keras.layers.Dense(LABEL_COUNT, activation=\"softmax\")\n ])\n model.compile(\n optimizer=\"adam\",\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"]\n )\n return model",
"def input_fn(params=None):\n del params\n filenames = gfile.Glob(os.path.join(flags.data_dir, pattern))\n if not filenames:\n raise RuntimeError('No data files found.')\n filename_queue = tf.train.string_input_producer(filenames, shuffle=True)\n reader = tf.TFRecordReader()\n\n _, val = reader.read(filename_queue)\n serialized_input = tf.reshape(val, shape=[1])\n\n image_seq = None\n\n for i in range(0, flags.sequence_length, flags.skip_num):\n image_name = 'image_' + str(i)\n\n if flags.dataset_type == 'robot':\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n features = {\n pose_name:\n tf.FixedLenFeature([flags.pose_dim], tf.float32),\n image_name:\n tf.FixedLenFeature([1], tf.string),\n action_name:\n tf.FixedLenFeature([flags.pose_dim], tf.float32),\n joint_pos_name:\n tf.FixedLenFeature([flags.joint_pos_dim], tf.float32)\n }\n else:\n features = {\n image_name: tf.FixedLenFeature([1], tf.string),\n }\n\n parsed_input = tf.parse_example(serialized_input, features)\n\n # Process image\n image_buffer = tf.reshape(parsed_input[image_name], shape=[])\n image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)\n image = tf.image.resize_images(\n image, (IMG_HEIGHT, IMG_WIDTH), method=tf.image.ResizeMethod.BICUBIC)\n image = tf.cast(tf.expand_dims(image, 0), tf.float32) / 255.0\n\n if flags.dataset_type == 'robot':\n pose = tf.reshape(parsed_input[pose_name], shape=[flags.pose_dim])\n pose = tf.expand_dims(pose, 0)\n action = tf.reshape(parsed_input[action_name], shape=[flags.pose_dim])\n action = tf.expand_dims(action, 0)\n joint_pos = tf.reshape(\n parsed_input[joint_pos_name], shape=[flags.joint_pos_dim])\n joint_pos = tf.expand_dims(joint_pos, 0)\n else:\n pose = tf.zeros([1, flags.pose_dim])\n action = tf.zeros([1, flags.pose_dim])\n joint_pos = tf.zeros([1, flags.joint_pos_dim])\n\n if i == 0:\n image_seq = image\n action_seq, pose_seq, joint_pos_seq = action, pose, joint_pos\n else:\n image_seq = tf.concat([image_seq, image], 0)\n action_seq = tf.concat([action_seq, action], 0)\n pose_seq = tf.concat([pose_seq, pose], 0)\n joint_pos_seq = tf.concat([joint_pos_seq, joint_pos], 0)\n\n [images, actions, poses, joint_pos] = tf.train.shuffle_batch(\n [image_seq, action_seq, pose_seq, joint_pos_seq],\n batch_size,\n num_threads=4,\n capacity=200 * batch_size,\n min_after_dequeue=batch_size * 10,\n )\n\n joint_poses = tf.concat([joint_pos, poses], 2)\n\n output_features = {\n IMAGE_FEATURE_NAME: images,\n JOINT_POSE_FEATURE_NAME: joint_poses,\n ACTION_FEATURE_NAME: actions\n }\n\n return output_features, None",
"def predictint(test_images):\n # Define the model (same as when creating the model file)\n x = tf.placeholder(tf.float32, [None, image_size])\n W = tf.Variable(tf.zeros([image_size, image_labels]))\n b = tf.Variable(tf.zeros([image_labels]))\n is_test = tf.placeholder(tf.bool)\n # Model Parameters\n W_conv1 = tf.get_variable(\"W_conv1\", shape=[5, 5, 1, 32], initializer=weight_xavier_init(5 * 5 * 1, 32))\n W_conv2 = tf.get_variable(\"W_conv2\", shape=[5, 5, 32, 64], initializer=weight_xavier_init(5 * 5 * 32, 64))\n W_fc1 = tf.get_variable(\"W_fc1\", shape=[64 * 7 * 7, 1024], initializer=weight_xavier_init(64 * 7 * 7, 1024))\n W_fc2 = tf.get_variable(\"W_fc2\", shape=[1024, image_labels], initializer=weight_xavier_init(1024, image_labels))\n\n b_conv1 = bias_variable([32])\n b_conv2 = bias_variable([64])\n b_fc1 = bias_variable([1024])\n b_fc2 = bias_variable([image_labels])\n\n x_image = tf.reshape(x, [-1, image_width, image_height, 1])\n conv1 = conv2d(x_image, W_conv1) + b_conv1\n conv1_bn = batchnorm(conv1, b_conv1, is_test, True)\n h_conv1 = tf.nn.relu(conv1_bn)\n h_pool1 = max_pool_2x2(h_conv1)\n\n conv2 = conv2d(h_pool1, W_conv2) + b_conv2\n conv2_bn = batchnorm(conv2, b_conv2, is_test, True)\n h_conv2 = tf.nn.relu(conv2_bn)\n h_pool2 = max_pool_2x2(h_conv2)\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, W_fc1.get_shape().as_list()[0]])\n fc1 = tf.matmul(h_pool2_flat, W_fc1) + b_fc1\n fc1_bn = batchnorm(fc1, b_fc1, is_test, False)\n h_fc1 = tf.nn.relu(fc1_bn)\n\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n init_op = tf.initialize_all_variables()\n saver = tf.train.Saver()\n\n \"\"\"\n Load the my-model file\n file is stored in the same directory as this python script is started\n Use the model to predict the integer. Integer is returend as list.\n\n Based on the documentatoin at\n https://www.tensorflow.org/versions/master/how_tos/variables/index.html\n \"\"\"\n predicted_lables = np.zeros(test_images.shape[0])\n with tf.Session() as sess:\n sess.run(init_op)\n saver.restore(sess, \"F:\\PycharmProject\\CNN_mnist_base\\model\\my-model\")\n # print (\"Model restored.\")\n predict = tf.argmax(y_conv, 1)\n for i in range(0, test_images.shape[0]):\n imagein = test_images[i]\n predicted_lables[i] = predict.eval(feed_dict={x: [imagein], keep_prob: 1.0, is_test: False}, session=sess)\n sess.close()\n return predicted_lables",
"def _build_model(self):\n\n # Placeholders for our input\n # Our input are FRAMES_STATE RGB frames of shape of the gridworld\n self.X_pl = tf.placeholder(shape=[None, self.x_size, self.y_size,\n self.frames_state]\n , dtype=tf.uint8, name=\"X\")\n # The TD target value\n self.y_pl = tf.placeholder(shape=[None], dtype=tf.float32, name=\"y\")\n # Integer id of which action was selected\n self.actions_pl = tf.placeholder(shape=[None], dtype=tf.int32, name=\"actions\")\n\n X = tf.to_float(self.X_pl) / 255.0\n batch_size = tf.shape(self.X_pl)[0]\n\n # NETWORK ARCHITECTURE\n # tf.contrib.layers.conv2d(input, num_outputs, kernel_size, stride)\n conv1 = tf.contrib.layers.conv2d(X, 64, 2, 1, activation_fn=tf.nn.relu)\n # try with padding = 'VALID'\n # pool1 = tf.contrib.layers.max_pool2d(conv1, 2)\n # conv2 = tf.contrib.layers.conv2d(pool1, 32, WX, 1, activation_fn=tf.nn.relu)\n\n # Fully connected layers\n flattened = tf.contrib.layers.flatten(conv1)\n fc1 = tf.contrib.layers.fully_connected(flattened, 64)\n self.predictions = tf.contrib.layers.fully_connected(fc1, self.actions_num)\n\n # Get the predictions for the chosen actions only\n gather_indices = tf.range(batch_size) * tf.shape(self.predictions)[1] + self.actions_pl\n self.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]), gather_indices)\n\n # Calcualte the loss\n self.losses = tf.squared_difference(self.y_pl, self.action_predictions)\n self.loss = tf.reduce_mean(self.losses)\n\n # Optimizer Parameters from original paper\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)\n self.train_op = self.optimizer.minimize(self.loss, global_step=tf.train.get_global_step())",
"def forward(self, inputs, hidden):\n # import pdb; pdb.set_trace()\n (seq_len, batch_size, dims) = inputs[0].size()\n\n img1s = []\n img2s = []\n for i in range(batch_size):\n img1 = inputs[0][:, i, :].contiguous().view(seq_len, 1, 224, 224)\n img1s.append(img1.contiguous().view(seq_len, 1, -1))\n\n # img2 = inputs[1][:, i, :].contiguous().view(seq_len, 1, 224, 224)\n # img2s.append(img2.contiguous().view(seq_len, 1, -1))\n\n seq1 = torch.cat(img1s, dim=1)\n both = seq1\n # seq2 = torch.cat(img2s, dim=1)\n # both = torch.cat([seq1, seq2], dim=2)\n # import pdb; pdb.set_trace()\n embeds = self.fc1(both.view(-1, 100352 // 2))\n embeds = self.relu1(embeds)\n # embeds = self.bn1(embeds)\n\n embeds = embeds.view(-1, batch_size, self.hidden_dim)\n lstm_out, hidden = self.lstm(embeds, hidden)\n out = self.fc2(lstm_out.view(-1, lstm_out.size(2)))\n out = out.view(-1, batch_size, self.output_dim)\n out = self.sigmoid(out)\n return out, hidden",
"def forward(self, inputs, hidden):\n # import pdb; pdb.set_trace()\n (seq_len, batch_size, dims) = inputs[0].size()\n\n img1s = []\n img2s = []\n for i in range(batch_size):\n img1 = inputs[0][:, i, :].contiguous().view(seq_len, 1, 224, 224)\n img1s.append(img1.contiguous().view(seq_len, 1, -1))\n\n img2 = inputs[1][:, i, :].contiguous().view(seq_len, 1, 224, 224)\n img2s.append(img2.contiguous().view(seq_len, 1, -1))\n\n seq1 = torch.cat(img1s, dim=1)\n both = seq1\n # seq2 = torch.cat(img2s, dim=1)\n # both = torch.cat([seq1, seq2], dim=2)\n # import pdb; pdb.set_trace()\n embeds = self.fc1(both.view(-1, 100352))\n embeds = self.relu1(embeds)\n # embeds = self.bn1(embeds)\n\n embeds = embeds.view(-1, batch_size, self.hidden_dim)\n lstm_out, hidden = self.lstm(embeds, hidden)\n out = self.fc2(lstm_out.view(-1, lstm_out.size(2)))\n out = out.view(-1, batch_size, self.output_dim)\n out = self.sigmoid(out)\n return out, hidden",
"def create_single_fc_model(feats2d, shapes, model_settings, is_training):\n # Inputs\n if is_training:\n dropout_prob = model_settings['dropout_prob'] \n\n # Reshape into 1D\n print(feats2d.get_shape()) \n assert len(feats2d.get_shape()) == 3 # a tuple (batch_size, seq_length, n_mel) - static shape\n #shape = feats2d.get_shape().as_list() # a list: [batch_size, seq_length, n_mel] - static shape\n shape = tf.shape(feats2d) # a tensor - dynamic shape\n dim = shape[1]*shape[2] # dim = prod(9,2) = 18\n\n #dim = reduce(lambda x, y: x*y, shape[1:])\n\n #feats1d = tf.reshape(feats2d, [-1, dim]) \n #batch_size = tf.shape(feats2d)[0] # a strided slice\n print(shape)\n #print(dim)\n #print(batch_size)\n feats1d = tf.reshape(feats2d, [shape[0], -1]) \n\n # Get dimensions\n feat1d_size = tf.shape(feats1d)[1] # a strided slice\n #feat1d_size = feats1d.get_shape().as_list()[1]\n num_classes = model_settings['num_classes']\n\n print(feats1d)\n print(feat1d_size)\n print(num_classes)\n # Weights and biases\n # weights = tf.Variable(\n # tf.truncated_normal([feat1d_size, num_classes], stddev=0.001))\n weights = tf.get_variable(name=\"weights\",shape=[dim, num_classes], initializer=tf.truncated_normal_initializer(), validate_shape=False)\n bias = tf.Variable(tf.zeros([num_classes]))\n\n # Fully-connected layer\n logits = tf.matmul(feats1d, weights) + bias\n if is_training:\n return logits, dropout_prob\n else:\n return logits",
"def __call__(self, inputs, training):\n\n self.training = training\n input_shape = inputs.shape\n if self.data_format == 'channels_first':\n img_size = (input_shape[2], input_shape[3])\n else:\n img_size = (input_shape[1], input_shape[2])\n\n with self._model_variable_scope('ssd300_model'):\n if self.data_format == 'channels_first':\n # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).\n # This provides a large performance boost on GPU. See\n # https://www.tensorflow.org/performance/performance_guide#data_formats\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n net = super(Model, self).__call__(inputs, training)\n\n with self._model_variable_scope('ssd300_model'):\n\n net = self._atrous_convolution_2d(net, filters=1024,\n kernel_size=3,\n atrous_rate=6, name='fc6')\n\n net = self._conv2d(net, filters=1024, kernel_size=1,\n padding='same', name='fc7')\n\n net = self._conv2d(net, filters=256, kernel_size=1,\n padding='same', name='conv6_1')\n\n net = self._conv2d(net, filters=512, kernel_size=3,\n strides=2,\n padding='same', name='conv6_2')\n\n net = self._conv2d(net, filters=128, kernel_size=1,\n padding='same', name='conv7_1')\n\n net = self._conv2d(fixed_padding(net, 3, self.data_format),\n filters=256, kernel_size=3,\n strides=2,\n padding='valid', name='conv7_2')\n\n net = self._conv2d(net, filters=128, kernel_size=1,\n padding='same', name='conv8_1')\n\n net = self._conv2d(net, filters=256, kernel_size=3,\n strides=2,\n padding='same', name='conv8_2')\n\n if self.data_format == 'channels_first':\n net = tf.reduce_mean(net, [2, 3])\n else:\n net = tf.reduce_mean(net, [1, 2])\n self.layers['pool6'] = net\n\n # Prediction from conv4_3\n conv4_3_norm = self._normalize(net, 20, name='conv4_3_norm')\n num_priors = 3\n x = self._conv2d(conv4_3_norm, filters=num_priors * 4, kernel_size=3,\n padding='same', name='conv4_3_norm_mbox_loc')\n self.layers['conv4_3_norm_mbox_loc_flat'] = tf.layers.flatten(x, name='conv4_3_norm_mbox_loc_flat')\n\n x = self._conv2d(conv4_3_norm, filters=num_priors * self.num_classes,\n kernel_size=3, padding='same',\n name='conv4_3_norm_mbox_conf')\n self.layers['conv4_3_norm_mbox_conf_flat'] = tf.layers.flatten(x, name='conv4_3_norm_mbox_conf_flat')\n\n prior_box = PriorBox(img_size, min_size=30.0, aspect_ratios=[2],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv4_3_norm_mbox_priorbox')\n net['conv4_3_norm_mbox_priorbox'] = prior_box(conv4_3_norm)\n\n return net",
"def forward_train(self, imgs, label, token_ids=None, segment_ids=None, input_mask=None, ans_ids=None, ans_mask=None, **kwargs):\n # (batch_size, num_clips*num_crops, channel, num_segments, h, w) -> (batch_size*num_clips*num_crops, channel, num_segments, h, w)\n imgs = imgs.reshape((-1, ) + imgs.shape[2:]) \n if self.from_scratch:\n imgs = imgs / 255.0\n B_text = token_ids.shape[0]\n # text reshape: (batch_size, num_candidates, seq_length) -> (batch_size * num_candidates, seq_length)\n token_ids = token_ids.reshape((-1, ) + token_ids.shape[2:])\n segment_ids = segment_ids.reshape((-1, ) + segment_ids.shape[2:])\n input_mask = input_mask.reshape((-1, ) + input_mask.shape[2:])\n losses = dict()\n visual_token = self.extract_visual_feat(imgs) # b, d, T, h, w\n B, D, T, H, W = visual_token.shape\n if B_text != B:\n visual_token = visual_token.view(B_text, -1, D, T, H, W)\n visual_token = visual_token.mean(dim=1)\n \n # text feature #\n text_out_with_mask = self.text_backbone(token_ids, input_mask)\n text_out_last_hidden_state = text_out_with_mask['last_hidden_state']\n\n # contrastive type finetuning retrieval #\n if self.task == 'retrieval':\n # text_only_out = self.text_backbone(token_ids, input_mask)\n visual_emb, text_emb = self.ssl_head(visual_token, text_out_last_hidden_state, input_mask, token_ids)\n nce_loss = self.loss_func(visual_emb, text_emb)\n losses['retrieval_nce_loss'] = nce_loss \n elif self.task == 'video_qa' or self.task == 'FIB':\n B, D, T, H, W = visual_token.shape\n visual_token = visual_token.view(B, D, T, -1).permute(0, 2, 3, 1)\n if hasattr(self.qa_head, 'num_labels'):\n num_choices = self.qa_head.num_labels\n visual_token_all = visual_token\n else:\n num_choices = int(token_ids.shape[0] / B)\n visual_token_all = visual_token.unsqueeze(1).expand(-1, num_choices, -1, -1, -1).flatten(0,1)\n\n output = self.multimodal_backbone(visual_token=visual_token_all, text_input_mask=input_mask, text_input_embeds=text_out_last_hidden_state)\n \n if self.answer_mask:\n mask_idx = torch.where(token_ids == 103)\n itm_output = output['t_last_hidden_state'][mask_idx]\n elif self.answer_cls:\n if 'cls_last_hidden_state' in output:\n itm_output = output['cls_last_hidden_state'].squeeze()\n else:\n itm_output = output['t_last_hidden_state'][:, 0]\n if self.itm_head is not None:\n itm_output = self.itm_head(itm_output)\n\n else:\n all_cls_emb = output['last_hidden_state'][:, 0]\n itm_output = self.itm_head(all_cls_emb)\n \n if self.qa_head is not None:\n final_output = self.qa_head(itm_output).view(-1, num_choices)\n final_label = label\n else:\n final_output = itm_output[:, 1]\n final_label = label\n\n\n qa_loss = self.loss_func(final_output, final_label.view(-1))\n losses['qa_loss'] = qa_loss\n\n\n\n return losses",
"def cnn_model_fn(features, labels, mode, num_classes=20):\n # Write this function\n # Input Layer\n input_layer = tf.reshape(features[\"x\"], [-1, 256, 256, 3])\n img_num = input_layer.get_shape().as_list()[0]\n input_image_layer = input_layer\n\n '''\n if img_num is not None:\n for img_idx in range(img_num):\n image = input_layer[img_idx,:]\n image = tf.random_crop(value = image, size = [224, 224, 3])\n image = tf.image.flip_left_right(image)\n image = tf.image.resize_image_with_crop_or_pad(image=image,target_height = 224, target_width = 224)\n input_image_layer.append(image)\n\n input_image_layer = tf.convert_to_tensor(input_image_layer, dtype=tf.float32)\n else:\n input_image_layer = input_layer\n print('img_num shape {}: input_layer is {} '.format(img_num, np.shape(input_layer.get_shape().as_list())))\n print(\"img_num is None\")\n '''\n\n # Convolutional Layer #1\n conv1_1 = tf.layers.conv2d(\n inputs=input_image_layer,\n filters=64,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n # pad = 1\n conv1_2 = tf.layers.conv2d(\n inputs=conv1_1,\n filters=64,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n pool1 = tf.layers.max_pooling2d(inputs=conv1_2, pool_size=[2, 2], strides=2)\n\n\n conv2_1 = tf.layers.conv2d(\n inputs=pool1,\n filters=128,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n\n conv2_2 = tf.layers.conv2d(\n inputs= conv2_1,\n filters=128,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n pool2 = tf.layers.max_pooling2d(inputs=conv2_2, pool_size=[2, 2], strides=2)\n\n conv3_1 = tf.layers.conv2d(\n inputs= pool2,\n filters=256,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv3_2 = tf.layers.conv2d(\n inputs= conv3_1,\n filters=256,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv3_3 = tf.layers.conv2d(\n inputs= conv3_2,\n filters=256,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n pool3 = tf.layers.max_pooling2d(inputs=conv3_3, pool_size=[2, 2], strides=2)\n\n\n conv4_1 = tf.layers.conv2d(\n inputs= pool3,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv4_2 = tf.layers.conv2d(\n inputs= conv4_1,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv4_3 = tf.layers.conv2d(\n inputs= conv4_2,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n pool4 = tf.layers.max_pooling2d(inputs=conv4_3, pool_size=[2, 2], strides=2)\n\n conv5_1 = tf.layers.conv2d(\n inputs= pool4,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv5_2 = tf.layers.conv2d(\n inputs= conv5_1,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv5_3 = tf.layers.conv2d(\n inputs= conv5_2,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n pool5 = tf.layers.max_pooling2d(inputs=conv5_3, pool_size=[2, 2], strides=2)\n\n # Dense Layer\n pool5_shape = pool5.get_shape()\n pool5_list = pool5_shape.as_list()\n pool5_product = np.int32(pool5_list[1]*pool5_list[2]*pool5_list[3])\n pool5_flat = tf.reshape(pool5, [-1, pool5_product])\n\n dense6 = tf.layers.dense(inputs=pool5_flat, units=4096,activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0.0,stddev=0.01),bias_initializer=tf.zeros_initializer(),)\n dropout6 = tf.layers.dropout(\n inputs=dense6, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n\n dense7 = tf.layers.dense(inputs=dropout6, units= 4096, activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0.0,stddev=0.01),\n bias_initializer=tf.zeros_initializer(),)\n dropout7 = tf.layers.dropout(\n inputs=dense7, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Logits Layer\n logits = tf.layers.dense(inputs=dropout7, units=20)\n\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.sigmoid(logits, name=\"sigmoid_tensor\")\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n\n loss = tf.identity(tf.losses.sigmoid_cross_entropy(multi_class_labels=labels,logits=logits))\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n grad_input = tf.gradients(loss,input_layer)\n grad_conv1_1 = tf.gradients(loss, conv1_1)\n grad_conv2_1 = tf.gradients(loss, conv2_1)\n grad_conv3_1 = tf.gradients(loss, conv3_1)\n grad_conv4_1 = tf.gradients(loss, conv4_1)\n grad_conv5_1 = tf.gradients(loss, conv5_1)\n grad_dense6 = tf.gradients(loss, dense6)\n grad_dense7 = tf.gradients(loss, dense7)\n\n starter_learning_rate = 0.001\n global_step = tf.train.get_global_step()\n learning_rate = tf.train.exponential_decay(learning_rate= starter_learning_rate, global_step = global_step,\n decay_steps = 100000, decay_rate= 0.5, staircase=True)\n optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n #tf.summary()\n # print(\"Training\")\n tf.summary.scalar(name= 'train_loss', tensor = loss )\n tf.summary.scalar(name= 'learning rate', tensor = learning_rate)\n tf.summary.histogram(name='grad_dense7', values=grad_input)\n tf.summary.histogram(name='grad_conv1_1', values= grad_conv1_1)\n tf.summary.histogram(name='grad_conv2_1', values=grad_conv2_1)\n tf.summary.histogram(name='grad_conv3_1', values=grad_conv3_1)\n tf.summary.histogram(name='grad_conv4_1', values=grad_conv4_1)\n tf.summary.histogram(name='grad_conv5_1', values=grad_conv5_1)\n tf.summary.histogram(name='grad_dense6', values=grad_dense6)\n tf.summary.histogram(name='grad_dense7', values=grad_dense7)\n\n tf.summary.image(name='image', tensor= input_layer)\n\n summary_hook = tf.train.SummarySaverHook(\n 10,\n output_dir='./models/03_VGG_Test0301',\n summary_op=tf.summary.merge_all())\n\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, train_op=train_op, training_hooks = [summary_hook])\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)",
"def make_full_image_model(self, label_shape: Tuple[int]) -> Tuple[k.Model, Tuple[int]]:\n n_inner_y = self.inner_size_from_label_size(label_shape[0])\n n_inner_x = self.inner_size_from_label_size(label_shape[1])\n image_shape = (self.subimage_size_from_inner_size(n_inner_y),\n self.subimage_size_from_inner_size(n_inner_x),\n self.n_channels)\n model = k.Sequential()\n model.add(k.layers.InputLayer(input_shape=image_shape))\n for layer in self.layers:\n model.add(layer)\n # The output size of the model is given by the size of the innermost layer * 2^n_folds. This may\n # fail to match a specific target size. Center crop the result to make it match.\n model.add(k.layers.experimental.preprocessing.CenterCrop(label_shape[0], label_shape[1]))\n\n return model, image_shape",
"def inference_input():\n # Decode image into float range [0,1]\n jpegs = tf.placeholder(tf.string, shape=(1), name='input')\n image_buffer = tf.squeeze(jpegs, [0])\n image = tf.image.decode_jpeg(image_buffer, channels=3)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n image = tf.image.central_crop(image, central_fraction=0.875)\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image, [FLAGS.image_size, FLAGS.image_size], align_corners=False)\n image = tf.squeeze(image, [0])\n\n # Rescale the image to [-1,-1]\n image = tf.sub(image, 0.5)\n image = tf.mul(image, 2.0)\n images = tf.expand_dims(image, 0)\n\n return images, jpegs",
"def build_model():\n mdl = Sequential()\n\n # normalization\n mdl.add(Lambda(lambda x: x/128. - 1, input_shape=IMAGE_SHAPE, name=\"input\"))\n\n # trim image\n mdl.add(Lambda(lambda x: x[:, 10:-10, :, :]))\n\n # convolutions\n mdl.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(48, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Flatten())\n\n mdl.add(Dense(128, activation='relu'))\n mdl.add(Dense(64, activation='relu'))\n mdl.add(Dense(1, name=\"output\"))\n\n mdl.summary()\n\n return mdl"
] | [
"0.74781317",
"0.7020652",
"0.68205154",
"0.6803613",
"0.6758614",
"0.6745477",
"0.6561226",
"0.6530336",
"0.6488972",
"0.6451771",
"0.6401803",
"0.62725765",
"0.62625706",
"0.6262408",
"0.6254835",
"0.616202",
"0.6155952",
"0.6150882",
"0.6128436",
"0.6106122",
"0.60909444",
"0.6081347",
"0.60772014",
"0.6074599",
"0.6045942",
"0.6030833",
"0.6026979",
"0.6018554",
"0.60175574",
"0.6010963"
] | 0.7470435 | 1 |
Add regularization loss of ddf into model. | def add_ddf_loss(
model: tf.keras.Model, ddf: tf.Tensor, loss_config: dict
) -> tf.keras.Model:
loss_reg = tf.reduce_mean(
deform_loss.local_displacement_energy(ddf, **loss_config["regularization"])
)
weighted_loss_reg = loss_reg * loss_config["regularization"]["weight"]
model.add_loss(weighted_loss_reg)
model.add_metric(loss_reg, name="loss/regularization", aggregation="mean")
model.add_metric(
weighted_loss_reg, name="loss/weighted_regularization", aggregation="mean"
)
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loss(self, X, Y, lmd):\n P, _ = self.forward(X)\n loss = np.mean(-np.log(np.einsum('ij,ji->i', Y.T, P)))\n\n reg = 0 # Regularization term\n for w in self.W:\n reg += np.sum(np.square(w))\n\n reg *= lmd\n\n cost = loss + reg\n\n return cost",
"def apply_regularization(self, w, loss, gradient, regularization, lambda_, m):\n if regularization == 'l2':\n loss += lambda_ / (2 * m) * np.squeeze(w.T.dot(w))\n gradient += lambda_ / m * w\n elif regularization == 'l1':\n loss += lambda_ / (2 * m) * np.sum(np.abs(w))\n gradient += lambda_ / m * np.sum((w >= 0) * 1 + (w < 0) * -1)\n return loss, gradient",
"def update_regularizer(self, regularizer = regularizers.l1(0.1)):\n # for layer in self.layers:\n # layer.kernel_regularizer = regularizer\n self.list_cnn[-1].kernel_regularizer = regularizer",
"def reg_loss(model: nn.Module, regularizer: str, l1: float=0.01, l2: float=0.01):\n if regularizer == 'l1':\n l1_reg = l1 * sum(torch.abs(p).sum() for p in model.parameters())\n return l1_reg\n if regularizer == 'l2':\n l2_reg = l2 * sum(torch.square(p).sum() for p in model.parameters())\n return l2_reg\n if regularizer == 'l1_l2':\n l1_reg = l1 * sum(torch.abs(p).sum() for p in model.parameters())\n l2_reg = l2 * sum(torch.square(p).sum() for p in model.parameters())\n return l1_reg + l2_reg",
"def _add_regularization_loss(self, selector_outputs):\n if self._entropy_reg is not None:\n # Add entropy regularization to each single-expert selector to encourage\n # sparsity.\n self.add_loss(self._entropy_reg(selector_outputs))\n\n if not self._power_of_2:\n # If the number of experts is not a power of 2, we add a regularization\n # term to prevent the \"non-reachable\" experts from getting all the nonzero\n # weights for any single-expert selector. The regularization term is equal\n # to 1/sum(weights of reachable experts) so that the reachable experts\n # cannot get zero weights.\n # In case of example conditioning, this regularizer is added per example.\n # NOTE: This regularization term has no effect once the sum of the weights\n # of the reachable experts reaches 1, which is the typical/expected case.\n self.add_loss(\n tf.math.reduce_sum(1 / tf.math.reduce_sum(selector_outputs, axis=-1)))",
"def regularizer(self):\n \n # L2 regularization for the fully connected parameters.\n regularizers = (tf.nn.l2_loss(self.weights.wd1) + tf.nn.l2_loss(self.weights.bd1) + \n tf.nn.l2_loss(self.weights.wout) + tf.nn.l2_loss(self.weights.bout))\n return regularizers",
"def __G_loss(self, D, fake):\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n\n return loss",
"def _learn_using_GD(self, y, tx, w, fn, gamma, lambda_, regularization):\n loss, grad = fn(y, tx, w, lambda_)\n loss, grad = self.apply_regularization(w, loss, grad, regularization, lambda_, tx.shape[0])\n w = w - gamma * grad\n return loss, w",
"def add_l2_reg(loss_f, grad_f, lambda_):\n\n def l2_loss(y, tx, w, *args, **kwargs):\n return loss_f(y, tx, w, *args, **kwargs) + lambda_ * np.linalg.norm(w)\n \n def l2_grad(y, tx, w, *args, **kwargs):\n return grad_f(y, tx, w, *args, **kwargs) + 2 * lambda_ * w\n \n return l2_loss, l2_grad",
"def add_regularization(self, regularizer):\n for layer in self.prenet_layers:\n for weights in layer.trainable_variables:\n if 'bias' not in weights.name:\n # print(\"Added regularizer to {}\".format(weights.name))\n if weights.dtype.base_dtype == tf.float16:\n tf.add_to_collection(\n 'REGULARIZATION_FUNCTIONS', (weights, regularizer)\n )\n else:\n tf.add_to_collection(\n ops.GraphKeys.REGULARIZATION_LOSSES,\n regularizer(weights),\n )",
"def update_lr(self, g_lr, d_lr):\n for param_group in self.g_optimizer.param_groups:\n param_group['lr'] = g_lr\n for param_group in self.d_optimizer.param_groups:\n param_group['lr'] = d_lr",
"def update_lr(self, g_lr, d_lr):\n for param_group in self.g_optimizer.param_groups:\n param_group['lr'] = g_lr\n for param_group in self.d_optimizer.param_groups:\n param_group['lr'] = d_lr",
"def RMFE_object(reg_weight=1e-10, criterion=torch.nn.BCEWithLogitsLoss(), is_packed=False):\n def regfunc(pred, truth, args):\n model = args['model']\n lossfunc = RegularizationLoss(criterion, model, reg_weight, is_packed)\n ce_loss = criterioning(pred, truth, criterion)\n inps = args['inputs']\n try:\n reg_loss = lossfunc(pred, [i.cuda() for i in inps])\n except RuntimeError:\n print(\"No reg loss for validation\")\n reg_loss = 0\n return ce_loss+reg_loss\n return regfunc",
"def objective_grad(self, wb, X, y):\n N, D = X.shape\n w = wb[:-1]\n b = wb[-1]\n loss_grad = np.zeros(D+1) \n # grad wrt regularization\n loss_grad[-1] = 2 * self.reg_param * (b - self.b0) # grad_b\n loss_grad[:-1] = 2 * self.reg_param * (w - self.w0) # grad_w\n\n for i in range(N):\n tmpvar = np.exp(-1 * y[i] * (np.dot(w, X[i]) + b)) \n loss_grad[-1] += tmpvar/(1 + tmpvar) * -1 * y[i] # grad_b \n loss_grad[:-1] += tmpvar/(1 + tmpvar) * -1 * y[i] * X[i] # grad_w\n\n return loss_grad",
"def D_loss_basic(self, netD, real, fake):\n # Real\n D_real = netD(real)\n D_real_loss = self.GANLoss(D_real, True, True)\n # fake\n D_fake = netD(fake)\n D_fake_loss = self.GANLoss(D_fake, False, True)\n # loss for discriminator\n D_loss = (D_real_loss + D_fake_loss) * 0.5\n # gradient penalty for wgan-gp\n if self.gan_mode == 'wgangp':\n gradient_penalty, gradients = base_function.cal_gradient_penalty(netD, real, fake)\n D_loss +=gradient_penalty\n\n D_loss = D_loss * self.loss_d_weight\n D_loss.backward()\n\n return D_loss",
"def loss(self, X, y=None, reg=0.0):\r\n Ws = self.weights\r\n bs = self.biases\r\n N, D = X.shape # number of samples, number of features per sample\r\n\r\n # Compute the forward pass\r\n self.activations = []\r\n for i in xrange(len(Ws)): # for each set of weights\r\n W,b = Ws[i], bs[i]\r\n if i == 0:\r\n H = np.dot(X,W) + b\r\n else:\r\n H = np.dot(self.activations[-1],W) + b\r\n if i < len(Ws) - 1: # if we're computing hidden activations, apply nonlinear function\r\n H = (H > 0) * (H) + (H < 0) * (H/100.0)\r\n self.activations.append(H)\r\n scores = self.activations[-1]\r\n \r\n # If there's no labels provided, stop here\r\n if y is None:\r\n return scores\r\n\r\n # Compute the loss\r\n exped_scores = np.exp(scores)\r\n sums = np.sum(exped_scores,axis=1)\r\n # softmax classifier loss\r\n data_loss = (-1.0/N) * np.sum(np.log(exped_scores[range(N),y.astype(int)] / sums))\r\n\r\n # loss due to regularization\r\n reg_loss = 0\r\n for i in xrange(len(Ws)):\r\n reg_loss += np.sum(Ws[i]**2)\r\n reg_loss *= reg*(0.5)\r\n\r\n loss = data_loss + reg_loss\r\n \r\n # Compute gradients\r\n weights_grads = []\r\n biases_grads = []\r\n activation_grads = []\r\n for i in xrange(len(Ws)):\r\n weights_grads.append(np.copy(Ws[i]))\r\n biases_grads.append(np.copy(bs[i]))\r\n activation_grads.append(np.copy(self.activations[i]))\r\n\r\n DlossDscores = np.array(exped_scores / (N * np.matrix(sums).T))\r\n DlossDscores[range(N),y.astype(int)] -= (1.0/N)\r\n \r\n for i in xrange(len(Ws)-1,-1,-1):\r\n if i == 0:\r\n weights_grads[0] = np.dot(X.T, activation_grads[0]) + reg*Ws[0]\r\n biases_grads[0] = np.dot(np.ones((1,N)), activation_grads[0])[0]\r\n elif i == len(Ws)-1:\r\n H = self.activations[i-1]\r\n weights_grads[i] = np.dot(H.T, DlossDscores) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), DlossDscores)[0]\r\n dH = np.dot(DlossDscores, Ws[i].T)\r\n activation_grads[i-1] = dH\r\n else:\r\n H = self.activations[i-1]\r\n dH_out = activation_grads[i]\r\n weights_grads[i] = np.dot(H.T, dH_out) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), dH_out)[0]\r\n dH = np.dot(dH_out, Ws[i].T)\r\n dH = dH * (H > 0) + dH/100.0 * (H < 0)\r\n activation_grads[i-1] = dH\r\n \r\n grads = {}\r\n grads['weights'] = weights_grads\r\n grads['biases'] = biases_grads\r\n\r\n return loss, grads",
"def __D_loss(self, D, real, fake):\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) + \\\n tf.reduce_mean(tf.square(D(fake))))\n\n return loss",
"def compute_loss(self):",
"def add_loss(self):\n \n raise RuntimeError(\"Must define function add_loss(self)\")",
"def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))",
"def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")",
"def EmbeddingL2RegularizationUpdate(embedding_variable, net_input, learn_rate, l2_reg_val):\n # TODO(student): Change this to something useful. Currently, this is a no-op.\n # net_input = net_input / tf.norm(net_input)\n net_input = tf.nn.l2_normalize(net_input, axis=0)\n grad = l2_reg_val * tf.matmul(tf.transpose(net_input), tf.matmul(net_input, embedding_variable))\n embedding_variable_ = embedding_variable - learn_rate * grad\n\n ## local test #better to disable when learning\n batch_size, number_of_vocabulary_tokens = net_input.shape\n net_example = numpy.random.binomial(1, .1, (3, number_of_vocabulary_tokens))\n sigma_fnc = l2_reg_val * tf.nn.l2_loss(tf.matmul(net_input, embedding_variable))\n # assert tf.gradients(sigma_fnc, embedding_variable) == grad, \"wrong grad in L2\"\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n tf_grad = sess.run(tf.gradients(sigma_fnc, embedding_variable)[0], feed_dict={net_input: net_example})\n my_grad = sess.run(grad, feed_dict={net_input: net_example})\n differ = numpy.linalg.norm(tf_grad - my_grad)\n differ = differ / numpy.linalg.norm(tf_grad)\n print('l2 grad differentage {}'.format(differ))\n print('l2 grad max difference {}'.format(numpy.max(tf_grad - my_grad)))\n\n return embedding_variable.assign(embedding_variable_)",
"def loss(self, X_batch, y_batch, reg):\n pass",
"def compute_gradient_and_loss1(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n for j in xrange(num_classes): # for every class\n if j != y[i]: # don't take the correct ground truth index\n term = s[j] - s_y + 1 # max term with Delta = 1, according to Hinge loss formula\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n loss += term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW += reg * deriv_abs(W)\n else:\n dW += 2 * reg * W # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################",
"def __init__(self, ndf, n_layers, original_model, norm_layer, fc_relu_slope, fc_drop_out):\n super(ModifiedModel_old, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.features = nn.Sequential(*list(original_model.children())[:-1])\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n\n sequence = []\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 32)\n sequence += [\n nn.Linear(in_features=int(ndf/nf_mult_prev), out_features=int(ndf/nf_mult)),\n # norm_layer(int(ndf/nf_mult)),\n nn.LeakyReLU(fc_relu_slope, True),\n nn.Dropout2d(p=fc_drop_out)\n ]\n\n sequence += [nn.Linear(in_features=int(ndf/nf_mult), out_features=1)] # output 1 channel prediction map\n self.linear_group = nn.Sequential(*sequence)",
"def compute_regularized_square_loss_gradient(X, y, theta, lambda_reg):\n return compute_square_loss_gradient(X,y,theta) + 2*lambda_reg*theta",
"def regularized_linear_regression(X, y, lambd):\n #####################################################\n # TODO 4: Fill in your code here #\n #####################################################\t\t\n w = None\n xtx = np.dot(X.T, X)\n xtx = np.add(xtx, np.identity(len(xtx)) * 0.1)\n w = np.dot(np.dot(np.linalg.inv(xtx), X.T), y)\n return w",
"def compute_loss_and_gradients(self, X, y):\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n # Hint: using self.params() might be useful!\n self.fulllayer1.W.grad = np.zeros_like(self.fulllayer1.W.grad)\n self.fulllayer1.B.grad = np.zeros_like(self.fulllayer1.B.grad)\n self.fulllayer2.W.grad = np.zeros_like(self.fulllayer2.W.grad)\n self.fulllayer2.B.grad = np.zeros_like(self.fulllayer2.B.grad)\n\n\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model\n res = self.fulllayer1.forward(X)\n res2 = self.reglayer1.forward(res)\n res3 = self.fulllayer2.forward(res2)\n\n loss, grad = softmax_with_cross_entropy(res3, y)\n\n back3 = self.fulllayer2.backward(grad)\n back2 = self.reglayer1.backward(back3)\n back = self.fulllayer1.backward(back2)\n \n # After that, implement l2 regularization on all params\n # Hint: self.params() is useful again!\n\n for params in self.params().keys():\n # print(params)\n # print(self.params()[params].value)\n loc_loss, loc_grad = l2_regularization(self.params()[params].value, self.reg)\n loss += loc_loss\n self.params()[params].grad += loc_grad\n\n return loss",
"def add_kl_loss(self, loss, d2=None):\n if d2 is None:\n self._kl_losses += [O.sum(loss, axis=None)]\n else:\n self._kl_losses += [O.sum(O.kl_divergence(loss, d2), axis=None)]",
"def add_loss(self, loss_fn):\n setattr(self, 'output_loss_fn', loss_fn)"
] | [
"0.63455456",
"0.61667484",
"0.60805756",
"0.60757357",
"0.60324085",
"0.59627956",
"0.58883804",
"0.58510906",
"0.58356464",
"0.5817414",
"0.58129907",
"0.58129907",
"0.5783318",
"0.5706253",
"0.5695184",
"0.5648697",
"0.5603422",
"0.55787474",
"0.5576006",
"0.5574673",
"0.5565806",
"0.55579036",
"0.5536907",
"0.55281746",
"0.5506362",
"0.5503617",
"0.54976237",
"0.5495489",
"0.5480566",
"0.54716146"
] | 0.7613506 | 1 |
Add image dissimilarity loss of ddf into model. | def add_image_loss(
model: tf.keras.Model,
fixed_image: tf.Tensor,
pred_fixed_image: tf.Tensor,
loss_config: dict,
) -> tf.keras.Model:
if loss_config["dissimilarity"]["image"]["weight"] > 0:
loss_image = tf.reduce_mean(
image_loss.dissimilarity_fn(
y_true=fixed_image,
y_pred=pred_fixed_image,
**loss_config["dissimilarity"]["image"],
)
)
weighted_loss_image = (
loss_image * loss_config["dissimilarity"]["image"]["weight"]
)
model.add_loss(weighted_loss_image)
model.add_metric(
loss_image, name="loss/image_dissimilarity", aggregation="mean"
)
model.add_metric(
weighted_loss_image,
name="loss/weighted_image_dissimilarity",
aggregation="mean",
)
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train_D(self, images):\n\n # Sample noise z, generate output G(z)\n noise = self.compute_noise(images.shape[0], self.model.z_dim)\n G_output = self.model.G(noise)\n\n # Classify the generated and real batch images\n DX_score = self.model.D(images) # D(x)\n DG_score = self.model.D(G_output) # D(G(z))\n\n # Compute vanilla (original paper) D loss\n D_loss = torch.sum(-torch.mean(torch.log(DX_score + 1e-8)\n + torch.log(1 - DG_score + 1e-8)))\n\n return D_loss",
"def add_ddf_loss(\n model: tf.keras.Model, ddf: tf.Tensor, loss_config: dict\n) -> tf.keras.Model:\n loss_reg = tf.reduce_mean(\n deform_loss.local_displacement_energy(ddf, **loss_config[\"regularization\"])\n )\n weighted_loss_reg = loss_reg * loss_config[\"regularization\"][\"weight\"]\n model.add_loss(weighted_loss_reg)\n model.add_metric(loss_reg, name=\"loss/regularization\", aggregation=\"mean\")\n model.add_metric(\n weighted_loss_reg, name=\"loss/weighted_regularization\", aggregation=\"mean\"\n )\n return model",
"def add_ddf_loss(\n model: tf.keras.Model, ddf: tf.Tensor, loss_config: dict\n) -> tf.keras.Model:\n loss_reg = tf.reduce_mean(\n deform_loss.local_displacement_energy(ddf, **loss_config[\"regularization\"])\n )\n weighted_loss_reg = loss_reg * loss_config[\"regularization\"][\"weight\"]\n model.add_loss(weighted_loss_reg)\n model.add_metric(loss_reg, name=\"loss/regularization\", aggregation=\"mean\")\n model.add_metric(\n weighted_loss_reg, name=\"loss/weighted_regularization\", aggregation=\"mean\"\n )\n return model",
"def dloss(self, output, labels):\n return 2*(output - labels)/labels.shape[1]",
"def imposter_img_loss(z_image, z_text, y, report_id, similarity_function):\n loss = torch.zeros(1, device=z_image.device, requires_grad=True)\n batch_size = z_image.size(0)\n\n for i in range(batch_size):\n if similarity_function == 'dot':\n paired_similarity = torch.dot(z_image[i], z_text[i])\n if similarity_function == 'cosine':\n paired_similarity = \\\n torch.dot(z_image[i], z_text[i]) / (torch.norm(z_image[i]) * torch.norm(z_text[i]))\n if similarity_function == 'l2':\n paired_similarity = -1 * torch.norm(z_image[i] - z_text[i])\n\n # Select an imposter image index and \n # compute the maximum margin based on the image label difference\n j = i + 1 if i < batch_size - 1 else 0\n if report_id[i] == report_id[j]:\n # This means the imposter image comes from the same acquisition\n margin = 0\n elif y[i].item() == -1 or y[j].item() == -1: # '-1' means unlabeled\n margin = 0.5\n else:\n margin = max(0.5, (y[i] - y[j]).abs().item())\n\n if similarity_function == 'dot':\n imposter_similarity = torch.dot(z_image[j], z_text[i])\n if similarity_function == 'cosine':\n imposter_similarity = \\\n torch.dot(z_image[j], z_text[i]) / (torch.norm(z_image[j]) * torch.norm(z_text[i]))\n if similarity_function == 'l2':\n imposter_similarity = -1 * torch.norm(z_image[j] - z_text[i])\n\n diff_similarity = imposter_similarity - paired_similarity + margin\n if diff_similarity > 0:\n loss = loss + diff_similarity\n\n return loss / batch_size # 'mean' reduction",
"def compute_Daisy(self,image_tensor):\n image_idx = image_tensor.size()[0] #size (6, C, H, W)\n daisys = list()\n # iterate through each generated image\n for i in range(image_idx):\n image = image_tensor[i,:,:,:]\n image = image.numpy()\n # revert normalization\n x = np.zeros_like(image)\n x[0, :, :] = image[0, :, :] * STD[0] + MEAN[0]\n x[1, :, :] = image[1, :, :] * STD[1] + MEAN[1]\n x[2, :, :] = image[2, :, :] * STD[2] + MEAN[2]\n x = x*255\n cv_x = x.astype(np.uint8)\n # transpose dimensions since hog implementation requires image to be (M, N[, C]) ndarray\n cv_x = np.transpose(cv_x, (1,2,0))\n\n # convert RGB to grayscale\n grey = rgb2gray(cv_x)\n\n # 27*27*36\n daisy_ = daisy(grey,step=180, radius=58, rings=2, histograms=6,orientations=8, visualize=False)\n\n # flatten to vector\n P,Q,R = daisy_.shape\n daisy = daisy_.reshape((P*Q*R,))\n print(\"daisy shape is {}\".format(daisy_.shape))\n daisy = torch.from_numpy(daisy_)\n\n daisys.append(daisy_)\n daisys_tensor = torch.stack(daisys, dim=0)\n \n \n return daisys_tensor",
"def d_train_step(self, batch_size, real_images):\n\n masked_images = real_images*self.m1 - self.m2\n inpainted_images = self.generator(masked_images)\n\n # Reset accumulated discriminator gradient.\n self.d_optimizer.zero_grad()\n\n # Get logits.\n real_logits = self.discriminator(real_images)\n inpainted_logits = self.discriminator(inpainted_images)\n\n # Get target labels for real images.\n real_target = torch.ones(batch_size, device='cuda')\n real_target -= self.args.label_smoothing\n real_target = real_target.view_as(real_logits)\n\n # Calculate loss on real iamges.\n d_real_loss = self.criterion(real_logits, real_target)\n\n # Get target labels for inpainted images.\n inpainted_target = torch.zeros(batch_size, device='cuda')\n inpainted_target = inpainted_target.view_as(inpainted_logits)\n\n # Calculate loss on inpainted images.\n d_inpainted_loss = self.criterion(\n inpainted_logits,\n inpainted_target\n )\n\n # Calculate total loss.\n d_loss = d_real_loss + d_inpainted_loss\n self.d_loss = d_loss.item()\n\n # Backprop.\n d_loss.backward()\n self.d_optimizer.step()",
"def local_label_dissimilarity(img1, img2, window_length):\n data1 = img1.get_data()\n data2 = img2.get_data()\n ndim = img1.get_ndim()\n window_size = window_length ** ndim\n\n label_difference = np.abs(data1 - data2)\n label_difference[np.where(label_difference > 0)] = 1.\n\n dissimilarity = uniform_filter(\n label_difference, window_length) / window_size\n\n return dissimilarity",
"def discriminator_loss(discriminator, fake_images, real_images, fake_labels, real_labels, con_aug, stage):\n discriminator.train()\n criterion = nn.BCELoss()\n fake = fake_images.detach()\n condition = con_aug.detach()\n batch_size = real_images.size(0)\n \"\"\"\n ********************************************************************************\n The next two lines should be removed if we don't have a very powerful GPU.\n I cannot train the 256 x 256 image in stage 2 in my GPU(Tesla K80). So modified stage 2\n so that all processing are done for 64 x 64 and output is also 64 x 64 image.\n *********************************************************************************\n \"\"\"\n if (stage==2):\n real_images = F.interpolate(real_images, scale_factor = 4)\n real_dis_fea = discriminator(real_images)\n fake_dis_fea = discriminator(fake)\n\n \"\"\"\n Here we use three types of error and add them.\n real_error: error between real images and real labels.\n wrong_error: error between real images and wrong labels.\n fake_error: error between fake images and fake labels.\n \"\"\"\n real_logits = discriminator.conditioned_result(real_dis_fea, condition)\n real_error = criterion(real_logits, real_labels)\n\n wrong_logits = discriminator.conditioned_result(real_dis_fea[:(batch_size-1)], condition[1:])\n wrong_error = criterion(wrong_logits, fake_labels[1:])\n\n fake_logits = discriminator.conditioned_result(fake_dis_fea, condition)\n fake_error = criterion(fake_logits, fake_labels)\n\n if discriminator.unconditioned_result is not None:\n \"\"\"\n In case of stage 2 generator in addition to above errors we also\n use another error calculated from scores computed using the image features\n only without using the text features.\n \"\"\"\n real_logits1 = discriminator.unconditioned_result(real_dis_fea)\n uncond_real_error = criterion(real_logits1, real_labels)\n\n fake_logits1 = discriminator.unconditioned_result(fake_dis_fea)\n uncond_fake_error = criterion(fake_logits1, fake_labels)\n\n error = (real_error + uncond_real_error)/2.0 + (wrong_error+fake_error+uncond_fake_error)/3.0\n real_error = (real_error + uncond_real_error)/2.0\n fake_error = (fake_error + uncond_fake_error)/2.0\n\n else:\n error = real_error + (wrong_error * fake_error) * 0.5\n\n return error, real_error.item(), fake_error.item(), wrong_error.item()",
"def dnn_loss_calculation(self, labeled_examples, labels):\n predicted_labels, _ = self.DNN(labeled_examples)\n labeled_loss = self.labeled_loss_function(predicted_labels, labels, order=self.settings.labeled_loss_order)\n labeled_loss *= self.settings.labeled_loss_multiplier\n return labeled_loss",
"def dissimilarity_loss(latents, mask):\n a_i = 0, 0, 0, 1, 1, 2\n b_i = 1, 2, 3, 2, 3, 3\n a = latents[a_i, :, :, :]\n b = latents[b_i, :, :, :]\n count = (mask[:, a_i] * mask[:, b_i]).sum() + 1e-08\n sim = F.cosine_similarity(a.abs(), b.abs(), dim=-1)\n sim = sim.sum(dim=(0, 1)) / count\n return sim.mean()",
"def _compute_loss(self, inputs, logits, targets, idxes):\n clf_loss = F.cross_entropy(logits, targets)\n\n if self._task == 0:\n distil_loss = torch.zeros(1, device=self._device)\n else:\n if self._finetuning:\n # We only do distillation on current task during the distillation\n # phase:\n last_index = len(self._task_idxes)\n else:\n last_index = len(self._task_idxes) - 1\n\n distil_loss = 0.\n #with torch.no_grad():\n previous_logits = self._old_model(inputs)\n\n for i in range(last_index):\n task_idxes = self._task_idxes[i]\n\n distil_loss += F.binary_cross_entropy(\n F.softmax(logits[..., task_idxes] / self._temperature, dim=1),\n F.softmax(previous_logits[..., task_idxes] / self._temperature, dim=1)\n )\n\n return clf_loss, distil_loss",
"def setup_loss(self):\n self.loss = nn.CrossEntropyLoss(weight = self.to_device(self.datasetManager.class_weights))\n #self.loss = nn.CrossEntropyLoss()",
"def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))",
"def dissimilarity_loss(latents, mask):\n a_i = (0, 0, 0, 1, 1, 2)\n b_i = (1, 2, 3, 2, 3, 3)\n\n a = latents[a_i, :, :, :]\n b = latents[b_i, :, :, :]\n\n count = (mask[:, a_i] * mask[:, b_i]).sum() + 1e-8\n sim = F.cosine_similarity(a.abs(), b.abs(), dim=-1)\n sim = sim.sum(dim=(0, 1)) / count\n return sim.mean()",
"def model_loss(self,input_real,input_z,out_channel_dim):\t\r\n label_smooth = 0.9 \r\n \r\n #get output of generator\r\n gen_img, gen_logits = self.generator(input_z,out_channel_dim,True)\r\n\r\n\t#pass real image to dicriminator\r\n disc_model_real, disc_logits_real = self.discriminator(input_real)\r\n\t\r\n\t#pass generated image to dicriminator\r\n disc_model_fake, disc_logits_fake = self.discriminator(gen_img,reuse=True)\r\n \r\n\t \t\r\n disc_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_real,labels=label_smooth*tf.ones_like(disc_model_real))) \r\n disc_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_fake,labels=tf.zeros_like(disc_model_fake)))\r\n \r\n\r\n\t\"\"\"\r\n\tLoss for discriminator is sum of loss for real image and fake image \r\n\t\"\"\"\t\r\n disc_loss = disc_loss_real + disc_loss_fake\r\n \r\n\r\n \"\"\"\r\n\tTo find loss for generator, fake image is passed with label= real (0.9)\r\n\t\"\"\"\r\n gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_fake,labels=label_smooth*tf.ones_like(disc_model_fake)))\r\n \r\n return disc_loss,gen_loss,gen_img",
"def discriminator_loss_fn(y_data, y_generated, data_label=0, label_noise=0.0):\n assert data_label == 1 or data_label == 0\n # TODO:\n # Implement the discriminator loss.\n # See pytorch's BCEWithLogitsLoss for a numerically stable implementation.\n # ====== YOUR CODE: ======\n device = y_data.device\n loss_fn = nn.BCEWithLogitsLoss()\n data_noise = torch.rand(*y_data.shape) * label_noise - (label_noise / 2)\n generated_noise = torch.rand(*y_data.shape) * label_noise - (label_noise / 2)\n\n loss_data = loss_fn(y_data, (data_noise + data_label).to(device))\n loss_generated = loss_fn(y_generated, (generated_noise + (1 - data_label)).to(device))\n # ========================\n return loss_data + loss_generated",
"def compute_loss(self):",
"def _CalculateDissimilarities(self):\n print 'Calculating dissimilarities'\n num_bins = len(self._mix_bins)\n\n for left_cluster_id, left_bin_collection in (self\n ._left_bin_collection_by_cluster_id.iteritems()):\n for right_cluster_id, right_bin_collection in (self\n ._right_bin_collection_by_cluster_id.iteritems()):\n # This operation can be easily parallelized via multiprocessing.\n d = self._CalculateDissimilarityBetweenClusters(\n left_cluster_id, left_bin_collection, right_cluster_id,\n right_bin_collection)\n print 'Left cluster: %s, Right cluster: %s, dissimilarity: %s' % (\n d.left_cluster_id, d.right_cluster_id, d.dissimilarity_score)\n self._dissimilarities.append(d)\n\n print 'Dissimilarities are calculated'",
"def _auxiliary_loss(self, expert_mask, router_prob):\n # density_1's shape: (dp_group, self.expert_dim)\n density_1 = self.reduce_mean(expert_mask, 1)\n # density_1_proxy's shape: (dp_group, self.expert_dim)\n density_1_proxy = self.reduce_mean2(router_prob, 1)\n loss = self.mul(density_1, density_1_proxy)\n loss = self.reduce_mean3(loss)\n loss = self.mul3(self.mul2(loss, self.expert_dim), self.expert_dim)\n return loss",
"def compute_HOG_Daisy(self,image_tensor):\n image_idx = image_tensor.size()[0] #size (6, C, H, W)\n hogs_daisys = list()\n # iterate through each generated image\n for i in range(image_idx):\n image = image_tensor[i,:,:,:]\n image = image.numpy()\n # revert normalization\n x = np.zeros_like(image)\n x[0, :, :] = image[0, :, :] * STD[0] + MEAN[0]\n x[1, :, :] = image[1, :, :] * STD[1] + MEAN[1]\n x[2, :, :] = image[2, :, :] * STD[2] + MEAN[2]\n x = x*255\n cv_x = x.astype(np.uint8)\n # transpose dimensions since hog implementation requires image to be (M, N[, C]) ndarray\n cv_x = np.transpose(cv_x, (1,2,0))\n\n #### HOG ####\n hist = hog(cv_x, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(3, 3), visualize=False, feature_vector=True, multichannel=True)\n print(\"hog shape is {}\".format(hist.shape))\n #### Daisy ####\n # convert RGB to grayscale\n grey = rgb2gray(cv_x)\n\n # 27*27*36\n daisy_ = daisy(grey,step=180, radius=58, rings=2, histograms=6,orientations=8, visualize=False)\n\n # flatten to vector\n P,Q,R = daisy_.shape\n daisy_ = daisy_.reshape((P*Q*R,))\n print(\"daisy shape is {}\".format(daisy_.shape))\n\n #### concatenate hog and daisy ####\n hog_daisy = np.concatenate((hist, daisy_), axis=None)\n print(\"hog_daisy shape is {}\".format(hog_daisy.shape))\n\n hog_daisy = torch.from_numpy(hog_daisy)\n\n hogs_daisys.append(hog_daisy)\n hogs_daisys_tensor = torch.stack(hogs_daisys, dim=0)\n \n \n return hogs_daisys_tensor",
"def backward_D(self):\n self.loss_D_frame, self.loss_D_frame_real, self.loss_D_frame_fake = self.get_GAN_loss_D_sequential(\n discriminator=self.discriminator,\n real_images=self.real_target,\n fake_images=self.fake_target,\n conditioned_on=self.real_source,\n )\n self.loss_D += self.loss_D_frame\n super().backward_D()",
"def __D_loss(self, D, real, fake):\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) + \\\n tf.reduce_mean(tf.square(D(fake))))\n\n return loss",
"def hd_loss(seg_soft, gt, seg_dtm, gt_dtm):\n\n delta_s = (seg_soft[:,1,...] - gt.float()) ** 2\n s_dtm = seg_dtm[:,1,...] ** 2\n g_dtm = gt_dtm[:,1,...] ** 2\n dtm = s_dtm + g_dtm\n multipled = torch.einsum('bxyz, bxyz->bxyz', delta_s, dtm)\n hd_loss = multipled.mean()\n\n return hd_loss",
"def discriminator_loss(gen_images, real_images):\n real = real_images.new_full((real_images.shape[0], 1), real_label)\n gen = gen_images.new_full((gen_images.shape[0], 1), fake_label)\n\n realloss = disc_loss_criterion(disc_net(real_images), real)\n genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen)\n\n return (genloss + realloss) / 2",
"def discriminator_model_lungs():\n # Initialize the weights\n init = tf.random_normal_initializer(0.0, 0.02)\n\n img_shape = (400, 400, 1)\n\n # Source and target image input\n source_img = tf.keras.Input(shape=img_shape)\n target_img = tf.keras.Input(shape=img_shape)\n\n # Concatenate images channel-wise\n src_tgt_img = Concatenate()([source_img, target_img]) # L : 400 x 400 x 1 # G: 200 x 200 x 1\n\n # C128\n d1 = Conv2D(filters=128, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n src_tgt_img) # L: 200 x 200 x 128 # G: 100 x 100 x 128 # RF: 4\n d1 = LeakyReLU(alpha=0.2)(d1)\n\n # C256\n d2 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d1) # G: 100 x 100 x 256 # L: 50 x 50 x 256 # RF: 10\n d2 = BatchNormalization()(d2)\n d2 = LeakyReLU(alpha=0.2)(d2)\n\n # C512\n d3 = Conv2D(filters=512, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d2) # G: 50 x 50 x 512 # L: 25 x 25 x 512 # RF: 22\n d3 = BatchNormalization()(d3)\n d3 = LeakyReLU(alpha=0.2)(d3)\n d3 = ZeroPadding2D()(d3) # G: 52 x 52 x 512 # L: 27 x 27 x 512\n\n # Patch output\n d4 = Conv2D(filters=1, kernel_size=(3, 3), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d3) # G: 50 x 50 x 1 # L: 25 x 25 x 1 # RF: 38\n output_patch = Activation('sigmoid')(d4)\n\n # Define model\n discriminator_model = tf.keras.Model([source_img, target_img], output_patch)\n return discriminator_model",
"def forward(self, output1, output2, label):\n euclidean_distance = F.pairwise_distance(output1, output2)\n clamped = torch.clamp(self.margin - euclidean_distance, min=0.0)\n similar_loss = (1 - label) * 0.5 * torch.pow(euclidean_distance, 2)\n dissimilar_loss = label * 0.5 * torch.pow(clamped, 2)\n contrastive_loss = similar_loss + dissimilar_loss\n\n return torch.mean(contrastive_loss)",
"def loss_fn(outputs, labels):\n #print('this is outputs', outputs.shape) # 2,3,128,128\n #print('this is labels', labels.shape) # 2,3,128,128\n N, C, H, W = outputs.shape\n \n# outputs = unnormalize(outputs, mean=[0.51371954, 0.40949144, 0.35572536], std= [0.2926419, 0.26180502, 0.25512055])\n # check if we normalize label images #labels = unnormalize(labels, mean=[0.53459634,0.39673596,0.33788489], std= [0.29101071,0.26140346,0.25485687])\n \n mse_loss = torch.sum((outputs - labels) ** 2) / N / C # each photo, each channel\n mse_loss *= 255 * 255\n mse_loss /= H * W \n # average loss on each pixel(0-255)\n return mse_loss",
"def D_loss(self, reals, fakes, labels=None):\n fake_label, real_label = [], []\n base_function._unfreeze(self.net_D_sce)\n for fake, real in zip(fakes, reals):\n if labels is not None and self.input_nc!=fake.size(1):\n scale_label = F.interpolate(labels, size=[fake.size(2), fake.size(3)])\n fake = torch.cat([fake, scale_label], dim=1)\n real = torch.cat([real, scale_label], dim=1)\n fake_label.append(fake.detach())\n real_label.append(real)\n self.optimizer_D.zero_grad()\n loss_d_sce = self.D_loss_basic(self.net_D_sce, real_label, fake_label)\n self.optimizer_D.step()\n\n return loss_d_sce",
"def forward(self, output1, output2, label):\n euclidean_distance = F.pairwise_distance(output1, output2)\n distance_from_margin = torch.clamp(torch.pow(euclidean_distance, 2) - self.margin, max=50.0)\n exp_distance_from_margin = torch.exp(distance_from_margin)\n distance_based_loss = (1.0 + math.exp(-self.margin)) / (1.0 + exp_distance_from_margin)\n similar_loss = -0.5 * (1 - label) * torch.log(distance_based_loss)\n dissimilar_loss = -0.5 * label * torch.log(1.0 - distance_based_loss)\n return torch.mean(similar_loss + dissimilar_loss)"
] | [
"0.6279302",
"0.6276216",
"0.6276216",
"0.56417763",
"0.5641206",
"0.56169605",
"0.54915816",
"0.5467536",
"0.54571056",
"0.5449515",
"0.536803",
"0.5303079",
"0.5301417",
"0.5299759",
"0.5276228",
"0.5274409",
"0.52681893",
"0.5266836",
"0.52594036",
"0.5251588",
"0.52275616",
"0.52254605",
"0.5219073",
"0.5169376",
"0.5165529",
"0.51627505",
"0.5162092",
"0.5160636",
"0.51602983",
"0.515842"
] | 0.6664222 | 1 |
Handles the ESC% sequence. | def escPercent(self) :
if self.minfile[self.pos : self.pos+7] == r"-12345X" :
#self.logdebug("Generic ESCAPE sequence at %08x" % self.pos)
self.pos += 7
buffer = []
quotes = 0
char = chr(self.readByte())
while ((char < ASCIILIMIT) or (quotes % 2)) and (char not in (FORMFEED, ESCAPE, NUL)) :
buffer.append(char)
if char == '"' :
quotes += 1
char = chr(self.readByte())
self.setPageDict("escaped", "".join(buffer))
#self.logdebug("ESCAPED : %s" % "".join(buffer))
self.pos -= 1 # Adjust position
else :
while 1 :
(value, end) = self.getInteger()
if end == 'B' :
self.enterHPGL2()
while self.minfile[self.pos] != ESCAPE :
self.pos += 1
self.pos -= 1
return
elif end == 'A' :
self.exitHPGL2()
return
elif end is None :
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cb_check_esc(data, remaining_calls):\n global esc_pressed, vi_buffer, cmd_text, catching_keys_data\n if last_signal_time == float(data):\n esc_pressed += 1\n set_mode(\"NORMAL\")\n # Cancel any current partial commands.\n vi_buffer = \"\"\n cmd_text = \"\"\n weechat.command(\"\", \"/bar hide vi_cmd\")\n catching_keys_data = {'amount': 0}\n weechat.bar_item_update(\"vi_buffer\")\n return weechat.WEECHAT_RC_OK",
"def keyPressEvent(self, evt):\n if evt.key() == Qt.Key_Escape:\n self.escapePressed.emit()\n else:\n evt.ignore()",
"def keyPressEvent(self, evt):\n if evt.key() == Qt.Key_Escape:\n self.escPressed.emit()\n else:\n super(QuickSearchLineEdit, self).keyPressEvent(evt) # pass it on",
"def escape(event: EventType, widget: WidgetType) -> bool:\n return event.key == _locals.K_ESCAPE",
"def __escCloseBook(self):\n if hasattr(localAvatar, \"newsPage\") and localAvatar.book.isOnPage(localAvatar.newsPage):\n localAvatar.newsButtonMgr.simulateEscapeKeyPress()\n else:\n base.localAvatar.stopSleepWatch()\n base.localAvatar.book.exit()\n base.localAvatar.b_setAnimState(\n 'CloseBook', 1, callback = self.handleBookClose)",
"def OnKeydown(self, vkey, shift):\n # ESCAPE\n if vkey == 27:\n self.Close()\n elif vkey == ord('H'):\n lineno = self.GetLineNo()\n if lineno is not None:\n line, fg, bg = self.GetLine(lineno)\n if line and line[0] != idaapi.SCOLOR_INV:\n s = idaapi.SCOLOR_INV + line + idaapi.SCOLOR_INV\n self.EditLine(lineno, s, fg, bg)\n self.Refresh()\n elif vkey == ord('C'):\n self.ClearLines()\n self.Refresh()\n elif vkey == ord('S'):\n print \"Selection (x1, y1, x2, y2) = \", self.GetSelection()\n elif vkey == ord('I'):\n print \"Position (line, x, y) = \", self.GetPos(mouse = 0)\n else:\n return False\n return True",
"def cb_key_pressed(data, signal, signal_data):\n global last_signal_time\n last_signal_time = time.time()\n if signal_data == \"\\x01[\":\n # In 50ms, check if any other keys were pressed. If not, it's Esc!\n weechat.hook_timer(50, 0, 1, \"cb_check_esc\",\n \"{:f}\".format(last_signal_time))\n return weechat.WEECHAT_RC_OK",
"def readline(self):\n\n\t\tself.history.insert(0, '')\n\t\tself.history_pos = 0\n\n\t\ttry:\n\t\t\tif self._gevent_handle_sigint:\n\t\t\t\timport gevent\n\t\t\t\tself._readline_greenlet = gevent.getcurrent()\n\n\t\t\twhile True:\n\n\t\t\t\tself.refresh()\n\n\t\t\t\t# read input\n\t\t\t\tc = self.read()\n\t\t\t\tif isinstance(c, unicode):\n\t\t\t\t\tc = c.encode(self.encoding or 'utf-8')\n\t\t\t\tif not c:\n\t\t\t\t\traise EOFError()\n\t\t\t\tif c in self.TERMINATORS:\n\t\t\t\t\tbreak\n\t\t\t\tself.esc_buf += c\n\n\t\t\t\t# on partial unicode characters, continue to buffer\n\t\t\t\tesc_buf = self.esc_buf\n\t\t\t\tif self.encoding or PY3:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tesc_buf = self.esc_buf.decode(self.encoding or 'utf-8')\n\t\t\t\t\texcept UnicodeDecodeError:\n\t\t\t\t\t\tlogging.debug(\"Got partial unicode character {!r}, continuing\".format(self.esc_buf))\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t# check for full escape sequence\n\t\t\t\tif esc_buf in ESCAPE_HANDLERS:\n\t\t\t\t\tlogging.debug(\"Got esc handler {!r}\".format(esc_buf))\n\t\t\t\t\tself.head, self.tail = ESCAPE_HANDLERS[esc_buf](self.head, self.tail, self)\n\t\t\t\t\tself.esc_buf = b''\n\t\t\t\t\tcontinue\n\n\t\t\t\t# on partial escape sequences, continue to buffer\n\t\t\t\tif any(sequence.startswith(esc_buf) for sequence in ESCAPE_HANDLERS):\n\t\t\t\t\tlogging.debug(\"Buffer {!r} is prefix of at least one esc handler, continuing\".format(esc_buf))\n\t\t\t\t\tcontinue\n\n\t\t\t\tlogging.debug(\"Buffer {!r} not prefix of any esc handler, stripping and adding\".format(esc_buf))\n\n\t\t\t\tif self.suppress_nonprinting:\n\t\t\t\t\t# filter non-printing chars before we add to main buffer\n\t\t\t\t\t# (also allow >128 for non-ascii chars)\n\t\t\t\t\tesc_buf = type(esc_buf)().join([\n\t\t\t\t\t\tc for c in esc_buf\n\t\t\t\t\t\tif c in self.PRINTABLE or ord(c) > 128\n\t\t\t\t\t])\n\n\t\t\t\t# flush escape buffer\n\t\t\t\tself.head += esc_buf\n\t\t\t\tself.esc_buf = b''\n\n\t\texcept KeyboardInterrupt:\n\t\t\tself.head = ''\n\t\t\tself.tail = ''\n\t\t\t# fall through\n\t\texcept EOFError:\n\t\t\tif not (self.head or self.tail): raise\n\t\t\t# fall through\n\t\tfinally:\n\t\t\tif self._gevent_handle_sigint:\n\t\t\t\tself._readline_greenlet = None\n\n\t\tself.history[0] = self.head + self.tail\n\t\tif not self.history[0]: self.history.pop(0)\n\n\t\tret = self.head + self.tail\n\t\tself.head = ''\n\t\tself.tail = ''\n\n\t\tif self.encoding and not isinstance(ret, unicode):\n\t\t\t# Some edge cases (eg. ^C) can result in ret being bytes even when decoding should happen.\n\t\t\t# Our code doesn't care because the implict coercion is safe for empty strings and ascii characters,\n\t\t\t# but we want to avoid unexpected behaviour when returning to the caller.\n\t\t\t# If this raises a UnicodeDecodeError, it indicates that there is a logic bug, as non-ascii characters\n\t\t\t# shouldn't be present if ret isn't already a unicode object.\n\t\t\tret = ret.decode('ascii')\n\n\t\treturn ret",
"def on_press_escape(self, event):\n del event\n self.destroy()",
"def __exit__(self, exc_type, exc_val, exc_tb):\n self.stdscr.keypad(False)\n self.stdscr.nodelay(False)\n curses.echo()\n curses.nocbreak()\n curses.endwin()",
"def OnKeydown(self, vkey, shift):\n if vkey == 27:\n # The ESC key was pressed so close the window and leave.\n self.Close()\n else:\n # An unknown key was pressed.\n return self.on_key_down(vkey, shift)\n\n return True",
"def signal_handler(signal, frame):\n print(chr(27) + \"[2J\")\n sys.exit(0)",
"def process_key(key):\n print(chr(key))",
"def playback_expr(e):\n t = 1\n while True:\n print(chr(int(\n e(t)\n ) % 256 ), end=\"\")\n t += 1",
"def escE(self) :\n #self.logdebug(\"RESET\")\n self.resets += 1",
"def onKeyPress(self):\n ch = read(fd, 4)\n if ch == '\\033': # escape\n self.pause()\n elif '\\033' in ch:\n return\n elif '\\t' in ch: # tab\n return\n elif len(self.user_input) >= 80: # too long\n self.user_input[:80]\n return\n elif ch == '\\r': # return\n if self.user_input == \"\":\n return\n command = command_list.match(self.user_input)\n if not command:\n pass\n elif command.group(1):\n self._save(0)\n elif command.group(2):\n self._save()\n elif command.group(3):\n self._save(command.group(4))\n link = self.links.match(self.user_input.lower())\n if link:\n self.reset(link.group(0))\n self.user_input = \"\"\n self.locked += 1\n print '\\033[0m'\n print_loc(' '*80, self.y+5, self.x+2)\n #print_loc(' '*80, self.y+6, 0)\n self.locked -= 1\n elif ch == '\\x7f': # backspace\n if self.user_input == \"\":\n return\n self.user_input = self.user_input[:-1]\n elif ch == ' ': # space\n if self.user_input == \"\":\n return\n elif self.user_input[-1] == ' ':\n return\n self.user_input += ' '\n else: # all else\n self.user_input += ch\n self.locked += 1\n # Highlight valid user input\n if self.links.match(self.user_input.lower()):\n print '\\033[0;96;4m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n elif command_list.match(self.user_input):\n print '\\033[0;1;92m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n else:\n print '\\033[0m'\n # Display new user input line\n print_loc(self.user_input+'\\033[0;7m \\033[0m ', self.y + 5, self.x)\n self.locked -= 1",
"def get_char_echo(self) -> str:\n ...",
"def ev_KEYDOWN(self, event):",
"def handle_digit(c):\n @handle(c, filter=HasArg())\n @handle(Keys.Escape, c)\n def _(event):\n event.append_to_arg_count(c)",
"def on_key_press(self, key, _modifiers): \n if key == arcade.key.ESCAPE: # resume game\n self.window.show_view(self.instruction_view)",
"def bye(event=None):\r\n s_message.set(\"{quit}\")\r\n send()",
"def keyPressEvent(self, event):\n self.Serial.send_keystroke(event.text())",
"def ev_keydown(self, event: KeyDown) -> None:",
"def OnKeyDown(self,event):\n\n\t\traw_code = event.GetRawKeyCode()\n\t\tmodifiers = event.GetModifiers()\n\n\t\t#~ if raw_code == 75 and modifiers==3:\n\t\t\t#~ self.Close()\n\t\tif raw_code == 75 and modifiers==2:\n\t\t\tself.debug_out.SetValue(\"\")",
"def send_enter():\n sys.stdout.write('\\x0D') # send carriage return\n sys.stdout.flush()",
"def on_key_press(self, key: int, modifiers: int) -> None:\n if key == arcade.key.ESCAPE:\n self.window.show_view(self.game_view)",
"def get_keypress(self):\n key = sys.stdin.read(1)\n\n if key:\n if ord(key) == 27: # ESC!\n sys.stdin.read(1) # we expect a [ here (ANSI CSI sequence)\n ansicode = sys.stdin.read(1)\n\n if ansicode == \"A\":\n return self.ARROW_UP\n elif ansicode == \"B\":\n return self.ARROW_DOWN\n elif ansicode == \"C\":\n return self.ARROW_RIGHT\n elif ansicode == \"D\":\n return self.ARROW_LEFT\n else: # return ESC\n return key\n\n return key",
"def event_handler(self, event):\n if event.type == pygame.QUIT:\n # close window event\n self.exit()\n elif event.type == pygame.KEYDOWN:\n # keyboard event on press ESC\n if event.key == pygame.K_ESCAPE:\n self.exit()",
"def event_handler(self, event):\n if event.type == pygame.QUIT:\n # close window event\n self.exit()\n elif event.type == pygame.KEYDOWN:\n # keyboard event on press ESC\n if event.key == pygame.K_ESCAPE:\n self.exit()",
"def remove_ansi_escape_sequence(self, text):\n\n # By default no string returned\n output = \"\"\n\n # By default no escape sequence found\n esc_found = 0\n\n # Read char by char a string\n for i in text:\n\n # Display char\n # log.info(f\"{str(i).encode('ascii')}\")\n\n # No escape previously found?\n if esc_found == 0:\n\n # No escape sequence currently found\n\n # Escape?\n if i == \"\\x1b\":\n\n # Yes\n log.info(\"Esc!\")\n\n # Escape found\n esc_found = 1\n\n else:\n\n # No\n\n # Then the current char can be saved\n output += i\n\n # Escape previously found?\n elif esc_found == 1:\n\n # Yes\n\n # Then check if this is a CSI sequence\n if i == \"[\":\n\n # Beginning of CSI sequence\n log.info(\"CSI sequence\")\n\n # CSI sequence\n esc_found = 2\n\n else:\n\n # Another Escape sequence\n\n # Keep the escape sequence in the string\n output += \"\\x1b\" + i\n\n # No escape sequence next\n esc_found = 0\n\n else:\n\n # Char between 'a' and 'z' or 'A' and 'Z'?\n if (i >= \"a\" and i <= \"z\") or (i >= \"A\" and i <= \"Z\"):\n\n # Yes\n\n # Then it is the end of CSI escape sequence\n log.info(\"End of escape sequence\")\n\n # No escape sequence next\n esc_found = 0\n\n # Return a string without ANSI escape sequence\n return output"
] | [
"0.6812342",
"0.645406",
"0.6427925",
"0.61390394",
"0.6042245",
"0.597142",
"0.5786621",
"0.57699805",
"0.57557946",
"0.57549417",
"0.5721573",
"0.5646723",
"0.5550067",
"0.55322945",
"0.5531788",
"0.5458349",
"0.543286",
"0.5431437",
"0.54196364",
"0.54120016",
"0.54112536",
"0.54103154",
"0.54029334",
"0.5400865",
"0.53979355",
"0.539213",
"0.5388696",
"0.53742284",
"0.53742284",
"0.53628623"
] | 0.6592571 | 1 |
Handles Canon ImageRunner tags. | def handleImageRunner(self) :
tag = self.readByte()
if tag == ord(self.imagerunnermarker1[-1]) :
oldpos = self.pos-2
codop = self.minfile[self.pos:self.pos+2]
length = unpack(">H", self.minfile[self.pos+6:self.pos+8])[0]
self.pos += 18
if codop != self.imagerunnermarker2 :
self.pos += length
self.logdebug("IMAGERUNNERTAG SKIP %i AT %08x" % (self.pos-oldpos, self.pos))
else :
self.pos -= 1 # Adjust position | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_image(self):\n pass",
"def __call__(self, images, targets):\n pass",
"def process(self, image):",
"def on_image(self, image):",
"def on_process_image(self, img, prefix):\n\t\traise NotImplementedError(\"You need to implement this to tweet to timeline (or pass if you don't want to)!\")",
"def process(image):\n pass",
"def __call__(self, name, batch):\n\n def _check_img(tag_img):\n tag, img = tag_img\n\n assert img.ndim == 2 or img.ndim == 3, 'Only 2D (HW) and 3D (CHW) images are accepted for display'\n\n if img.ndim == 2:\n img = np.expand_dims(img, axis=0)\n else:\n C = img.shape[0]\n assert C == 1 or C == 3, 'Only (1, H, W) or (3, H, W) images are supported'\n\n return tag, img\n\n tagged_images = self.process_batch(name, batch)\n\n return list(map(_check_img, tagged_images))",
"def __call__(self, img, *args, **kwargs):\n raise NotImplementedError",
"def __call__(self, img, *args, **kwargs):\n raise NotImplementedError",
"def tag(self, **kwargs):\n return self.getField('image').tag(self, **kwargs)",
"def tag(self, **kwargs):\n return self.getField('image').tag(self, **kwargs)",
"def builder_did_create_target_image(self, builder, target, image_id, template, parameters):",
"def do_stage(self, images):\n\n for i, image in enumerate(images):\n pass\n # logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)",
"def on_object(self, image, objects):\n pass",
"def process_image(self):\n\n detect.main(self.nn_args)",
"def cmd_gallery_tag_image(client, args):\n gallery_tag_image = client.gallery_tag_image(args.tag, args.image_id)\n data = gallery_tag_image.__dict__\n generate_output({'gallery_tag_image': data})",
"def proc_image(self, tokens):\n\n print \"IMAGE:\", tokens, tokens.asList(), tokens.keys()\n\n raise NotImplementedError",
"def propagateImage(self, dryrun):\n pass",
"def builder_will_create_target_image(self, builder, target, image_id, template, parameters):",
"def image_callback(self, data):\n\n # Import raw image and convert to cv2 format\n frame = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n # Convert to grayscale (AprilTag detector prefers this)\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n detections = self.detector.detect(gray, return_image = False)\n\n num_detections = len(detections)\n rospy.loginfo('Detected {} tags'.format(num_detections))\n # Check to see if any detections\n if (not detections):\n\t\t self.pub.publish(None,None, None)\n\t\t return\n\n\t detection = detections[0]\n retval, rvec, tvec = cv2.solvePnP(self.opts, detection.corners,\n self.cmatrix, self.dists)\n # Update moving average with 60% old, 40% new\n self.tvec_avg = 0.6 * self.tvec_avg + 0.4 * tvec\n # publish the center of the tag.\n self.pub.publish(self.tvec_avg[0], self.tvec_avg[1], self.tvec_avg[2])",
"def run(self, name, image, entrypoint, command):\n return 0, ''",
"def tag_images(self, image_files, model=None):\n return self._multi_image_op(image_files, ['tag'], model=model)",
"def RemoteBuild(self, image):\n raise NotImplementedError()",
"def image_tag(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"image_tag\")",
"def image_tag(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_tag\")",
"def images(self, **kwargs):\n\n raise NotImplementedError",
"def hook_image_tag(self, parser, space, name):\n link = name\n caption = name\n params = {}\n\n # Parse the inner syntax, e.g. [[Image:src|option=val|caption]]\n separator = name.find('|')\n items = []\n if separator != -1:\n items = link.split('|')\n link = items[0]\n # If the last item contains '=', it's not a caption\n if items[-1].find('=') == -1:\n caption = items[-1]\n items = items[1:-1]\n else:\n caption = link\n items = items[1:]\n\n # parse the relevant items\n params = self._buildImageParams(items)\n img_path = self._getImagePath(link)\n\n template = jingo.env.get_template('wikiparser/hook_image.html')\n r_kwargs = {'img_path': img_path, 'caption': caption, 'params': params}\n return template.render(**r_kwargs)",
"def run_frame(self, ti, img):\n pass",
"def builder_should_create_target_image(self, builder, target, image_id, template, parameters):",
"def load_image(self, **kwargs):\n ..."
] | [
"0.6645309",
"0.6427157",
"0.6351128",
"0.63281864",
"0.62446356",
"0.6240594",
"0.6175346",
"0.60579926",
"0.60579926",
"0.60159016",
"0.60159016",
"0.60156727",
"0.6013718",
"0.5928864",
"0.58572084",
"0.5823431",
"0.5798933",
"0.57932264",
"0.5709512",
"0.57068586",
"0.56861186",
"0.5647051",
"0.55989045",
"0.559885",
"0.5518608",
"0.54788935",
"0.5471587",
"0.54625696",
"0.54416466",
"0.54111004"
] | 0.70048636 | 0 |
Return PUBLISHED message based on the PUBLISH message received. | def process(self):
received_message = PublishMessage(*self.message.value)
allow, msg = customize.authorize_publication(received_message.topic, self.connection)
answer = None
if allow:
publication_id = create_global_id()
self.broadcast_messages, response = customize.get_publish_messages(received_message, publication_id, self.connection.id)
if received_message.options.get("acknowledge"):
if response is None:
answer = PublishedMessage(
request_id=received_message.request_id,
publication_id=publication_id
)
else:
answer = response
else:
answer = ErrorMessage(
request_id=received_message.request_id,
request_code=received_message.code,
uri="tornwamp.publish.unauthorized"
)
answer.error(msg)
self.answer_message = answer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_publish(client, userdata, mid):\n print(\"Message Published.\")",
"def publish(request):\n issue = request.issue\n if issue.edit_allowed:\n form_class = PublishForm\n else:\n form_class = MiniPublishForm\n draft_message = None\n if not request.POST.get('message_only', None):\n query = models.Message.query(\n models.Message.issue_key == issue.key,\n models.Message.sender == request.user.email(),\n models.Message.draft == True)\n draft_message = query.get()\n if request.method != 'POST':\n reviewers = issue.reviewers[:]\n cc = issue.cc[:]\n if (request.user != issue.owner and\n request.user.email() not in issue.reviewers and\n not issue.is_collaborator(request.user)):\n reviewers.append(request.user.email())\n if request.user.email() in cc:\n cc.remove(request.user.email())\n reviewers = [models.Account.get_nickname_for_email(reviewer,\n default=reviewer)\n for reviewer in reviewers]\n ccs = [models.Account.get_nickname_for_email(cc, default=cc) for cc in cc]\n tbd, comments = _get_draft_comments(request, issue, True)\n preview = _get_draft_details(request, comments)\n if draft_message is None:\n msg = ''\n else:\n msg = draft_message.text\n form = form_class(initial={'subject': issue.subject,\n 'reviewers': ', '.join(reviewers),\n 'cc': ', '.join(ccs),\n 'send_mail': True,\n 'message': msg,\n })\n return respond(request, 'publish.html', {'form': form,\n 'issue': issue,\n 'preview': preview,\n 'draft_message': draft_message,\n })\n\n # Supply subject so that if this is a bare request to /publish, it won't\n # fail out if we've selected PublishForm (which requires a subject).\n augmented_POST = request.POST.copy()\n if issue.subject:\n augmented_POST.setdefault('subject', issue.subject)\n form = form_class(augmented_POST)\n\n # If the user is blocked, intentionally redirects him to the form again to\n # confuse him.\n account = models.Account.get_account_for_user(request.user)\n if account.blocked or not form.is_valid():\n return respond(request, 'publish.html', {'form': form, 'issue': issue})\n if issue.edit_allowed:\n issue.subject = form.cleaned_data['subject']\n if form.is_valid() and not form.cleaned_data.get('message_only', False):\n reviewers = _get_emails(form, 'reviewers')\n else:\n reviewers = issue.reviewers\n if (request.user != issue.owner and\n request.user.email() not in reviewers and\n not issue.is_collaborator(request.user)):\n reviewers.append(db.Email(request.user.email()))\n if form.is_valid() and not form.cleaned_data.get('message_only', False):\n cc = _get_emails(form, 'cc')\n else:\n cc = issue.cc\n # The user is in the reviewer list, remove them from CC if they're there.\n if request.user.email() in cc:\n cc.remove(request.user.email())\n if not form.is_valid():\n return respond(request, 'publish.html', {'form': form, 'issue': issue})\n issue.reviewers = reviewers\n issue.cc = cc\n if not form.cleaned_data.get('message_only', False):\n tbd, comments = _get_draft_comments(request, issue)\n else:\n tbd = []\n comments = []\n issue.update_comment_count(len(comments))\n tbd.append(issue)\n\n if comments:\n logging.warn('Publishing %d comments', len(comments))\n msg = _make_message(request, issue,\n form.cleaned_data['message'],\n comments,\n form.cleaned_data['send_mail'],\n draft=draft_message,\n in_reply_to=form.cleaned_data.get('in_reply_to'))\n tbd.append(msg)\n\n for obj in tbd:\n obj.put()\n\n # There are now no comments here (modulo race conditions)\n models.Account.current_user_account.update_drafts(issue, 0)\n if form.cleaned_data.get('no_redirect', False):\n return HttpTextResponse('OK')\n return HttpResponseRedirect(reverse(show, args=[issue.key.id()]))",
"def get_publish_link(self):\n return self.get_link(PUBLISH_LINK_REL)",
"def draft_message(request):\n query = models.Message.query(\n models.Message.sender == request.user.email(),\n models.Message.draft == True,\n ancestor=request.issue.key)\n if query.count() == 0:\n draft_message = None\n else:\n draft_message = query.get()\n if request.method == 'GET':\n return HttpTextResponse(draft_message.text if draft_message else '')\n return HttpTextResponse('An error occurred.', status=500)",
"def publish(self, message: str) -> None:",
"def _get_draft_message(draft):\n return HttpTextResponse(draft.text if draft else '')",
"def get_message():\n\tincoming_message = conn.recv(1024)\n\tincoming_message = incoming_message.decode()\n\treturn incoming_message",
"def on_pubmsg(self, raw_msg, source, msg, **kwargs):",
"def draft_message(request):\n query = models.Message.query(\n models.Message.issue_key == request.issue.key,\n models.Message.sender == request.user.email(),\n models.Message.draft == True)\n if query.count() == 0:\n draft_message = None\n else:\n draft_message = query.get()\n if request.method == 'GET':\n return _get_draft_message(draft_message)\n elif request.method == 'POST':\n return _post_draft_message(request, draft_message)\n elif request.method == 'DELETE':\n return _delete_draft_message(draft_message)\n return HttpTextResponse('An error occurred.', status=500)",
"def find_publish_link(self):\n return self.find_url(PUBLISH_LINK_REL)",
"def pub(self, topic, msg, callback=None):\n return self._pub('pub', topic, msg, callback=callback)",
"def construct_message(self):\n msg_type = self.msg_type\n if msg_type == \"PUBMSG\":\n msg_type = \"PRIVMSG\"\n ret = \"{} {}\".format(msg_type, self.target)\n if self.content:\n ret += \" :{}\".format(self.content)\n return ret + \"\\r\\n\"",
"def message(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'message')\r\n return http.Request('GET', url), parsers.parse_json",
"def publish_receive(message):\n topic, content = message # \"femag_log\" + text\n # topics: femag_log, progress, file_modified,\n # model_image, calc_image, field_image, babs_image, demag_image, color_scale\n if topic == 'femag_log' or topic == 'progress':\n logger.info(\"%s: %s\", topic, content.strip())\n else:\n logger.info('%s: len %d', topic, len(content.strip()))",
"def on_publish(self, mqtt_client, userdata, mid):\n logging.debug(\"DEBUG - publish ack received\")",
"def _get_published(self):\n return self.__published",
"def on_publish(client: mqtt.Client, userdata: Any, mid: int) -> None:\n logging.info(f\"Successfully published a message: mid={mid}\")",
"async def fetch_initial_message(self):\n data = await self._state.get_message(self.id, self.initial_message_id)\n message = self._state.create_message(data)\n return message",
"def on_message(client, userdata, message): \n print(\"Topic: \" + message.topic + \" Message: \" + message.payload.decode('utf-8'))",
"def on_publish(client, userdata, mid):\n print('on_publish')\n print(\" userdata:\" + str(userdata))\n print(\" mid:\" + str(mid))\n print()",
"def maybe_notify_lessee(request, response):\n if request.get('pubsub_topic'):\n pubsub.publish(\n pubsub.full_topic_name(\n request['pubsub_project'], request['pubsub_topic']),\n json.dumps(response),\n {},\n )\n metrics.pubsub_messages_sent.increment(fields={'target': 'lessee'})",
"def get_notification_message(self):\n return {'body': None}",
"def pull(self):\n \n data = self.s.recv(1024)\n if data:\n info = json.loads(data.decode()) \n print(\"DATA FROM BROKER : \", info)\n \n return info.get(\"topic\"), info.get(\"value\")\n pass",
"def process_incoming(self, msg, status):\n return msg[0]",
"def publish_view(self, request, object_id, revision_id, extra_context=None):\n \n obj = get_object_or_404(self.model, pk=object_id)\n version = get_object_or_404(Version,\n revision=revision_id,\n object_id=force_unicode(obj.pk),\n content_type=ContentType.objects.get_for_model(obj))\n \n if not version.revision.easypublishermetadata_set.filter(language=request.LANGUAGE_CODE):\n request.user.message_set.create(message=_(\"There is no draft available for language %s\") % request.LANGUAGE_CODE)\n return HttpResponseRedirect('../../current')\n \n # Generate the context.\n context = {\n \"title\": _(\"Publish %(name)s\") % {\"name\": self.model._meta.verbose_name},\n \"publish\":True,\n 'has_draft':True,\n 'link_current':True,\n 'extra':0,\n 'revision_id': revision_id,\n }\n context.update(extra_context or {})\n return self.render_revision_form(request, obj, version, context, revert=True)",
"def get_announcement(self, request):\n return StringMessage(\n data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or \"\")",
"def getAnnouncement(self, request):\n announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or \"\"\n return StringMessage(data=announcement)",
"def compute_publish_state(xblock):\r\n\r\n if getattr(xblock, 'is_draft', False):\r\n try:\r\n modulestore('direct').get_item(xblock.location)\r\n return PublishState.draft\r\n except ItemNotFoundError:\r\n return PublishState.private\r\n else:\r\n return PublishState.public",
"def on_message(client, userdata, message):\n print(f'{message.topic} {message.payload.decode(\"utf-8\")}') # Print message topic and payload",
"def handle_pubnub_message(self, message: dict) -> None:\n super().handle_pubnub_message(message)\n\n event = None\n\n if message.get(Attribute.CAMERA_THUMBNAIL_DATE):\n event = THUMBNAIL_READY\n elif message.get(Attribute.DING_DONG):\n event = DOORBELL_DING\n elif message.keys() == set([Attribute.ID, Attribute.TYPE]):\n event = VIDEO_READY\n elif message.get(Attribute.VISITOR_DETECTED) or message.keys() in [\n set([Attribute.ID, Attribute.ACTUAL_TYPE, Attribute.STATE]),\n set([Attribute.ID, Attribute.DETER_ON_DUTY, Attribute.TYPE]),\n ]:\n event = MOTION_DETECTED\n\n if event is not None:\n self.emit(event, {\"message\": message})\n\n _LOGGER.debug(\"Message received by %s: %s\", self.name, message)"
] | [
"0.6156396",
"0.5943395",
"0.5853581",
"0.5687853",
"0.56821215",
"0.55945885",
"0.558449",
"0.5567416",
"0.55304414",
"0.5491364",
"0.54436356",
"0.5389817",
"0.53799397",
"0.53675115",
"0.52919793",
"0.5285353",
"0.5283519",
"0.5240137",
"0.52175355",
"0.5157807",
"0.51494783",
"0.5149371",
"0.5146135",
"0.5132583",
"0.5112187",
"0.51094556",
"0.51008356",
"0.50924516",
"0.5086669",
"0.50844234"
] | 0.6062522 | 1 |
Calls registered shift action for the given grammar symbol. | def _call_shift_action(self, context):
debug = self.debug
token = context.token
sem_action = token.symbol.action
if self.build_tree:
# call action for building tree node if tree building is enabled
if debug:
h_print("Building terminal node",
"'{}'.".format(token.symbol.name), level=2)
# If both build_tree and call_actions_during_build are set to
# True, semantic actions will be call but their result will be
# discarded. For more info check following issue:
# https://github.com/igordejanovic/parglare/issues/44
if self.call_actions_during_tree_build and sem_action:
sem_action(context, token.value, *token.additional_data)
return NodeTerm(context, token)
if sem_action:
result = sem_action(context, token.value, *token.additional_data)
else:
if debug:
h_print("No action defined",
"for '{}'. "
"Result is matched string.".format(token.symbol.name),
level=1)
result = token.value
if debug:
h_print("Action result = ",
"type:{} value:{}"
.format(type(result), repr(result)), level=1)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _call_shift_action(self, context):\n debug = self.debug\n token = context.token\n sem_action = token.symbol.action\n\n if self.build_tree:\n # call action for building tree node if tree building is enabled\n if debug:\n h_print(\"Building terminal node\",\n \"'{}'.\".format(token.symbol.name), level=2)\n\n # If both build_tree and call_actions_during_build are set to\n # True, semantic actions will be call but their result will be\n # discarded. For more info check following issue:\n # https://github.com/igordejanovic/parglare/issues/44\n if self.call_actions_during_tree_build and sem_action:\n sem_action(context, token.value, *token.additional_data)\n\n return treebuild_shift_action(context)\n\n if sem_action:\n result = sem_action(context, token.value, *token.additional_data)\n\n else:\n if debug:\n h_print(\"No action defined\",\n \"for '{}'. \"\n \"Result is matched string.\".format(token.symbol.name),\n level=1)\n result = token.value\n\n if debug:\n h_print(\"Action result = \",\n \"type:{} value:{}\"\n .format(type(result), repr(result)), level=1)\n\n return result",
"def process (self, inputSymbol):\n \n self.inputSymbol = inputSymbol\n (self.action, self.nextState) = self.getTransition (self.inputSymbol, self.currentState)\n \n if self.action is not None:\n self.action (self)\n \n self.memoryState.append(self.currentState)\n self.currentState = self.nextState\n self.nextState = None",
"def act(symbol):\n if symbol == pyglet.window.key.SPACE:\n return 1 # jump up\n elif symbol == pyglet.window.key.W:\n return 2 # move up\n elif symbol == pyglet.window.key.D:\n return 3 # move right\n elif symbol == pyglet.window.key.A:\n return 4 # move left\n elif symbol == pyglet.window.key.S:\n return 5 # move down\n elif symbol == pyglet.window.key.E:\n return 11 # jump right\n elif symbol == pyglet.window.key.Q:\n return 12 # jump left\n else:\n return 0 # noop",
"def apply_shift(text, shift):\n ### TODO.",
"def applyShift(text, shift):\n \n m=applyCoder(text,buildCoder(shift) )\n return m",
"def input(self, symbol, *args, **kwargs):\n if self.__state is None:\n raise ValueError(\"FSM state is undefined\")\n try:\n transition = self.__get_state_attr(self._transition_prefix)\n except AttributeError:\n raise Exception(\"unable to find transition function or target\")\n if callable(transition):\n new_state = transition(symbol)\n elif isinstance(transition, dict):\n new_state = transition[symbol]\n else:\n new_state = transition\n return None if new_state is None else self.enter(new_state, *args, **kwargs)",
"def applyShift(text, shift):\n return applyCoder(text, buildCoder(shift))",
"def applyShift(text, shift):\n return applyCoder(text, buildCoder(shift))",
"def applyShift(text, shift):\n ### TODO.\n ### HINT: This is a wrapper function.\n \n return applyCoder(text, buildCoder(shift))",
"def testRegisterShift(self):\n reg = ShiftRegister(3)\n reg.register[0] = \"a\"\n reg.register[1] = \"b\"\n reg.register[2] = \"c\"\n reg.shift(\"d\")\n self.assertEqual(reg.register[0], \"b\")\n self.assertEqual(reg.register[1], \"c\")\n self.assertEqual(reg.register[2], \"d\")",
"def on_key_press(symbol, modifiers):\n if symbol == key.SPACE:\n world.next_step()",
"def convert_symbol_to_raw_actions(self, symbol, rules):\n assert not isinstance(symbol, list)\n assert isinstance(symbol, str) or isinstance(symbol, int)\n symbol = [symbol]\n finished = False\n while not finished:\n new_symbol = []\n for symbol_val in symbol:\n if symbol_val in rules.keys():\n new_symbol.append(rules[symbol_val][0])\n new_symbol.append(rules[symbol_val][1])\n else:\n new_symbol.append(symbol_val)\n if new_symbol == symbol: finished = True\n else: symbol = new_symbol\n new_symbol = tuple(new_symbol)\n return new_symbol",
"def append(self, state, symbol, action, destinationstate, production = None):\r\n if action not in (None, \"Accept\", \"Shift\", \"Reduce\"):\r\n raise TypeError\r\n if not state in self:\r\n self[state] = {}\r\n rule = {\"action\":action, \"dest\":destinationstate}\r\n if action == \"Reduce\":\r\n if rule is None:\r\n raise TypeError(\"Expected production parameter\")\r\n rule[\"rule\"] = production\r\n if isinstance(symbol, list) and len(symbol) == 1:\r\n symbol = symbol[0]\r\n if not isinstance(symbol, Symbol):\r\n raise TypeError(\"Expected symbol, got %s\" % symbol)\r\n self[state][symbol] = rule",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def Shift(self, String, infix):\r\n\r\n tmp1 = self.Check_code_operand(infix[0])\r\n tmp2 = self.Check_code_operand(infix[1])\r\n if (tmp1 is False) or (tmp2 is False):\r\n return False\r\n if ((tmp1[0] == 'reg') or ((tmp1[0] == 'add') and (tmp1[2] != 0))) and (((tmp2[0] == 'imm' and tmp2[2] == 1)) or ((tmp2[0] == 'reg') and (infix[1][0] == 'cl'))):\r\n if (String == 'shl') or (String == 'sal'):\r\n a = 0\r\n if tmp1[0] != 'add':\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n\r\n b = tmp2[1]\r\n\r\n\r\n if b < 0:\r\n b = pow(2, (tmp1[2] * 8)) + b\r\n if b < 0:\r\n return False\r\n\r\n for i in range(0, b):\r\n a = a * 2\r\n if a >= pow(2, tmp1[2] * 8):\r\n a = a & (pow(2, tmp1[2] * 8) - 1)\r\n self.Flags[\"cf\"] = 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n if a == 0:\r\n self.Flags[\"zf\"] = 1\r\n else:\r\n self.Flags[\"zf\"] = 0\r\n\r\n if tmp1[0] == 'reg':\r\n if len(infix[0][0]) == 3:\r\n self.Registers[infix[0][0]] = a\r\n else:\r\n self.Save_value_in_reg_X(infix[0][0], a)\r\n else:\r\n if not self.Save_value_in_memory(tmp1[1], a, tmp1[2]):\r\n return False\r\n elif String == 'shr':\r\n a = 0\r\n if tmp1[0] != 'add':\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n\r\n b = tmp2[1]\r\n\r\n if b < 0:\r\n b = pow(2, (tmp1[2] * 8)) + b\r\n if b < 0:\r\n return False\r\n\r\n for i in range(0, b):\r\n if bool(a & 1):\r\n self.Flags[\"cf\"] = 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n a = int(a / 2)\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n if a == 0:\r\n self.Flags[\"zf\"] = 1\r\n else:\r\n self.Flags[\"zf\"] = 0\r\n\r\n if tmp1[0] == 'reg':\r\n if len(infix[0][0]) == 3:\r\n self.Registers[infix[0][0]] = a\r\n else:\r\n self.Save_value_in_reg_X(infix[0][0], a)\r\n else:\r\n if not self.Save_value_in_memory(tmp1[1], a, tmp1[2]):\r\n return False\r\n elif String == 'sar':\r\n a = 0\r\n if tmp1[0] != 'add':\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n b = tmp2[1]\r\n\r\n if b < 0:\r\n b = pow(2, (tmp1[2] * 8)) + b\r\n if b < 0:\r\n return False\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n for i in range(0, b):\r\n if bool(a & 1):\r\n self.Flags[\"cf\"] = 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n a = int(a / 2)\r\n if self.Flags[\"sf\"] == 1:\r\n a = a | pow(2, (tmp1[2] * 8) - 1)\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n if a == 0:\r\n self.Flags[\"zf\"] = 1\r\n else:\r\n self.Flags[\"zf\"] = 0\r\n\r\n if tmp1[0] == 'reg':\r\n if len(infix[0][0]) == 3:\r\n self.Registers[infix[0][0]] = a\r\n else:\r\n self.Save_value_in_reg_X(infix[0][0], a)\r\n else:\r\n if not self.Save_value_in_memory(tmp1[1], a, tmp1[2]):\r\n return False\r\n elif String == 'rol':\r\n a = 0\r\n if tmp1[0] != 'add':\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n b = tmp2[1]\r\n\r\n if b < 0:\r\n b = pow(2, (tmp1[2] * 8)) + b\r\n if b < 0:\r\n return False\r\n\r\n for i in range(0, b):\r\n a = a * 2\r\n\r\n if a >= pow(2, tmp1[2] * 8):\r\n a = a & (pow(2, tmp1[2] * 8) - 1)\r\n self.Flags[\"cf\"] = 1\r\n a = a | 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n if a == 0:\r\n self.Flags[\"zf\"] = 1\r\n else:\r\n self.Flags[\"zf\"] = 0\r\n\r\n if tmp1[0] == 'reg':\r\n if len(infix[0][0]) == 3:\r\n self.Registers[infix[0][0]] = a\r\n else:\r\n self.Save_value_in_reg_X(infix[0][0], a)\r\n else:\r\n if not self.Save_value_in_memory(tmp1[1], a, tmp1[2]):\r\n return False\r\n elif String == 'ror':\r\n a = 0\r\n if tmp1[0] != 'add':\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n\r\n b = tmp2[1]\r\n\r\n if b < 0:\r\n b = pow(2, (tmp1[2] * 8)) + b\r\n if b < 0:\r\n return False\r\n\r\n for i in range(0, b):\r\n if bool(a & 1):\r\n self.Flags[\"cf\"] = 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n a = int(a / 2)\r\n if self.Flags[\"cf\"] == 1:\r\n a = a | pow(2, (tmp1[2] * 8) - 1)\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n if a == 0:\r\n self.Flags[\"zf\"] = 1\r\n else:\r\n self.Flags[\"zf\"] = 0\r\n\r\n if tmp1[0] == 'reg':\r\n if len(infix[0][0]) == 3:\r\n self.Registers[infix[0][0]] = a\r\n else:\r\n self.Save_value_in_reg_X(infix[0][0], a)\r\n else:\r\n if not self.Save_value_in_memory(tmp1[1], a, tmp1[2]):\r\n return False\r\n elif String == 'rcl':\r\n a = 0\r\n if tmp1[0] != 'add':\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n b = tmp2[1]\r\n\r\n\r\n if b < 0:\r\n b = pow(2, (tmp1[2] * 8)) + b\r\n if b < 0:\r\n return False\r\n\r\n for i in range(0, b):\r\n a = a * 2\r\n\r\n if a >= pow(2, tmp1[2] * 8):\r\n a = a & (pow(2, tmp1[2] * 8) - 1)\r\n if self.Flags[\"cf\"] == 1:\r\n a = a | 1\r\n self.Flags[\"cf\"] = 1\r\n\r\n else:\r\n if self.Flags[\"cf\"] == 1:\r\n a = a | 1\r\n self.Flags[\"cf\"] = 0\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n if a == 0:\r\n self.Flags[\"zf\"] = 1\r\n else:\r\n self.Flags[\"zf\"] = 0\r\n\r\n if tmp1[0] == 'reg':\r\n if len(infix[0][0]) == 3:\r\n self.Registers[infix[0][0]] = a\r\n else:\r\n self.Save_value_in_reg_X(infix[0][0], a)\r\n else:\r\n if not self.Save_value_in_memory(tmp1[1], a, tmp1[2]):\r\n return False\r\n elif String == 'rcr':\r\n a = 0\r\n if tmp1[0] != 'add':\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n b = tmp2[1]\r\n\r\n if b < 0:\r\n b = pow(2, (tmp1[2] * 8)) + b\r\n if b < 0:\r\n return False\r\n\r\n for i in range(0, b):\r\n f = self.Flags[\"cf\"]\r\n if bool(a & 1):\r\n self.Flags[\"cf\"] = 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n a = int(a / 2)\r\n if f == 1:\r\n a = a | pow(2, (tmp1[2] * 8) - 1)\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n if a == 0:\r\n self.Flags[\"zf\"] = 1\r\n else:\r\n self.Flags[\"zf\"] = 0\r\n\r\n if tmp1[0] == 'reg':\r\n if len(infix[0][0]) == 3:\r\n self.Registers[infix[0][0]] = a\r\n else:\r\n self.Save_value_in_reg_X(infix[0][0], a)\r\n else:\r\n if not self.Save_value_in_memory(tmp1[1], a, tmp1[2]):\r\n return False\r\n else:\r\n return False\r\n\r\n return True",
"def apply_action(self, action_input_obj: Input):\n # implement frameskip(k) by sending the action (k+1) times every time we have an action.\n for _ in range(self.frames_per_action):\n self.rstate.get_state().apply_action(action_input_obj)",
"def __rlshift__(self, *args):\n return _libsbml.string___rlshift__(self, *args)",
"def step(self):\n self.emit_symbol()\n self.change_state()",
"def shift_char(char, shift, charset):\n index = (charset.index(char) + shift) % len(charset)\n return charset[index]",
"def input_shortcut(symbol, name=None):\n def input_method(self, *args, **kwargs):\n return self.input(symbol, *args, **kwargs)\n input_method.__name__ = str(name) if name is not None else (\"input_\" + str(symbol))\n input_method.__doc__ = \"Shortcut method to feed symbol '%s' into the FSM.\" % str(symbol)\n return input_method",
"def right_shift_all(expr, s=None, t=None, func_symbols=[]):\r\n\r\n expr = expr.expand()\r\n\r\n if isinstance(expr, sp.Matrix):\r\n def fnc(a):\r\n return right_shift_all(a, s, t, func_symbols)\r\n return expr.applyfunc(fnc)\r\n\r\n assert isinstance(expr, sp.Basic)\r\n\r\n if isinstance(expr, sp.Add):\r\n args = expr.args\r\n elif isinstance(expr, (sp.Mul, sp.Atom)):\r\n args = (expr,)\r\n elif isinstance(expr, sp.Pow):\r\n base, expo = expr.args\r\n assert int(expo) == expo\r\n assert expo < 0\r\n args = (expr,)\r\n\r\n else:\r\n raise ValueError, \"unexpected type: %s\" % type(expr)\r\n\r\n res = 0\r\n for a in args:\r\n assert isinstance(a, (sp.Mul, sp.Atom, sp.Pow))\r\n res += right_shift(a, s, t, func_symbols)\r\n\r\n return res",
"def shift(self, direction):\n direct, pos = tuple(direction)\n\n board = {'L': self.rows, 'R': self.rows, 'D': self.cols, 'U': self.cols}[direct]\n board[int(pos)].shift(direction=self.direct[direct])",
"def process(opcode):\n opcode.process()",
"def right_shift(self):\n register = (self.opcode & 0xFFF) >> 8\n bits = self.registers[register]\n \"\"\"if bits & 0b1 == 1:\n self.registers[0xF] = 1\n else:\n self.registers[0xF] = 0\n \"\"\"\n self.registers[0xF] = bits & 0b1\n self.registers[register] = self.registers[register] >> 1\n logger.info(\"Shifted register V{} 1 bit to the right got {}\".format(\n register,\n hex(self.registers[register])))",
"def apply_shift(text, shift):\n ### TODO.\n encoder = build_coder(shift)\n # print encoder\n encoded_text = apply_coder(text, encoder)\n # print \"'\" + encoded_text + \"'\"\n return encoded_text",
"def Spm():\n\n global Asm\n\n if dec.Asm.Parse_Pointer == 0 or not dec.Asm.Optional:\n # No operand, use defalt Z index\n target.CodeWord(dec.Asm.Instructions[dec.Asm.Mnemonic][3])\n else:\n # An operand is given, must be Z or Z+\n value = GetIndex()\n\n if value[1] != 'Z' or value[0] == 2 or value[2] != 0:\n # Illegal index register\n errors.DoError('badoper', False)\n index = 0 # Dummy mode\n else:\n # Legal index register\n index = value[0]\n\n target.CodeWord(dec.Asm.Instructions[dec.Asm.Mnemonic][3] +\n (index << 4))\n\n NoMore()",
"def shift(self, direction):\n try:\n if direction == Direction.UP:\n return self.shift_up()\n elif direction == Direction.DOWN:\n return self.shift_down()\n elif direction == Direction.RIGHT:\n return self.shift_right()\n elif direction == Direction.LEFT:\n return self.shift_left()\n else:\n raise IndexError(\"Invalid direction {}\".format(direction))\n except IndexError as e:\n raise IndexError(e)",
"def shift_players_pokemon(self, shift_action: ShiftActionModel) -> None:\n\n shift_action.shift(self._battle)\n self._battle_scene.shift_players_pokemon(shift_action)",
"def make_action(self, game, node, action, moves):\n pass",
"def rshift(self, attr):\n return self.set_child_and_return(shifter.rshift(self.statement, attr))"
] | [
"0.64362437",
"0.58083284",
"0.5472519",
"0.5405932",
"0.534024",
"0.5325374",
"0.5295643",
"0.5295643",
"0.5265304",
"0.5261245",
"0.5103639",
"0.5092218",
"0.49588338",
"0.49423075",
"0.49236375",
"0.48676485",
"0.48510936",
"0.4850286",
"0.48325142",
"0.47916",
"0.47611374",
"0.47599384",
"0.47544077",
"0.47441345",
"0.47415882",
"0.47293854",
"0.47184852",
"0.4709228",
"0.47057927",
"0.470004"
] | 0.60593754 | 1 |
Calls registered reduce action for the given grammar symbol. | def _call_reduce_action(self, context, subresults):
debug = self.debug
result = None
bt_result = None
production = context.production
if self.build_tree:
# call action for building tree node if enabled.
if debug:
h_print("Building non-terminal node",
"'{}'.".format(production.symbol.name), level=2)
bt_result = NodeNonTerm(context, children=subresults,
production=production)
context.node = bt_result
if not self.call_actions_during_tree_build:
return bt_result
sem_action = production.symbol.action
if sem_action:
assignments = production.assignments
if assignments:
assgn_results = {}
for a in assignments.values():
if a.op == '=':
assgn_results[a.name] = subresults[a.index]
else:
assgn_results[a.name] = bool(subresults[a.index])
if type(sem_action) is list:
if assignments:
result = sem_action[production.prod_symbol_id](
context, subresults, **assgn_results)
else:
result = sem_action[production.prod_symbol_id](context,
subresults)
else:
if assignments:
result = sem_action(context, subresults, **assgn_results)
else:
result = sem_action(context, subresults)
else:
if debug:
h_print("No action defined",
" for '{}'.".format(production.symbol.name), level=1)
if len(subresults) == 1:
if debug:
h_print("Unpacking a single subresult.", level=1)
result = subresults[0]
else:
if debug:
h_print("Result is a list of subresults.", level=1)
result = subresults
if debug:
h_print("Action result =",
"type:{} value:{}"
.format(type(result), repr(result)), level=1)
# If build_tree is set to True, discard the result of the semantic
# action, and return the result of treebuild_reduce_action.
return bt_result if bt_result is not None else result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _call_reduce_action(self, context, subresults):\n debug = self.debug\n result = None\n bt_result = None\n production = context.production\n\n if self.build_tree:\n # call action for building tree node if enabled.\n if debug:\n h_print(\"Building non-terminal node\",\n \"'{}'.\".format(production.symbol.name), level=2)\n\n bt_result = treebuild_reduce_action(context, nodes=subresults)\n if not self.call_actions_during_tree_build:\n return bt_result\n\n sem_action = production.symbol.action\n if sem_action:\n assignments = production.assignments\n if assignments:\n assgn_results = {}\n for a in assignments.values():\n if a.op == '=':\n assgn_results[a.name] = subresults[a.index]\n else:\n assgn_results[a.name] = bool(subresults[a.index])\n\n if type(sem_action) is list:\n if assignments:\n result = sem_action[production.prod_symbol_id](\n context, subresults, **assgn_results)\n else:\n result = sem_action[production.prod_symbol_id](context,\n subresults)\n else:\n if assignments:\n result = sem_action(context, subresults, **assgn_results)\n else:\n result = sem_action(context, subresults)\n\n else:\n if debug:\n h_print(\"No action defined\",\n \" for '{}'.\".format(production.symbol.name), level=1)\n if len(subresults) == 1:\n if debug:\n h_print(\"Unpacking a single subresult.\", level=1)\n result = subresults[0]\n else:\n if debug:\n h_print(\"Result is a list of subresults.\", level=1)\n result = subresults\n\n if debug:\n h_print(\"Action result =\",\n \"type:{} value:{}\"\n .format(type(result), repr(result)), level=1)\n\n # If build_tree is set to True, discard the result of the semantic\n # action, and return the result of treebuild_reduce_action.\n return bt_result if bt_result is not None else result",
"def process (self, inputSymbol):\n \n self.inputSymbol = inputSymbol\n (self.action, self.nextState) = self.getTransition (self.inputSymbol, self.currentState)\n \n if self.action is not None:\n self.action (self)\n \n self.memoryState.append(self.currentState)\n self.currentState = self.nextState\n self.nextState = None",
"def call_actions(self, node):\n def inner_call_actions(node):\n sem_action = node.symbol.action\n if node.is_term():\n if sem_action:\n try:\n result = sem_action(node.context, node.value,\n *node.additional_data)\n except TypeError as e:\n raise TypeError('{}: terminal={} action={} params={}'\n .format(\n str(e),\n node.symbol.name,\n repr(sem_action),\n (node.context, node.value,\n node.additional_data))) from e\n else:\n result = node.value\n else:\n subresults = []\n # Recursive right to left, bottom up. Simulate LR\n # reductions.\n for n in reversed(node):\n subresults.append(inner_call_actions(n))\n subresults.reverse()\n\n if sem_action:\n assignments = node.production.assignments\n if assignments:\n assgn_results = {}\n for a in assignments.values():\n if a.op == '=':\n assgn_results[a.name] = subresults[a.index]\n else:\n assgn_results[a.name] = \\\n bool(subresults[a.index])\n if type(sem_action) is list:\n if assignments:\n result = \\\n sem_action[\n node.production.prod_symbol_id](\n node, subresults, **assgn_results)\n else:\n result = \\\n sem_action[\n node.production.prod_symbol_id](\n node.context, subresults)\n else:\n if assignments:\n result = sem_action(node.context, subresults,\n **assgn_results)\n else:\n result = sem_action(node.context, subresults)\n else:\n if len(subresults) == 1:\n # Unpack if single subresult\n result = subresults[0]\n else:\n result = subresults\n\n return result\n\n return inner_call_actions(node)",
"def _reduce(self, action):\n assert len(self.stack) >= 2, \"ERROR: Cannot reduce with stack length less than 2\"\n \n # STUDENT\n # hint: use list.pop()\n # END STUDENT\n rightarc = self.stack.pop()\n leftarc = self.stack.pop()\n head = rightarc if action == Actions.REDUCE_L else leftarc\n mod = leftarc if action == Actions.REDUCE_L else rightarc\n self.stack.append( StackEntry(head.headword, head.headword_pos, self.combiner(head.embedding,mod.embedding)) )\n return DepGraphEdge((head.headword, head.headword_pos),(mod.headword, mod.headword_pos))",
"def test_parglare_builtin_action_override_repetition():\n # B+ will product B_1 rule with `collect` common action\n grammar = \"\"\"\n S: B+;\n B: \"b\";\n \"\"\"\n\n called = [False]\n\n def my_collect(_, __):\n called[0] = True\n return \"pass\"\n\n my_actions = {\n \"collect\": my_collect,\n }\n\n g = Grammar.from_string(grammar)\n p = Parser(g, actions=my_actions)\n assert p.parse(\"b b\") == 'pass'\n assert called[0]",
"def append(self, state, symbol, action, destinationstate, production = None):\r\n if action not in (None, \"Accept\", \"Shift\", \"Reduce\"):\r\n raise TypeError\r\n if not state in self:\r\n self[state] = {}\r\n rule = {\"action\":action, \"dest\":destinationstate}\r\n if action == \"Reduce\":\r\n if rule is None:\r\n raise TypeError(\"Expected production parameter\")\r\n rule[\"rule\"] = production\r\n if isinstance(symbol, list) and len(symbol) == 1:\r\n symbol = symbol[0]\r\n if not isinstance(symbol, Symbol):\r\n raise TypeError(\"Expected symbol, got %s\" % symbol)\r\n self[state][symbol] = rule",
"def call_actions(self, node, context=None):\n self.context = context = context if context else Context()\n context.parser = self\n\n def set_context(context, node):\n context.start_position = node.start_position\n context.end_position = node.end_position\n context.node = node\n context.production = None\n context.token = None\n context.layout_content = node.layout_content\n\n def inner_call_actions(node):\n sem_action = node.symbol.action\n if isinstance(node, NodeTerm):\n if sem_action:\n set_context(context, node)\n try:\n result = sem_action(context, node.value,\n *node.additional_data)\n except TypeError as e:\n raise TypeError('{}: terminal={} action={} params={}'\n .format(\n str(e),\n node.symbol.name,\n repr(sem_action),\n (context, node.value,\n node.additional_data))) from e\n else:\n result = node.value\n else:\n subresults = []\n # Recursive right to left, bottom up. Simulate LR\n # reductions.\n for n in reversed(node):\n subresults.append(inner_call_actions(n))\n subresults.reverse()\n\n if sem_action:\n set_context(context, node)\n context.production = node.production\n assignments = node.production.assignments\n if assignments:\n assgn_results = {}\n for a in assignments.values():\n if a.op == '=':\n assgn_results[a.name] = subresults[a.index]\n else:\n assgn_results[a.name] = \\\n bool(subresults[a.index])\n if type(sem_action) is list:\n if assignments:\n result = \\\n sem_action[\n node.production.prod_symbol_id](\n context, subresults, **assgn_results)\n else:\n result = \\\n sem_action[\n node.production.prod_symbol_id](context,\n subresults)\n else:\n if assignments:\n result = sem_action(context, subresults,\n **assgn_results)\n else:\n result = sem_action(context, subresults)\n else:\n if len(subresults) == 1:\n # Unpack if single subresult\n result = subresults[0]\n else:\n result = subresults\n\n return result\n\n return inner_call_actions(node)",
"def convert_symbol_to_raw_actions(self, symbol, rules):\n assert not isinstance(symbol, list)\n assert isinstance(symbol, str) or isinstance(symbol, int)\n symbol = [symbol]\n finished = False\n while not finished:\n new_symbol = []\n for symbol_val in symbol:\n if symbol_val in rules.keys():\n new_symbol.append(rules[symbol_val][0])\n new_symbol.append(rules[symbol_val][1])\n else:\n new_symbol.append(symbol_val)\n if new_symbol == symbol: finished = True\n else: symbol = new_symbol\n new_symbol = tuple(new_symbol)\n return new_symbol",
"def process(opcode):\n opcode.process()",
"def _call_shift_action(self, context):\n debug = self.debug\n token = context.token\n sem_action = token.symbol.action\n\n if self.build_tree:\n # call action for building tree node if tree building is enabled\n if debug:\n h_print(\"Building terminal node\",\n \"'{}'.\".format(token.symbol.name), level=2)\n\n # If both build_tree and call_actions_during_build are set to\n # True, semantic actions will be call but their result will be\n # discarded. For more info check following issue:\n # https://github.com/igordejanovic/parglare/issues/44\n if self.call_actions_during_tree_build and sem_action:\n sem_action(context, token.value, *token.additional_data)\n\n return NodeTerm(context, token)\n\n if sem_action:\n result = sem_action(context, token.value, *token.additional_data)\n\n else:\n if debug:\n h_print(\"No action defined\",\n \"for '{}'. \"\n \"Result is matched string.\".format(token.symbol.name),\n level=1)\n result = token.value\n\n if debug:\n h_print(\"Action result = \",\n \"type:{} value:{}\"\n .format(type(result), repr(result)), level=1)\n\n return result",
"def reducer(token_pair):\n\treturn (token_pair[0], sum(token_pair[1]))",
"def consume(self, word):\r\n visited = set()\r\n length = len(word)\r\n\r\n @lru_cache(maxsize=None)\r\n def parse(elem, idx):\r\n if elem.isNonterminal():\r\n if (elem, idx) in visited:\r\n raise LeftRecursive(\"Left recursive is prohibited: \" + elem.symbol)\r\n else:\r\n visited.add((elem, idx))\r\n return parse(elem.tree.arg.exp, idx)\r\n if elem is epsilon:\r\n if idx <= length:\r\n return 0\r\n else:\r\n return None\r\n if isinstance(elem, Terminal):\r\n if idx < length:\r\n return 1 if word[idx] == elem.symbol else None\r\n else:\r\n return None\r\n if elem.symbol == '>>':\r\n r1 = parse(elem.tree.left.exp, idx)\r\n if r1 is None:\r\n return None\r\n r2 = parse(elem.tree.right.exp, idx + r1)\r\n if r2 is None:\r\n return None\r\n return r1 + r2\r\n if elem.symbol == '|':\r\n r1 = parse(elem.tree.left.exp, idx)\r\n if r1 != None:\r\n return r1\r\n return parse(elem.tree.right.exp, idx)\r\n if elem.symbol == '~':\r\n r1 = parse(elem.tree.arg.exp, idx)\r\n return 0 if r1 is None else None\r\n if elem.symbol == '+':\r\n r1 = parse(elem.tree.arg.exp, idx)\r\n if r1 is None:\r\n return None\r\n r2 = parse(elem, idx + r1)\r\n return r1 if r2 is None else r1 + r2\r\n\r\n result = parse(self, 0)\r\n parse.cache_clear()\r\n return result",
"def _call(self, k_spec):\n if \"reducer_type\" not in k_spec.keys():\n raise ValueError(\"Did not specify the type of the global descriptor reducer.\")\n if k_spec[\"reducer_type\"] == \"average\":\n return Atomic_2_Global_Average(k_spec)\n if k_spec[\"reducer_type\"] == \"sum\":\n return Atomic_2_Global_Sum(k_spec)\n if k_spec[\"reducer_type\"] == \"moment_average\":\n return Atomic_2_Global_Moment_Average(k_spec)\n if k_spec[\"reducer_type\"] == \"moment_sum\":\n return Atomic_2_Global_Moment_Sum(k_spec)\n else:\n raise NotImplementedError",
"def test_user_grammar_actions():\n grammar = \"\"\"\n S: A B C;\n @nonterm_action\n C: A B;\n A: \"a\";\n @term_action\n B: \"b\";\n \"\"\"\n\n called = [False, False]\n\n def nonterm_action(_, __):\n called[0] = True\n\n def term_action(_, __):\n called[1] = True\n\n my_actions = {\n \"nonterm_action\": nonterm_action,\n \"term_action\": term_action,\n }\n\n g = Grammar.from_string(grammar)\n p = Parser(g, actions=my_actions)\n assert p.parse(\"a b a b\")\n assert all(called)",
"def _call_shift_action(self, context):\n debug = self.debug\n token = context.token\n sem_action = token.symbol.action\n\n if self.build_tree:\n # call action for building tree node if tree building is enabled\n if debug:\n h_print(\"Building terminal node\",\n \"'{}'.\".format(token.symbol.name), level=2)\n\n # If both build_tree and call_actions_during_build are set to\n # True, semantic actions will be call but their result will be\n # discarded. For more info check following issue:\n # https://github.com/igordejanovic/parglare/issues/44\n if self.call_actions_during_tree_build and sem_action:\n sem_action(context, token.value, *token.additional_data)\n\n return treebuild_shift_action(context)\n\n if sem_action:\n result = sem_action(context, token.value, *token.additional_data)\n\n else:\n if debug:\n h_print(\"No action defined\",\n \"for '{}'. \"\n \"Result is matched string.\".format(token.symbol.name),\n level=1)\n result = token.value\n\n if debug:\n h_print(\"Action result = \",\n \"type:{} value:{}\"\n .format(type(result), repr(result)), level=1)\n\n return result",
"def define_action(char):\n if char == \"checks\":\n return ActionType.CHECK\n elif char == \"folds\":\n return ActionType.FOLD\n elif char == \"bets\":\n return ActionType.BET\n elif char == \"raises\":\n return ActionType.RAISE\n elif char == \"calls\":\n return ActionType.CALL\n else:\n return ActionType.UNDEFINED",
"def _doReduce(self, func):\n name = \"Reducer\"\n sys.stderr.write(\"Master[%s phase]: starting\\n\" % name)\n proc = [mp.Process(target=spawn_reducer(func), name=name) for _ in range(self.num_workers)]\n for p in proc:\n p.daemon = True\n p.start()\n for p in proc:\n p.join()\n sys.stderr.write(\"Master[%s phase]: ended..\\n\" % name)",
"def take_action(self, action):\n getattr(self, action['func'])(\n *action.get('args', ()), \n **action.get('kwargs', {})\n )",
"def reduce(self, f, initial):\n self.append(Reducer(f, initial))\n return self",
"def decodeSRaction(tree):\n # Start decoding\n post_nodelist = postorder_DFT(tree, [])\n # print len(post_nodelist)\n actionlist = []\n for node in post_nodelist:\n if (node.lnode is None) and (node.rnode is None):\n actionlist.append(('Shift', None, None))\n elif (node.lnode is not None) and (node.rnode is not None):\n form = node.form\n if (form == 'NN') or (form == 'NS'):\n relation = extractrelation(node.rnode.relation)\n else:\n relation = extractrelation(node.lnode.relation)\n actionlist.append(('Reduce', form, relation))\n else:\n raise ValueError(\"Can not decode Shift-Reduce action\")\n return actionlist",
"def apply_action(self, action):\n return self.__environment.step(action)",
"def reduce_run():",
"def call_action(self, action):\n pass",
"def evaluation_reducer(self) -> Union[Reducer, Dict[str, Reducer]]:\n return Reducer.AVG",
"def perform ( self, action, action_event = None ):\r\n getattr( self.editor, action.action )()",
"def action(self):\n returns = []\n for command in self._commands:\n #try:\n returns.append(eval(command))\n #except: #TODO Shouldn't except without specifying a type or indicating what the error is\n # print \"Error: Could not execute rule action:\", command, str(self.device)\n \n self.calls += 1\n self.last_call_time = time.time()\n return returns",
"def rule_action(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule_action\")",
"def parse(self, input_string):\n print(\"----------- Syntax analysis -----------\")\n table = self.generate_table()\n self.workingStack = ['0']\n self.inputStack = [char for char in input_string]\n self.output = []\n try:\n print(\"--------- Parsing ---------\")\n while len(self.workingStack) != 0:\n state = int(self.workingStack[-1]) # which dict from parsing table, index of state\n if len(self.inputStack) > 0:\n char = self.inputStack.pop(0)\n else:\n char = None\n if table[state]['action'] == 'shift':\n # Shift operation on the stack\n if char not in table[state]:\n raise (Exception(\"Syntax error! Expected \" + str(table[state]) +\n \"!\\nCannot parse shift. Character: \" + char))\n self.workingStack.append(char)\n self.workingStack.append(table[state][char])\n elif table[state]['action'] == 'acc':\n # Accept operation, sequence is accepted\n if len(self.inputStack) != 0:\n raise (Exception(\"Syntax error! Expected \" + str(table[state]) +\n \"!\\nCannot parse accept. Character: \" + char))\n self.workingStack.clear()\n else:\n # Reduce operation on the stack\n reduce_state = int(table[state]['action'].split(' ')[1])\n reduce_production = self.grammar.P[reduce_state]\n to_remove_from_working_stack = [symbol for symbol in reduce_production[1]]\n while len(to_remove_from_working_stack) > 0 and len(self.workingStack) > 0:\n if self.workingStack[-1] == to_remove_from_working_stack[-1]:\n to_remove_from_working_stack.pop()\n self.workingStack.pop()\n if len(to_remove_from_working_stack) != 0:\n raise (Exception('Syntax error!' +\n '!\\nCannot parse reduce. Character: ', char))\n self.inputStack.insert(0, char)\n self.inputStack.insert(0, reduce_production[0])\n self.output.insert(0, reduce_state)\n print('Syntax analysis successfully. Yay!')\n except Exception as ex:\n raise Exception(ex)\n print()\n return self.output",
"def act(symbol):\n if symbol == pyglet.window.key.SPACE:\n return 1 # jump up\n elif symbol == pyglet.window.key.W:\n return 2 # move up\n elif symbol == pyglet.window.key.D:\n return 3 # move right\n elif symbol == pyglet.window.key.A:\n return 4 # move left\n elif symbol == pyglet.window.key.S:\n return 5 # move down\n elif symbol == pyglet.window.key.E:\n return 11 # jump right\n elif symbol == pyglet.window.key.Q:\n return 12 # jump left\n else:\n return 0 # noop",
"def _register_builtin_reduce_func():\n for reduce_op in [\"max\", \"min\", \"sum\", \"mean\"]:\n builtin = _gen_reduce_builtin(reduce_op)\n setattr(sys.modules[__name__], reduce_op, builtin)\n __all__.append(reduce_op)"
] | [
"0.57662225",
"0.5551271",
"0.53377557",
"0.52707666",
"0.52382267",
"0.52345824",
"0.5143216",
"0.51314694",
"0.50739235",
"0.49302822",
"0.49294648",
"0.48488995",
"0.4841632",
"0.48236185",
"0.47548437",
"0.47397217",
"0.47113505",
"0.45758638",
"0.45712143",
"0.45620832",
"0.4547165",
"0.45303404",
"0.45271188",
"0.4499337",
"0.44906634",
"0.44870296",
"0.4483054",
"0.44589552",
"0.4438654",
"0.44149393"
] | 0.5647278 | 1 |
For the given list of matched tokens apply disambiguation strategy. | def _lexical_disambiguation(self, tokens):
if self.debug:
h_print("Lexical disambiguation.",
" Tokens: {}".format([x for x in tokens]), level=1)
if len(tokens) <= 1:
return tokens
# Longest-match strategy.
max_len = max((len(x.value) for x in tokens))
tokens = [x for x in tokens if len(x.value) == max_len]
if self.debug:
h_print("Disambiguation by longest-match strategy.",
"Tokens: {}".format([x for x in tokens]), level=1)
if len(tokens) == 1:
return tokens
# try to find preferred token.
pref_tokens = [x for x in tokens if x.symbol.prefer]
if pref_tokens:
if self.debug:
h_print("Preferring tokens {}.".format(pref_tokens),
level=1)
return pref_tokens
return tokens | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disambiguateWords(self, word_list, tag_list):\n\t\t# print u\" \".join(word_list).encode('utf8');\n\t\t# print u\" \".join(tag_list).encode('utf8');\n\t\n\t\tif len(word_list)==0 or len(word_list)!=len(tag_list):\n\t\t\treturn word_list;\n\t\telse:\n\t\t\tnewwordlist=[];\n\t\t\twordtaglist=list(zip(word_list,tag_list));\n\t\t\t# print wordtaglist\n\t\t\tfor i in range(len(wordtaglist)):\n\t\t\t\tcurrentWord=wordtaglist[i][0]; \n\t\t\t\tif i+1<len(wordtaglist):\n\t\t\t\t\tnextTag=wordtaglist[i+1][1];\n\t\t\t\t\t# if the current exists in disambig table,\n\t\t\t\t\t# and the next is similar to the expected tag, return vocalized word form\n\t\t\t\t\tif self.isAmbiguous(currentWord):\n\t\t\t\t\t\t# test if expected tag is verb and \n\t\t\t\t\t\tif self.tagger.isVerbTag(nextTag) and self.isDisambiguatedByNextVerb(currentWord) :\n\t\t\t\t\t\t\tcurrentWord = self.getDisambiguatedByNextVerb(currentWord);\n\t\t\t\t\t\telif self.tagger.isNounTag(nextTag) and self.isDisambiguatedByNextNoun(currentWord) :\n\t\t\t\t\t\t\tcurrentWord = self.getDisambiguatedByNextNoun(currentWord);\n\t\t\t\tnewwordlist.append(currentWord);\n\t\t\treturn newwordlist;",
"def _lexical_disambiguation(self, context, tokens):\n\n if self.debug:\n h_print(\"Lexical disambiguation.\",\n \" Tokens: {}\".format([x for x in tokens]), level=1)\n\n if len(tokens) <= 1:\n return tokens\n\n # prefer STOP over EMPTY\n if STOP_token in tokens:\n tokens = [t for t in tokens if t != EMPTY_token]\n\n # Longest-match strategy.\n max_len = max((len(x.value) for x in tokens))\n tokens = [x for x in tokens if len(x.value) == max_len]\n if self.debug:\n h_print(\"Disambiguation by longest-match strategy.\",\n \"Tokens: {}\".format([x for x in tokens]), level=1)\n if len(tokens) == 1:\n return tokens\n\n # try to find preferred token.\n pref_tokens = [x for x in tokens if x.symbol.prefer]\n if pref_tokens:\n if self.debug:\n h_print(\"Preferring tokens {}.\".format(pref_tokens),\n level=1)\n return pref_tokens\n\n return tokens",
"def disambiguateWordsOld(self, word_list, tag_list):\n\t\t# print u\" \".join(word_list).encode('utf8');\n\t\t# print u\" \".join(tag_list).encode('utf8');\t\t\t\n\t\n\t\tif len(word_list)==0 or len(word_list)!=len(tag_list):\n\t\t\treturn word_list;\n\t\telse:\n\t\t\tnewwordlist=[];\n\t\t\twordtaglist=zip(word_list,tag_list);\n\t\t\t# print wordtaglist\n\t\t\tfor i in range(len(wordtaglist)):\n\t\t\t\tif i+1<=len(wordtaglist):\n\t\t\t\t\t# do tests with next word\n\t\t\t\t\t# إذا كانت الكلمة الحالية \"أن\" تكون \"أنْ\" حرف نصب إذا سبقت فعلا\n\t\t\t\t\t# وتكون أنّ، من أخوات إنّ إذا كان ما بعدها اسما\n\t\t\t\t\tif wordtaglist[i][0]==u'أن' and self.tagger.isVerbTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case1';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنْ','t');\n\t\t\t\t\telif wordtaglist[i][0]==u'أن' and self.tagger.isNounTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case 2';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنَّ','t');\n\t\t\t\tnewwordlist.append(wordtaglist[i][0]);\n\t\t\treturn newwordlist;",
"def test_disambiguate(self):\n self.assertEqual(self.RNA(\"\").disambiguate(), \"\")\n self.assertEqual(\n self.RNA(\"AGCUGAUGUA--CAGU\").disambiguate(), \"AGCUGAUGUA--CAGU\"\n )\n self.assertEqual(\n self.RNA(\"AUn-yrs-wkmCGwmrNMWRKY\").disambiguate(\"strip\"), \"AU--CG\"\n )\n s = self.RNA(\"AUn-yrs-wkmCGwmrNMWRKY\")\n t = s.disambiguate(\"random\")\n u = s.disambiguate(\"random\")\n for i, j in zip(str(s), str(t)):\n if i in s.moltype.degenerates:\n assert j in s.moltype.degenerates[i]\n else:\n assert i == j\n self.assertNotEqual(t, u)\n self.assertEqual(len(s), len(t))",
"def convert_ambigs(strings, alph):\n ms = alph.translator(False)\n for i in range(len(strings)):\n strings[i] = strings[i].translate(ms)\n return(strings)",
"def processwords(list_of_matches, lemmatag = False):\n list_of_matches = [w.lower() for w in list_of_matches]\n # remove nonwords, strip . to normalise \"dr.\"\n if translated_option != 'o' and translated_option != 'u':\n list_of_matches = [w.lstrip('.').rstrip('.') for w in list_of_matches if re.search(regex_nonword_filter, w)]\n \n list_of_matches.sort()\n \n # tokenise if multiword:\n if phrases and not n_gramming:\n from nltk import word_tokenize as word_tokenize\n list_of_matches = [word_tokenize(i) for i in list_of_matches]\n\n # this is just for plaintext ... should convert to unicode on file open\n if datatype == 'plaintext':\n try:\n list_of_matches = [unicode(w, errors = 'ignore') for w in list_of_matches]\n except TypeError:\n pass\n\n if not dependency and exclude and 'w' in exclude.keys():\n list_of_matches = [w for w in list_of_matches if not re.match(exclude['w'], w)]\n\n if lemmatise or 'l' in show:\n if not dependency:\n tag = gettag(query, lemmatag = lemmatag)\n lemmata = lemmatiser(list_of_matches, tag)\n tups = zip(list_of_matches, lemmata)\n res = []\n for w, l in tups:\n single_result = []\n if exclude and 'l' in exclude.keys():\n if re.match(exclude['l'], l):\n continue\n if 'w' in show:\n single_result.append(w)\n if 'l' in show:\n single_result.append(l)\n # bad fix:\n # this currently says, if pos in show, there must only be pos ...\n if 'p' in show:\n if lemmatise:\n single_result.append(l)\n else:\n single_result.append(w)\n\n single_result = '/'.join(single_result)\n res.append(single_result)\n list_of_matches = res\n\n if titlefilter and not dependency:\n list_of_matches = titlefilterer(list_of_matches)\n if spelling:\n list_of_matches = convert_spelling(list_of_matches, spelling = spelling)\n\n # use blacklist option in gui\n if 'blacklist' in kwargs.keys():\n stopwords = False\n if kwargs['blacklist'] is not False:\n if kwargs['blacklist'] is True:\n from dictionaries.stopwords import stopwords as my_stopwords\n stopwords = [i.lower() for i in my_stopwords]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n if type(kwargs['blacklist']) == list:\n stopwords = [i.lower() for i in kwargs['blacklist']]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n regexblacklist = re.compile(kwargs['blacklist'])\n list_of_matches = [w for w in list_of_matches if not re.search(regexblacklist, w)]\n\n #if not split_con:\n # list_of_matches = unsplitter(list_of_matches)\n \n # turn every result into a single string again if need be:\n if phrases:\n output = []\n for res in list_of_matches:\n joined = ' '.join(res)\n output.append(joined)\n return output\n else:\n return list_of_matches",
"def synonym_token_replace(tokens, ignored_tokens=stopwords.words('english'), excluded_token_regex=None,\n max_frequency=None, min_occurrences=None,\n # min_frequency=None, max_occurrences=None,\n # min_document_frequency=None, max_document_frequency=None,\n # min_document_occurrences=None, max_document_occurrences=None,\n num_candidates=25, replace_probability=0.5, tokens_to_replace=None):\n\n if not tokens_to_replace:\n # flatten tuple of tuples and get token dictionary\n token_dict, token_freq, n_tokens = vectorizers._vectorizers.construct_token_dictionary_and_frequency(\n vectorizers.utils.flatten(tokens))\n\n # prune token dictionary depending on parameters supplied by user\n # returns a dictionary of candidate tokens for replacement\n candidate_dict, candidate_freq = vectorizers._vectorizers.prune_token_dictionary(\n token_dict,\n token_freq,\n ignored_tokens=ignored_tokens,\n excluded_token_regex=excluded_token_regex,\n min_frequency=(min_occurrences / n_tokens),\n max_frequency=max_frequency,\n # min_occurrences=min_occurrences,\n # max_occurrences=max_occurrences,\n # min_document_frequency=min_document_frequency,\n # max_document_frequency=max_document_frequency,\n # min_document_occurrences=min_document_occurrences,\n # max_document_occurrences=max_document_occurrences,\n total_tokens=n_tokens,\n total_documents=len(tokens),\n )\n\n # take a random sample of tokens from the candidate dictionary\n tokens_to_replace = random.sample(list(candidate_dict.keys()), num_candidates)\n\n print(\"Tokens for replacement:\")\n print(tokens_to_replace)\n\n # normalize replacement_probability\n norm_prob = np.array([replace_probability, 1 - replace_probability]).reshape(1, -1)\n norm_prob = normalize(norm_prob, axis=1, norm='l1').flatten().tolist()\n\n new_doc_list = []\n for doc in tokens:\n new_doc = []\n for token in doc:\n if token not in tokens_to_replace:\n new_doc.append(token) # new_doc.append(f\"{token}_$$orig\")\n else:\n synonyms = []\n for idx, _ in enumerate(norm_prob):\n synonyms.append(f\"{token}_$${idx}\")\n synonym = np.random.choice(synonyms, p=norm_prob)\n # logging.info(\"replacing '{}' with '{}'\".format(token,synonym)) # print(synonym)\n new_doc.append(str(synonym))\n new_doc_list.append(new_doc)\n\n # change dataset back to tuple of tuples before returning\n new_doc_tuple = tuple(tuple(doc) for doc in new_doc_list)\n return tokens_to_replace, new_doc_tuple",
"def _disambiguate_merged_glosses(self, merged_glosses):\n\t\tprocessed_glosses = {}\n\t\ttotal_glosses = len(merged_glosses)\n\t\tcurrent_gloss = 0\n\t\tdisambiguated_glosses_count = 0\n\t\tskipped_glosses_count = 0\n\n\t\t# go over all glosses and disambiguate them if possible\n\t\tfor gloss_key in merged_glosses:\n\t\t\tcurrent_gloss += 1\n\t\t\tgloss = merged_glosses[gloss_key]\n\t\t\ttaggable_tokens = []\n\t\t\ttagged_tokens = []\n\n\t\t\tif len(gloss.tokens) == 0:\n\t\t\t\tself._log_message(\"WARNING: unmerged gloss {0}\".format(gloss_key))\n\n\t\t\t# for each token inside the gloss determine if the token is already disambiguated, needs to be disambiguated or isnt part of WordNet anyways\n\t\t\tfor token_index in gloss.tokens:\n\t\t\t\ttoken = gloss.tokens[token_index]\n\t\t\t\ttokens_wn_ss_types = list(map(int, re.findall(\"[0-9]+\", token.lemma)))\n\t\t\t\t# tokens that are tagged as \"man\" or \"auto\" are already disambiguated\n\t\t\t\tif token.tag in [\"man\", \"auto\"]:\n\t\t\t\t\ttagged_tokens.append(token)\n\t\t\t\t# if a token is tagged as \"un\" it is not yet annotated, but possibly should be; if the lemma contains entries of the type \"LEMMA%SS_TYPE\" and the POS if the word matches one of the lemma-ss_types it needs to be/can be disambiguated\n\t\t\t\telif token.tag == \"un\" and re.match(\"([a-z]+%[0-9]\\|?)+\", token.lemma) and not set(GLOSSTAG_POS_POSSIBLE_SS_TYPES[token.pos]).isdisjoint(set(tokens_wn_ss_types)) and type(token) != CollocationMember:\n\t\t\t\t\ttaggable_tokens.append(token)\n\n\t\t\t# if there are no remaining taggable tokens, then proceed with the next gloss\n\t\t\tif len(taggable_tokens) == 0:\n\t\t\t\tprocessed_glosses[gloss_key] = gloss\n\t\t\t\tskipped_glosses_count += 1\n\t\t\t\tcontinue\n\t\t\tdisambiguated_glosses_count += 1\n\n\t\t\t## DISAMBUGATION PROCEDURE ##\n\t\t\tprint(\"\\tat gloss {0} of {1}\".format(current_gloss, total_glosses))\n\t\t\tprint(\"\\t\\t...disambiguating {0} words in gloss {1}\".format(len(taggable_tokens), gloss_key))\n\t\t\t# disambiguated_gloss = self._disambiguate_gloss_with_path_similarity(gloss, taggable_tokens, tagged_tokens)\n\t\t\tdisambiguated_gloss = self._disambiguate_gloss_by_most_frequent_sense(gloss, taggable_tokens, tagged_tokens)\n\t\t\t# finished, append to output\n\t\t\tprocessed_glosses[gloss_key] = disambiguated_gloss\n\n\t\tprint(\"\\tdisambiguated {0} glosses, skipped {1}\".format(disambiguated_glosses_count, skipped_glosses_count))\n\n\t\treturn processed_glosses",
"def disambiguate(label, labels):\n label = label.replace(' ', '_')\n if label not in labels:\n return label\n suffix = 1\n while label + ('_%i' % suffix) in labels:\n suffix += 1\n return label + ('_%i' % suffix)",
"def _apply_rule_list(self, word, rules):\n for rule in rules:\n suffix, replacement, condition = rule\n if suffix == \"*d\" and self._ends_double_consonant(word):\n stem = word[:-2]\n if condition is None or condition(stem):\n return stem + replacement\n else:\n # Don't try any further rules\n return word\n if word.endswith(suffix):\n stem = self._replace_suffix(word, suffix, \"\")\n if condition is None or condition(stem):\n return stem + replacement\n else:\n # Don't try any further rules\n return word\n\n return word",
"def disambiguate_all_not_disambiguated(\n celery_batch_size, total_records, indexing_queue_limit, disambiguation_queue_limit\n):\n with current_celery_app.connection_or_acquire() as conn:\n indexer_queue = conn.default_channel.queue_declare(\n queue=\"indexer_task\", passive=True\n )\n disambiguation_queue = conn.default_channel.queue_declare(\n queue=\"disambiguation\", passive=True\n )\n if (\n disambiguation_queue.message_count > disambiguation_queue_limit\n or indexer_queue.message_count > indexing_queue_limit\n ):\n click.echo(\"MQ queues are full, can't run disambiguation\")\n return\n not_disambiguated_records_search = _get_all_not_disambiguated_records_search()\n documents = not_disambiguated_records_search.scan()\n if total_records:\n documents = islice(documents, total_records)\n uuids = (document.meta.id for document in documents)\n _send_celery_group_disambiguation_task(uuids, celery_batch_size)",
"def isAmbiguous(self, word):\n\t\treturn word in disambig_const.DISAMBIGUATATION_TABLE;",
"def disambiguate(self, sense_clusters:List[List[str]],\n definitions:List[str] = None) \\\n -> List[Union[float, Tuple[float, float]]]:\n\n def get_contextualized_embedding(ids):\n outputs = model(ids)[0]\n # out = torch.mean(torch.stack(hidden_states[-2:]), dim=0).squeeze()\n out = outputs.squeeze()\n return out\n\n if definitions is None:\n definitions = ['']*len(sense_clusters)\n load_model()\n hypers_scores = []\n defs_scores = []\n with torch.no_grad():\n cosine = torch.nn.CosineSimilarity(dim=0)\n\n tokens = self.tokenize(do_mask=False)\n target_start, target_end = self.target_indices(do_mask=False)\n cxt_ids = model_tok.encode(tokens)\n cxt_ids = torch.tensor(cxt_ids).unsqueeze(0).to(device)\n cxt_embedding = get_contextualized_embedding(cxt_ids)\n target_embeddings = cxt_embedding[target_start+1:target_end+1, :]\n target_embedding = torch.mean(target_embeddings, dim=0).squeeze()\n\n for sc in sense_clusters:\n if any(x.strip() for x in sc):\n sc_str = ', '.join(x.strip() for x in sc if x.strip())\n sc_tokens = model_tok.tokenize(sc_str)\n sc_inds = [ind+1 for ind in range(len(sc_tokens))\n if sc_tokens[ind] != ',']\n sc_ids = torch.tensor(model_tok.encode(sc_str)).unsqueeze(0).to(device)\n assert len(sc_ids[0]) == len(sc_tokens) + 2, (len(sc_ids[0]), len(sc_tokens))\n sc_embeddings = get_contextualized_embedding(sc_ids)\n sc_embedding = torch.mean(sc_embeddings[sc_inds, :], dim=0)\n sc_score = cosine(sc_embedding, target_embedding).item()\n assert isinstance(sc_score, float), sc_str\n hypers_scores.append(sc_score)\n else:\n hypers_scores.append(0)\n for def_ in definitions:\n if def_.strip():\n def_ids = torch.tensor(model_tok.encode(def_)).unsqueeze(0).to(device)\n def_embeddings = get_contextualized_embedding(def_ids)\n def_embedding = torch.mean(def_embeddings[1:-1, :], dim=0)\n def_score = cosine(def_embedding, target_embedding).item()\n assert isinstance(def_score, float), def_\n defs_scores.append(def_score)\n else:\n defs_scores.append(None)\n logger.debug(f'Sense clusters received: {sense_clusters}, '\n f'scores: {hypers_scores}')\n out = []\n for s_score, def_score in zip(hypers_scores, defs_scores):\n out.append((s_score, def_score) if def_score else s_score)\n return out",
"def _enum_stem_match(enum_hypothesis_list, enum_reference_list, stemmer=PorterStemmer()):\n stemmed_enum_list1 = [(word_pair[0], stemmer.stem(word_pair[1])) \\\n for word_pair in enum_hypothesis_list]\n\n stemmed_enum_list2 = [(word_pair[0], stemmer.stem(word_pair[1])) \\\n for word_pair in enum_reference_list]\n\n word_match, enum_unmat_hypo_list, enum_unmat_ref_list = \\\n _match_enums(stemmed_enum_list1, stemmed_enum_list2)\n\n enum_unmat_hypo_list = list(zip(*enum_unmat_hypo_list)) if len(enum_unmat_hypo_list) > 0 else []\n\n enum_unmat_ref_list = list(zip(*enum_unmat_ref_list)) if len(enum_unmat_ref_list) > 0 else []\n\n enum_hypothesis_list = list(filter(lambda x: x[0] not in enum_unmat_hypo_list,\n enum_hypothesis_list))\n\n enum_reference_list = list(filter(lambda x: x[0] not in enum_unmat_ref_list,\n enum_reference_list))\n\n return word_match, enum_hypothesis_list, enum_reference_list",
"def match_finder(word_list):\n dupe_check = []\n match_list = []\n for word in word_list:\n if word in match_list:\n continue\n elif word in dupe_check:\n match_list.append(word)\n else:\n dupe_check.append(word)\n return match_list",
"def __call__(self: TokenMatcher, doc: Doc) -> List[Tuple[str, int, int, None]]:\n mapped_patterns = defaultdict(list)\n matcher = Matcher(self.vocab)\n for label, patterns in self._patterns.items():\n for pattern in patterns:\n mapped_patterns[label].extend(\n _spacyfy(\n self._searcher.match(doc, pattern, **self.defaults),\n pattern,\n )\n )\n for label in mapped_patterns.keys():\n matcher.add(label, mapped_patterns[label])\n matches = matcher(doc)\n if matches:\n extended_matches = [\n (self.vocab.strings[match_id], start, end, None)\n for match_id, start, end in matches\n ]\n extended_matches.sort(key=lambda x: (x[1], -x[2] - x[1]))\n for i, (label, _start, _end, _details) in enumerate(extended_matches):\n on_match = self._callbacks.get(label)\n if on_match:\n on_match(self, doc, i, extended_matches)\n return extended_matches\n else:\n return []",
"def _spacyfy(\n matches: List[List[Optional[Tuple[str, str]]]], pattern: List[Dict[str, Any]]\n) -> List[List[Dict[str, Any]]]:\n new_patterns = []\n if matches:\n for match in matches:\n new_pattern = deepcopy(pattern)\n for i, token in enumerate(match):\n if token:\n del new_pattern[i][token[0]]\n new_pattern[i][\"TEXT\"] = token[1]\n new_patterns.append(new_pattern)\n return new_patterns",
"def _disambiguate_gloss_by_most_frequent_sense(self, gloss, taggable_tokens, tagged_tokens):\n\t\tdisambiguated_gloss = gloss\n\n\t\tfor undisambiguated_token in taggable_tokens:\n\t\t\tpossible_senses = self._get_possible_wn_senses_for_token(undisambiguated_token)\n\n\t\t\tif len(possible_senses) != 0:\n\t\t\t\tmost_frequent_sense = max(possible_senses, key=lambda sense_key: self.reference_wordnet.sense_keys[sense_key][\"tag_cnt\"])\n\t\t\t\tsynset_offset = self.reference_wordnet.sense_keys[most_frequent_sense][\"synset_offset\"]\n\t\t\telse:\n\t\t\t\tmost_frequent_sense = \"no_wn_sense_existing\"\n\t\t\t\tsynset_offset = \"no_wn_sense_existing\"\n\t\t\t\tself._log_message(\"WARNING: no wn sense found for token {0}\".format(undisambiguated_token))\n\n\t\t\ttoken_index = undisambiguated_token.id\n\n\t\t\tif undisambiguated_token.wn_sense_key is None and undisambiguated_token.wn_synset_offset is None:\n\t\t\t\tdisambiguated_gloss.tokens[token_index].tag = \"mfs\"\n\t\t\t\tdisambiguated_gloss.tokens[token_index].wn_sense_key = most_frequent_sense\n\t\t\t\tdisambiguated_gloss.tokens[token_index].wn_synset_offset = synset_offset\n\t\t\telse:\n\t\t\t\tprint(\"WHAT\")\n\n\t\treturn disambiguated_gloss",
"def isDisambiguatedByNextNoun(self, word):\n\t\treturn 'noun' in disambig_const.DISAMBIGUATATION_TABLE.get(word, {});",
"def get_disambiguator(self):",
"def get_disambiguator(self):",
"def _match_short_names(self, token_set_one, token_set_two):\n copy_set_one = token_set_one.copy()\n copy_set_two = token_set_two.copy()\n matching_dict = {}\n\n\n for token in token_set_one:\n res = self.dotted_name_re.search(token)\n if res:\n initials = res.group('name')\n for other_token in token_set_two:\n if other_token.startswith(initials):\n copy_set_one.remove(token)\n try:\n copy_set_two.remove(other_token)\n except KeyError:\n continue\n matching_dict[token] = other_token\n break\n else:\n return False, None, None, None\n\n return True, copy_set_one, copy_set_two, matching_dict",
"def synonym_sentence_append(tokens, ignored_tokens=stopwords.words('english'), excluded_token_regex=None,\n max_frequency=None, min_occurrences=None,\n # min_frequency=None, max_occurrences=None,\n # min_document_frequency=None, max_document_frequency=None,\n # min_document_occurrences=None, max_document_occurrences=None,\n num_candidates=25, tokens_to_replace=None, replace_probability=0.3):\n\n # check if tokens to be replaced are supplied, if not, choose tokens depending on parameters from user\n if not tokens_to_replace:\n # flatten tuple of tuples and get token dictionary\n token_dict, token_freq, n_tokens = vectorizers._vectorizers.construct_token_dictionary_and_frequency(\n vectorizers.utils.flatten(tokens))\n\n # prune token dictionary depending on parameters supplied by user\n # returns a dictionary of candidate tokens for replacement\n candidate_dict, candidate_freq = vectorizers._vectorizers.prune_token_dictionary(\n token_dict,\n token_freq,\n ignored_tokens=ignored_tokens,\n excluded_token_regex=excluded_token_regex,\n min_frequency=(min_occurrences / n_tokens),\n max_frequency=max_frequency,\n # min_occurrences=min_occurrences,\n # max_occurrences=max_occurrences,\n # min_document_frequency=min_document_frequency,\n # max_document_frequency=max_document_frequency,\n # min_document_occurrences=min_document_occurrences,\n # max_document_occurrences=max_document_occurrences,\n total_tokens=n_tokens,\n total_documents=len(tokens),\n )\n\n # take a random sample of tokens from the candidate dictionary\n tokens_to_replace = random.sample(list(candidate_dict.keys()), num_candidates)\n\n print(\"Tokens for replacement:\")\n print(tokens_to_replace)\n\n new_tokens = []\n for sent in tokens:\n word_changed = False\n sent = list(sent)\n # check each token by index and create a deep copy with the changed word at that index and add new sentence\n # to new corpus\n for idx, token in enumerate(sent):\n if token in tokens_to_replace:\n new_sent = copy.deepcopy(sent)\n new_sent[idx] = f\"{token}_$$0\"\n new_tokens.append(new_sent)\n word_changed = True\n # depending on probability, add another copy of the new sentence with the second replacement synonym\n if random.random() <= replace_probability:\n added_sent = copy.deepcopy(sent)\n added_sent[idx] = f\"{token}_$$1\"\n new_tokens.append(added_sent)\n # if no words were changed, just add the original sentence to the new corpus\n if not word_changed:\n new_tokens.append(sent)\n\n # change dataset back to tuple of tuples before returning\n new_tokens_tuple = tuple(tuple(sent) for sent in new_tokens)\n\n return tokens_to_replace, new_tokens_tuple",
"def disambiguate(self, string: str) -> str:\n if string in self.names:\n return self.disambiguate(f'_{string}')\n return string",
"def condense_matches(matches: List[Tuple[int, ...]]) -> List[Tuple[int, ...]]:\n new_matches = []\n for match in matches:\n if match not in new_matches and tuple(reversed(match)) not in new_matches:\n new_matches.append(match)\n return new_matches",
"def disambiguate(names: list[str], mark: str = \"1\") -> list[str]:\n names_seen = set()\n new_names = []\n for name in names:\n new_name = name\n while new_name in names_seen:\n new_name += mark\n new_names.append(new_name)\n names_seen.add(new_name)\n\n return new_names",
"def get_ambiguous_words(self, sort_on=None):\n multis = [word for word in self.word_tag_dict.keys() if len(self.word_tag_dict[word]) > 1]\n if not sort_on:\n multis.sort()\n return multis",
"def tok_by_list(pattern, list_of_toks, concordancing=False, **kwargs):\n import re\n if isinstance(pattern, STRINGTYPE):\n pattern = [pattern]\n if not case_sensitive:\n pattern = [p.lower() for p in pattern]\n if not concordancing:\n if case_sensitive:\n matches = [m for m in list_of_toks if m in pattern]\n else:\n matches = [m for m in list_of_toks if m.lower() in pattern]\n else:\n matches = []\n for index, token in enumerate(list_of_toks):\n if token in pattern:\n if not split_contractions:\n match = [' '.join(t for t in unsplitter(list_of_toks[:index]))[-140:]]\n else:\n match = [' '.join(t for t in list_of_toks[:index])[-140:]]\n match.append(token)\n if not split_contractions:\n match.append(' '.join(t for t in unsplitter(list_of_toks[index + 1:]))[:140])\n else:\n match.append(' '.join(t for t in list_of_toks[index + 1:])[:140])\n\n matches.append(match)\n if countmode:\n return len(matches)\n else:\n return matches",
"def compareMentionLists(self, dList, aList, mType, irStats, errorOut=None):\n # build lists of overlapping mentions for annotated and detected mentions in this sentence\n potentialMatches = {}\n for aMention in aList:\n potentialMatches[aMention] = []\n for dMention in dList:\n potentialMatches[dMention] = []\n for aMention in aList:\n if dMention.countOverlapTokens(aMention) > 0:\n potentialMatches[dMention].append(aMention)\n potentialMatches[aMention].append(dMention)\n \n # check matches for each detected template\n for dMention in dList:\n aMentionList = potentialMatches[dMention]\n if len(aMentionList) == 1 and dMention.matchAnnotated(aMentionList[0]):\n # there is only one annotated mention that matches this detected one\n # this is either a TP or a DUPLICATE\n annotatedMention = aMentionList[0]\n if len(potentialMatches[annotatedMention]) == 1:\n # this detected mention matches only ONE annotated one, count as TP\n # OTHERWISE, deal with it when we process annotated mentions\n dMention.matchedMention = annotatedMention\n annotatedMention.matchedMention = dMention\n# self.write(errorOut, '+TP: '+dMention.text+' == '+annotatedMention.text+' ('+mType+')\\n')\n self.write(errorOut, '+TP: %s == %s %s (%s)\\n'%(dMention.text, annotatedMention.text, annotatedMention, mType))\n\n irStats.incTP() \n else:\n # this detected mention overlaps multiple annotated mentions. \n # OR it does not match any annotated mention. either way, discard it.\n # count it as a FP\n self.write(errorOut, '-FP: '+dMention.text+' ('+mType+')\\n')\n irStats.incFP()\n for aMention in aMentionList:\n potentialMatches[aMention].remove(dMention)\n self.write(errorOut, 'DETECTED MENTION OVERLAPS '+aMention.text+'\\n')\n potentialMatches[dMention] = []\n\n # check matches for each annotated mention \n for annotatedMention in aList:\n dMatches = potentialMatches[annotatedMention]\n if len(dMatches) == 0:\n # annotated mention was unmatched, count as FN\n irStats.incFN()\n self.write(errorOut, '-FN: '+annotatedMention.text+' ('+mType+')\\n')\n elif len(dMatches) > 1:\n # annotated mention overlapped multiple detected ones\n # check each one to see if it counts as a match\n # If more than one does, count the best match as a TP\n # and the rest as duplicates.\n bestMatches = []\n for dMention in dMatches:\n if dMention.matchAnnotated(annotatedMention):\n overlap = dMention.countOverlapTokens(annotatedMention)\n bestMatches.append([overlap, dMention])\n dMention.matchedMention = annotatedMention\n else:\n # detected mention did not sufficiently match, count as FP\n self.write(errorOut, '-FP: '+dMention.text+' ('+mType+')\\n')\n irStats.incFP()\n\n if len(bestMatches) > 0:\n # count best match\n bestMatches.sort()\n dMention = bestMatches[-1][1]\n dMention.matchedMention = annotatedMention\n annotatedMention.matchedMention = dMention \n self.write(errorOut, '+TP: '+dMention.text+' == '+annotatedMention.text+' ('+mType+')\\n') \n irStats.incTP() \n # count duplicates\n for i in range(0, len(bestMatches)-1):\n irStats.incDuplicates()\n dMention = bestMatches[i][1] \n self.write(errorOut, 'ANNOTATED MENTION ALSO MATCHES ')\n self.write(errorOut, dMention.text+'\\n')\n dMention.matchedMention = annotatedMention\n else:\n # there are no valid matches\n irStats.incFN()\n self.write(errorOut, '-FN: '+annotatedMention.text+' ('+mType+')\\n')",
"def perform_disqualify(responder, options):\n match = options['<match-id>']\n tla = options['<tla>']\n scores.disqualify(match, tla)\n responder('Disqualified {0} in match {1}'.format(tla, match))"
] | [
"0.6592729",
"0.64651644",
"0.6100285",
"0.57694244",
"0.57575583",
"0.5470414",
"0.54121065",
"0.5405158",
"0.5329084",
"0.53212726",
"0.52661914",
"0.5216783",
"0.52162904",
"0.5212118",
"0.5187984",
"0.515725",
"0.5138664",
"0.5132483",
"0.510651",
"0.508598",
"0.508598",
"0.5041221",
"0.50291854",
"0.49698213",
"0.49430662",
"0.49388358",
"0.49378893",
"0.49354962",
"0.4908125",
"0.48811275"
] | 0.6760126 | 0 |
The default recovery strategy is to search from the current location for expected terminals. Returns True if successful, False otherwise. | def default_error_recovery(self, head):
while head.position < len(head.input_str):
head.position += 1
token = self._next_token(head)
if token:
head.token_ahead = token
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _terminal_test(self, game, depth):\n if self.time_left() < self.TIMER_THRESHOLD:\n self.timeout_depths.append(depth)\n raise SearchTimeout()\n\n beyond_search_depth = depth >= self.search_depth\n no_legal_moves = len(game.get_legal_moves()) == 0\n\n return beyond_search_depth or no_legal_moves",
"def _terminal_test(self, game, depth):\n if self.time_left() < self.TIMER_THRESHOLD:\n self.timeout_depths.append(depth)\n raise SearchTimeout()\n\n beyond_search_depth = depth >= self.search_depth\n no_legal_moves = len(game.get_legal_moves()) == 0\n\n return beyond_search_depth or no_legal_moves",
"def __is_terminal(self, reward):\n\n # Initialize the terminal signals to false\n done = 0\n exit_cond = 0\n\n # Find readings that are below the set minimum. If there are multiple readings below the threshold, a crash\n # likely occurred and the episode should end\n # print(self.lidar_ranges)\n # indices = np.where(self.lidar_ranges <= self.min_dist)\n # # print(indices)\n # if len(indices[0]) >= self.crash_threshold:\n # exit_cond = 1\n # If the speed is less than 0.3, then the vehicle is pressed against a wall and not moving. Thus, it has crashed.\n # print('Speed: ' + str(self.pos[3]))\n if self.pos[3] < 0.2:\n dist_to_start = np.sqrt((self.pos[0] -self.ref_track[1088, 0])**2 + (self.pos[1] -self.ref_track[1088, 1])**2)\n # print(dist_to_start)\n if dist_to_start > 0.4:\n exit_cond = 1\n\n if reward <= -1.0:\n exit_cond = 1\n\n return done, exit_cond",
"def isTerminal(self) -> bool:\n ...",
"def is_terminal(self):",
"def maze_solver_rec(maze, start, end):\r\n def find_path(maze, pos, end):\r\n mark(maze, pos)\r\n if pos == end:\r\n print(pos, end=' ')\r\n return True\r\n for i in range(4):\r\n nextp = pos[0]+dirs[i][0], pos[1]+dirs[i][1]\r\n if passable(maze, nextp):\r\n if find_path(maze, nextp, end):\r\n print(pos, end=' ')\r\n return True\r\n return False\r\n\r\n print(\"If find, print the path from end to start:\")\r\n if find_path(maze, start, end):\r\n print(\"\\n\")\r\n else:\r\n print(\"No path exists.\")",
"def is_terminal(self) -> bool:\n pass",
"def isTerminalRunning(self):\n return self.f4 is 'R'",
"def terminal_check(self, state, depth):\r\n early_terminated = self.depth_check(depth)\r\n ended, winner = self.terminal_test(state)\r\n\r\n if early_terminated or ended:\r\n return True\r\n\r\n return False",
"def continue_search( self ):\n return True;",
"def check_no_silent_crash(self, override=False):\n if self.results:\n score = self.results.linter.stats.get('global_note', False)\n if score is False:\n messages = self.results.linter.stats.get('by_msg', {})\n if messages.get('syntax-error', False) and not override:\n self.logging.warning('\\n------------------------------------------------------------------')\n self.logging.warning('PYLINT FAILED BECAUSE SYNTAX ERROR.')\n self.logging.warning('------------------------------------------------------------------')\n self.logging.warning('\\n')\n self.failed_files.append(self.fname)\n return False\n self.logging.info('\\n------------------------------------------------------------------')\n self.logging.info('FILE WAS IGNORED.')\n self.logging.info('------------------------------------------------------------------')\n return True\n return False",
"def _is_terminal(self):\n return self.spacecraft.crashed or self.steps >= self.config[\"duration\"]",
"def done(self):\n # Only recheck if we found new paths\n if len(self.found) > self._last:\n self._last = len(self.found)\n x = self._check_path(self.found[-1])\n if x is not None:\n self.leaks.append(x)\n\n # Stop if we have enough paths\n if len(self.leaks) >= self.num_leaks:\n return True\n\n # Delegate the decision to the superclass's method\n return super(SExplorer, self).done",
"def is_using_terminal(self):\n return self.using_terminal",
"def is_terminal(self) -> bool:\n return is_dead_state(self.as_2d_array(), self.storage_locations) or self.is_solved()",
"def found(raise_error: bool = False) -> bool:\n raise NotImplementedError",
"def _is_terminal(self):\n raise NotImplementedError",
"def recover(self):\n eh = SimpleErrorHandler()\n\n self._client.execute('recover', eh=eh)\n\n return bool(eh)",
"def __verify_exit_path(self, position=None):\n\n # Start from entrance if no position provided.\n if not position:\n position = self.__entrance\n\n # Create search queue, traversed list and return condition.\n search_queue = deque()\n search_queue.append(position)\n traversed = [[False for _ in range(self.__col_count)]\n for _ in range(self.__row_count)]\n can_exit = False\n\n # Add first position to traversed\n row, col = position\n traversed[row][col] = True\n\n count = 0\n # Loop through the queue, adding new positions.\n while search_queue:\n # Grab first position.\n count += 1\n row, col = search_queue.popleft()\n # Check if room is exit.\n if [row, col] == self.__exit:\n can_exit = True\n break\n # Add positions to queue if valid.\n if (self.__grid[row][col].up and\n self.__can_enter([row - 1, col], traversed)):\n search_queue.append([row - 1, col])\n traversed[row - 1][col] = True\n if (self.__grid[row][col].right and\n self.__can_enter([row, col + 1], traversed)):\n search_queue.append([row, col + 1])\n traversed[row][col + 1] = True\n if (self.__grid[row][col].down and\n self.__can_enter([row + 1, col], traversed)):\n search_queue.append([row + 1, col])\n traversed[row + 1][col] = True\n if (self.__grid[row][col].left and\n self.__can_enter([row, col - 1], traversed)):\n search_queue.append([row, col - 1])\n traversed[row][col - 1] = True\n return can_exit",
"def _check(self) -> bool:\n path, base_path = self.list_path[-1]\n if \"override\" in path:\n return True\n command = \"cd {} && docker-compose config\".format(\n os.path.dirname(get_path(path, base_path))\n )\n ret = console.run(command, get_stdout=False, silent=True)\n if not ret:\n console.run(command)\n return ret",
"def performBacktrackSearch(self, rootNode, node):\r\n \r\n print (\"-- proc --\", node.state.assignment)\r\n \r\n #check if we have reached goal state\r\n if node.state.checkGoalState():\r\n print (\"reached goal state\")\r\n return True\r\n \r\n else:\r\n \r\n #check if there is a case of early failure\r\n #if node.state.forwardCheck(): \r\n if node.state.arcConsistency():\r\n \r\n #find an unassigned variable \r\n variable = node.state.selectUnassignedVariable()\r\n \r\n #for all values in the domain\r\n for value in node.state.orderDomainValues():\r\n \r\n #check if constraints are satisfied\r\n if CSP.checkConstraints(node.state.assignment,\r\n variable, value):\r\n \r\n #create child node\r\n childNode = Node(State(node.state.assignment, \r\n node.state.possibleValues, variable, value))\r\n \r\n node.addChild(childNode)\r\n \r\n #show the search tree explored so far\r\n treeplot = TreePlot()\r\n treeplot.generateDiagram(rootNode, childNode)\r\n \r\n result = self.performBacktrackSearch(rootNode, childNode)\r\n if result == True:\r\n return True\r\n return False",
"def terminal_configured():\n return lnp.userconfig.get('terminal_type') is not None",
"def fat_isinitialized(location):\n\n with utils.cd(location):\n with open(os.devnull, 'w') as devnull:\n try:\n cmd = '/usr/bin/git config --local --get filter.fat.smudge'\n subprocess.check_call(cmd, stdout=devnull, shell=True)\n return True\n except subprocess.CalledProcessError as e:\n if e.returncode == 1:\n return False\n raise e",
"def is_terminal(node):\n if is_checkmate(node.state):\n return True\n elif is_draw(node.state, node.state.history):\n return True\n else:\n return False",
"def check_running(self, fail_on_error=True):\n self._update_mount_state()\n if self._mount_state[\"unmounted\"] or self._mount_state[\"nodirectory\"]:\n self.log.error(\n \"dfuse not running on %s\",\n str(self._mount_state[\"unmounted\"].union(self._mount_state[\"nodirectory\"])))\n if fail_on_error:\n raise CommandFailure(\"dfuse not running\")\n return False\n if self._mount_state[\"rogue\"]:\n self.log.error(\"rogue dfuse processes on %s\", str(self._mount_state[\"rogue\"]))\n if fail_on_error:\n raise CommandFailure(\"rogue dfuse processes detected\")\n return False\n return True",
"def rrt_search(self):\n self.tree.AddVertex(self.start_config)\n self.tree.AddEdge(self.start_config, self.start_config)\n\n while True:\n x_new, x_nearest = self.new_and_near()\n if x_new is None:\n # print(\"it's None\")\n continue\n # connect shortest valid edge\n # print(\"new point\", x_new)\n self.connect_to_point(x_nearest, x_new)\n\n # probabilistically check if solution found\n if self.goal_config in self.tree.vertices:\n print(\"find it\")\n path = self.planning_env.reconstruct_path(self.tree.edges, self.start_config, self.goal_config)\n if path is not None:\n return path\n\n if self.name=='rrtstar' and self.tree.samples_taken > 10:\n return []\n # # check if can connect to goal after generating max_samples\n if self.tree.samples_taken >= self.tree.max_samples:\n return []",
"def is_terminal(self):\n return self.terminal",
"def started_path(self):\n if self.ros_node.get_data('/diff_drive/path_achieved') is None:\n return False\n return not self.ros_node.get_data('/diff_drive/path_achieved')",
"def check_known_issues_and_attempt_fix(self, output):\n self.composite_logger.log_debug(\"Output from package manager containing error: \\n|\\t\" + \"\\n|\\t\".join(output.splitlines()))\n self.composite_logger.log_debug(\"\\nChecking if this is a known error...\")\n for error in self.known_errors_and_fixes:\n if error in output:\n self.composite_logger.log_debug(\"\\nFound a match within known errors list, attempting a fix...\")\n self.known_errors_and_fixes[error]()\n return True\n\n self.composite_logger.log_debug(\"\\nThis is not a known error for the extension and will require manual intervention\")\n return False",
"def isTerminalFinished(self):\n return self.f4 is 'F'"
] | [
"0.542259",
"0.542259",
"0.5314984",
"0.53074527",
"0.52782655",
"0.5156155",
"0.5131019",
"0.51145136",
"0.5065869",
"0.50623226",
"0.49963906",
"0.49698153",
"0.48765576",
"0.4871227",
"0.48605087",
"0.48404363",
"0.4815467",
"0.47997767",
"0.46984255",
"0.46935907",
"0.4687863",
"0.46854582",
"0.46810916",
"0.46723545",
"0.46646997",
"0.46314862",
"0.46292838",
"0.46267527",
"0.4623506",
"0.4605798"
] | 0.5593158 | 0 |
Finds all the minimal functional dependencies X>rhs with X subset of lhs. Usually lhs = U\rhs where U is the set of attributes in the relation. The idea of this function is to eliminate unnecessary computation using the fact that, if the fd X>E does not hold, then for all Y subset of X, Y>E doesn't hold either. db_partition doesn't play an important role in this function, it is passed to the function test_fd_db that determines if a given fd is satisfied by the data in db_partition. | def find_fds_rhs(lhs, rhs, db_partition, test_mode=False):
x = {tuple(lhs)}
e0 = set() # set with the non-satisfied fds
e1 = set() # set with the satisfied fds
set_len = lhs.__len__()
while x.__len__() != 0 and set_len > 0:
level = set() # each level tries the proper subsets of X with length len(X)-1
for subx in x:
if test_mode:
test = test_fds_test(list(subx), rhs, db_partition)
else:
test = test_fds(list(subx), rhs, db_partition)
if not test:
e0 = e0.union([subx])
else:
e1 = set(remove_super_sets(list(subx), e1)) # removes redundancy in e1
e1 = e1.union([subx])
level = level.union([subx])
level = set(subsets(list(level), set_len - 1)) # obtain the next level
e0 = set(subsets(list(e0), set_len - 1))
x = prune(level, e0) # removes the cases that are not satisfiable by means of e0
set_len -= 1
return [list(x) for x in list(e1)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_LHSs(rhs, attrs, df, partitions, accuracy, masks):\n lhs_attrs = attrs.difference(set([rhs]))\n seeds = nodes_from_seeds(sorted(list(lhs_attrs)))\n min_deps = LHSs(lhs_attrs)\n max_non_deps = LHSs(lhs_attrs)\n trace = []\n while seeds != []:\n node = seeds[0] # should this actually be random?\n while node is not None:\n\n if node.visited:\n if node.is_candidate():\n if node.is_dependency():\n if node.is_minimal():\n min_deps.add_dep(node.attrs)\n else:\n if node.is_maximal():\n max_non_deps.add_dep(node.attrs)\n node.update_dependency_type(min_deps, max_non_deps)\n\n else:\n node.infer_type()\n if node.category == 0:\n if compute_partitions(df, rhs, node.attrs, partitions, accuracy, masks):\n if node.is_minimal():\n min_deps.add_dep(node.attrs)\n node.category = 2\n else:\n node.category = 3\n else:\n if node.is_maximal():\n max_non_deps.add_dep(node.attrs)\n node.category = -2\n else:\n node.category = -3\n node.visited = True\n\n node = pick_next_node(node, trace, min_deps, max_non_deps, df.columns)\n\n seeds = nodes_from_seeds(sorted(generate_next_seeds(max_non_deps, min_deps, lhs_attrs)))\n return min_deps",
"def compute_partitions(df, rhs, lhs_set, partitions, accuracy, masks):\n # for approximate dependencies see TANE section 2.3s\n if accuracy < 1:\n return approximate_dependencies(list(lhs_set), rhs, df, accuracy, masks)\n part_rhs = partition(lhs_set.union(set([rhs])), df, partitions)\n # if part_rhs > df.shape[0] * rep_percent:\n # return False\n return part_rhs == partition(lhs_set, df, partitions)",
"def dfd(df, accuracy, index=None):\n partitions = {}\n masks = Masks(df.columns)\n non_uniq = set(df.columns)\n unique_attrs = set()\n dependencies = DfdDependencies(df.columns)\n for i in non_uniq.copy():\n if df[i].is_unique or i == index:\n unique_attrs.add(i)\n non_uniq.remove(i)\n dependencies.add_unique_lhs(i)\n for i in tqdm(non_uniq):\n lhss = find_LHSs(i, non_uniq, df, partitions, accuracy, masks)\n dependencies.add_LHSs(i, lhss)\n return dependencies",
"def controller(df, func):\n # Initialization: Generate computational graph for each attribute which will be on RHS\n schema = df.columns\n computational_graph = dict()\n FDs = []\n for RHS in schema:\n computational_graph[RHS] = generate_computational_graph(RHS, schema)\n\n for level in range(3):\n # Get current level candidates\n current_level_candidates = dict()\n for RHS in computational_graph.keys():\n current_level_candidates[RHS] = get_candidates(level, computational_graph[RHS])\n\n # print('candidates:',current_level_candidates)\n # Use current_level candidates as an input to FD-functions for each level, func will return discovered (soft/delta)functional dependencies\n tFDs = func(level, df, current_level_candidates)\n # print('FDs:',tFDs)\n # print(tFDs)\n FDs.extend(tFDs)\n # Transform res into a dictionary where key: RHS value: a list of LHS where candidates are in the form of sets\n current_level_result = transform_res(tFDs)\n # print(current_level_result)\n\n # Prune graphs according to feedback of FD-functions\n # print(f\"level:{level}, computatioanl_graph_key:{computational_graph.keys()},current_level_result_key:{current_level_result.keys()}\")\n for RHS in computational_graph.keys():\n if RHS in current_level_result.keys():\n computational_graph[RHS] = prune_graph(level, current_level_result[RHS], computational_graph[RHS])\n\n return FDs",
"def approximate_dependencies(lhs_set, rhs, df, accuracy, masks):\n df_lhs_rhs = df.drop_duplicates(lhs_set + [rhs])\n df_lhs = df_lhs_rhs.drop_duplicates(lhs_set)\n # if df_lhs.shape[0] > df.shape[0] * rep_percent:\n # return False\n\n limit = df.shape[0] * (1 - accuracy)\n if df_lhs_rhs.shape[0] - df_lhs.shape[0] > limit:\n return False\n\n merged = df_lhs.merge(df_lhs_rhs, indicator=True, how='outer') # create new df that is the merge of df_one and df_two\n indicator = merged[merged['_merge'] == 'right_only'] # filter out the rows that were only on the right side (the rows that are preventing the two dataframes from being equal)\n indicator = indicator.drop_duplicates(lhs_set) # find unique combinations of columns in LHS_set that characterize the disrepencies (have 2+ different values in rhs column)\n acc = 0\n\n for index, row in indicator.iterrows():\n\n mask = None\n for attr in lhs_set:\n\n m = masks.get_mask(attr, row[attr])\n if m is None:\n if df[attr].dtypes.name == 'datetime64[ns]':\n m = df[attr] == row[attr]\n else:\n m = df[attr].values == row[attr]\n masks.add_mask(attr, row[attr], m)\n if mask is None:\n mask = m\n else:\n mask = mask & m\n options = df[mask]\n _, unique_counts = numpy.unique(options[rhs].to_numpy(), return_counts=True)\n acc += unique_counts.sum() - unique_counts.max()\n if acc > limit:\n return False\n # idea: try using numpy arrays and taking intersections of sets for each column????\n return True",
"def decompose_bcnf_tree(root: Node, fds: List[FunctionalDependency]) -> Node:\n def _decompose_bcnf_tree(node, fds):\n # Check if we are violating BCNF for any functional dependency in fds\n relation = node.value\n for x, y in fds:\n _closure = closure(x, fds)\n _is_superkey = _closure == relation.attributes\n\n if not _is_superkey: # Do we violate BCNF?\n # Partitions of relation\n r1 = _closure\n r2 = (relation.attributes - _closure).union(x)\n\n # Compute functional dependencies of partitioned dependencies\n fd1 = list(fd_projection(r1, fds))\n fd2 = list(fd_projection(r2, fds))\n\n node.left = Node(Relation(relation.name + '1', r1),\n node.depth + 1)\n node.right = Node(Relation(relation.name + '2', r2),\n node.depth + 1)\n\n _decompose_bcnf_tree(node.left, fd1)\n _decompose_bcnf_tree(node.right, fd2)\n\n _decompose_bcnf_tree(root, fds)\n return root",
"def run_checks(df):\n # Find flowpath column\n fpcol = [x for x in df.columns if x.startswith(\"fp\")][0]\n gordcol = [x for x in df.columns if x.startswith(\"gord\")][0]\n fplencol = [x for x in df.columns if x.startswith(\"fpLen\")][0]\n gdf = df.groupby(fpcol).agg([np.min, np.max])\n # collapse multiindex\n gdf.columns = list(map(\"\".join, gdf.columns.values))\n # Check that grid order starts at 1 and goes to at least 5\n df2 = gdf[(gdf[f\"{gordcol}amin\"] > 1) | (gdf[f\"{gordcol}amax\"] < 5)]\n cull = []\n if not df2.empty:\n for fpath, row in df2.iterrows():\n print(\n \"GORDER_CHECK FAIL %s %s min:%s max:%s, culling\"\n % (\n gordcol,\n fpath,\n row[f\"{gordcol}amin\"],\n row[f\"{gordcol}amax\"],\n )\n )\n cull.append(fpath)\n # Check that fpLen is monotonic\n for fpath, gdf in df.groupby(fpcol):\n res = gdf[fplencol].values[1:] - gdf[fplencol].values[:-1]\n if not all(res > 0):\n print(\n \"FPLEN %s for %s not monotonic, culling %s\"\n % (fplencol, fpath, min(res))\n )\n cull.append(fpath)\n\n if cull:\n print(\"culling %s\" % (cull,))\n df = df[~df[fpcol].isin(cull)]\n return df",
"def fd_projection(attributes: Set[A],\n fds: List[FunctionalDependency]) -> \\\n Iterator[FunctionalDependency]:\n for x in powerset(attributes):\n for b in attributes.intersection(closure(x, fds) - x):\n yield FunctionalDependency(x, {b})",
"def squeeze_accept(partition):\n Write a function that\n - Sort districts by most Democratic heavy and most Republican heavy\n\n - Assign a base value of competitiveness for each district\n - Run chain, accept only if districts satisfy values under or order\n \"\"\"\n\n#--- CONSTRAINTS\n\n\"\"\"",
"def conditional_fptree(candidate, fptree, head_pointers, min_support, partial, items, initial_counts):",
"def evaluate_and_prune(dfs_codes, mapper, projection, max_length, feature_selection_model):\n\tglobal __pattern_set\n\tglobal __dataset\n\tglobal __min_threshold\n\tglobal __min_index\n\n\t# subgraph pattern and vector representation\n\tg = projection_to_graph(dfs_codes, mapper)\n\tvector = projection_to_vector(projection)\n\t# constraints checking\n\tml = check_ml_constraints(vector)\n\tcl = check_cl_constraints(vector)\n\tif not ml:\n\t\t# Must-Link stops search\n\t\treturn True\n\tif not cl:\n\t\t# Cannot-Link only skips current pattern\n\t\treturn False\n\n\tadd_eval = feature_selection_model.score(vector)\n\tprune_eval = feature_selection_model.upper_bound(vector)\n\tlogging.log(logging.DEBUG, \"Score of %s: %s\" %(g, add_eval))\n\tlogging.log(logging.DEBUG, \"Upperbound of %s: %s\" %(g, prune_eval))\n\n\t# evaluate current pattern set\n\tif len(__dataset) < max_length or add_eval > __min_threshold:\n\t\t__dataset.append(vector)\n\t\t__pattern_set.append(g)\n\t\tif add_eval < __min_threshold:\n\t\t\t__min_threshold = add_eval\n\t\t\t__min_index = len(__pattern_set) - 1\n\tif len(__dataset) > max_length:\n\t\t__dataset.pop(__min_index)\n\t\t__pattern_set.pop(__min_index)\n\t\t__min_index, __min_threshold = get_min(feature_selection_model.score)\n\tif prune_eval < __min_threshold:\n\t\treturn True\n\treturn False",
"def DRBFS(self, flow, edge_mark):\n\t\t# Distance flag for each node\n\t\td = {v:float('inf') for v in self.topo.nodes}\n\t\t# Parent node for each node\n\t\tpa = {v:-1 for v in self.topo.nodes}\n\t\t# Request info\n\t\ts = flow[0]\n\t\tt = flow[1]\n\n\t\t# BFS to find a min-hop path\n\t\tqueue = [s]; hdr = 0; d[s] = 0\n\t\twhile hdr < len(queue):\n\t\t\tu = queue[hdr]\n\t\t\thdr += 1\n\n\t\t\tfor v in self.topo.topo.neighbors(u):\n\t\t\t\tif edge_mark[(u, v)] or d[v] <= d[u] + 1:\n\t\t\t\t\tcontinue\n\t\t\t\tqueue.append(v)\n\t\t\t\td[v] = d[u] + 1\n\t\t\t\tpa[v] = u\n\t\t\t\tif v == t:\n\t\t\t\t\t# This is because when BFS on edges, the first time reaching t meaning the smallest hop it can be reached\n\t\t\t\t\thdr = len(queue)\n\t\t\t\t\tbreak\n\n\t\tif d[t] == float('inf'):\n\t\t\treturn False\n\n\t\tp = [t]; v = t\n\t\twhile v != s and v != -1:\n\t\t\tv = pa[v]\n\t\t\tp.append(v)\n\t\tp.reverse()\n\n\t\treturn p",
"def test_dfs():\r\n assert DFS(valid_graph, sorted(list(valid_graph.get_graph().nodes))[0]) == \\\r\n list(nx.dfs_preorder_nodes(valid_graph.get_graph(), sorted(list(valid_graph.get_graph().nodes))[0]))",
"def Min(Fun, p, ubRes, conj):\n d = Fun.degree()\n AffFun = Fun.dehomogenize(1)\n R = AffFun.coordinate_ring()\n if R.is_field():\n #want the polynomial ring not the fraction field\n R = R.ring()\n F = R(AffFun[0].numerator())\n G = R(AffFun[0].denominator())\n dG = G.degree()\n if dG > (d+1)/2:\n lowerBound = (-2*(G[dG]).valuation(p)/(2*dG - d + 1) + 1).floor()\n else:\n lowerBound = (-2*(F[d]).valuation(p)/(d-1) + 1).floor()\n upperBound = 2*(ubRes.valuation(p))\n\n if upperBound < lowerBound:\n #There are no possible transformations to reduce the resultant.\n return Fun,conj\n else:\n #Looping over each possible k, we search for transformations to reduce the\n #resultant of F/G\n k = lowerBound\n Qb = PolynomialRing(QQ,'b')\n b = Qb.gen(0)\n Q = PolynomialRing(Qb,'z')\n z = Q.gen(0)\n while k <= upperBound:\n A = (p**k)*z + b\n Ft = Q(F(A) - b*G(A))\n Gt = Q((p**k)*G(A))\n Fcoeffs = Ft.coefficients(sparse=False)\n Gcoeffs = Gt.coefficients(sparse=False)\n coeffs = Fcoeffs + Gcoeffs\n RHS = (d + 1)*k/2\n #If there is some b such that Res(phi^A) < Res(phi), we must have ord_p(c) >\n #RHS for each c in coeffs.\n #Make sure constant coefficients in coeffs satisfy the inequality.\n if all( QQ(c).valuation(p) > RHS for c in coeffs if c.degree() ==0 ):\n #Constant coefficients in coeffs have large enough valuation, so check\n #the rest. We start by checking if simply picking b=0 works\n if all(c(0).valuation(p) > RHS for c in coeffs):\n #A = z*p^k satisfies the inequalities, and F/G is not minimal\n #\"Conjugating by\", p,\"^\", k, \"*z +\", 0\n newconj = matrix(QQ,2,2,[p**k,0,0,1])\n minFun = Fun.conjugate(newconj)\n conj = conj*newconj\n minFun.normalize_coordinates()\n return minFun, conj\n\n #Otherwise we search if any value of b will work. We start by finding a\n #minimum bound on the valuation of b that is necessary. See Theorem 3.3.5\n #in [Molnar, M.Sc. thesis].\n bval = max([bCheck(coeff,RHS,p,b) for coeff in coeffs if coeff.degree() > 0])\n\n #We scale the coefficients in coeffs, so that we may assume ord_p(b) is\n #at least 0\n scaledCoeffs = [coeff(b*(p**bval)) for coeff in coeffs]\n\n #We now scale the inequalities, ord_p(coeff) > RHS, so that coeff is in\n #ZZ[b]\n scale = QQ(max([coeff.denominator() for coeff in scaledCoeffs]))\n normalizedCoeffs = [coeff*scale for coeff in scaledCoeffs]\n scaleRHS = RHS + scale.valuation(p)\n\n #We now search for integers that satisfy the inequality ord_p(coeff) >\n #RHS. See Lemma 3.3.6 in [Molnar, M.Sc. thesis].\n bound = (scaleRHS+1).floor()\n bool,sol = blift(normalizedCoeffs,bound,p)\n\n #If bool is true after lifting, we have a solution b, and F/G is not\n #minimal.\n if bool:\n #Rescale, conjugate and return new map\n bsol = QQ(sol*(p**bval))\n #\"Conjugating by \", p,\"^\", k, \"*z +\", bsol\n newconj = matrix(QQ,2,2,[p**k,bsol,0,1])\n minFun = Fun.conjugate(newconj)\n conj = conj*newconj\n\n minFun.normalize_coordinates()\n return minFun, conj\n k = k + 1\n return Fun, conj",
"def dp_partition(edges, to_add=[], to_remove=[]):\n if not edges:\n return to_add, [edge_id for edge_id in to_remove if edge_id is not None]\n\n \"\"\" Take the minimum of two results:\n - merge the first two edges, and consider all remaining edges\n - do not merge the first edge, and consider all remaining edges. \"\"\"\n\n \"\"\" Possibility 1: Do not merge the first two edges. \n Result: Partition on all of the remaining edges. Add the current edge to to_add, \n and the current edge to to_remove. \"\"\"\n skip_edge = dp_partition(edges[1:], to_add + [edges[0]], to_remove + [edges[0][2]])\n\n \"\"\" Possibility 2: Merge the first two edges. \n Result: Partition the newly merged edge with all of the remaining edges, we add \n nothing to to_add because the merged edge may be merged again, \n and we remove the two edges which were merged. \"\"\"\n try:\n merge_edge = dp_partition([merge(edges[0], edges[1])] + edges[2:], to_add,\n to_remove + [edges[0][2]] + [edges[1][2]])\n except (AssertionError, IndexError) as exception:\n \"\"\" Either the first two edges in the pool cannot be merged, or there is only one edge remaining\n in the pool. In both cases, partition without merging. \"\"\"\n merge_edge = skip_edge\n\n \"\"\" Return the result which adds the fewest edges. \"\"\"\n return min(merge_edge, skip_edge, key=lambda pair: len(pair[0]))",
"def check_fc(constraint, variable):\n values = []\n\n # Get the current scope\n variables = constraint.get_scope()\n\n # Track the unassigned value and create a list of values for the constraint we're checking\n for var in variables:\n values.append(var.get_assigned_value())\n # None is the index of the uninitialized value\n variable_index = values.index(None)\n\n # Pruned values\n pruned_variables = []\n\n # Check each domain, prune those that do not satisfy it\n for domain in variable.cur_domain():\n values[variable_index] = domain\n if not constraint.check(values):\n variable.prune_value(domain)\n pruned_variables.append((variable, domain))\n\n # Check for domain wipeout\n if variable.cur_domain_size() != 0:\n return (False, pruned_variables)\n elif variable.cur_domain_size() == 0:\n return (True, pruned_variables)",
"def generate_next_seeds(max_non_deps, min_deps, lhs_attrs):\n seeds = set()\n if max_non_deps.all_sets() == set():\n seeds = lhs_attrs.difference(min_deps.all_sets().pop())\n else:\n for nfd in max_non_deps.all_sets():\n nfd_compliment = lhs_attrs.difference(nfd)\n if len(seeds) == 0:\n seeds = nfd_compliment\n else:\n seeds = seeds.intersection(nfd_compliment)\n for x in min_deps.all_sets():\n seeds = seeds.difference(x)\n return list(seeds)",
"def make_dependence_cmp():\r\n\r\n depends = make_depends()\r\n\r\n def dependence(a, b):\r\n \"\"\" A cmp function for nodes in a graph - does a depend on b?\r\n\r\n Returns positive number if a depends on b\r\n Returns negative number if b depends on a\r\n Returns 0 otherwise\r\n \"\"\"\r\n if depends((a, b)):\r\n return 1\r\n if depends((b, a)):\r\n return -1\r\n return 0\r\n\r\n return dependence",
"def get_candidate_sets(F, F_1, option=1):\r\n # option 1: F_{k-1} x F_1\r\n C = []\r\n for singleton in F_1:\r\n for subset in F:\r\n if max(singleton) > max(subset):\r\n C.append(subset.union(singleton))\r\n return C",
"def sqf_part(f):\n return f.per(dmp_sqf_part(f.rep, f.lev, f.dom))",
"def test_components_dff_sorting():\n \n ic = IC(\"Mixed\", {\"in\": 1}, {\"out\": 1})\n nand1 = Nand()\n dff1 = DFF()\n nand2 = Nand()\n ic.wire(Connection(root, \"in\", 0), Connection(nand1, \"a\", 0))\n ic.wire(Connection(root, \"in\", 0), Connection(nand1, \"b\", 0))\n ic.wire(Connection(nand1, \"out\", 0), Connection(dff1, \"in_\", 0))\n ic.wire(Connection(dff1, \"out\", 0), Connection(nand2, \"a\", 0))\n ic.wire(Connection(dff1, \"out\", 0), Connection(nand2, \"b\", 0))\n ic.wire(Connection(nand2, \"out\", 0), Connection(root, \"out\", 0))\n\n # Note: relative order of the Nands doesn't really matter here\n assert ic.flatten().sorted_components() == [nand2, nand1, dff1]",
"def partition(functions: Sequence[FilterFN],\n values: chex.ArrayTree,\n strict: bool = False):\n\n vals, struct = jax.tree_util.tree_flatten(values)\n\n def get_name(k, v):\n del v\n return k\n\n keys = jax.tree_util.tree_leaves(map_named(get_name, \"\", values))\n keys = [str(i) for i, v in enumerate(vals)]\n if not strict:\n functions = list(functions) + [lambda k, v: True]\n\n partitions = [[] for _ in functions]\n names = [[] for _ in functions]\n\n for k, v in zip(keys, vals):\n has_got = False\n for fi, f in enumerate(functions):\n if f(k, v):\n partitions[fi].append(v)\n names[fi].append(k)\n has_got = True\n break\n assert has_got, f\"No matching found for: {k}\"\n data_to_restore = (tuple(keys), tuple(names), struct)\n return partitions, PartitionUnflatten(data_to_restore)",
"def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)",
"def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)",
"def better_partition_parallel(graph, part1, part2, independent_set_extraction_strategy):\n\n best = part2\n for i in range(len(part1)):\n if better_partition(graph, part1[i], best, independent_set_extraction_strategy):\n best = part1[i]\n\n return best",
"def subset_pass_constraints(df):\n # All the constraints strings to test against. Must follow regex.\n # Keys: user-firendly constraint name, used for pass/fail bool column\n # Values: strings to test against\n accept_constraints = {\n \"pass_del_a_mu\": r\"Muon magn\\. mom\\. more than 2 sigma away\",\n \"pass_relic\": r\"Relic density too small \\(Planck\\)\",\n \"pass_bctaunu\": r\"b \\-> c tau nu more than 2 sigma away \\(as SM\\)\",\n \"pass_chi2zz\": r\"chi2\\(H\\->ZZ\\) > 6\\.18\",\n \"pass_chi2bb\": r\"chi2\\(H\\->bb\\) > 6\\.18\",\n \"pass_chi2gg\": r\"chi2\\(H\\->gg\\) > 6\\.18\",\n \"pass_cms4mu\": r\"Excluded H_125\\->AA\\->4mu \\(CMS\\)\"\n }\n for k, v in accept_constraints.iteritems():\n df[k] = ~df.constraints.str.contains(v)\n\n # We want a bitmask, so for each entry we simply want a True or False\n # First make a copy of the constraints Series\n con_series = df.constraints.copy(deep=True)\n # Now for each entry we remove the constraints we don't mind failing\n for c in accept_constraints.values():\n con_series = con_series.str.replace(c, \"\")\n # con_series = con_series.str.replace(r\"^\\|+$\", \"\") # Any leftover separators\n con_series = con_series.apply(lambda x: x.strip('|'))\n con_series = con_series.str.replace(r\"\\|\\|+\", r\"|\") # Any multiple separators\n # Now figure out which ones are empty\n mask = con_series.str.match(\"^$\")\n # Return those entries, allowing for a +ve muon mag moment contribution\n return df[mask & (df.Del_a_mu > 0)]",
"def check_requirements_feasibility(self, requirements):\n\n subgraphs = []\n\n # FIXME need to fix this if there are multiple sources\n feasibilities = []\n for requirement in requirements:\n visited_cells = {}\n feasible, cs_root = self._check_requirement_feasibility(requirement.root, 0,\n self.sources[0], visited_cells, 0)\n feasibilities.append(feasible)\n\n if feasible == True:\n subgraph = ConveyanceSubgraph(cs_root[0][1])\n subgraphs.append(subgraph)\n\n print(\"Requirement '%s' feasibility: %r\" % (requirement.name,\n feasible))\n print()\n\n return feasibilities, subgraphs",
"def FuzzyBallGraph(partition, q):\n from sage.graphs.generators.basic import CompleteGraph\n if len(partition)<1:\n raise ValueError(\"partition must be a nonempty list of positive integers\")\n n=q+sum(partition)\n g=CompleteGraph(n)\n curr_vertex=0\n for e,p in enumerate(partition):\n g.add_edges([(curr_vertex+i, 'a{0}'.format(e+1)) for i in range(p)])\n curr_vertex+=p\n return g",
"def quickbb(graph, fast=True):\n\n \"\"\"Given a permutation of the nodes (called an elimination ordering),\n for each node, remove the node and make its neighbors into a clique.\n The maximum degree of the nodes at the time of their elimination is\n the width of the tree decomposition corresponding to that ordering.\n The treewidth of the graph is the minimum over all possible\n permutations.\n \"\"\"\n\n best = Solution() # this gets around the lack of nonlocal in Python 2\n best.count = 0\n\n def bb(graph, order, f, g):\n best.count += 1\n if len(graph) < 2:\n if f < best.ub:\n assert f == g\n best.ub = f\n best.order = list(order) + list(graph)\n\n else:\n vs = []\n for v in graph:\n # very important pruning rule\n if simplicial(graph, v) or almost_simplicial(graph, v) and len(graph[v]) <= lb:\n vs = [v]\n break\n else:\n vs.append(v)\n\n for v in vs:\n graph1 = copy_graph(graph)\n eliminate_node(graph1, v)\n order1 = order + [v]\n # treewidth for current order so far\n g1 = max(g, len(graph[v]))\n # lower bound given where we are\n f1 = max(g, lower_bound(graph1))\n if f1 < best.ub:\n bb(graph1, order1, f1, g1)\n return\n\n graph = {u: set(graph[u]) for u in graph}\n\n order = []\n best.ub, best.order = upper_bound(graph)\n lb = lower_bound(graph)\n\n # This turns on the branch and bound algorithm that\n # gets better treewidth results, but takes a lot\n # longer to process\n if not fast:\n if lb < best.ub:\n bb(graph, order, lb, 0)\n\n # Build the tree decomposition\n tree = defaultdict(set)\n\n def build(order):\n if len(order) < 2:\n bag = frozenset(order)\n tree[bag] = set()\n return\n v = order[0]\n clique = graph[v]\n eliminate_node(graph, v)\n build(order[1:])\n for tv in tree:\n if clique.issubset(tv):\n break\n bag = frozenset(clique | {v})\n tree[bag].add(tv)\n tree[tv].add(bag)\n\n build(best.order)\n return tree",
"def rules_dnf(self):\n base = self.only_one_root() + self.one_in_each()\n group_by = 4\n while len(base) > 1:\n base_help = []\n for i in range(0, len(base), group_by):\n form = And(*[f for f in base[i: i+group_by] ])\n base_help.append(form.to_dnf())\n base = base_help\n\n return base[0]"
] | [
"0.62974924",
"0.5865606",
"0.5735515",
"0.5554457",
"0.53682923",
"0.5271781",
"0.52280307",
"0.5218041",
"0.52048236",
"0.50691766",
"0.48889202",
"0.48751596",
"0.48490342",
"0.4838506",
"0.4830531",
"0.48198992",
"0.4782947",
"0.4776574",
"0.4747381",
"0.47241384",
"0.47232172",
"0.47067875",
"0.46733993",
"0.46733993",
"0.4664917",
"0.46412873",
"0.46406806",
"0.4625306",
"0.46202844",
"0.46079373"
] | 0.66442513 | 0 |
Finds the subsets of cardinality k for each element (set) of the list x | def subsets(x, k):
sub_set = set()
for i in x:
sub_set = sub_set.union(set(combinations(i, k)))
return list(sub_set) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def k_subsets(set_, k):\n ensure_countable(set_)\n\n if not isinstance(k, Integral):\n raise TypeError(\"subset cardinality must be a number\")\n if not (k >= 0):\n raise ValueError(\"subset cardinality must be positive\")\n if not (k <= len(set_)):\n raise ValueError(\"subset cardinality must not exceed set cardinality\")\n\n result = combinations(set_, k)\n return _harmonize_subset_types(set_, result)",
"def find_partitions(V,k):\n k_subs = k_subset(V,k)\n k_subs = uniq_subsets(k_subs)\n\n return k_subs",
"def Get(self,k:int): \n ### get partitions depending on the partition schemes C that depends on k!\n return subsets_k(list(range(self._n)),k)",
"def get_subsets(l, k):\n if k == 0:\n return [[]]\n else:\n res = []\n for i in range(len(l)):\n rest_subsets = get_subsets(l[i + 1:], k - 1)\n for subset in rest_subsets:\n subset.insert(0, l[i])\n res += rest_subsets\n return res",
"def k_random_subsets(x, y, k):\n if k > len(y):\n raise Exception(\n \"Cannot split a dataset into more folds than it has rows.\")\n if k < 2:\n raise Exception(\"Cannot split a dataset into fewer than 2 fold.\")\n # Randomly shuffle dataset\n y = [[i] for i in y]\n z = np.append(x, y, axis=1)\n np.random.seed(0)\n np.random.shuffle(z)\n x = z[:, :-1]\n y = z[:, -1]\n # Create k equally sized subsets from the randomly sorted dataset\n subset_size = int(len(y) / k)\n remainder = len(y) - (subset_size * k)\n folds_x = list()\n folds_y = list()\n start = 0\n end = subset_size\n for i in range(k):\n fold_x = list(x[start:end])\n fold_y = list(y[start:end])\n folds_x.append(fold_x)\n folds_y.append(fold_y)\n start += subset_size\n end += subset_size\n\n for i in range(remainder):\n folds_x[i].append(x[-i])\n folds_y[i].append(y[-i])\n\n folds_x = np.array(folds_x).astype(np.int)\n folds_y = np.array(folds_y)\n return folds_x, folds_y",
"def all_subsets_of_size(L, size):\r\n pass # Left as an exercise for the reader\r",
"def subsets(n):\n binary = lambda x: x>0 and binary(x>>1) + [x&1] or []\n pad = lambda l: [0]*(n-len(l)) + l #Always returns a list of length 'n'\n return [pad(binary(i)) for i in range(1, 2**n)]",
"def beautifulSubsets(self, nums: List[int], k: int) -> int:\n\n \"\"\"\n queue = deque([([], -1)])\n res = 0\n\n while queue:\n cur, idx = queue.popleft()\n res += 1\n\n for i in range(idx + 1, len(nums)):\n if nums[i] - k in cur or nums[i] + k in cur:\n continue\n\n queue.append((cur + [nums[i]], i))\n\n return res - 1\n \"\"\"\n\n \"\"\"\n # dp0 is the ways that without A[i]\n # dp1 is the ways that with A[i]\n\n count = [Counter() for i in range(k)]\n for n in nums:\n count[n % k][n] += 1\n\n res = 1\n for i in range(k):\n prev, dp0, dp1 = 0, 1, 0\n for n in sorted(count[i]):\n v = pow(2, count[i][n])\n if prev + k == n:\n dp0, dp1 = dp0 + dp1, dp0 * (v - 1)\n else:\n dp0, dp1 = dp0 + dp1, (dp0 + dp1) * (v - 1)\n\n prev = n\n\n res *= dp0 + dp1\n\n return res - 1\n \"\"\"\n\n # Count the frequency of A, and then consider all the arithmetic sequence with difference k.\n # Each arithmetic sequence can be solve as a hourse robber problem.\n # We solve the hourse robber by dp.\n # dp(a) return the result for sequence no bigger than a.\n\n # dp(a)[0] is the ways that without a\n # dp(a)[1] is the ways that with a\n\n # dp(a)[0] = dp(a - k)[0] + dp(a - k)[1]\n # dp(a)[1] = dp(a - k)[0] * (2 ^ count(a) - 1\n\n count = Counter(nums)\n\n def dp(n):\n dp0, dp1 = dp(n - k) if n - k in count else (1, 0)\n return dp0 + dp1, dp0 * (pow(2, count[n]) - 1)\n\n return functools.reduce(operator.mul, (sum(dp(n)) for n in count if not count[n + k])) - 1",
"def get_subset(h, k, elements):\n n = len(elements)\n maxM = binom(n, k) - 1\n ans = [0] * k\n a = n\n b = k\n # x is the \"dual\" of h.\n x = maxM - h\n for i in range(0, k):\n ans[i] = largestV(a, b, x)\n x -= binom(ans[i], b)\n a = ans[i]\n b = b - 1\n ans = [elements[(n - 1) - ans[i]] for i in range(0, k)]\n return ans",
"def sets(elements, set_size):\n return combinations(elements, set_size)",
"def subsets(arr):\n return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])",
"def subsets(arr):\n return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])",
"def S(k):\n\n from scipy.special import binom as binomial\n\n TrueHset = [0]\n if k > 1:\n for j in np.arange(k, 0, -1, dtype=int):\n TrueHset = list(set(TrueHset) | set([binomial(j, 2) + x for x in S(k - j)]))\n return TrueHset",
"def get_frequent_subsets(recipes, min_sup=15, min_score=3.5, max_size=3):\r\n # C_k denotes candidate subsets size k\r\n # F_k denotes frequent subsets size k\r\n F_1 = [{t} for t in range(len(recipes.columns)) if np.sum(recipes.iloc[:,t]) > min_sup]\r\n freq_subsets = []\r\n subs_scores = []\r\n print(\"|F_1| = %d\" % (len(F_1)))\r\n F_k = F_1\r\n k = 1\r\n while len(F_k) > 0:\r\n k += 1\r\n C_k = get_candidate_sets(F_k, F_1)\r\n scores = get_log_scores(recipes, C_k)\r\n freq_i = [i for i in range(len(C_k)) if scores[i] >= np.log(min_score)]\r\n F_k = [C_k[i] for i in freq_i]\r\n freq_subsets += F_k\r\n subs_scores += [scores[i] for i in freq_i]\r\n print(\"|F_%d| = %d\" % (k, len(F_k)))\r\n if k == max_size: break ###\r\n return freq_subsets, subs_scores",
"def gen_k_ary_ind_from_cliques(k: int, E: Iterable[Edge]) -> FrozenSet[Edge]:\n result = set()\n for i in E:\n result.update(map(Edge, itertools.permutations(i, k)))\n return frozenset(result)",
"def power_set_efective(seq,k_min,k_max):\n \n seq = list(seq)\n \n #Empty set or one element sets\n if len(seq) <= 1:\n yield seq\n yield []\n\n else:\n for item in power_set(seq[1:]):\n if (len([seq[0]]+item) <= k_max and len([seq[0]]+item) >= k_min):\n yield [seq[0]]+item\n if (len(item) <= k_max and len(item) >= k_min): \n yield item",
"def powerset(xs):\n cards = list(reversed(xrange(len(xs)))) + [len(xs)]\n return list(chain.from_iterable(combinations(xs, n) for n in cards))",
"def find_subarrays(nums, k):\n res = pre_sum = 0\n dic = {0: 1}\n for i in nums:\n pre_sum += i\n res += dic.get(pre_sum - k, 0)\n dic[pre_sum] = dic.get(pre_sum, 0) + 1\n return res",
"def subsets(self):\n return set(self.subset_map.values())",
"def subset_gen(itemSet):\n subsets = []\n for i in range(1, len(itemSet)):\n c = combinations(itemSet, r=i)\n for cc in c:\n subsets.append(set(cc))\n return subsets",
"def movie_subset(k):\n return np.array(movies)[np.random.permutation(len(movies))[:k]].tolist()",
"def subsets(lst):\n\tsubSet = [[]]\n\tfor element in lst:\n\t\tfor s in subSet[:]:\n\t\t\tsubSet.append(s.copy())\n\t\t\ts.append(element)\n\treturn subSet",
"def combo(N,K):\n assert type(N)==list\n assert type(K)==int\n for k in N:\n assert type(k)==int\n assert K>0 and K<=len(N)\n \n main_combo = []\n #Finds the power list of the inputted list and loops through the power list for lists with length 'K'.\n for l in power_list(N):\n if len(l)==K:\n main_combo.append(l)\n return main_combo #Returns a list of list combinations with length 'K'.",
"def generate_candidates(L_k, k):\n candidates = []\n\n # Iterate over every possible pair of transactions and \n # append their union to candidates if the union is \n # one element larger than an itemset in L_k \n # (emulate self joining L_k)\n candidates = set()\n for item in itertools.combinations(L_k, 2):\n union_ = frozenset(item[0].union(item[1]))\n if len(union_) == k+1:\n candidates.add(union_)\n \n # Convert candidates into a list with each candidate converted to custom set\n candidates = [CandidateItem(candidate) for candidate in candidates]\n\n # Prune\n candidates_to_remove = []\n for candidate in candidates:\n # if there's any itemset of size k in each candidate that is not in L_k, add it to the\n # list of candidates to be removed\n if any([c for c in itertools.combinations(candidate, k) if not any([L for L in L_k if len(set(c) & set(L)) == k])]):\n candidates_to_remove.append(candidate)\n \n for i in candidates_to_remove:\n candidates.remove(i)\n \n return candidates",
"def allcombinations(orgset, k):\n return itertools.chain(*[combination(orgset, i) for i in range(1, k + 1)])",
"def kmer_set(s, k):\n kmer = set([])\n n = len(s)\n #n-k+1 is the available range of values or probablities.\n for x in range(0, n - k + 1):\n kmer.add(s[x:x + k])\n return kmer",
"def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:\n if not nums or len(nums) < k:\n return False\n if sum(nums) % k != 0:\n return False\n nums.sort(reverse=True) # 倒排更快\n set_sum = [0] * k\n average_sum = sum(nums) // k\n\n def dfs(index):\n if index == len(nums):\n return True\n for i in range(k):\n set_sum[i] += nums[index]\n if set_sum[i] <= average_sum and dfs(index + 1):\n return True\n set_sum[i] -= nums[index]\n if set_sum[i] == 0: # 如果这个数不符合条件就没必要尝试别的空篮子,速度提高很多\n break\n return False\n\n return dfs(0)",
"def get_subsets(arr, n, value):\n result = []\n # return immediately if there is no possible subset in arr whose sum is equal to value\n if dp[n][value] == False:\n return\n \n queue = deque()\n queue.append(Pair(n, value, set()))\n\n while len(queue) > 0:\n pair = queue.popleft()\n if pair.i == 0 or pair.j == 0:\n result.append([arr[i] for i in pair.path_set])\n else:\n exclude = dp[pair.i - 1][pair.j]\n if exclude:\n queue.append(Pair(pair.i-1, pair.j, pair.path_set))\n\n if pair.j >= arr[pair.i-1]:\n include = dp[pair.i - 1][pair.j - arr[pair.i -1]]\n if include:\n b = pair.path_set.copy()\n b.add(pair.i - 1)\n queue.append(Pair(pair.i - 1, pair.j-arr[pair.i-1], b))\n \n return result",
"def get_candidate_sets(F, F_1, option=1):\r\n # option 1: F_{k-1} x F_1\r\n C = []\r\n for singleton in F_1:\r\n for subset in F:\r\n if max(singleton) > max(subset):\r\n C.append(subset.union(singleton))\r\n return C",
"def big_selections(lst: List[int], n: int) -> List[List[int]]:\n if not lst:\n return [[]]\n else:\n holder = [lst.copy()]\n for i in range(len(lst)):\n l2 = lst.copy()\n l2.pop(i)\n for item in selections(l2):\n if item not in holder and sum(item) >= n:\n holder.append(item)\n return holder"
] | [
"0.7720204",
"0.72854334",
"0.7243107",
"0.7168222",
"0.69899297",
"0.69096446",
"0.68885064",
"0.68423504",
"0.6712827",
"0.6528964",
"0.64861095",
"0.64861095",
"0.6458022",
"0.64492786",
"0.6430704",
"0.6393705",
"0.6287829",
"0.62864715",
"0.62657607",
"0.62625057",
"0.6261158",
"0.62096155",
"0.62047625",
"0.61909837",
"0.61887234",
"0.61867213",
"0.61737514",
"0.61621284",
"0.6093883",
"0.60744417"
] | 0.839665 | 0 |
Tests if the fds lhs>rhs is satisfied in fds. This function is only for testing purposes | def test_fds_test(lhs, rhs, fds):
closure = fds.attribute_closure(lhs)
return rhs[0] in closure | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __gt__(self, vs) -> bool:\n return vs <= self",
"def fp_gt(x: float, y: float) -> bool:\n return not fp_eq(x, y) and x > y",
"def _cmp_fstruct(self, s1, s2, frac_tol, mask):\n if len(s2) > len(s1):\n raise ValueError(\"s1 must be larger than s2\")\n if mask.shape != (len(s2), len(s1)):\n raise ValueError(\"mask has incorrect shape\")\n\n return is_coord_subset_pbc(s2, s1, frac_tol, mask)",
"def __ge__(self,f2):\n return self > f2 or self == f2",
"def gte(cls, lhs, rhs):\n return lhs >= rhs",
"def __gt__(self, other):\n return self.x ** 2 + self.y ** 2 > other.x ** 2 + other.y ** 2",
"def is_gt(lhs, rhs, assumptions=None):\n return fuzzy_not(is_le(lhs, rhs, assumptions))",
"def __le__(self,f2):\n return not self > f2 or self == f2",
"def __gt__(self, other):\n if self.head_vertex <= other.head_vertex:\n return False\n elif self.tail_vertex <= other.tail_vertex:\n return False\n elif self.weight <= other.weight:\n return False\n return True",
"def __gt__(self, other):\n if self.head_vertex <= other.head_vertex:\n return False\n elif self.tail_vertex <= other.tail_vertex:\n return False\n elif self.weight <= other.weight:\n return False\n return True",
"def __gt__(self, rhs: Union[float, Simpy]) -> list[bool]:\n mask: list[bool] = []\n if isinstance(rhs, float):\n for item in self.values:\n mask.append(item > rhs)\n else:\n assert len(self.values) == len(rhs.values)\n for i in range(len(self.values)):\n mask.append(self.values[i] > rhs.values[i])\n return mask",
"def finf(lhs, rhs, epsilon=0.00001):\n return rhs-lhs > epsilon",
"def check_flow_conditions(triple, fwd, rev, overlap):\n\n p1, p2, p3 = [self.paths[triple[x]] for x in [0,1,2]]\n p2_end_index = len(p2) - rev + overlap\n #print(\"p2_end_index = {}\".format(p2_end_index))\n p1_start_index = fwd + 1\n #print(\"p1_start_index = {}\".format(p1_start_index))\n #print(\"p1 subset: {}\".format(p1[p1_start_index - 1:]))\n #print(\"p2 subset = {}\".format(p2[:p2_end_index]))\n p_prime = p2[:p2_end_index] + p1[p1_start_index - 1:]\n #print(\"p_prime = {}\".format(p_prime))\n\n # try to rebalance\n if flow_condition(p1, p2, triple):\n print(\"Rebalance opportunity found. Now rebalancing.\")\n self.rebalances += 1\n return(True)\n\n # try to splice and merge\n if flow_condition(p_prime, p3, triple):\n print(\"Splice+merge opportunity found. Now splicing.\")\n self.splices += 1\n return(True)\n\n return(False)",
"def __gt__(self,f2):\n return self.__num * f2.den > self.__den * f2.num",
"def _check_flow_consistencity (sg_map, fr_sg):\n if isinstance(fr_sg, Flowrule):\n flowclass = NFFGToolBox._extract_flowclass(fr_sg.match.split(\";\"))\n else:\n flowclass = fr_sg.flowclass\n consistent = True\n if sg_map[fr_sg.id][2] != flowclass:\n consistent = False\n if (sg_map[fr_sg.id][3] is None or sg_map[fr_sg.id][3] == float(\"inf\")) != \\\n (fr_sg.bandwidth is None or fr_sg.bandwidth == float(\"inf\")):\n # If not both of them are None\n consistent = False\n elif (sg_map[fr_sg.id][3] is not None) and (fr_sg.bandwidth is not None):\n if consistent and math.fabs(sg_map[fr_sg.id][3] - fr_sg.bandwidth) > 1e-8:\n consistent = False\n if (sg_map[fr_sg.id][4] is None or sg_map[fr_sg.id][4] == 0.000000000) != \\\n (fr_sg.delay is None or fr_sg.delay == 0.0000000000):\n # If not both of them are None\n consistent = False\n elif (sg_map[fr_sg.id][4] is not None) and (fr_sg.delay is not None):\n if math.fabs(sg_map[fr_sg.id][4] - fr_sg.delay) > 1e-8:\n consistent = False\n if not consistent:\n raise RuntimeError(\"Not all data of a Flowrule equal to the other \"\n \"Flowrules of the sequence for the SGHop %s! Or the\"\n \" SGHop to be added differs in data from the existing\"\n \" SGHop!\" % fr_sg.id)",
"def find_fds_rhs(lhs, rhs, db_partition, test_mode=False):\n x = {tuple(lhs)}\n e0 = set() # set with the non-satisfied fds\n e1 = set() # set with the satisfied fds\n set_len = lhs.__len__()\n while x.__len__() != 0 and set_len > 0:\n level = set() # each level tries the proper subsets of X with length len(X)-1\n for subx in x:\n if test_mode:\n test = test_fds_test(list(subx), rhs, db_partition)\n else:\n test = test_fds(list(subx), rhs, db_partition)\n if not test:\n e0 = e0.union([subx])\n else:\n e1 = set(remove_super_sets(list(subx), e1)) # removes redundancy in e1\n e1 = e1.union([subx])\n level = level.union([subx])\n\n level = set(subsets(list(level), set_len - 1)) # obtain the next level\n e0 = set(subsets(list(e0), set_len - 1))\n x = prune(level, e0) # removes the cases that are not satisfiable by means of e0\n set_len -= 1\n return [list(x) for x in list(e1)]",
"def compare(self, t2) -> bool:\n return True if self.get_edge(t2) >= 0 else False",
"def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12",
"def __gt__(self, other):\n return self.head_vertex > other.head_vertex and self.tail_vertex > other.tail_vertex",
"def __gt__(self, other):\n return self.__f > other.get_f()",
"def __gt__(self, other: Event) -> bool:\n return not self.__le__(other)",
"def all_gt(self, other):\n return self.x > other.x and self.y > other.y",
"def __gt__(self, other):\n return self.weight() > other.weight()",
"def fp_lt(x: float, y: float) -> bool:\n return not fp_eq(x, y) and x < y",
"def __gt__(self,other):\r\n\t\tsorted_self = sorted(self.vector, reverse=True) #sort both lists in descending order\r\n\t\tsorted_other = sorted(other, reverse=True) \r\n\t\tcmpflag = False\r\n\t\tfor li1, li2 in zip(sorted_self, sorted_other):\r\n\t\t\tif(li1 > li2):\r\n\t\t\t\tcmpflag = True\r\n\t\treturn cmpflag",
"def __gt__(self, other):\n return self.abs2phy.__gt__(other)",
"def __gt__(self, other):\n return self.abs2phy.__gt__(other)",
"def is_le(lhs, rhs, assumptions=None):\n return is_ge(rhs, lhs, assumptions)",
"def overUseRes(res1, res2):\n \n for i in range(len(res1)):\n if res1[i] > res2[i]:\n return True\n return False",
"def __ge__(self,b):\n\n if (MODE_RELAXED_WITH_ERROR_CHECKING):\n if (isinstance(b,int) | isinstance(b,float)):\n return(self.val() >= b)\n return (self.val() >= b.val())"
] | [
"0.58796936",
"0.57489437",
"0.5731534",
"0.5666526",
"0.5645582",
"0.5642558",
"0.5614026",
"0.5610124",
"0.5603615",
"0.5603615",
"0.55949885",
"0.55658036",
"0.554568",
"0.55397534",
"0.5535892",
"0.5522948",
"0.55229074",
"0.54996073",
"0.5495589",
"0.54946375",
"0.5437182",
"0.5433299",
"0.54229623",
"0.54184246",
"0.54170537",
"0.54131854",
"0.54131854",
"0.54097015",
"0.54089713",
"0.5395267"
] | 0.7160488 | 0 |
Removes the elements in set_of_sets that are super sets of sub_set | def remove_super_sets(sub_set, set_of_sets):
return [x for x in set_of_sets if not set(x).issuperset(set(sub_set))] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_pruned_subsets(subsets, min_deps):\n for n in subsets[:]:\n if min_deps.contains_superset(n.attrs):\n subsets.remove(n)",
"def remove_subset(set_, subset):\n ensure_set(set_)\n ensure_iterable(subset)\n\n for elem in subset:\n set_.remove(elem)",
"def remove_pruned_supersets(supersets, max_non_deps):\n for n in supersets[:]:\n if max_non_deps.contains_subset(n.attrs):\n supersets.remove(n)",
"def __sub__(self, other):\n if not isinstance(other, (list, Set)):\n raise TypeError(\"only sets can be removed from sets\")\n\n new_set = self._clone()\n\n for element in other:\n new_set.delete(element)\n\n return new_set",
"def subsets(self):\n return set(self.subset_map.values())",
"def prune_sequence(sequence_set, extended_set):\n tmp_set = set()\n for seq in sequence_set:\n # se una sotto-sequenza e' trovata viene ignorata, altrimenti e' aggiunta al set temporaneo\n found = False\n for ext in extended_set:\n if seq1_in_seq2(seq, ext, 0): # eps e' 0 perche' le sequenze sono identiche\n found = True\n break\n if not found:\n tmp_set.add(seq)\n # alla fine aggiungi tutto il set esteso, si puo' includere nel ciclo precedente\n for ext in extended_set:\n tmp_set.add(ext)\n return tmp_set",
"def finalSubsets(self):\n subs = self.allSubsets()\n for s in self.graph.observed:\n subs = subs[subs[:,s] == 1,] # remove subsets where values in s are not True\n return subs",
"def remove_elements_from_set(s: set, *args) -> set:\n for _ in args:\n s.remove(_)\n return s",
"def merge_sets(sets):\n idxs_skipped = []\n n = len(sets)\n for i in range(n-1):\n if i not in idxs_skipped:\n set_i = sets[i]\n for j in range(i+1,n):\n set_j = sets[j]\n if set_i.intersection( set_j ) > set([]):\n sets[i].update( set_j )\n idxs_skipped.append( j )\n sets_u = [ sets[k] for k in np.setdiff1d(range(n), idxs_skipped).astype(np.int) ]\n return sets_u",
"def complement(self, aset):\n return self.all_n.difference(aset)",
"def subsets(lst):\n\tsubSet = [[]]\n\tfor element in lst:\n\t\tfor s in subSet[:]:\n\t\t\tsubSet.append(s.copy())\n\t\t\ts.append(element)\n\treturn subSet",
"def find_proper_subsets(powerset, cardinality_difference = 1, debug = False):\n subset_idx = [] # Which will be set A of the powerset\n superset_idx = [] # Which will be set B of the powerset\n\n for A, a_idx in zip(powerset, list(range(0, len(powerset)))):\n # A_is_proper_subset_of_B = True\n for B, b_idx in zip(powerset[a_idx:], list(range(a_idx, len(powerset)))):\n if len(A) is not len(B)-cardinality_difference:\n continue\n else: # Check every element\n for a in A:\n A_is_proper_subset_of_B = True\n found_a_in_b = False\n for b in B:\n if a is b:\n found_a_in_b = True\n break\n if found_a_in_b is False:\n A_is_proper_subset_of_B = False\n if A_is_proper_subset_of_B:\n if debug:\n print(\"A:\", A, \" is proper subset of B:\", B)\n subset_idx.append(a_idx)\n superset_idx.append(b_idx)\n # return the indecees for the corresponding sets\n #print(\"subset_idx: \", subset_idx)\n #print(\"superset_idx: \", superset_idx)\n return subset_idx, superset_idx",
"def union_sets(S):\n res = set()\n for s in S:\n res |= s\n return res",
"def is_proper_superset(self, other):\n if isinstance(other, Set):\n return self != other and self.is_superset(other)\n else:\n raise ValueError(\"Unknown argument '%s'\" % other)",
"def apply_to_sets(cls, sets):\n for sq_set in sets:\n sqs_with_val = {}\n sqs_by_bitmask = {}\n for sq in iter(sq_set):\n for sq2 in iter(sq_set):\n if sq2.known_value:\n sq.eliminate(sq2)\n\n pvals = sq.possible_values()\n\n if sq.bitmask not in sqs_by_bitmask:\n sqs_by_bitmask[sq.bitmask] = []\n sqs_by_bitmask[sq.bitmask].append(sq)\n\n for val in pvals:\n if val not in sqs_with_val:\n sqs_with_val[val] = []\n sqs_with_val[val].append(sq)\n\n for val, sqs in sqs_with_val.iteritems():\n if len(sqs) == 1:\n sqs[0].set_value(val)\n\n for bm, sqs in sqs_by_bitmask.iteritems():\n if len(sqs) > 1:\n pvals = list(SudokuSquare.bitmask_to_possible_values(bm))\n if len(sqs) == len(pvals):\n for sq in iter(sq_set):\n if sq not in sqs:\n sq.eliminate(sqs[0])",
"def subsets(x, k):\n sub_set = set()\n for i in x:\n sub_set = sub_set.union(set(combinations(i, k)))\n return list(sub_set)",
"def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x",
"def union_of_non_none_sets(sets):\r\n return functools.reduce(lambda x, y: x.union(y), filter(lambda z: z is not\\\r\n None, sets), set())",
"def powerset(a):\n if len(a) == 0:\n return set([frozenset()])\n accumulator = set()\n a = set(a)\n element = a.pop()\n for subset in powerset(a):\n accumulator.add(subset)\n accumulator.add(frozenset(set([element]) | subset))\n return accumulator",
"def merge(lists):\n newsets, sets = [set(lst) for lst in lists if lst], []\n while len(sets) != len(newsets):\n sets, newsets = newsets, []\n for aset in sets:\n for eachset in newsets:\n if not aset.isdisjoint(eachset):\n eachset.update(aset)\n break\n else:\n newsets.append(aset)\n return newsets",
"def is_superset(self, other):\n if isinstance(other, Set):\n return other.is_subset(self)\n else:\n raise ValueError(\"Unknown argument '%s'\" % other)",
"def properSubset(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return lhs < rhs",
"def __sub__(self, vs):\n return [v for v in self.__elements if tuple(v) not in map(tuple, vs)]",
"def sets(elements, set_size):\n return combinations(elements, set_size)",
"def extend_all(element, powerset):\n\n new_elements = set()\n\n for subset in powerset:\n extended_element = subset | frozenset([element])\n set.add(new_elements, extended_element)\n \n return new_elements",
"def subsets(self):\n \n # note subsets have an unusual encoding\n query = \"\"\"\n prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>\n SELECT DISTINCT ?s WHERE {{\n GRAPH <{g}> {{\n ?c oboInOwl:inSubset ?s \n }}\n }}\n \"\"\".format(g=self.graph_name)\n bindings = run_sparql(query)\n return [r['s']['value'] for r in bindings]",
"def is_superset(self, other):\n \n for element in other:\n if element not in self:\n return False\n\n return True",
"def subsets_with_dup(s):\n r = [[]]\n for e in s:\n print 'r: %-55r e: %r' % (e,r)\n for x in r:\n a = sorted(x + [e])\n if not(a in r): r.append(a) \n return r",
"def SetFunction():\r\n s2 = []\r\n s3 = []\r\n s4 = []\r\n s2 = { i for i in range(21) if i%2 == 0}\r\n s3 = { i for i in range(21) if i%3 == 0}\r\n s4 = { i for i in range(21) if i%4 == 0}\r\n s2 = set(s2)\r\n s3 = set(s3)\r\n s4 = set(s4)\r\n print s3.issubset(s2)\r\n print s4.issubset(s2)",
"def test_set(self):\n a = set()\n a.add('b')\n a.add('c')\n a.add('a')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'b', 'c'])\n a.remove('b')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'c'])\n\n a.discard('d')\n\n b = set(['r', 's'])\n d = a.union(b)\n b = list(d)\n b.sort()\n self.assertEqual(b, ['a', 'c', 'r', 's'])"
] | [
"0.7332789",
"0.7161458",
"0.70739305",
"0.6937012",
"0.69058263",
"0.6827412",
"0.66181725",
"0.65181637",
"0.6417106",
"0.63042456",
"0.62337095",
"0.61942023",
"0.6188511",
"0.6163847",
"0.6162156",
"0.6138481",
"0.61357856",
"0.60712695",
"0.6062938",
"0.60583323",
"0.60110617",
"0.59897643",
"0.59749377",
"0.5953733",
"0.5946515",
"0.59441364",
"0.58793664",
"0.58686787",
"0.58534354",
"0.5851701"
] | 0.9116026 | 0 |
Get or set the triangsamples object. The parameter `triangsamples` has to be an instance of the class `spharapy.trimesh.TriMesh`. Setting the triangsamples object will simultaneously check if it in the correct format. | def triangsamples(self):
return self._triangsamples | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sampleTau(self):\n # perform Cholesky factorization Rbar = Lbar.T dot Lbar\n LbarField = self._choleskyRMFields(self.Rbar, self.nCell_cfd)\n # Generate nSample identity random matrix fields, (LFields)\n GFields = self.randomMatrix.sample(self.nSample, self.pOrder)\n for isamp in np.arange(self.nSample):\n GFlatSample_i = self.intf.rm2tau(GFields[isamp, :, :, :], self.nCell_kl)\n self.GSample[isamp, :, :] = GFlatSample_i\n np.savetxt(self.resultDir+'G_samples/G_s'+str(isamp), GFlatSample_i)\n #pdb.set_trace()\n #TODO, mapping mesh (from KL to CFD needed to be considered)\n # For loop to construct Reynolds stress samples (1 : nSamples)\n for isamp in np.arange(self.nSample):\n for icell in np.arange(self.nCell_cfd):\n #pdb.set_trace()\n L_temp = LbarField[icell, :, :]\n G_temp = GFields[isamp, icell, :, :]\n LTG_temp = np.dot(L_temp.T, G_temp)\n LTGL_temp = np.dot(LTG_temp, L_temp)\n self.RFields[isamp, icell, :, :] = LTGL_temp\n # convert rm fields to tau fields (OpenFOAM format)\n for isamp in np.arange(self.nSample):\n # convert R to Tau fields\n TauSample_i = self.intf.rm2tau(self.RFields[isamp, :, :, :], self.nCell_cfd)\n self.TauSample[isamp, :, :] = TauSample_i\n self.deltaTauSample[isamp, :, :] = TauSample_i - self.Taubar\n ## output data\n np.savetxt(self.resultDir+'R_samples/R_s'+str(isamp), TauSample_i)",
"def set_samples(samples):\n if samples is not None and not isinstance(samples, int):\n raise TypeError('samples must be an int or None')\n elif isinstance(samples, int) and samples < 1:\n raise ValueError('samples must be positive')\n else:\n __SETTINGS__._SAMPLES = samples",
"def samples_set(self):\n self.get_samples_set(self.samples_db)\n self.choose_samples(self.chosen_samples_db, self.chosen_hashes)",
"def read_samples(samples_file):\n sample_tuples = []\n try:\n with open(samples_file, 'r') as f:\n for line in f:\n try:\n chip, input=line.strip().split('\\t')\n if len(chip)>0 and len(input)>0:\n sample_tuples.append((chip, input))\n except ValueError: #if there are not two samples on a line\n pass\n return set(sample_tuples)\n except IOError:\n sys.exit(\"I/O Error: Could not find samples file: \"+str(samples_file))",
"def fixture_lims_samples(lims_family: dict) -> List[dict]:\n return lims_family[\"samples\"]",
"def sample(self, temperature=1.0, nsample=100000):\n sample_z, energy_z = self.sample_z(temperature=temperature, nsample=nsample, return_energy=True)\n sample_x, Jzx = self.transform_zxJ(sample_z)\n energy_x = self.energy_model.energy(sample_x) / temperature\n logw = -energy_x + energy_z + Jzx\n\n return sample_z, sample_x, energy_z, energy_x, logw",
"def uniform_samples(self):\n if self._uniform_samples is None:\n if self.is_from_directory:\n uniform_sample_path = f'{self._directory_root}/uniform_points.sdf'\n uniform_samples = gaps_util.read_pts_file(uniform_sample_path)\n # log.info(f'The uniform points have shape {uniform_samples.shape}')\n else:\n uniform_samples = self._archive['uniform_samples']\n self._uniform_samples = np.reshape(uniform_samples,\n [100000, 4]).astype(np.float32)\n return self._uniform_samples",
"def as_trimesh(self, **kwargs):\n kws0 = self.as_array_dict()\n kws = {'vertices': kws0.get('vertices', None),\n 'vertex_colors': kws0.get('vertex_colors', None),\n 'faces': kws0.get('faces', None)}\n if (kws['vertices'] is not None) and (kws['vertices'].shape[1] == 4):\n weights = kws['vertices'][:, 3] * 256 - 1.0\n weights[np.isnan(weights)] = 255\n kws['vertices'] = kws['vertices'][:, :3]\n if kws['vertex_colors'] is not None:\n kws['vertex_colors'] = np.hstack(\n [kws['vertex_colors'], weights[..., None]])\n kws.update(kwargs, process=False)\n return trimesh.base.Trimesh(**kws)",
"def set_Samples(self, value):\n super(GetPathElevationInputSet, self)._set_input('Samples', value)",
"def _magsamples(self):\n if self._derived_properties[\"magsamples\"] is None:\n if self.lbda is None:\n raise AttributeError(\"lbda not set.\")\n self.derive_magsamples()\n \n return self._derived_properties[\"magsamples\"]",
"def trial_samples(self):\n if self._trial_samples is None:\n samp = self.steps[0].active[self.replica]\n samples = [samp]\n for step in self.steps:\n rep_trials = [s for s in step.change.trials \n if s.replica==self.replica]\n if len(rep_trials) > 0:\n samples.append(rep_trials[-1])\n\n self._trial_samples = samples\n\n return self._trial_samples",
"def get_samples():\n r = req('GET', SUB_API + 'samples', params=handle_filters())\n samples = []\n for k in demisto.get(r.json(), 'data.items'):\n samples.append(sample_to_readable(k))\n md = tableToMarkdown('ThreatGrid - List of Samples', samples, [\n 'ID', 'Filename', 'State', 'Status', 'MD5', 'SHA1', 'SHA256', 'OS', 'SubmittedAt', 'StartedAt', 'CompletedAt'\n ])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': samples},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })",
"def samples(self, samples):\n\n self._samples = samples",
"def get_samples(self, min_samples):\n raise NotImplementedError",
"def create_samples(self):\n self._samples = self.load_samples()\n self.modify_samples()",
"def train_samples_for_cls(self, cls):\n train = [t for t in self.get_train_files() if t[2] == cls]\n all_samples = ({'cls': t[2], **_read_mat_data(t[3])} for t in train)\n return (sample for sample in all_samples if sample['signals'] is not None), len(train)",
"def __init__(self, samples):\n self.samples = samples",
"def get_samples_list(self):\n return self.samples_list",
"def sample_trajectories(env_teacher, env_student, policy_teacher, policy_student, min_timesteps_per_batch, max_path_length, render=False, render_mode=('rgb_array')):\n\n # TODO: GETTHIS from HW1\n timesteps_this_batch = 0\n paths_t, paths_s = [], []\n while timesteps_this_batch < min_timesteps_per_batch:\n path_teacher, path_student = sample_trajectory(env_teacher, env_student, policy_teacher, policy_student, max_path_length, render, render_mode)\n paths_t.append(path_teacher)\n paths_s.append(path_student)\n timesteps_this_batch += get_pathlength(path_teacher) + get_pathlength(path_student)\n\n #print(\"OBS\")\n #print([p[\"observation\"].shape for p in paths_s])\n return paths_t, paths_s, timesteps_this_batch",
"def samples(self, gp, Y_metadata=None, samples=1):\n raise NotImplementedError(\"\"\"May be possible to use MCMC with user-tuning, see\n MCMC_pdf_samples in likelihood.py and write samples function\n using this, beware this is a simple implementation\n of Metropolis and will not work well for all likelihoods\"\"\")",
"def sample(self, number_samples: int = 1) -> List[Any]:\n # if prompt is provided, use it\n if self.prompt:\n item = self.model(batch_size=number_samples, prompt=self.prompt)\n else:\n item = self.model(batch_size=number_samples)\n\n # To support old diffusers versions (<0.6.0)\n if DIFFUSERS_VERSION_LT_0_6_0 or self.model_type in [\"geodiff\"]:\n item = item[\"sample\"]\n else:\n item = item.images\n\n return item",
"def samples(self):\n return self._samples",
"def get_tensor_examples_from_custom_input(self, samples):\n tensorizer = Tensorizer(self.config, self.tokenizer)\n tensor_samples = [tensorizer.tensorize_example(sample, False) for sample in samples]\n tensor_samples = [(doc_key, self.convert_to_torch_tensor(*tensor)) for doc_key, tensor in tensor_samples]\n return tensor_samples, tensorizer.stored_info",
"def test_synthesis_mesh(mesh_slepian_wavelets, mesh_field_region) -> None:\n coefficients = sleplet.slepian_methods.slepian_mesh_forward(\n mesh_slepian_wavelets.mesh_slepian,\n u_i=mesh_field_region.coefficients,\n )\n wav_coeffs = sleplet.wavelet_methods.slepian_wavelet_forward(\n coefficients,\n mesh_slepian_wavelets.wavelets,\n mesh_slepian_wavelets.mesh_slepian.N,\n )\n f_p = sleplet.wavelet_methods.slepian_wavelet_inverse(\n wav_coeffs,\n mesh_slepian_wavelets.wavelets,\n mesh_slepian_wavelets.mesh_slepian.N,\n )\n np.testing.assert_allclose(\n np.abs(f_p - coefficients)[: mesh_slepian_wavelets.mesh_slepian.N].mean(),\n 0,\n atol=1e-16,\n )",
"def MeshPyTri(points,facets,*args,**kwargs):\n info = triangle.MeshInfo()\n info.set_points(points)\n info.set_facets(facets)\n\n return triangle.build(info,*args,**kwargs)",
"def save_and_upload_batch_sample_sets(batch_samples, batch_tumors, batch_normals, tsca_id, namespace, workspace):\n # Save to file\n os.system('mkdir -p %s'%tsca_id)\n batch_samples_filename = './%s/fc_upload_sample_set_tsca_%s.txt' % (tsca_id, tsca_id)\n batch_tumors_filename = './%s/fc_upload_sample_set_tsca_%s_tumors.txt' % (tsca_id, tsca_id)\n batch_normals_filename = './%s/fc_upload_sample_set_tsca_%s_normals.txt' % (tsca_id, tsca_id)\n \n batch_samples.to_csv(batch_samples_filename , sep=\"\\t\", index=False )\n batch_tumors.to_csv(batch_tumors_filename , sep=\"\\t\", index=False )\n batch_normals.to_csv(batch_normals_filename , sep=\"\\t\", index=False )\n\n r1 = upload_entities_from_tsv(namespace, workspace, batch_samples_filename)\n r2 = upload_entities_from_tsv(namespace, workspace, batch_tumors_filename)\n r3 = upload_entities_from_tsv(namespace, workspace, batch_normals_filename)\n return (r1, r2, r3)",
"def initmesh(dvmin, dvmax, tlegmin, tlegmax, state0_chaser, n_s0):\n dv = sphere_sampling.dv_sampling(dvmin, dvmax, n_s0) # [n_s0 x 3]\n t = rng.uniform(tlegmin, tlegmax, n_s0) # [n_s0]\n legs = []\n for x, y in zip(dv, t): # create Leg class from the samples\n leg = Leg(x, y, state0_chaser)\n while leg.integration_status != 0: # in case the leg is not valid for the mission, sample another one\n new_dv = sphere_sampling.dv_sampling(dvmin, dvmax, 2)[0] # take first sample, the 2nd one is dv=[0,0,0]\n new_t = rng.uniform(tlegmin, tlegmax, 1)[0] # [0] because we just want the value, not the array\n leg = Leg(new_dv, new_t, state0_chaser)\n legs.append(leg) # array containing the legs created by init.mesh\n return legs",
"def fixture_samples(sample_single) -> Iterator[dict]:\n _samples = []\n sample_id = sample_single[\"sample_id\"]\n for number in range(3):\n sample = copy.deepcopy(sample_single)\n sample[\"sample_id\"] = \"_\".join([sample_id, str(number)])\n _samples.append(sample)\n return _samples",
"def from_trimesh(cls, in_mesh):\n kws = dict(vertices=in_mesh.vertices,\n vertex_colors=in_mesh.visual.vertex_colors,\n faces=in_mesh.faces.astype('int32'))\n weights = (kws['vertex_colors'][:, 3].astype('float32') + 1.0) / 256\n weights[weights == 1.0] = np.NaN\n kws['vertex_colors'] = kws['vertex_colors'][:, :3]\n kws['vertices'] = np.hstack([kws['vertices'], weights[..., None]])\n return cls.from_array_dict(kws)",
"def upload_samples(self, file):\n result = self._upload_sample(file)\n if \"samples\" in result:\n return result[\"samples\"]\n else:\n return [result]"
] | [
"0.55859154",
"0.53676873",
"0.49243876",
"0.4758033",
"0.47214693",
"0.4715205",
"0.46934715",
"0.46811655",
"0.46724606",
"0.45840403",
"0.45816875",
"0.45716864",
"0.4570878",
"0.45601994",
"0.45363176",
"0.4531265",
"0.4520595",
"0.45133793",
"0.44980806",
"0.447963",
"0.4476501",
"0.44590712",
"0.44563618",
"0.4449134",
"0.4448199",
"0.43881238",
"0.43866202",
"0.43791664",
"0.43778893",
"0.43719876"
] | 0.63555366 | 0 |
Return the SPHARA basis for the triangulated sample points The method determines a SPHARA basis for spatially distributed sampling points described by a triangular mesh. A discrete LaplaceBeltrami operator in matrix form is determined for the given triangular grid. The discretization methods for determining the LaplaceBeltrami operator is specified in the | def basis(self):
# lazy evaluation, compute the basis at the first request and store
# it until the triangular mesh or the discretization method is changed
if self._basis is None or self._frequencies is None:
if self.mode == 'fem':
self._massmatrix = (self.triangsamples
.massmatrix(mode='normal'))
stiffmatrix = self.triangsamples.stiffnessmatrix()
self._frequencies, self._basis = linalg.eigh(-stiffmatrix,
self._massmatrix)
# self._basis =
else: # 'unit' and 'inv_euclidean' discretization
laplacianmatrix = (self.triangsamples
.laplacianmatrix(mode=self.mode))
self._frequencies, self._basis = linalg.eigh(laplacianmatrix)
# make a row vector of natural frequencies
# print(self._frequencies)
# self._frequencies = self._frequencies.transpose
# print(self._frequencies.shape)
# return the SPHARA basis
return self._basis, self._frequencies | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_hyperbolic_tiles(self):\n import itertools\n\n s = space(curvature=-1)\n\n # turning constants in radians\n t1_ref = 6.28318530717958647692528676655867\n t2_ref = t1_ref / 2\n t4_ref = t1_ref / 4\n\n def make_triangle(f, v):\n f = t1_ref / f\n v = t1_ref / v / 2\n a = (common_math.cos(f) + 1)/common_math.sin(v)**2 - 1\n a = common_math.sqrt(a**2 - 1)\n b = a / common_math.sin(f) * common_math.sin(v)\n a = common_math.asinh(a)\n b = common_math.asinh(b)\n return a, v, b, f, b, v\n\n for p, q in itertools.product(*[range(3,11)]*2):\n # skip ones that don't result in hyperbolic tilings\n if p * q < 20:continue\n\n a, C, b, A, c, B = make_triangle(p, q)\n\n # try all vertex permutations\n for (a, A), (b, B), (c, C) in itertools.permutations([(a, A), (b, B), (c, C)], 3):\n self.assertTrue(isclose(\n s.cosine_law_side(a, b, C),\n c\n ))\n self.assertTrue(isclose(\n s.cosine_law_angle(a, b, c),\n C\n ))\n self.assertTrue(isclose(\n s.dual_cosine_law_angle(A, B, c),\n C\n ))\n self.assertTrue(isclose(\n s.dual_cosine_law_side(A, B, C),\n c\n ))\n self.assertTrue(isclose(\n s.sine_law_side(a, A, B),\n b\n ))\n self.assertTrue(isclose(\n s.sine_law_angle(a, A, b),\n B,\n rel_tol = 1e-5 # have to go easier on it since asin is really sensitive around 1\n ) or B > t4_ref and isclose( # SSA triangle solving strangeness\n s.sine_law_angle(a, A, b),\n t2_ref - B\n ))\n self.assertTrue(isclose(\n s.triangle_area_from_sides(a, b, c),\n s.triangle_area_from_angles(A, B, C)\n ))",
"def square_bravais_lattice(self,R,lattice_multiplier=1):\n a = lattice_multiplier*self.a\n b = lattice_multiplier*self.b\n c = lattice_multiplier*self.c\n\n #Calculate the number of lattice points needed in each direction to cover a length of R\n #I use the ceiling function so that when I shift the origin by a one unit cell vector,\n #I still cover all lattive points within a distance of R\n Na = int(np.ceil(R/np.linalg.norm(a)))\n Nb = int(np.ceil(R/np.linalg.norm(b)))\n Nc = int(np.ceil(R/np.linalg.norm(c)))\n\n #calculate the number of vertices in a grid that covers the sphere\n #A sphere of radius R fits within a grid of size 2R x 2R x 2R\n #Adding one to account for origin\n number_vertices = (2*Na+1)*(2*Nb+1)*(2*Nc+1)\n vertices = np.empty((number_vertices,3))\n vertex_labels = np.empty(number_vertices ,dtype=int)\n \n # populate the vertices list with the positions of a lattice with single spacing\n n = 0\n for i in np.arange(-Na,Na+1):\n for j in np.arange(-Nb,Nb+1):\n for k in np.arange(-Nc,Nc+1):\n vertices[n]=np.dot([[i,j,k]],[[a[0],a[1],a[2]],[b[0],b[1],b[2]],[c[0],c[1],c[2]]])\n vertex_labels[n] = self.position_map_inverse[(i*lattice_multiplier)%2,(j*lattice_multiplier)%2,(k*lattice_multiplier)%2]\n n += 1\n return vertices, vertex_labels",
"def _triangulate_periodic(self,x):\n\n #1. Tile cell positions 9-fold to perform the periodic triangulation\n # Calculates y from x. y is (9nc x 2) matrix, where the first (nc x 2) are the \"true\" cell positions,\n # and the rest are translations\n y = make_y(x,self.L*self.grid_xy)\n\n\n #2. Perform the triangulation on y\n # The **triangle** package (tr) returns a dictionary, containing the triangulation.\n # This triangulation is extracted and saved as tri\n t = tr.triangulate({\"vertices\": y})\n tri = t[\"triangles\"]\n\n # Del = Delaunay(y)\n # tri = Del.simplices\n n_c = x.shape[0]\n\n #3. Find triangles with **at least one** cell within the \"true\" frame (i.e. with **at least one** \"normal cell\")\n # (Ignore entries with -1, a quirk of the **triangle** package, which denotes boundary triangles\n # Generate a mask -- one_in -- that considers such triangles\n # Save the new triangulation by applying the mask -- new_tri\n tri = tri[(tri != -1).all(axis=1)]\n one_in = (tri<n_c).any(axis=1)\n new_tri = tri[one_in]\n\n #4. Remove repeats in new_tri\n # new_tri contains repeats of the same cells, i.e. in cases where triangles straddle a boundary\n # Use remove_repeats function to remove these. Repeats are flagged up as entries with the same trio of\n # cell ids, which are transformed by the mod function to account for periodicity. See function for more details\n n_tri = self.remove_repeats(new_tri,n_c)\n\n # tri_same = (self.tris == n_tri).all()\n\n #6. Store outputs\n self.n_v = n_tri.shape[0]\n self.tris = n_tri\n self.Cents = x[self.tris]\n self.vs = self.get_vertex_periodic()\n\n #7. Manually calculate the neighbours. See doc_string for conventions.\n n_neigh = get_neighbours(n_tri)\n self.v_neighbours = n_neigh\n self.neighbours = self.vs[n_neigh]",
"def test_special_triangles_euclidean(self):\n import itertools\n\n s = space(curvature=0)\n\n # turning constants in radians\n t1_ref = 6.28318530717958647692528676655867\n t2_ref = t1_ref / 2\n t3_ref = t1_ref / 3\n t4_ref = t1_ref / 4\n t6_ref = t1_ref / 6\n t8_ref = t1_ref / 8\n t12_ref = t1_ref / 12\n # sqrt constants\n sqrt2_ref = 1.41421356237309504880168872420977\n sqrt3_ref = 1.73205080756887729352744634150584\n\n # test with each known triangle\n for a, C, b, A, c, B, m in (\n (1, t6_ref, 1, t6_ref, 1, t6_ref, sqrt3_ref/4), # 1 1 1 (equilateral)\n (1, t4_ref, 1, t8_ref, sqrt2_ref, t8_ref, 1/2), # 1 1 sqrt2 (right isoceles)\n (1, t4_ref, sqrt3_ref, t12_ref, 2, t6_ref, sqrt3_ref/2), # 1 sqrt3 2 (right)\n (1, t3_ref, 1, t12_ref, sqrt3_ref, t12_ref, sqrt3_ref/4), # 1 1 sqrt3 (obtuse isoceles)\n (sqrt2_ref, t8_ref + t6_ref, 2, t12_ref, 1 + sqrt3_ref, t8_ref, (1 + sqrt3_ref)/2) # sqrt2 2 1+sqrt3 (obtuse scalene)\n ):\n # try scaling them up and down too\n for scale in (1, 2, 1/3):\n a *= scale\n b *= scale\n c *= scale\n m *= scale**2\n # go through all vertex permutations\n for (a, A), (b, B), (c, C) in itertools.permutations([(a, A), (b, B), (c, C)], 3):\n self.assertTrue(isclose(\n s.cosine_law_side(a, b, C),\n c\n ))\n self.assertTrue(isclose(\n s.cosine_law_angle(a, b, c),\n C\n ))\n self.assertTrue(isclose(\n s.dual_cosine_law_angle(A, B, c),\n C\n ))\n # skip dual_cosine_law_side because it is not defined in K = 0\n self.assertTrue(isclose(\n s.sine_law_side(a, A, B),\n b\n ))\n self.assertTrue(isclose(\n s.sine_law_angle(a, A, b),\n B,\n rel_tol = 1e-5 # have to go easier on it since asin is really sensitive around 1\n ) or B > t4_ref and isclose( # SSA triangle solving strangeness\n s.sine_law_angle(a, A, b),\n t2_ref - B\n ))\n self.assertTrue(isclose(\n s.triangle_area_from_sides(a, b, c),\n m\n ))",
"def volterra_BM_path_chol(grid_points, M, H, T,rho):\n\n assert 0<H<1.0\n\n ## Step1: create partition\n\n X=np.linspace(0, T, num=grid_points)\n\n # get rid of starting point\n X=X[1:grid_points]\n\n ## Step 2: compute covariance matrix\n size=2*(grid_points-1)\n Sigma=np.zeros([size,size])\n #Sigma(1,1)\n for j in range(grid_points-1):\n for i in range(grid_points-1):\n if i==j:\n Sigma[i,j]=np.power(X[i],2*H)/2/H\n else:\n s=np.minimum(X[i],X[j])\n t=np.maximum(X[i],X[j])\n Sigma[i,j]=np.power(t-s,H-0.5)/(H+0.5)*np.power(s,0.5+H)*special.hyp2f1(0.5-H, 0.5+H, 1.5+H, -s/(t-s))\n #Sigma(1,2) and Sigma (2,1)\n for j in range(grid_points-1):\n for i in range(grid_points-1):\n Sigma[i,j+((grid_points-1))]=rho/(H+0.5)*(np.power(X[i],H+0.5)-np.power(X[i]-np.minimum(X[i],X[j]),H+0.5))\n Sigma[i+(grid_points-1),j]=rho/(H+0.5)*(np.power(X[j],H+0.5)-np.power(X[j]-np.minimum(X[i],X[j]),H+0.5))\n #Sigma(2,2)\n for j in range(grid_points-1):\n for i in range(grid_points-1):\n Sigma[i+(grid_points-1),j+(grid_points-1)]=np.minimum(X[i],X[j])\n\n ## Step 3: compute Cholesky decomposition\n P=np.linalg.cholesky(Sigma)\n\n ## Step 4: draw Gaussian rv\n\n Z=np.random.normal(loc=0.0, scale=1.0, size=[M,2*(grid_points-1)])\n\n ## Step 5: get (V,W) and add 0's in the beginning\n\n V=np.zeros((M,grid_points))\n W=np.zeros((M,grid_points))\n for i in range(M):\n aux=np.dot(P,Z[i,:])\n V[i,1:grid_points]=aux[0:(grid_points-1)]\n W[i,1:grid_points]=aux[(grid_points-1):2*(grid_points-1)]\n\n return V, W",
"def spherical_bravais_lattice(self,R,iNumber,iLetter,jNumber,jLetter,lattice_multiplier=1):\n\n vertices, vertex_labels = self.square_bravais_lattice(R,lattice_multiplier)\n #Shift vertices to be the lattice generated from the jth position\n vertices = vertices + self.position[str(jNumber) + jLetter]\n #Calculate distances from the ith atom to each other atom\n distance = np.sqrt(np.sum(np.power(vertices - self.position[str(iNumber) + iLetter],2),axis=1))\n #only keep the locations of which are within a distance R from ion i\n #I take the intersection with non-zero distances to avoid counting origin when ith and jth ions are equal\n vertices = vertices[(distance < R) & (distance != 0.0)]\n vertex_labels = vertex_labels[(distance < R) & (distance != 0.0)]\n #If this is a lattice of the B ions, then change the vertex labels accordingly\n if jLetter == 'B':\n vertex_labels += 8\n \n return vertices, vertex_labels",
"def test_bilocal(self):\n sv=system_vars_c().init_xyzlike([ [8, [0.0, 0.0, 0.0]], [1, [1.0, 1.0, 1.0] ]])\n atom2rcut=np.array([5.0, 4.0])\n grids = dft.gen_grid.Grids(sv)\n grids.level = 2 # precision as implemented in pyscf\n grids.radi_method=leggauss_ab\n grids.build(atom2rcut=atom2rcut)\n self.assertEqual(len(grids.weights), 20648)",
"def test_elliptic_special_triangles(self):\n import itertools\n\n s = space(curvature=1)\n\n # turning constants in radians\n t1_ref = 6.28318530717958647692528676655867\n t2_ref = t1_ref / 2\n t3_ref = t1_ref / 3\n t4_ref = t1_ref / 4\n t5_ref = t1_ref / 5\n t6_ref = t1_ref / 6\n # random number\n magic = 7.77733337337373737373\n tm_ref = t1_ref / magic\n nagic = magic - 4 # strangely named other magic constant\n tn_ref = t1_ref / nagic\n # tetrahedron edge central angle\n p4_ref = 1.91063323624901855632771420503144 # = acos(-1/3)\n # icosahedron edge central angle\n p20_ref = 1.10714871779409050301706546017856 # = atan(2)\n # area constant\n sm = space(0).sphere_s2(1)\n\n # test with each known triangle\n for a, C, b, A, c, B, m in (\n (t3_ref, t2_ref, t3_ref, t2_ref, t3_ref, t2_ref, sm / 2), # literally a hemisphere, which takes up the entire space\n (t2_ref, t4_ref, t4_ref, t2_ref, t4_ref, t4_ref, sm / 4), # diangle which is 1/4 of the sphere\n (t2_ref, tm_ref, t3_ref, t2_ref, t6_ref, tm_ref, sm / magic), # a different diangle\n (t2_ref, tn_ref, t3_ref, t2_ref, t6_ref, tn_ref, sm / nagic), # a different diangle, obtuse angle this time\n (t4_ref, t4_ref, t4_ref, t4_ref, t4_ref, t4_ref, sm / 8), # triangle with 3 right angles\n (t4_ref, tm_ref, t4_ref, t4_ref, tm_ref, t4_ref, sm / magic / 2), # different slice of the previous one, has 2 right angles\n (t4_ref, tn_ref, t4_ref, t4_ref, tn_ref, t4_ref, sm / nagic / 2), # another one but with an obtuse angle\n (p4_ref, t3_ref) * 3 + (sm / 4,), # regular tetrahedron face, projected onto the sphere\n (p20_ref, t5_ref) * 3 + (sm / 20,) # regular icosahedron face, projected onto the sphere\n ):\n # go through all vertex permutations\n for (a, A), (b, B), (c, C) in itertools.permutations([(a, A), (b, B), (c, C)], 3):\n self.assertTrue(isclose(\n s.cosine_law_side(a, b, C),\n c\n ))\n self.assertTrue(t2_ref in (A, B) or isclose(\n s.cosine_law_angle(a, b, c),\n C,\n rel_tol = 1e-5\n ))\n self.assertTrue(isclose(\n s.dual_cosine_law_angle(A, B, c),\n C,\n rel_tol = 1e-5\n ))\n self.assertTrue(t2_ref in (A, B) or isclose(\n s.dual_cosine_law_side(A, B, C),\n c\n ))\n self.assertTrue(A == t2_ref or isclose(\n s.sine_law_side(a, A, B),\n b,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ) or isclose(\n s.sine_law_side(a, A, B),\n t2_ref - b,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ))\n self.assertTrue(A == t2_ref or isclose(\n s.sine_law_angle(a, A, b),\n B,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ) or B > t4_ref and isclose( # SSA triangle solving strangeness\n s.sine_law_angle(a, A, b),\n t2_ref - B,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ))\n self.assertTrue((A, B, C).count(t2_ref) == 1 or isclose(\n s.triangle_area_from_sides(a, b, c),\n m,\n rel_tol = 1e-5\n ))\n self.assertTrue(isclose(\n s.triangle_area_from_angles(A, B, C),\n m\n ))",
"def new_basis(abc, lattice):\n return np.dot(abc.T, lattice.inv_matrix.T)",
"def triangle_grid(ny, nx, width, amp=1, sig=0, slope_y=None):\n f = nx / width\n t = np.linspace(0, 1, nx)\n y = amp * sawtooth(2 * np.pi * f * t, width=0.5)\n triangle = np.tile(y, (ny, 1))\n triangle += sig * np.random.randn(ny, nx)\n triangle += np.abs(np.min(triangle))\n\n if slope_y:\n y = np.linspace(0, ny, num=ny).reshape(ny, 1)\n X = np.tile(y, (1, nx))\n tilt = slope_y * X\n triangle *= tilt\n\n out_obj = Elevation(nx = nx, ny = ny, dx = 1.0)\n out_obj._griddata = triangle\n return out_obj",
"def SO4_circuit(a_alpha, a_theta, a_beta, b_alpha, b_theta, b_beta):\n # return np.kron(S1_inv, I2) @ np.kron(I2, S1_inv) @ np.kron(I2, R1_inv) @ CNOT2 \\\n # @ np.kron(I2, R_z(b_beta)) @ np.kron(I2, R_y(b_theta)) @ np.kron(I2, R_z(b_alpha)) \\\n # @ np.kron(R_z(a_beta), I2) @ np.kron(R_y(a_theta), I2) @ np.kron(R_z(a_alpha), I2) \\\n # @ CNOT2 @ np.kron(I2, R1) @ np.kron(I2, S1) @ np.kron(S1, I2)\n\n return np.linalg.inv(magic_gate) \\\n @ np.kron(I2, R_z(b_beta)) @ np.kron(I2, R_y(b_theta)) @ np.kron(I2, R_z(b_alpha)) \\\n @ np.kron(R_z(a_beta), I2) @ np.kron(R_y(a_theta), I2) @ np.kron(R_z(a_alpha), I2) \\\n @ magic_gate",
"def snc0_barycentric_function_space(coarse_space):\n from .space import SpaceBuilder\n from scipy.sparse import coo_matrix\n\n number_of_support_elements = coarse_space.number_of_support_elements\n bary_grid_number_of_elements = 6 * coarse_space.grid.number_of_elements\n\n bary_support_elements = 6 * _np.repeat(coarse_space.support_elements, 6) + _np.tile(\n _np.arange(6), number_of_support_elements\n )\n\n bary_support_size = len(bary_support_elements)\n\n support = _np.zeros(6 * coarse_space.grid.number_of_elements, dtype=_np.bool_)\n support[bary_support_elements] = True\n\n normal_multipliers = _np.repeat(coarse_space.normal_multipliers, 6)\n\n local_coords = _np.array(\n [[0, 0], [0.5, 0], [1, 0], [0.5, 0.5], [0, 1], [0, 0.5], [1.0 / 3, 1.0 / 3]]\n ).T\n\n coeffs = (\n _np.array(\n [\n [1, -1.0 / 3, 0],\n [-1.0 / 3, 1, 0],\n [0, 1.0 / 3, -1.0 / 6],\n [0, 0, 1.0 / 6],\n [0, 0, 1.0 / 6],\n [1.0 / 3, 0, -1.0 / 6],\n ]\n ),\n _np.array(\n [\n [0, 1.0 / 3, -1.0 / 6],\n [0, 0, 1.0 / 6],\n [0, 0, 1.0 / 6],\n [1.0 / 3, 0, -1.0 / 6],\n [1, -1.0 / 3, 0],\n [-1.0 / 3, 1, 0],\n ]\n ),\n _np.array(\n [\n [0, 0, 1.0 / 6],\n [1.0 / 3, 0, -1.0 / 6],\n [1, -1.0 / 3, 0],\n [-1.0 / 3, 1, 0],\n [0, 1.0 / 3, -1.0 / 6],\n [0, 0, 1.0 / 6],\n ]\n ),\n )\n\n coarse_dofs, bary_dofs, values = generate_rwg0_map(\n coarse_space.grid.data(), coarse_space.support_elements, local_coords, coeffs\n )\n\n local2global = _np.zeros((bary_grid_number_of_elements, 3), dtype=\"uint32\")\n local_multipliers = _np.zeros((bary_grid_number_of_elements, 3), dtype=\"uint32\")\n\n local2global[support] = _np.arange(3 * bary_support_size).reshape(\n bary_support_size, 3\n )\n\n local_multipliers[support] = 1\n\n transform = coo_matrix(\n (values, (bary_dofs, coarse_dofs)),\n shape=(3 * bary_support_size, 3 * number_of_support_elements),\n dtype=_np.float64,\n ).tocsr()\n\n dof_transformation = transform @ coarse_space.map_to_localised_space\n\n return (\n SpaceBuilder(coarse_space.grid.barycentric_refinement)\n .set_codomain_dimension(3)\n .set_support(support)\n .set_normal_multipliers(normal_multipliers)\n .set_order(0)\n .set_is_localised(True)\n .set_is_barycentric(True)\n .set_shapeset(\"rwg0\")\n .set_identifier(\"snc0\")\n .set_local2global(local2global)\n .set_local_multipliers(local_multipliers)\n .set_dof_transformation(dof_transformation)\n .set_numba_evaluator(_numba_snc0_evaluate)\n .build()\n )",
"def bcL(self, rng=None):\n if rng is None:\n rng = random.PRNGKey(1)\n n = self.n\n x = onp.sin(self.bcmesh * np.pi)\n n_y = (np.floor((n + 1) / 2) - 1).astype(int)\n if rng is not None:\n coeffs = random.multivariate_normal(rng, np.zeros(16),\n np.diag(np.ones(16)))\n else:\n key = random.randint(random.PRNGKey(1), (1,), 1, 1000)\n coeffs = random.multivariate_normal(\n random.PRNGKey(key[0]), np.zeros(16), np.diag(np.ones(16)))\n left = coeffs[0] * x**3 + coeffs[1] * x**2 + coeffs[2] * x #+ coeffs[3]\n right = coeffs[4] * x**3 + coeffs[5] * x**2 + coeffs[6] * x #+ coeffs[7]\n lower = coeffs[8] * x**3 + coeffs[9] * x**2 + coeffs[10] * x #+ coeffs[11]\n upper = coeffs[12] * x**3 + coeffs[13] * x**2 + coeffs[14] * x #+ coeffs[15]\n shape = 2 * x.shape\n source = onp.zeros(shape)\n source[0, :] = upper\n source[n_y - 1, n_y - 1:] = lower[:n - n_y + 1]\n source[n_y - 1:, n_y - 1] = right[:n - n_y + 1]\n source[:, 0] = left\n source[-1, :n_y - 1] = right[n:n - n_y:-1]\n source[:n_y - 1, -1] = lower[n:n - n_y:-1]\n # because this makes the correct order of boundary conditions\n return source * (n + 1)**2",
"def hexapodZernikeLinearModel():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n \n M22realTrefoil2 = b[:,37] # for x decenter\n M22imagTrefoil1 = b[:,54] \n M22TrefoilXshift = 0.5*(M22realTrefoil2+M22imagTrefoil1)\n\n M22realTrefoil1 = b[:,34] # for y decenter\n M22imagTrefoil2 = b[:,57] \n M22TrefoilYshift = 0.5*(M22realTrefoil1 - M22imagTrefoil2)\n\n M20defocus = b[:,12] # for defocus\n\n M22realComa2 = b[:,36] # for x-tilt\n M22imagComa1 = b[:,55]\n M22ComaXtilt = 0.5*(M22realComa2+M22imagComa1)\n\n M22realComa1 = b[:,35] # for y-tilt\n M22imagComa2 = b[:,56]\n M22ComaYtilt = 0.5*(M22realComa1 - M22imagComa2)\n \n pl.figure(figsize=(21,12))\n pl.subplot(2,3,1)\n t=bp.bin_scatter(M22TrefoilXshift,x,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22TrefoilXshift,x)\n pl.plot(M22TrefoilXshift,M22TrefoilXshift*res[1]+res[0],'r,')\n pl.ylabel('x-decenter')\n pl.xlabel('(M22realTrefoil2+M22imagTrefoil1)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,2)\n t=bp.bin_scatter(M22TrefoilYshift,y,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22TrefoilYshift,y)\n pl.plot(M22TrefoilYshift,M22TrefoilYshift*res[1]+res[0],'r,')\n pl.ylabel('y-decenter')\n pl.xlabel('(M22realTrefoil1 - M22imagTrefoil2)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,3)\n t=bp.bin_scatter(M20defocus,z,nbins=20,fmt='bo',scatter=True)\n res = linefit(M20defocus,z)\n pl.plot(M20defocus,M20defocus*res[1]+res[0],'r,')\n pl.ylabel('z-defocus')\n pl.xlabel('M20defocus')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,4)\n t=bp.bin_scatter(M22ComaXtilt,thetax,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22ComaXtilt,thetax)\n pl.plot(M22ComaXtilt,M22ComaXtilt*res[1]+res[0],'r,')\n pl.ylabel('x-tilt')\n pl.xlabel('(M22realComa2+M22imagComa1)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,5)\n t=bp.bin_scatter(M22ComaYtilt,thetay,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22ComaYtilt,thetay)\n pl.plot(M22ComaYtilt,M22ComaYtilt*res[1]+res[0],'r,')\n pl.ylabel('y-tilt')\n pl.xlabel('(M22realComa1 - M22imagComa2)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n\n pl.close()",
"def generate_bnd(cli_file, geo_file, slf_file, bnd_file, varnames, varunits):\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ cli+slf new mesh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if not path.exists(cli_file):\n raise TelemacException(\\\n '... the provided cli_file does not seem to exist:'\n ' {}\\n\\n'.format(cli_file))\n if not path.exists(geo_file):\n raise TelemacException(\\\n '... the provided geo_file does not seem to exist: '\n '{}\\n\\n'.format(geo_file))\n\n if len(varnames) != len(varunits):\n raise TelemacException(\\\n 'Not the same number of variables and units\\nvarnames: {}\\nvarunits: {}'\n '{}\\n\\n'.format(varnames, varunits))\n\n\n # Read the new CLI file to get boundary node numbers\n print(' +> getting hold of the Conlim file and of its liquid boundaries')\n cli = Conlim(cli_file)\n # Keeping only open boundary nodes\n bor = np.extract(cli.bor['lih'] != 2, cli.bor['n'])\n\n # Find corresponding (x,y) in corresponding new mesh\n print(' +> getting hold of the GEO file and of its bathymetry')\n geo = Selafin(geo_file)\n xys = np.vstack((geo.meshx[bor-1], geo.meshy[bor-1])).T\n _ = geo.get_variables_at(0,\\\n subset_variables_slf(\"BOTTOM: \", geo.varnames)[0])[0]\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ slf existing res ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if not path.exists(slf_file):\n raise TelemacException(\\\n '... the provided slf_file does not seem to exist: '\n '{}\\n\\n'.format(slf_file))\n slf = Selafin(slf_file)\n slf.set_kd_tree()\n slf.set_mpl_tri()\n\n print(' +> support extraction')\n # Extract triangles and weigths in 2D\n support2d = []\n ibar = 0\n pbar = ProgressBar(maxval=len(xys)).start()\n for xyi in xys:\n support2d.append(xys_locate_mesh(xyi, slf.ikle2, slf.meshx, slf.meshy,\n slf.tree, slf.neighbours))\n ibar += 1\n pbar.update(ibar)\n pbar.finish()\n # Extract support in 3D\n support3d = list(zip(support2d, len(xys)*[range(slf.nplan)]))\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ writes BND header ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n bnd = Selafin('')\n bnd.fole = {}\n bnd.fole.update({'hook':open(bnd_file, 'wb')})\n bnd.fole.update({'name':bnd_file})\n bnd.fole.update({'endian':\">\"}) # big endian\n bnd.fole.update({'float':('f', 4)}) # single precision\n\n # Meta data and variable names\n bnd.title = ''\n bnd.nbv1 = len(varnames)\n # /!\\ ELEVATION has to be the first variable\n # (for possible vertical re-interpolation within TELEMAC)\n\n bnd.varnames = []\n bnd.varunits = []\n for var, unit in zip(varnames, varunits):\n new_var = var + (16-len(var))*\" \"\n new_unit = unit + (16-len(unit))*\" \"\n bnd.varnames.append(new_var)\n bnd.varunits.append(new_unit)\n\n bnd.nvar = bnd.nbv1\n bnd.varindex = range(bnd.nvar)\n\n # Sizes and mesh connectivity\n bnd.nplan = slf.nplan\n # Number of nodes per boundary element (ndp2 in 2D and ndp3 in 3D)\n bnd.ndp2 = 2\n bnd.ndp3 = 4\n bnd.npoin2 = len(bor)\n bnd.npoin3 = bnd.npoin2*slf.nplan\n bnd.iparam = [0, 0, 0, 0, 0, 0, bnd.nplan, 0, 0, 1]\n bnd.ipob2 = bor # /!\\ note that ipobo keeps the original numbering\n print(' +> masking and setting connectivity')\n # Set the array that only includes elements of geo.ikle2\n # with at least two nodes in bor\n array_1d = np.in1d(geo.ikle2, np.sort(bor-1))\n mask = geo.ikle2[np.where(np.sum(array_1d.reshape(geo.nelem2, geo.ndp2),\n axis=1) == 2)]\n # this ikle2 keeps the original numbering\n ikle2 = np.ravel(mask)[np.in1d(mask, np.sort(bor-1))].reshape(len(mask), 2)\n # ~~> re-numbering ikle2 as a local connectivity matrix\n knolg, _ = np.unique(np.ravel(ikle2), return_index=True)\n knogl = dict(zip(knolg, range(len(knolg))))\n bnd.ikle2 = - np.ones_like(ikle2, dtype=np.int)\n for k in range(len(ikle2)):\n # /!\\ bnd.ikle2 has a local numbering, fit to the boundary elements\n bnd.ikle2[k] = [knogl[ikle2[k][0]], knogl[ikle2[k][1]]]\n # Last few numbers\n bnd.nelem2 = len(bnd.ikle2)\n if slf.nplan > 1:\n bnd.nelem3 = bnd.nelem2*(slf.nplan-1)\n else:\n bnd.nelem3 = bnd.nelem2\n bnd.ndp3 = bnd.ndp2\n # 3D structures\n if slf.nplan > 1:\n bnd.ipob3 = np.ravel(np.add(np.repeat(bnd.ipob2, slf.nplan)\\\n .reshape((bnd.npoin2, slf.nplan)),\n bnd.npoin2*np.arange(slf.nplan)).T)\n bnd.ikle3 = \\\n np.repeat(bnd.npoin2*np.arange(slf.nplan-1),\n bnd.nelem2*bnd.ndp3)\\\n .reshape((bnd.nelem2*(slf.nplan-1), bnd.ndp3)) + \\\n np.tile(np.add(np.tile(bnd.ikle2, 2),\n np.repeat(bnd.npoin2*np.arange(2), bnd.ndp2)),\n (slf.nplan-1, 1))\n else:\n bnd.ipob3 = bnd.ipob2\n bnd.ikle3 = bnd.ikle2\n # Mesh coordinates\n bnd.meshx = geo.meshx[bor-1]\n bnd.meshy = geo.meshy[bor-1]\n\n print(' +> writing header')\n # Write header\n bnd.append_header_slf()\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ writes BND core ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n print(' +> setting variables')\n # TIME and DATE extraction\n bnd.datetime = slf.datetime\n bnd.tags['times'] = slf.tags['times']\n # VARIABLE extraction\n list_var = varnames[0]+\": \"\n for var in varnames[1:]:\n list_var += \";\"+var+\": \"\n\n vrs = subset_variables_slf(list_var, slf.varnames)\n\n # Read / Write data, one time step at a time to support large files\n print(' +> reading / writing variables')\n pbar = ProgressBar(maxval=len(slf.tags['times'])).start()\n zeros = np.zeros((bnd.npoin3, 1), dtype=np.float)\n for itime in range(len(slf.tags['times'])):\n data = get_value_history_slf(slf.file, slf.tags, [itime], support3d,\n slf.nvar, slf.npoin3, slf.nplan, vrs)\n data = np.reshape(np.transpose(np.reshape(np.ravel(data),\n (bnd.nvar, bnd.npoin2,\n bnd.nplan)),\n (0, 2, 1)),\n (bnd.nvar, bnd.npoin3))\n bnd.append_core_time_slf(itime)\n bnd.append_core_vars_slf(data)\n pbar.update(itime)\n pbar.finish()\n\n # Close bnd_file\n bnd.fole['hook'].close()",
"def get_spline(points):\n import numpy\n import scipy.linalg\n\n # sort points by x value\n points = sorted(points, key=lambda point: point[\"x\"])\n\n n = len(points) - 1\n\n # Set up a system of equations of form Ax=b\n A = numpy.zeros(shape=(4*n, 4*n))\n b = numpy.zeros(shape=(4*n, 1))\n\n for i in range(0, n):\n # 2n equations from condtions (S2)\n A[i][4*i+0] = points[i][\"x\"]**3\n A[i][4*i+1] = points[i][\"x\"]**2\n A[i][4*i+2] = points[i][\"x\"]\n A[i][4*i+3] = 1\n b[i] = points[i][\"y\"]\n\n A[n+i][4*i+0] = points[i+1][\"x\"]**3\n A[n+i][4*i+1] = points[i+1][\"x\"]**2\n A[n+i][4*i+2] = points[i+1][\"x\"]\n A[n+i][4*i+3] = 1\n b[n+i] = points[i+1][\"y\"]\n\n # 2n-2 equations for (S3):\n if i == 0:\n continue\n # point i is an inner point\n A[2*n+(i-1)][4*(i-1)+0] = 3*points[i][\"x\"]**2\n A[2*n+(i-1)][4*(i-1)+1] = 2*points[i][\"x\"]\n A[2*n+(i-1)][4*(i-1)+2] = 1\n A[2*n+(i-1)][4*(i-1)+0+4] = -3*points[i][\"x\"]**2\n A[2*n+(i-1)][4*(i-1)+1+4] = -2*points[i][\"x\"]\n A[2*n+(i-1)][4*(i-1)+2+4] = -1\n b[2*n+(i-1)] = 0\n\n A[3*n+(i-1)][4*(i-1)+0] = 6*points[i][\"x\"]\n A[3*n+(i-1)][4*(i-1)+1] = 2\n A[3*n+(i-1)][4*(i-1)+0+4] = -6*points[i][\"x\"]\n A[3*n+(i-1)][4*(i-1)+1+4] = -2\n b[3*n+(i-1)] = 0\n # Natural spline:\n A[3*n-1+0][0+0] += 6*points[0][\"x\"]\n A[3*n-1+0][0+1] += 2\n b[3*n-1+0] += 0\n\n A[3*n+n-1][4*(n-1)+0] += 6*points[n][\"x\"]\n A[3*n+n-1][4*(n-1)+1] += 2\n b[3*n+n-1] += 0\n\n x = scipy.linalg.solve(A, b)\n spline = []\n for i in range(0, n):\n spline.append({\"u\": points[i][\"x\"], \"v\": points[i+1][\"x\"],\n \"a\": float(x[4*i+0]),\n \"b\": float(x[4*i+1]),\n \"c\": float(x[4*i+2]),\n \"d\": float(x[4*i+3])})\n return spline",
"def phosphorene_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [-s/2, -ay/2, h], 0),\n ('B', [ s/2, -ay/2, 0], 0),\n ('C', [-s/2 + ax/2, 0, 0], 0),\n ('D', [ s/2 + ax/2, 0, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5')\n )\n\n return lat",
"def crystallography(tmat, A, B, ccellA=np.eye(3), ccellB=np.eye(3), planehkl=[1,0,0], diruvw=[1,0,0], fileA=\"input 1\", fileB=\"input 2\", ftf=True):\n \n print(\"----------CRYSTALLOGRAPHY----------\")\n print()\n eigval, U, P, Q = strainDirs(tmat, ftf=ftf)\n\n print(\"Strain Directions in %s (%s) coordinates:\"%(B.name, fileB))\n printMatAndDir(P, ccellB)\n print()\n print(\"Strain Directions in %s (%s) coordinates:\"%(A.name, fileA))\n printMatAndDir(Q, ccellA)\n print()\n print(\"Strains + 1 (eigenvalues)\")\n print(\" e1 e2 e3 \")\n print(' '.join([\"% 5.3f\"%(val) for val in eigval])) \n print()\n\n planeHab, ratio = findHabit(U, P, eigval)\n \n print(\"Uniformly strained planes:\")\n print()\n print(\"Exact plane hkl in %s (%s) coordinates:\"%(B.name, fileB))\n print(\"(+): (% 6.4f, % 6.4f, % 6.4f)\"%(*ccellB.T.dot(planeHab[:,0]),))\n print(\"(-): (% 6.4f, % 6.4f, % 6.4f)\"%(*ccellB.T.dot(planeHab[:,1]),))\n print()\n print(\"Closest hkl:\")\n print(\"(+): (% d, % d, % d)\"%(*find_uvw(planeHab[:,0:1], la.inv(ccellB.T)),))\n print(\"(-): (% d, % d, % d)\"%(*find_uvw(planeHab[:,1:2], la.inv(ccellB.T)),))\n print()\n\n R = findR(U, P=P, planeHab=planeHab, ratio=ratio)\n \n print(\"Orientation Relationship with habit plane:\")\n print()\n orMat = np.zeros((2,3,3))\n resPlanehkl = np.zeros((2,3))\n resDiruvw = np.zeros((2,3))\n for i in range(2):\n orMat[i,:,:] = Q.dot(P.T).dot(R[i,:,:].T)\n resPlanehkl[i,:] = ccellB.T.dot(orMat[i,:,:].dot(la.inv(ccellA.T).dot(planehkl)))\n resDiruvw[i,:] = la.inv(ccellB).dot(orMat[i,:,:].dot(ccellA.dot(diruvw)))\n print(\"%s (%s) // %s (%s)\"%(B.name, fileB, A.name, fileA))\n print(\"(+): (% 2d, % 2d, % 2d) [% 2d, % 2d, % 2d] // (% 6.4f, % 6.4f, % 6.4f) [% 6.4f, % 6.4f, % 6.4f]\"%(*planehkl, *diruvw, *resPlanehkl[0,:], *resDiruvw[0,:]))\n print(\"(-): (% 2d, % 2d, % 2d) [% 2d, % 2d, % 2d] // (% 6.4f, % 6.4f, % 6.4f) [% 6.4f, % 6.4f, % 6.4f]\"%(*planehkl, *diruvw, *resPlanehkl[1,:], *resDiruvw[1,:]))\n print()\n print(\"Approximate low index OR\")\n resPlanehklClose = np.zeros((2,3))\n resDiruvwClose = np.zeros((2,3))\n for i in range(2):\n resPlanehklClose[i,:] = find_uvw(la.inv(ccellB.T).dot(resPlanehkl[i,:].reshape((3,1))), la.inv(ccellB.T)).T[0]\n resDiruvwClose[i,:] = find_uvw(ccellB.dot(resDiruvw[i,:].reshape((3,1))), ccellB).T[0]\n print(\"(+): (% 2d, % 2d, % 2d) [% 2d, % 2d, % 2d] // (% d, % d, % d) [% d, % d, % d]\"%(*planehkl, *diruvw, *resPlanehklClose[0,:], *resDiruvwClose[0,:]))\n print(\"(-): (% 2d, % 2d, % 2d) [% 2d, % 2d, % 2d] // (% d, % d, % d) [% d, % d, % d]\"%(*planehkl, *diruvw, *resPlanehklClose[1,:], *resDiruvwClose[1,:]))\n print()\n \n print(\"Orientation Relationship in thin films:\")\n print()\n orMat = Q.dot(P.T)\n resPlanehkl = ccellB.T.dot(orMat.dot(la.inv(ccellA.T).dot(planehkl)))\n resDiruvw = la.inv(ccellB).dot(orMat.dot(ccellA.dot(diruvw)))\n print(\"%s (%s) // %s (%s)\"%(B.name, fileB, A.name, fileA))\n print(\"(% 2d, % 2d, % 2d) [% 2d, % 2d, % 2d] // (% 6.4f, % 6.4f, % 6.4f) [% 6.4f, % 6.4f, % 6.4f]\"%(*planehkl, *diruvw, *resPlanehkl, *resDiruvw))\n print()\n print(\"Approximate low index OR\")\n resPlanehklClose = find_uvw(la.inv(ccellB.T).dot(resPlanehkl.reshape((3,1))), la.inv(ccellB.T)).T[0]\n resDiruvwClose = find_uvw(ccellB.dot(resDiruvw.reshape((3,1))), ccellB).T[0]\n print(\"(% 2d, % 2d, % 2d) [% 2d, % 2d, % 2d] // (% d, % d, % d) [% d, % d, % d]\"%(*planehkl, *diruvw, *resPlanehklClose, *resDiruvwClose))\n print()\n\n return eigval, U, P, Q, planeHab",
"def diagonal_hessian_guess(self, geom, Z, connectivity, guess_type=\"SIMPLE\"):\n\n logger = logging.getLogger(__name__)\n\n if guess_type == \"SIMPLE\":\n return 0.1\n\n elif guess_type == \"SCHLEGEL\":\n R_BC = v3d.dist(geom[self.B], geom[self.C])\n Rcov = qcel.covalentradii.get(Z[self.B], missing=4.0) + qcel.covalentradii.get(Z[self.C], missing=4.0)\n a = 0.0023\n b = 0.07\n if R_BC > (Rcov + a / b):\n b = 0.0\n return a - (b * (R_BC - Rcov))\n\n elif guess_type == \"FISCHER\":\n R = v3d.dist(geom[self.B], geom[self.C])\n Rcov = qcel.covalentradii.get(Z[self.B], missing=4.0) + qcel.covalentradii.get(Z[self.C], missing=4.0)\n a = 0.0015\n b = 14.0\n c = 2.85\n d = 0.57\n e = 4.00\n\n # Determine connectivity factor L\n Brow = connectivity[self.B]\n Crow = connectivity[self.C]\n Bbonds = 0\n Cbonds = 0\n for i in range(len(Crow)):\n Bbonds = Bbonds + Brow[i]\n Cbonds = Cbonds + Crow[i]\n L = Bbonds + Cbonds - 2\n logger.info(\"Connectivity of central 2 torsional atoms - 2 = L = %d\\n\" % L)\n return a + b * (np.power(L, d)) / (np.power(R * Rcov, e)) * (np.exp(-c * (R - Rcov)))\n\n elif guess_type == \"LINDH_SIMPLE\":\n\n R_AB = v3d.dist(geom[self.A], geom[self.B])\n R_BC = v3d.dist(geom[self.B], geom[self.C])\n R_CD = v3d.dist(geom[self.C], geom[self.D])\n k_tau = 0.005\n\n Lindh_Rho_AB = hguess_lindh_rho(Z[self.A], Z[self.B], R_AB)\n Lindh_Rho_BC = hguess_lindh_rho(Z[self.B], Z[self.C], R_BC)\n Lindh_Rho_CD = hguess_lindh_rho(Z[self.C], Z[self.D], R_CD)\n return k_tau * Lindh_Rho_AB * Lindh_Rho_BC * Lindh_Rho_CD\n\n else:\n logger.warning(\n \"\"\"Hessian guess encountered unknown coordinate type.\\n \n As default, identity matrix is used\"\"\"\n )\n return 1.0",
"def gramschmidt(A):\r\n _, k = A.shape\r\n\r\n # first basis vector\r\n Q = A[:, [0]] / np.linalg.norm(A[:, 0])\r\n for j in range(1, k):\r\n # orthogonal projection, loop-free implementation\r\n q = A[:, j] - np.dot(Q, np.dot(Q.T, A[:, j]))\r\n\r\n # check premature termination\r\n nq = np.linalg.norm(q)\r\n if nq < 1e-9 * np.linalg.norm(A[:, j]):\r\n break\r\n # add new basis vector as another column of Q\r\n Q = np.column_stack([Q, q / nq])\r\n return Q",
"def compute_mesh_laplacian(verts, tris, weight_type='cotangent',\n return_vertex_area=True, area_type='mixed',\n add_diagonal=True):\n if area_type not in ['mixed', 'lumped_mass']:\n raise ValueError('unknown area type: %s' % area_type)\n if weight_type not in ['cotangent', 'mean_value', 'uniform']:\n raise ValueError('unknown weight type: %s' % weight_type)\n\n n = len(verts)\n # we consider the triangle P, Q, R\n iP = tris[:, 0]\n iQ = tris[:, 1]\n iR = tris[:, 2]\n # edges forming the triangle\n PQ = verts[iP] - verts[iQ] # P--Q\n QR = verts[iQ] - verts[iR] # Q--R\n RP = verts[iR] - verts[iP] # R--P\n if weight_type == 'cotangent' or (return_vertex_area and area_type == 'mixed'):\n # compute cotangent at all 3 points in triangle PQR\n double_area = V.veclen(np.cross(PQ, RP))\n cotP = -1 * (PQ * RP).sum(axis=1) / double_area # angle at vertex P\n cotQ = -1 * (QR * PQ).sum(axis=1) / double_area # angle at vertex Q\n cotR = -1 * (RP * QR).sum(axis=1) / double_area # angle at vertex R\n\n # compute weights and indices\n if weight_type == 'cotangent':\n I = np.concatenate(( iP, iR, iP, iQ, iQ, iR))\n J = np.concatenate(( iR, iP, iQ, iP, iR, iQ))\n W = 0.5 * np.concatenate((cotQ, cotQ, cotR, cotR, cotP, cotP))\n\n elif weight_type == 'mean_value':\n # TODO: I didn't check this code yet\n PQlen = 1 / V.veclen(PQ)\n QRlen = 1 / V.veclen(QR)\n RPlen = 1 / V.veclen(RP)\n PQn = PQ * PQlen[:,np.newaxis] # normalized\n QRn = QR * QRlen[:,np.newaxis]\n RPn = RP * RPlen[:,np.newaxis]\n # TODO pretty sure there is a simpler solution to those 3 formulas\n tP = np.tan(0.5 * np.arccos((PQn * -RPn).sum(axis=1)))\n tQ = np.tan(0.5 * np.arccos((-PQn * QRn).sum(axis=1)))\n tR = np.tan(0.5 * np.arccos((RPn * -QRn).sum(axis=1)))\n I = np.concatenate(( iP, iP, iQ, iQ, iR, iR))\n J = np.concatenate(( iQ, iR, iP, iR, iP, iQ))\n W = np.concatenate((tP*PQlen, tP*RPlen, tQ*PQlen, tQ*QRlen, tR*RPlen, tR*QRlen))\n\n elif weight_type == 'uniform':\n # this might add an edge twice to the matrix\n # but prevents the problem of boundary edges going only in one direction\n # we fix this problem after the matrix L is constructed\n I = np.concatenate((iP, iQ, iQ, iR, iR, iP))\n J = np.concatenate((iQ, iP, iR, iQ, iP, iR))\n W = np.ones(len(tris) * 6)\n\n # construct sparse matrix\n # notice that this will also sum duplicate entries of (i,j), \n # which is explicitely assumed by the code above\n L = sparse.csr_matrix((W, (I, J)), shape=(n, n))\n if weight_type == 'uniform':\n # because we probably add weights in both directions of an edge earlier, \n # and the csr_matrix constructor sums them, some values in L might be 2 instead of 1\n # so reset them\n L.data[:] = 1\n # add diagonal entries as the sum across rows\n if add_diagonal:\n L = L - sparse.spdiags(L * np.ones(n), 0, n, n)\n\n if return_vertex_area:\n if area_type == 'mixed':\n # compute voronoi cell areas\n aP = 1/8. * (cotR * (PQ**2).sum(axis=1) + cotQ * (RP**2).sum(axis=1)) # area at point P\n aQ = 1/8. * (cotR * (PQ**2).sum(axis=1) + cotP * (QR**2).sum(axis=1)) # area at point Q\n aR = 1/8. * (cotQ * (RP**2).sum(axis=1) + cotP * (QR**2).sum(axis=1)) # area at point R\n # replace by barycentric areas for obtuse triangles\n # TODO area computed previously in cotangent formula, reuse it here?\n triangle_area = .5 * V.veclen(np.cross(PQ, RP))\n for i, c in enumerate([cotP, cotQ, cotR]):\n is_x_obtuse = c < 0 # obtuse at point?\n # TODO: the paper by Desbrun says that we should divide by 1/2 or 1/4,\n # but according to other code I found we should divide by 1 or 1/2\n # check which scheme is correct!\n aP[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 0 else 1/2.)\n aQ[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 1 else 1/2.)\n aR[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 2 else 1/2.)\n area = np.bincount(iP, aP, minlength=n) + \\\n np.bincount(iQ, aQ, minlength=n) + np.bincount(iR, aR, minlength=n)\n\n elif area_type == 'lumped_mass':\n lump_area = V.veclen(np.cross(PQ, RP)) / 6.\n area = sum(np.bincount(tris[:,i], lump_area, minlength=n) for i in range(3))\n\n return L, area\n else:\n return L",
"def surface(self):\n # return sum(np.outer(basis_function, control_point) for basis_function, control_point in zip(self.basis_1, self.basis)).T\n # return sum(np.outer(basis_function_1, self.control_net[ii, jj]) for ((ii, basis_function_1), (jj, basis_function_2)) in zip(enumerate(self.basis_1), enumerate(self.basis_2))).T\n # return sum(np.outer(basis_function_1, self.control_net[ii, jj]) + np.outer(basis_function_2, self.control_net[ii, jj]) for ((ii, basis_function_1), (jj, basis_function_2)) in zip(enumerate(self.basis_1), enumerate(self.basis_2))).T\n\n # x = np.zeros_like(self.xi_1_mesh)\n # y = np.zeros_like(self.xi_1_mesh)\n # z = np.zeros_like(self.xi_1_mesh)\n xyz = np.zeros((*self.xi_1_mesh.shape, 3))\n for (i, basis_function_i), (j, basis_function_j) in itertools.product(enumerate(self.basis_1), enumerate(self.basis_2)):\n print(i, basis_function_i)\n print(j, basis_function_j)\n print(self.control_net[i, j])\n # b1, b2 = np.meshgrid(basis_function_i, basis_function_j, indexing = 'ij')\n control_x, control_y, control_z = self.control_net[i, j]\n # print(b1.shape, b2.shape, np.array(self.control_net[i, j]).shape)\n # print((b1 * b2).shape)\n # z += np.outer(b1 * b2, self.control_net[i, j])\n # print(np.shape(z))\n print(np.outer(basis_function_i, basis_function_j))\n # x += np.outer(basis_function_i, basis_function_j) * control_x\n # y += np.outer(basis_function_i, basis_function_j) * control_y\n # z += np.outer(basis_function_i, basis_function_j) * control_z\n print(np.outer(basis_function_i, basis_function_j).shape)\n print(np.outer(np.outer(basis_function_i, basis_function_j), self.control_net[i, j]).shape)\n print(np.outer(np.outer(basis_function_i, basis_function_j), np.array(self.control_net[i, j])).shape)\n r = np.einsum('i,j,k->ijk', basis_function_i, basis_function_j, np.array(self.control_net[i, j]))\n print(r.shape)\n xyz += r\n\n # print(x, y, z)\n\n # return x, y, z\n return xyz",
"def system(L_x, W, L_sc_top, L_sc_bot, w, z_x, z_y, a, periodic, leads=False, transverse_SOI=True):\n # If the system is periodic shorten the length by one lattice constant\n if periodic:\n L_x = L_x - a\n\n template_strings = get_template_strings(transverse_SOI)\n templates = {k: kwant.continuum.discretize(v, coords=('x', 'y'), grid=a)\n for k, v in template_strings.items()}\n shapes = get_zigzag_shape(L_x, W, L_sc_top, L_sc_bot, w, z_x, z_y, a)\n\n if periodic:\n syst = kwant.Builder(kwant.TranslationalSymmetry([L_x + a, 0]))\n else:\n syst = kwant.Builder()\n\n if w == 0:\n normal_sites = syst.fill(templates['normal'], *shapes['normal'])\n\n else:\n norm_top_sites = syst.fill(templates['normal'], *shapes['normal_top'])\n norm_bot_sites = syst.fill(templates['normal'], *shapes['normal_bot'])\n sc_mid_sites = syst.fill(templates['sc_mid'], *shapes['sc_mid'])\n\n if L_sc_top > 0:\n sc_top_sites = syst.fill(templates['sc_top'], *shapes['sc_top'])\n\n if L_sc_bot > 0:\n sc_bot_sites = syst.fill(templates['sc_bot'], *shapes['sc_bot'])\n\n if periodic:\n syst = kwant.wraparound.wraparound(syst)\n\n if leads:\n if z_x != 0 and L_x % z_x != 0:\n raise NotImplementedError(\n 'Horizontal leads for L_x not and integer multiple of z_x are not implemented.', z_x, L_x)\n\n ph = np.kron(sigma_y, sigma_y)\n c_law = np.kron(sigma_0, sigma_z)\n\n lead_left = kwant.Builder(kwant.TranslationalSymmetry(\n [-a, 0]), conservation_law=c_law, particle_hole=ph)\n lead_right = kwant.Builder(kwant.TranslationalSymmetry(\n [a, 0]), conservation_law=c_law, particle_hole=ph)\n\n # Can't use lead.reversed() because the system might not be reflection\n # invariant if it has a zigzag shape\n for lead in [lead_left, lead_right]:\n lead_idx = 0 if lead == lead_left else -1\n x_lead = 0 if lead == lead_left else L_x\n\n lead_shape = shapes['normal_bot'][0] + shapes['normal_top'][0] + \\\n shapes['sc_mid'][0] if w != 0 else shapes['normal'][0]\n lead_shape = (lead_shape[lead_idx:, ::], (x_lead, 0))\n lead.fill(templates['normal'], *lead_shape)\n syst.attach_lead(lead)\n\n return syst.finalized()",
"def rwg0_barycentric_function_space(coarse_space):\n from .space import SpaceBuilder\n from scipy.sparse import coo_matrix\n\n number_of_support_elements = coarse_space.number_of_support_elements\n bary_grid_number_of_elements = 6 * coarse_space.grid.number_of_elements\n\n bary_support_elements = 6 * _np.repeat(coarse_space.support_elements, 6) + _np.tile(\n _np.arange(6), number_of_support_elements\n )\n\n bary_support_size = len(bary_support_elements)\n\n support = _np.zeros(6 * coarse_space.grid.number_of_elements, dtype=_np.bool_)\n support[bary_support_elements] = True\n\n normal_multipliers = _np.repeat(coarse_space.normal_multipliers, 6)\n\n local_coords = _np.array(\n [[0, 0], [0.5, 0], [1, 0], [0.5, 0.5], [0, 1], [0, 0.5], [1.0 / 3, 1.0 / 3]]\n ).T\n\n coeffs = (\n _np.array(\n [\n [1, -1.0 / 3, 0],\n [-1.0 / 3, 1, 0],\n [0, 1.0 / 3, -1.0 / 6],\n [0, 0, 1.0 / 6],\n [0, 0, 1.0 / 6],\n [1.0 / 3, 0, -1.0 / 6],\n ]\n ),\n _np.array(\n [\n [0, 1.0 / 3, -1.0 / 6],\n [0, 0, 1.0 / 6],\n [0, 0, 1.0 / 6],\n [1.0 / 3, 0, -1.0 / 6],\n [1, -1.0 / 3, 0],\n [-1.0 / 3, 1, 0],\n ]\n ),\n _np.array(\n [\n [0, 0, 1.0 / 6],\n [1.0 / 3, 0, -1.0 / 6],\n [1, -1.0 / 3, 0],\n [-1.0 / 3, 1, 0],\n [0, 1.0 / 3, -1.0 / 6],\n [0, 0, 1.0 / 6],\n ]\n ),\n )\n\n coarse_dofs, bary_dofs, values = generate_rwg0_map(\n coarse_space.grid.data(), coarse_space.support_elements, local_coords, coeffs\n )\n\n local2global = _np.zeros((bary_grid_number_of_elements, 3), dtype=\"uint32\")\n local_multipliers = _np.zeros((bary_grid_number_of_elements, 3), dtype=\"uint32\")\n\n local2global[support] = _np.arange(3 * bary_support_size).reshape(\n bary_support_size, 3\n )\n\n local_multipliers[support] = 1\n\n transform = coo_matrix(\n (values, (bary_dofs, coarse_dofs)),\n shape=(3 * bary_support_size, 3 * number_of_support_elements),\n dtype=_np.float64,\n ).tocsr()\n\n dof_transformation = transform @ coarse_space.map_to_localised_space\n\n return (\n SpaceBuilder(coarse_space.grid.barycentric_refinement)\n .set_codomain_dimension(3)\n .set_support(support)\n .set_normal_multipliers(normal_multipliers)\n .set_order(0)\n .set_is_localised(True)\n .set_is_barycentric(True)\n .set_shapeset(\"rwg0\")\n .set_identifier(\"rwg0\")\n .set_local2global(local2global)\n .set_local_multipliers(local_multipliers)\n .set_dof_transformation(dof_transformation)\n .set_numba_evaluator(_numba_rwg0_evaluate)\n .build()\n )",
"def _triangulate(self,x):\n\n t = tr.triangulate({\"vertices\": x},\"-n\")\n tri = t[\"triangles\"]\n neighbours = t[\"neighbors\"]\n\n b_cells = np.zeros(self.n_c)\n b_cells[self.n_C:] = 1\n\n three_b_cell_mask = b_cells[tri].sum(axis=1)==3\n tri = tri[~three_b_cell_mask]\n\n neigh_map = np.cumsum(~three_b_cell_mask)-1\n neigh_map[three_b_cell_mask] = -1\n neigh_map = np.concatenate((neigh_map,[-1]))\n\n neighbours = neighbours[~three_b_cell_mask]\n neighbours = neigh_map[neighbours]\n\n #6. Store outputs\n self.tris = tri\n self.n_v = tri.shape[0]\n self.Cents = x[self.tris]\n self.vs = self.get_vertex()\n\n\n #7. Manually calculate the neighbours. See doc_string for conventions.\n self.v_neighbours = neighbours\n self.neighbours = self.vs[neighbours]\n self.neighbours[neighbours == -1] = np.nan\n\n self.reset_k2s()",
"def bezier_surface(A):\r\n n, m, z = A.shape\r\n n, m = n-1, m-1\r\n res = 10\r\n B = np.zeros((res,res,3))\r\n\r\n u = np.linspace(0,1,res)\r\n v = np.linspace(0,1,res)\r\n for i in xrange(res):\r\n for j in xrange(res):\r\n B[i,j,:] = deCasteljua2(A,n,m,u[i],v[j]) \r\n\r\n return B",
"def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall",
"def _create_triangular_filterbank(\n all_freqs: jnp.array,\n f_pts: jnp.array,\n) -> jnp.array:\n # Adopted from Librosa\n # calculate the difference between each filter mid point and each stft freq point in hertz\n f_diff = f_pts[1:] - f_pts[:-1] # (n_filter + 1)\n slopes = f_pts[jnp.newaxis, Ellipsis] - all_freqs[Ellipsis, jnp.newaxis] # (n_freqs, n_filter + 2)\n # create overlapping triangles\n down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_filter)\n up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_filter)\n fb = jnp.maximum(0., jnp.minimum(down_slopes, up_slopes))\n return fb",
"def householder_ls(A, b):\n m, n = A.shape\n Ahat = np.zeros((m, n+1))\n Ahat[:,:n] = 1.0*A\n Ahat[:, n] = 1.0*b\n\n Rhat = householder(Ahat)\n x = solve_triangular(Rhat[:n,:n], Rhat[:n,n])\n\n return x",
"def Bimat(self):\n a, b, c, alpha, beta, gamma = self.lattice_parameters\n alpha = alpha * radians\n beta = beta * radians\n gamma = gamma * radians\n B23 = c*(np.cos(alpha)-np.cos(beta)*np.cos(gamma))/np.sin(gamma)\n B33 = np.sqrt(c**2-(c*np.cos(beta))**2-B23**2)\n return np.matrix(((a, b*np.cos(gamma), c*np.cos(beta)),\n (0, b*np.sin(gamma), B23),\n (0, 0, B33)))"
] | [
"0.59415287",
"0.5810972",
"0.5460677",
"0.53912824",
"0.537836",
"0.5234978",
"0.521264",
"0.5190832",
"0.5172464",
"0.5166259",
"0.5152519",
"0.51436037",
"0.51390684",
"0.5131656",
"0.51265067",
"0.5109335",
"0.5104018",
"0.5094834",
"0.50901693",
"0.50835675",
"0.50709367",
"0.5054786",
"0.5050934",
"0.5049765",
"0.50353926",
"0.5013754",
"0.49978194",
"0.49975416",
"0.49856535",
"0.49826732"
] | 0.62006503 | 0 |
Return the massmatrix The method returns the mass matrix of the triangular mesh. | def massmatrix(self):
# lazy evaluation, compute the mass matrix at the first request and
# store it until the triangular mesh or the discretization method
# is changed
if self._massmatrix is None:
self._massmatrix = self.triangsamples.massmatrix(mode='normal')
return self._massmatrix | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Mass_Matrix(self):\n self.mass_matrix = np.empty((self.N,self.N))\n for n1 in range(self.N):\n for n2 in range(self.N):\n self.mass_matrix[n1,n2] = integrate.quad(lambda x:self.basis[n1](x)*self.basis[n2](x),-1,1)[0]",
"def material_matrix(self):\n out = Tmatrix()\n out.translate(Vector([.5, .5, .5]))\n out.scale(Vector([self.radius, self.radius, self.radius]) *\n (.5 / (self.radius + self.thickness)))\n return out",
"def mass_matrix(self):\n if not self._fr or not self._frstar:\n raise ValueError('Need to compute Fr, Fr* first.')\n return Matrix([self._k_d, self._k_dnh])",
"def calculate_mass_matrix(self, m, jsa):\n\n rho_i = self.r_i.local_coordinates - self.r_g_loc\n tr_rho_i = rho_i.transpose()\n x = np.array([self.r_j.local_coordinates - self.r_i.local_coordinates,\n self.u.local_coordinates, self.v.local_coordinates])\n a = np.matmul(inv(x), self.r_g_loc - self.r_i.local_coordinates)\n j_i = jsa + m * np.outer(rho_i, tr_rho_i)\n z = inv(x).dot(j_i).dot(inv(x.transpose()))\n\n self.mass_matrix = mass_matrix_assembly(m, z, a)",
"def ComputeMassMatrix(m, Ix, Iy, Iz):\n M = zeros(6)\n M[0, 0] = M[1, 1] = M[2, 2] = m \n M[3, 3] = Ix\n M[4, 4] = Iy\n M[5, 5] = Iz\n return M",
"def create_mass_matrix(self, model):\n # Create list of mass matrices for each equation to be put into block\n # diagonal mass matrix for the model\n mass_list = []\n mass_inv_list = []\n\n # get a list of model rhs variables that are sorted according to\n # where they are in the state vector\n model_variables = model.rhs.keys()\n model_slices = []\n for v in model_variables:\n model_slices.append(self.y_slices[v][0])\n sorted_model_variables = [\n v for _, v in sorted(zip(model_slices, model_variables))\n ]\n\n # Process mass matrices for the differential equations\n for var in sorted_model_variables:\n if var.domain == []:\n # If variable domain empty then mass matrix is just 1\n mass_list.append(1.0)\n mass_inv_list.append(1.0)\n else:\n mass = (\n self.spatial_methods[var.domain[0]]\n .mass_matrix(var, self.bcs)\n .entries\n )\n mass_list.append(mass)\n if isinstance(\n self.spatial_methods[var.domain[0]],\n (pybamm.ZeroDimensionalSpatialMethod, pybamm.FiniteVolume),\n ):\n # for 0D methods the mass matrix is just a scalar 1 and for\n # finite volumes the mass matrix is identity, so no need to\n # compute the inverse\n mass_inv_list.append(mass)\n else:\n # inverse is more efficient in csc format\n mass_inv = inv(csc_matrix(mass))\n mass_inv_list.append(mass_inv)\n\n # Create lumped mass matrix (of zeros) of the correct shape for the\n # discretised algebraic equations\n if model.algebraic.keys():\n mass_algebraic_size = model.concatenated_algebraic.shape[0]\n mass_algebraic = csr_matrix((mass_algebraic_size, mass_algebraic_size))\n mass_list.append(mass_algebraic)\n\n # Create block diagonal (sparse) mass matrix (if model is not empty)\n # and inverse (if model has odes)\n if len(model.rhs) + len(model.algebraic) > 0:\n mass_matrix = pybamm.Matrix(block_diag(mass_list, format=\"csr\"))\n if len(model.rhs) > 0:\n mass_matrix_inv = pybamm.Matrix(block_diag(mass_inv_list, format=\"csr\"))\n else:\n mass_matrix_inv = None\n else:\n mass_matrix, mass_matrix_inv = None, None\n\n return mass_matrix, mass_matrix_inv",
"def get_mass(self):\n return self.m",
"def matrix_mass(C, B, ref_values, weights):\n check_transformations(C, B)\n check_ref_values(ref_values, weights=weights)\n mass = np.empty((21,21), dtype=np.float64)\n _ap.ap_matrix_mass(C, B, ref_values, weights, ref_values.shape[1], mass)\n return mass",
"def matrix(self):\n return self._matrix(*self.parameters)",
"def M(self):\n return _hypre.HypreParMatrix_M(self)",
"def getMatrix(self) -> CMatrix4:\n ...",
"def matrix(self):\n return self._matrix",
"def matrix(self):\n return self._matrix",
"def get_matrix(self):\n return self._matrix[:3, :]",
"def mass_matrix_full(self):\n if not self._fr or not self._frstar:\n raise ValueError('Need to compute Fr, Fr* first.')\n o = len(self.u)\n n = len(self.q)\n return ((self._k_kqdot).row_join(zeros(n, o))).col_join((zeros(o,\n n)).row_join(self.mass_matrix))",
"def calc_mass(self):\n\n star = self.star\n\n K, N = star.mesh_size\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j):\n return np.sum((mu[2::2] - mu[:-2:2]) *\n (star.rho[:-2:2, j] + 4 * star.rho[1:-1:2, j] +\n star.rho[2::2, j])) / 6\n\n mass = 0\n\n for j in range(0, N - 2, 2):\n mass += (r[j + 2] - r[j]) * (r[j]**2 * Q1(j) +\n 4 * r[j + 1]**2 * Q1(j + 1) +\n r[j + 2]**2 * Q1(j + 2))\n\n return 2 / 3 * np.pi * mass",
"def calc_mass(self):\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j, k):\n sum = 0\n\n for i in range(0, M - 2, 2):\n sum += (1 / 6) * (ph[i + 2] - ph[i]) * (star.rho[i, j, k] +\n 4 *\n star.rho[i + 1, j, k]\n + star.rho[i + 2, j, k])\n\n return 2 * sum\n\n def Q2(k):\n sum = 0\n\n for j in range(0, K - 2, 2):\n sum += (1 / 6) * (mu[j + 2] - mu[j]) * \\\n (Q1(j, k) + 4 * Q1(j + 1, k) + Q1(j + 2, k))\n\n return 2 * sum\n\n mass = 0\n\n for k in range(0, N - 2, 2):\n mass += (1 / 6) * (r[k + 2] - r[k]) * (r[k]**2 * Q2(k) +\n 4 * r[k + 1]**2 * Q2(k + 1) +\n r[k + 2]**2 * Q2(k + 2))\n\n return mass",
"def calc_matrix(self, attribute=False, basis=None):\n matrix = mgfns.dynamical_matrix_magnetic_gyros(self, basis=basis)\n return matrix",
"def get_mass(self):\n _pal.lib.geometry_get_mass.restype = c.c_float\n return _pal.lib.geometry_get_mass(self._geometry)",
"def _get_unitary_matrix(self, unitary): # pylint: disable=no-self-use\n if unitary in diagonal_in_z_basis:\n return unitary.eigvals()\n\n return unitary.matrix()",
"def get_mmt():\r\n # M^T\r\n MT = np.array([[-1, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [1, -1, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 1, -1, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 1, -1, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 1, -1, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 1, -1, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 1, -1, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 1, -1, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 1, -1],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 1]])\r\n\r\n M = np.transpose(MT)\r\n return M, MT",
"def mass(self):\n return _cantera.reactor_mass(self.__reactor_id)",
"def matrix(self):\n return np.matrix(list(self._columns.values()))",
"def matrix(self):\n m = Matrix.identity(4, 4)\n\n m[:3, :3] = self.rotation.matrix.data\n m[:3, 3:4] = self.translation.matrix.data\n\n return m",
"def asTrilinosMeshMatrix(self):\n A = self.matrix.copy()\n values, irow, jcol = A.find()\n\n if not hasattr(self, 'trilinosMatrix'):\n if A.shape[0] == 0:\n bandwidth = 0\n else:\n bandwidth = int(numerix.ceil(float(len(values)) / float(A.shape[0])))\n bandwidth = 1\n from fipy.matrices.trilinosMatrix import _TrilinosMeshMatrixKeepStencil\n self.trilinosMatrix = _TrilinosMeshMatrixKeepStencil(mesh=self.mesh, bandwidth=bandwidth,\n numberOfVariables=self.numberOfVariables,\n numberOfEquations=self.numberOfEquations)\n\n self.trilinosMatrix.addAt(values, irow, jcol)\n self.trilinosMatrix.finalize()\n\n return self.trilinosMatrix",
"def get_matrix_rigidity_unitary(self):\r\n return self.flexural_rigidity/(self.length**3)*np.array([\r\n [12, 6*self.length, -12, 6*self.length],\r\n [6*self.length, 4*self.length**2, -6*self.length, 2*self.length**2],\r\n [-12, -6*self.length, 12, -6*self.length],\r\n [6*self.length, 2*self.length**2, -6*self.length, 4*self.length**2]\r\n ])",
"def mass(self):\n return self._mass",
"def mass(self):\n return self._mass",
"def getMass(self):\n return self.mass",
"def m1(self):\n return self.mass[0]"
] | [
"0.7246817",
"0.70758045",
"0.7053071",
"0.6931438",
"0.65270054",
"0.64959997",
"0.6477681",
"0.6448242",
"0.6388519",
"0.63806444",
"0.6350266",
"0.63291156",
"0.63291156",
"0.62858677",
"0.628041",
"0.6222232",
"0.621956",
"0.6169165",
"0.61493695",
"0.61330867",
"0.6062642",
"0.6062013",
"0.6029892",
"0.6026475",
"0.60088587",
"0.600691",
"0.60024506",
"0.60024506",
"0.5994804",
"0.5970733"
] | 0.8440911 | 0 |
Return a MIME bundle for display in Jupyter frontends. | def _repr_mimebundle_(self, include, exclude):
return renderers.get()(self.to_dict()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def app_bundle(self) -> str:\n if self.minimize:\n js_url = f\"https://cdn.jsdelivr.net/gh/salesforce/cloudsplaining@{__version__}/cloudsplaining/output/dist/js/index.js\"\n bundle = f'<script type=\"text/javascript\" src=\"{js_url}\"></script>'\n return bundle\n else:\n with open(app_bundle_path, \"r\", encoding=\"utf-8\") as f:\n bundle_content = f.read()\n # bundle_content = app_bundle_path.read_text(encoding=\"utf-8\")\n bundle = f'<script type=\"text/javascript\">\\n{bundle_content}\\n</script>'\n return bundle",
"def get_bundle(conf, asset_type, bundle_name):\n \n content_type = 'application/javascript'\n content = []\n \n if asset_type == 'css':\n content_type = 'text/css'\n \n for asset in conf[asset_type][bundle_name]:\n content.append(open(os.path.join(conf['srcDir'], asset_type, asset)).read())\n \n content = ''.join(content)\n \n return '200 OK', content_type, content",
"def get_mimetype(data: bytes) -> str:\n f = magic.Magic(keep_going=True, mime=False)\n return f.from_buffer(data)",
"def _get_bundle(request):\n config = request.matchdict['config']\n bundle_name = request.matchdict['bundle']\n bundle = request.webpack(config).get_bundle(bundle_name)\n renderer = request.params.get('renderer')\n if renderer:\n return render_to_response(renderer, {})\n else:\n return bundle",
"def build_mimetype(self) -> None:\n logger.info(__('writing mimetype file...'))\n copy_asset_file(path.join(self.template_dir, 'mimetype'), self.outdir)",
"def __call__(self, environ, start_response):\n\n def set_content_type(status, headers, exc_info=None):\n header_map = wsgiref.headers.Headers(headers)\n header_map['Content-Type'] = mime_type\n start_response(status, headers, exc_info)\n\n resource_url = environ['PATH_INFO'].lstrip('/')\n resource_path = '/'.join([self.base_path, resource_url])\n mime_type, _ = mimetypes.guess_type(resource_url)\n req_method = environ['REQUEST_METHOD']\n if req_method not in ('GET', 'HEAD'):\n app = webob.exc.HTTPMethodNotAllowed(\"You cannot %s a file\" % req_method)\n else:\n try:\n res = pkg_resources.resource_stream(self.package_name, resource_path)\n # if 'wsgi.file_wrapper' in environ:\n # app = environ['wsgi.file_wrapper'](res, self.BLOCK_SIZE)\n # return app\n # else:\n app = (webob.Response(app_iter=webob.static.FileIter(res))\n .conditional_response_app)\n except:\n app = webob.exc.HTTPNotFound()\n return app(environ, set_content_type)",
"def mime(self):\n msg = MIMEMultipart('alternative')\n\n msg.attach(MIMEText(str(self), 'plain'))\n msg.attach(MIMEText(self.html, 'html'))\n\n return msg",
"def get_mimetype(self):\n if self.resource.get_mimetype():\n return self.resource.get_mimetype()\n # Give best guess at mimetype\n mimetype = mimetypes.guess_type(self.resource.name)\n if mimetype[0]:\n return mimetype[0]\n else:\n # Interpret as binary data\n return \"application/octet-stream\"",
"def ext_js_bundle(context, extension, name):\n return _render_js_bundle(context, extension, name)",
"def _jupyter_bundlerextension_paths():\n return [{\n 'name': 'jorts_bundler',\n 'label': 'Human Readable Report (.pdf)',\n 'module_name': 'jorts',\n 'group': 'download'\n }]",
"def get_content(self, renderer, data,\n accepted_media_type, renderer_context):\n if not renderer:\n return '[No renderers were found]'\n\n renderer_context['indent'] = 4\n content = renderer.render(data, accepted_media_type, renderer_context)\n\n render_style = getattr(renderer, 'render_style', 'text')\n assert render_style in ['text', 'binary'], 'Expected .render_style ' \\\n '\"text\" or \"binary\", but got \"%s\"' % render_style\n if render_style == 'binary':\n return '[%d bytes of binary content]' % len(content)\n\n return content",
"def getMimeType(self, extension): #$NON-NLS-1$\r",
"def vendor_bundle(self) -> str:\n\n if self.minimize:\n js_url = f\"https://cdn.jsdelivr.net/gh/salesforce/cloudsplaining@{__version__}/cloudsplaining/output/dist/js/chunk-vendors.js\"\n bundle = f'<script type=\"text/javascript\" src=\"{js_url}\"></script>'\n return bundle\n else:\n vendor_bundle_path = get_vendor_bundle_path()\n with open(vendor_bundle_path, \"r\", encoding=\"utf-8\") as f:\n bundle_content = f.read()\n # bundle_content = vendor_bundle_path.read_text(encoding=\"utf-8\")\n bundle = f'<script type=\"text/javascript\">\\n{bundle_content}\\n</script>'\n return bundle",
"def load_jupyter_server_extension(nbapp):\n route_pattern = url_path_join(nbapp.web_app.settings['base_url'], '/nbmessage')\n nbapp.web_app.add_handlers('.*', [\n (url_path_join(route_pattern, r'/render/(\\w+)'), ShowMessage),\n (url_path_join(route_pattern, r'/notify'), Notify),\n (route_pattern + '/(.*)', web.StaticFileHandler, {'path': f'{STATIC_DIR}'})\n ])\n # FIX ME move to top\n # nbapp.web_app.add_handlers('.*', [(route_pattern + '/(.*)', web.StaticFileHandler, {'path': f'{STATIC_DIR}'})])",
"def bundle(handler, model):\n\n notebook_filename = model['name']\n notebook_name = os.path.splitext(notebook_filename)[0]\n pdf_filename = '{}.pdf'.format(notebook_name)\n\n with io.BytesIO() as pdf_buffer:\n pdf_body = convert_notebook_to_pdf(model)\n pdf_buffer.write(pdf_body)\n\n handler.set_attachment_header(pdf_filename)\n handler.set_header('Content-Type', 'application/pdf')\n\n # Return the buffer value as the response\n handler.finish(pdf_buffer.getvalue())",
"def resources(self) -> HTMLBody:\n\t\treturn render_template(\"resources.jinja2\")",
"def task_render():\n target = 'analysis.html'\n dep = 'analysis.ipynb'\n return {\n 'file_dep': [dep],\n 'targets': [target],\n 'actions': [\n f\"jupyter nbconvert --execute --to html {dep}\"\n ],\n 'clean': True\n }",
"def get_static_content(self) -> str | bytes:\n raise NotImplementedError()",
"def getMimeTypes(self): #$NON-NLS-1$\r",
"def _jupyter_nbextension_paths():\n return [{\n \"section\": \"notebook\",\n \"dest\": \"nbresuse\",\n \"src\": \"static\",\n \"require\": \"nbresuse/main\"\n }]",
"def mime(mime):\n\n def dfn(fn):\n fn.mime = mime\n return fn\n\n return dfn",
"def return_file(path):\n file_ext = path.split(\".\")[1]\n if file_ext != \"py\":\n with open(path, \"rb\") as file:\n content = file.read()\n if file_ext == \"txt\":\n mime_type = b\"text/plain\"\n elif file_ext == \"html\" or file_ext == \"htm\":\n mime_type = b\"text/html\"\n elif file_ext == \"png\":\n mime_type = b\"image/png\"\n elif file_ext == \"jpg\":\n mime_type = b\"image/jpeg\"\n else:\n content, mime_type = return_py_script(path)\n return content, mime_type",
"def mimetype(self, mimetype):\n return \"text/plain\"",
"def return_py_script(path):\n old_stdout = sys.stdout\n sys.stdout = mystdout = StringIO()\n exec(open(path).read())\n sys.stdout = old_stdout\n content = mystdout.getvalue()\n if \"<http>\" in content:\n mime_type = b\"text/html\"\n else:\n mime_type = b\"text/plain\"\n content = content.encode()\n return content, mime_type",
"def _best_mime():\n supported = []\n renders = {}\n for renderer_cls in app.config.get(\"RENDERERS\"):\n renderer = import_from_string(renderer_cls)\n for mime_type in renderer.mime:\n supported.append(mime_type)\n renders[mime_type] = renderer\n\n if len(supported) == 0:\n abort(\n 500,\n description=debug_error_message(\n \"Configuration error: no supported mime types\"\n ),\n )\n\n best_match = request.accept_mimetypes.best_match(supported) or supported[0]\n return best_match, renders[best_match]",
"def mime_type(path):\n cmd = ['/usr/bin/file', '-b', '--mime-type', path]\n return subprocess.check_output(cmd).rstrip()",
"def mimeData(self, indices):\n \n pass",
"def Mime(self):\n if self.force_auto_sync:\n self.get('Mime')\n return self._Mime",
"def bundle(self):\n return self._bundle",
"def get_inline_attachment(self) -> Attachment:\n file_path = os.path.join(os.getcwd(), \"assets/architecture-resize.png\")\n with open(file_path, \"rb\") as in_file:\n base64_image = base64.b64encode(in_file.read()).decode()\n\n return Attachment(\n name=\"architecture-resize.png\",\n content_type=\"image/png\",\n content_url=f\"data:image/png;base64,{base64_image}\",\n )"
] | [
"0.61310464",
"0.55794567",
"0.5460762",
"0.5446731",
"0.54235196",
"0.5399325",
"0.53911847",
"0.53861356",
"0.5315898",
"0.5268542",
"0.52289283",
"0.522581",
"0.5222383",
"0.5213547",
"0.51955634",
"0.5189331",
"0.51724833",
"0.5170243",
"0.51659167",
"0.5088028",
"0.5052184",
"0.50492585",
"0.50423795",
"0.503012",
"0.5025844",
"0.49760967",
"0.4948078",
"0.49348122",
"0.49325246",
"0.4922855"
] | 0.7133241 | 0 |
Used when creating the album, as it needs to be related to the creator | def add_album_with_contributor(title, username):
album = Album(title=title)
album.save()
ContributorAlbum(slug=album.slug, username=username).save()
return album | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perform_create(self, serializer):\n serializer.save(owner=self.request.user)",
"def perform_create(self, serializer):\n serializer.save(owner=self.request.user)",
"def perform_create(self, serializer):\n serializer.save(owner=self.request.user)",
"def perform_create(self, serializer):\n serializer.save(owner=self.request.user)",
"def perform_create(self, serializer):\n serializer.save(owner=self.request.user)",
"def perform_create(self, serializer):\n serializer.save(creator=self.request.user)",
"def perform_create(self, serializer):\r\n serializer.save(author=self.request.user)",
"def cmd_album_create(client, args):\n fields = data_fields(args, client.allowed_album_fields)\n album = client.create_album(fields)\n generate_output({'album': album})",
"def perform_create(self, serializer):\n serializer.save(author=self.request.user)",
"def creator(self, creator):\n self._creator = creator",
"def perform_create(self, serializer):\n user = self.request.user\n serializer.save(owner=user)\n return Response({}, status=status.HTTP_201_CREATED)",
"def add_contributor_album(slug, username):\n contrib = Contributor.get(username)\n album = Album.get(slug)\n ContributorAlbum(slug=album.slug, username=contrib.username).save()",
"def perform_create(self, serializer):\n if serializer.instance is None:\n profile = Profile.objects.get(user=self.request.user)\n #print profile\n serializer.save(owner=profile)",
"def perform_create(self, serializer):\n org = self.kwargs['org_guid']\n serializer.validated_data['organization'] = Organization.objects.get(\n org_guid=org)\n serializer.validated_data['author'] = self.request.user\n return super(OrganizationNoteListView, self).perform_create(serializer)",
"def creator(self, creator):\n\n self._creator = creator",
"def creator(self, creator):\n\n self._creator = creator",
"def creator(self, creator):\n\n self._creator = creator",
"def create(self):",
"def create(self):\n\n pass",
"def create(self):\n ...",
"def pre_save(self, obj):\n obj.owner = self.request.user",
"def creator(self):\n return self._creator",
"def creator(self):\n return self._creator",
"def create(self):\n pass",
"def create(self):\n pass",
"def create(self):\n pass",
"def test_create_photo_album(self, api_client, test_user):\n photo_1 = sample_photo(user=test_user, title='Home')\n photo_2 = sample_photo(user=test_user, title='Work')\n\n payload = {\n 'title': 'Holidays in LA',\n 'photos': [str(photo_1.id), str(photo_2.id)],\n }\n\n api_client.force_authenticate(test_user)\n res = api_client.post(PHOTO_ALBUM_URL, payload)\n print(res.data)\n assert res.status_code == status.HTTP_201_CREATED\n album = PhotoAlbum.objects.get(id=res.data['id'])\n photos = album.photos.all()\n assert photos.count() == 2",
"def sample_photo_album(user, **params):\n defaults = {\n 'title': 'Around the World',\n }\n defaults.update(params)\n album = PhotoAlbum.objects.create(user=user)\n album.photos.add(sample_photo(user))\n album.photos.add(sample_photo(user, title=\"Mount Everest\"))",
"def perform_create(self, serializer):\n\n attachment = serializer.save()\n attachment.user = self.request.user\n attachment.save()",
"def create(self, *args, **kwargs):\n pass"
] | [
"0.65831757",
"0.65831757",
"0.65831757",
"0.65831757",
"0.65831757",
"0.6488359",
"0.6439454",
"0.64283496",
"0.64183915",
"0.6306739",
"0.62202275",
"0.6189925",
"0.611416",
"0.6101035",
"0.6100576",
"0.6100576",
"0.6100576",
"0.6034475",
"0.59486526",
"0.59470713",
"0.5914194",
"0.59032005",
"0.59032005",
"0.590023",
"0.590023",
"0.590023",
"0.58300656",
"0.58188456",
"0.5796241",
"0.5774696"
] | 0.6782147 | 0 |
Used when adding an existent contributor to an existent album | def add_contributor_album(slug, username):
contrib = Contributor.get(username)
album = Album.get(slug)
ContributorAlbum(slug=album.slug, username=contrib.username).save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_album_with_contributor(title, username):\n album = Album(title=title) \n album.save()\n ContributorAlbum(slug=album.slug, username=username).save()\n return album",
"def test_post_add_album_contrib_as_owner(self):\n self.make_logged_in_owner()\n\n # get our manage page with form\n resp = self.client.get(reverse('manage_album', kwargs={'albumid': self.testalbum.id}))\n\n # get and populate form\n myform = resp.context['addcontributorsform']\n data = myform.initial\n data['idname'] = self.u2.id\n\n # construct our post\n self.addcontribpostrequest = self.factory.post(\n reverse(\"add_album_contrib\", kwargs={\"albumid\": self.testalbum.id}), data=data)\n self.addcontribpostrequest.user = self.u\n\n # we do not successfully add because not friends, but still redirect\n # todo: why did this not raise?\n resp = album.add_contrib(self.addcontribpostrequest, self.testalbum.id)\n assert resp.status_code == 302\n assert not self.u2.profile in collate_owner_and_contrib(self.testalbum)\n\n # make friends and we will succeed in adding\n complete_add_friends(self.u.id, self.u2.id)\n\n resp = album.add_contrib(self.addcontribpostrequest, self.testalbum.id)\n assert resp.status_code == 302\n assert self.u2.profile in collate_owner_and_contrib(self.testalbum)",
"def test_post_add_album_contrib_as_not_owner(self):\n complete_add_friends(self.u2.id, self.u3.id)\n\n self.make_logged_in_owner()\n\n # get our manage page with form (use self.u as self.u2 will not obtain the form)\n # using self.u will not affect our test later because we aren't using the client later\n resp = self.client.get(reverse('manage_album', kwargs={'albumid': self.testalbum.id}))\n\n # get and populate form\n myform = resp.context['addcontributorsform']\n data = myform.initial\n data['idname'] = self.u3.id\n\n # construct our post\n self.addcontribpostrequest = self.factory.post(\n reverse(\"add_album_contrib\", kwargs={\"albumid\": self.testalbum.id}), data=data)\n\n self.user_escalate_post_test_helper(self.addcontribpostrequest, self.u2, self.testalbum, self.testalbum.id,\n album.add_contrib, ALBUM_PRIVATE+1)",
"def add_contributor(username):\n try:\n get_contributor_by_username(username)\n raise AttributeError(\"%s username alredy exists\" % username)\n except DynamoDBKeyNotFoundError:\n pass\n \n # print( \"Username: %s\" % username)\n contrib = Contributor(username=username)\n contrib.save()\n return contrib",
"def post(request, slug, username):\n add_contributor_album(slug, username)\n response = redirect(reverse(\"resource_contributoralbum\", args=[slug, username]))\n response['Cache-Control'] = 'no-cache'\n return response",
"def channel_addowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id in channel[\"owners\"]:\n raise ValueError(\"user is already an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to add owners\")\n\n channels.set(channel_id, \"owners\", u_id)",
"def add_album(self, album):\n self.albums.append(album)",
"def add_album(self, album):\n self.albums.append(album)",
"def add_song(self, name, year, title):\n\n # Here we check if album exist under artist.\n album_found = find_object(name, self.albums)\n if album_found is None: # If there is no album found\n print(name + \"not found\") # we print \"Album name not found\n album_found = Album(name, year, self.name) # Change_3: Pass \"self.name\" instead of \"self\"\n self.add_album(album_found) # We add new_album to song.\n else: # if we found an existing album with same name\n print(\"found album\" + name) # we print found album name\n\n # so we add song to album_found\n album_found.add_song(title)",
"def add_album(self, album):\n\n self.album.append(album)",
"def add_album(self, album):\n\n self.albums.append(album)",
"def add_album(self, album):\n\n self.albums.append(album)",
"def add_album(self):\n item = self.clementine_albums.currentItem()\n albumname = item.text(0) if item else ''\n year = item.data(0, core.Qt.UserRole) if item else ''\n dlg = NewAlbumDialog(self, albumname, year).exec_()\n if dlg != qtw.QDialog.Accepted:\n return\n name, year, is_live = self.data\n if not item:\n result = self.clementine_albums.findItems(name,\n core.Qt.MatchFixedString, 0)\n if result:\n item = result[0]\n if not item:\n qtw.QMessageBox.information(self, self.appname, \"Album doesn't \"\n \"exist on the Clementine side\")\n return\n\n a_item = None\n results = self.albums_albums.findItems(name, core.Qt.MatchFixedString, 0)\n data = [build_album_name(x) for x in results]\n if results:\n selected, ok = qtw.QInputDialog.getItem(self, self.appname,\n 'Select Album', data,\n editable=False)\n if ok:\n a_item = results[data.index(selected)]\n if not a_item:\n a_item = qtw.QTreeWidgetItem([name, year, '0'])\n self.albums_albums.addTopLevelItem(a_item)\n tracklist = dmlc.list_tracks_for_album(dmlc.DB, self.c_artist,\n item.text(0))\n num = itertools.count(1)\n self.albums_to_save[self.c_artist].append(\n (name, year, 'X', is_live,\n [(next(num), x['title']) for x in tracklist if x['track'] > -1]))\n self.update_item(a_item, item)",
"def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def add_songs(self, name, year, title):\n\n album_found = find_object(name, self.album)\n if album_found is None:\n print(\"Not Found \" + name)\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n else:\n print(\"Found album \"+name)\n\n album_found.add_songs(title)",
"def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])",
"def add_song(self, name, year, title):\n album_found = find_object(name, self.albums)\n if album_found is None:\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n album_found.add_song(title)",
"def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")",
"def test_get(self):\n self.perm_escalate_helper(self.albumcontrol, self.managepagerequest, self.testalbum, self.testalbum.id,\n self.u, album.manage_album_permissions, ALBUM_PRIVATE)\n # add u2 as contributor\n self.make_logged_in_contributor()\n\n self.perm_escalate_helper(self.albumcontrol, self.managepagerequest, self.testalbum, self.testalbum.id,\n self.u2, album.manage_album_permissions, ALBUM_PRIVATE)\n\n # contributor does not obtain addcontributorsform from get\n resp = self.client.get(reverse('manage_album', kwargs={'albumid': self.testalbum.id}))\n assert 'addcontributorsform' not in resp.context.keys()\n assert 'accesstypeform' not in resp.context.keys()\n # todo: elsewhere we need to add testing for group form if the access type is or is not groups\n\n # use u3 as control, not owner or contributor, pass 0 as permission, no access\n self.perm_escalate_helper(self.albumcontrol, self.managepagerequest, self.testalbum, self.testalbum.id,\n self.u3, album.manage_album_permissions, 0)",
"def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])",
"def _add_owner(parent_id, child_id):\n db.session.add(\n pam.BivAccess(\n source_biv_id=parent_id,\n target_biv_id=child_id\n )\n )",
"def AddRosterItem(self, fpb, username):\n pass",
"def test_channel_addowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })",
"def test_aid_creation_requires_contributor(client, user):\n\n client.force_login(user)\n form_url = reverse('aid_create_view')\n res = client.get(form_url, follow=True)\n assert res.status_code == 200\n assert len(res.redirect_chain) == 1\n assert res.redirect_chain[0][0].startswith('/comptes/profil-contributeur/')",
"def test_logged_in_contributor(self):\n self.make_logged_in_contributor()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PRIVATE)",
"def test_adding_album_twice(self):\n self.add_mp3(filename='1.mp3')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n\n self.add_mp3(filename='2.mp3')\n (added, status) = self.app.add_album(self.filenames, 'ep')\n self.assertEqual(added, False)\n self.assertIn('Would update to', status)\n self.assertEqual(self.get_album_count(), 1)\n\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 2)\n self.assertEqual(album.totaltracks, 1)",
"def add_administrator(self, project_id, name, email):\n self._run(\n url_path=\"contributors/add\",\n id=project_id,\n name=name,\n email=email,\n admin=True\n )\n return True",
"def test_2_addautor(self):\n for nome, email, grupo in ((\"Autor 1\", \"[email protected]\", \"grupo 1\"),\n (\"Autor 2\", \"[email protected]\", \"\")):\n self.app.addAutor(nome=nome,\n email=email,\n grupo=grupo)",
"def test_adding_album_twice_forced(self):\n self.add_mp3(filename='1.mp3')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n\n self.add_mp3(filename='2.mp3')\n (added, status) = self.app.add_album(self.filenames, 'ep', force_update=True)\n self.assertEqual(added, True)\n self.assertIn('Updated to', status)\n self.assertEqual(self.get_album_count(), 1)\n\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'ep')\n self.assertEqual(album.totalseconds, 4)\n self.assertEqual(album.totaltracks, 2)"
] | [
"0.74803764",
"0.65657246",
"0.65415776",
"0.6436727",
"0.64360774",
"0.62320966",
"0.6168189",
"0.6168189",
"0.61587274",
"0.6062143",
"0.595339",
"0.595339",
"0.5950093",
"0.5899336",
"0.5891353",
"0.5877098",
"0.58582014",
"0.5845494",
"0.58208936",
"0.58176625",
"0.57979894",
"0.5769017",
"0.57678956",
"0.5751591",
"0.5734827",
"0.56716573",
"0.5658617",
"0.56378025",
"0.55644274",
"0.5522112"
] | 0.7505874 | 0 |
Album deletion can only be done based on slug, thus no conflicts | def delete_album_by_slug(slug):
album = get_album_by_slug(slug)
[x.delete() for x in ContributorAlbum.scan({"slug": condition.EQ(album.slug)})]
album.delete() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(request, slug, username):\n delete_album_contributor(slug, username)\n \n response = HttpResponse(status=204)\n response['Cache-Control'] = 'no-cache'\n return response",
"def cmd_album_delete(client, args):\n delete_album = client.album_delete(args.album_id)\n generate_output({'delete_album': delete_album})",
"def test_remove_category_from_asset(self):\n pass",
"def remove_book(request, slug):\n\n user = CustomUser.objects.get(\n id=request.user.id\n )\n book_name = Book.objects.get(\n slug=slug\n )\n book = get_object_or_404(\n Book,\n customuser=user,\n book_name=book_name,\n )\n book.delete()\n\n return redirect('favorite')",
"def __delete__(self, instance):\n instance.doc.pop(self.slug, None)",
"def unlink(self):\n album_id = self.albums_map[self.artist][self.c_album][1]\n # clear entry in self.albums_map[artist]\n self.albums_map[self.artist].pop(self.c_album)\n # remove Albums recording only if no more references to the album exist\n still_present = False\n for item in self.albums_map[self.artist].values():\n if item[1] == album_id:\n still_present = True\n if not still_present:\n dmla.unlink_album(self.a_album)\n self.modified = True\n self.refresh_screen(self.artists_list.currentIndex(),\n self.albums_list.currentIndex(), modifyoff=False)",
"def test_delete_asset(self):\n pass",
"def test_api_can_delete_music(self):\n music = Music.objects.get()\n response = self.client.delete(\n reverse('details', kwargs={'pk': music.id}),\n format = \"json\",\n follow = True\n )\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)",
"def test_delete_album_all_fails(self):\n\n web.app.config['READONLY'] = False\n\n # Delete all albums\n response = self.client.delete('/album/')\n self.assertEqual(response.status_code, 405)\n\n # Note: if this fails, all albums have gone and rest of\n # tests will fail!",
"def cmd_album_remove_images(client, args):\n remove_images = client.album_remove_images(args.album_id, args.ids)\n generate_output({'remove_images': remove_images})",
"def remove_empty_albums(aid):\n\t\tprint \"aid\"\n\t\tprint aid\n\t\tif aid is None:\n\t\t\treturn\n\t\tcon = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')\n\t\twith con:\n\t\t\tquery = \"SELECT count(*) from fileuploader_picture WHERE album_id=%s\" % (aid)\n\t\t\tcur = con.cursor()\n\t\t\tcur.execute(query)\n\t\t\tdata = cur.fetchall()\n\t\t\t# there is no picture in this album\n\t\t\tprint \"len(data)\"\n\t\t\tprint len(data)\n\t\t\tif len(data) == 0:\n\t\t\t\tquery = \"DELETE from fileuploader_album WHERE id=%s\" % (aid)\n\t\t\t\tprint query\n\t\t\t\tcur = con.cursor()\n\t\t\t\tcur.execute(query)",
"def delete_thumbnail(self, thumbnail_name):",
"def get_album_by_slug(slug):\n\n album_key = get_album_key(slug)\n album = album_key.get()\n return album",
"def test_delete_collection_image(self):\n pass",
"def test_remove_asset(self):\n # Confirm that the story has no assets\n self.assertEqual(self.story.assets.count(), 0)\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())\n # Delete the asset from the section.\n section_asset.delete()\n # Confirm that the asset is NOT in the section's list\n self.assertFalse(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())",
"def test_search_by_deleted_relation(self):\n db.session.delete(self.rel)\n db.session.commit()\n self.assertSlugs(\"comment\", self.comment.description, [])",
"def cmd_gallery_remove(client, args):\n gallery_remove = client.remove_from_gallery(args.item_id)\n generate_output({'gallery_remove': gallery_remove})",
"def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)",
"def remove_from_gallery(self):\n url = \"https://api.imgur.com/3/gallery/{0}\".format(self.id)\n self._imgur._send_request(url, needs_auth=True, method='DELETE')\n if isinstance(self, Image):\n item = self._imgur.get_image(self.id)\n else:\n item = self._imgur.get_album(self.id)\n _change_object(self, item)\n return self",
"def photo_delete(sender, instance, **kwargs):\n\tinstance.photo.delete(False)",
"def clean_up(self, graph):\n # Delete albums associated with place\n if len(self.albums) != 0:\n for album in self.albums:\n album.clean_up()\n album.delete(graph)",
"def sorl_delete(**kwargs):\n from sorl.thumbnail import delete\n delete(kwargs['file'])",
"def test_delete_category(self):\n pass",
"def test_delete_a_song(self):\n self.login_client('test_user', 'testing')\n # hit the API endpoint\n response = self.delete_a_post(1)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n # test with invalid data\n response = self.delete_a_post(100)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def delete(self, *args, **kwargs):\n super(Image, self).delete(*args, **kwargs)",
"def get_album_key(slug):\n err = 'Series slug must be defined and of of type basestring'\n\n if not slug or not isinstance(slug, basestring):\n raise RuntimeError(err)\n\n return ndb.Key(PHOTOALBUM_KIND, slug)",
"def test_two_tracks_mismatched_album(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3', set_album=True, album='Album 2')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('changed to', status)\n self.assertEqual(self.get_album_count(), 0)",
"def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)",
"def removeAlias(self, alias):\r\n for k, v in self.aliases.iteritems():\r\n if v.title == alias:\r\n del self.aliases[k]\r\n return True\r\n return False",
"def tag_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)"
] | [
"0.6888736",
"0.6556271",
"0.6163622",
"0.5952389",
"0.59495986",
"0.5918944",
"0.58567643",
"0.58391935",
"0.5832632",
"0.5774268",
"0.57660323",
"0.5679342",
"0.5658493",
"0.5637132",
"0.5582927",
"0.5533151",
"0.54873204",
"0.54847586",
"0.5484405",
"0.54693687",
"0.54595584",
"0.5453982",
"0.5451408",
"0.5444439",
"0.54392487",
"0.54351497",
"0.5427256",
"0.54168737",
"0.54116863",
"0.5411325"
] | 0.7940032 | 0 |
Get the items in a collection using limitoffset paging. | def get_items(
self,
limit: Optional[int] = None,
offset: int = 0,
fields: Iterable[str] = None
) -> 'BoxObjectCollection':
return LimitOffsetBasedObjectCollection(
self.session,
self.get_url('items'),
limit=limit,
fields=fields,
offset=offset,
return_full_pages=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_slice(self, limit, offset):\r\n if limit == 0:\r\n return self.objects[offset:]\r\n\r\n return self.objects[offset:offset + limit]",
"def get_slice(self, limit, offset):\n # Always get the first page\n return super(NoLimitPaginator, self).get_slice(0, 0)",
"def get_all(self, start_at, limit, order=None):",
"def fetch(self, limit, offset=0):\r\n self.limit = limit\r\n self.offset = offset\r\n return self",
"def get_page_list(self, offset=0, limit=50):\n return self._telegraph.method('getPageList', {\n 'offset': offset,\n 'limit': limit\n })",
"def Get(self, limit, offset=0):\n count = 1\n result = []\n\n iterator = self.Run()\n\n try:\n for i in xrange(offset):\n val = iterator.next()\n except StopIteration:\n pass\n\n try:\n while count <= limit:\n val = iterator.next()\n result.append(val)\n count += 1\n except StopIteration:\n pass\n return result",
"def get_next_page(\n cls,\n collection: Dict[str, Any],\n page_size: int,\n api_key: Optional[str] = None,\n optional_params: Optional[Dict[str, Any]] = None,\n ) -> List[Any]:\n requestor = Requestor(local_api_key=api_key)\n url = cls.class_url()\n collection_array = collection.get(url[1:])\n\n if collection_array is None or len(collection_array) == 0 or not collection.get(\"has_more\"):\n raise Error(message=\"There are no more pages to retrieve.\")\n\n params = {\n \"before_id\": collection_array[-1].id,\n \"page_size\": page_size,\n }\n\n if optional_params:\n params.update(optional_params)\n\n response, api_key = requestor.request(method=RequestMethod.GET, url=url, params=params)\n response_array: List[Any] = response.get(url[1:]) # type: ignore\n if response is None or len(response_array) == 0 or not response.get(\"has_more\"):\n raise Error(message=\"There are no more pages to retrieve.\")\n\n return convert_to_easypost_object(response=response, api_key=api_key)",
"def skiplimit(page_size, page_num):\n # Calculate number of documents to skip\n skips = page_size * (int(page_num/page_size) - 1)\n\n if skips < 0:\n skips = 0\n \n print('skip', skips, 'pages', page_size,'num', page_num )\n # Skip and limit\n cursor = db['farms'].find().skip(skips).limit(page_size)\n print('curs', [x for x in cursor])\n\n # Return documents\n return [x for x in cursor]",
"def get_all(\n self,\n offset: int = 0,\n extra_properties: Union[list, str, None] = None,\n limit: int = -1,\n **options,\n ):\n finished = False\n output = []\n limited = limit > 0\n\n # Default properties to fetch\n properties = [\"name\", \"price\", \"quantity\"]\n\n # append extras if they exist\n if extra_properties:\n if isinstance(extra_properties, list):\n properties += extra_properties\n if isinstance(extra_properties, str):\n properties.append(extra_properties)\n\n while not finished:\n batch = self._call(\n \"paged\",\n method=\"GET\",\n params=ordered_dict({\"offset\": offset, \"properties\": properties}),\n doseq=True,\n **options,\n )\n output.extend(\n [\n prettify(line_item, id_key=\"objectId\")\n for line_item in batch[\"objects\"]\n if not line_item[\"isDeleted\"]\n ]\n )\n finished = not batch[\"hasMore\"] or (limited and len(output) >= limit)\n offset = batch[\"offset\"]\n\n return output if not limited else output[:limit]",
"def _all_offset_pages(self, page_function, **kwargs) -> Iterator[Iterable]:\n\n next_offset = 0\n is_truncated = True\n while is_truncated:\n page = page_function(offset=next_offset, **kwargs)\n next_offset = page.offset + page.limit\n is_truncated = page.total > next_offset\n for data in page.page_data:\n yield data",
"def get_pages(offset=None, limit=None):\n articles = list(pages)\n # assign section value if none was provided in the metas\n for article in articles:\n if not article.meta.get(\"section\"):\n article.meta[\"section\"] = article.path.split(\"/\")[0]\n\n # filter unpublished article\n if not app.debug:\n articles = [p for p in articles if p.meta.get(\"draft\") is not True]\n\n articles = sorted(articles, reverse=True, key=lambda p: p.meta[\"date\"])\n\n if offset and limit:\n return articles[offset:limit]\n elif limit:\n return articles[:limit]\n elif offset:\n return articles[offset:]\n else:\n return articles",
"def paginate(self, page_size=20, **q_options):\n cursor = self._get_cursor(self.page, page_size, **q_options)\n results, cursor, more = self.query.fetch_page(page_size,\n start_cursor=cursor,\n **q_options)\n self.has_next = more\n return results, cursor, more",
"def test_collection_pagination(mocker, logged_in_apiclient):\n mocker.patch(\"ui.serializers.get_moira_client\")\n mocker.patch(\"ui.utils.get_moira_client\")\n page_size = 8\n CollectionSetPagination.page_size = page_size\n client, user = logged_in_apiclient\n collections = CollectionFactory.create_batch(20, owner=user)\n url = reverse(\"models-api:collection-list\")\n result = client.get(url)\n assert len(result.data[\"results\"]) == min(page_size, len(collections))\n for i in range(1, 3):\n paged_url = url + \"?page={}\".format(i)\n result = client.get(paged_url)\n assert len(result.data[\"results\"]) == min(\n page_size, max(0, len(collections) - page_size * (i - 1))\n )",
"def get_list(self, **kwargs):\n self.fields = self.get_fields(**kwargs)\n fields = \", \".join(self.fields)\n kwargs[\"query\"] = 'SELECT {0}'.format(fields)\n start = kwargs.pop(\"offset\", None)\n end = kwargs.pop(\"count\", None)\n data = self.filter(**kwargs)\n\n return self.paginate(data, start=start, end=end)",
"def get_objects(obj, offset=0):\n r = requests.get(BASE_URL + '/api/{}'.format(obj), params={\n 'offset': offset,\n 'limit': 100,\n 'all': 1\n })\n r.raise_for_status()\n return r",
"def searchCollection(self, limit=100, **kwargs):\n results = self._CMR.get_search_results(url=self._SEARCH_COLLECTION_URL, limit=limit, **kwargs)\n return [Collection(result, self._MAAP_HOST) for result in results][:limit]",
"def offset(self, offset: int) -> MongoDBQuerySet:\n\n self.cursor = self.cursor.skip(offset)\n return self",
"def test_limit(db_session):\n query_params = {\"limit\": \"1\"}\n parser = ModelQueryParamParser(query_params)\n album_resource = AlbumResource(session=db_session)\n offset_limit_info = parser.parse_offset_limit(page_max_size=30)\n offset = offset_limit_info.offset\n limit = offset_limit_info.limit\n result = album_resource.get_collection(\n filters=parser.parse_filters(album_resource.model),\n sorts=parser.parse_sorts(),\n limit=limit,\n offset=offset\n )\n assert len(result) == 1",
"def _collection_samples(collection_query, limit, config):\n just_cid = lambda obj : obj.get('meta', {}).get('concept-id')\n found_collections = scom.search_by_page(\"collections\",\n query=collection_query,\n filters=just_cid,\n page_state=scom.create_page_state(limit=limit),\n config=config)\n return found_collections[:limit]",
"def test_get_documents_offset_optional_params(index_with_documents):\n index = index_with_documents()\n response = index.get_documents()\n assert isinstance(response.results, list)\n assert len(response.results) == 20\n response_offset_limit = index.get_documents({\"limit\": 3, \"offset\": 1, \"fields\": \"title\"})\n assert len(response_offset_limit.results) == 3\n assert hasattr(response_offset_limit.results[0], \"title\")\n assert response_offset_limit.results[0].title == response.results[1].title",
"def test_offset(db_session):\n query_params = {\"offset\": \"1\"}\n parser = ModelQueryParamParser(query_params)\n album_resource = AlbumResource(session=db_session)\n offset_limit_info = parser.parse_offset_limit(page_max_size=30)\n offset = offset_limit_info.offset\n limit = offset_limit_info.limit\n result = album_resource.get_collection(\n filters=parser.parse_filters(album_resource.model),\n sorts=parser.parse_sorts(),\n limit=limit,\n offset=offset\n )\n assert result[0][\"album_id\"] == 2",
"def paginate(cls, papers, page=0, limit=30):\n offset = page * limit\n end = offset + limit\n if offset > len(papers):\n return []\n if end > len(papers):\n return papers[offeset:]\n return papers[offset:end]",
"def page(self):\r\n limit = self.get_limit()\r\n offset = self.get_offset()\r\n count = self.get_count()\r\n objects = self.get_slice(limit, offset)\r\n meta = {\r\n 'offset': offset,\r\n 'limit': limit,\r\n 'total_count': count}\r\n\r\n if limit:\r\n meta['previous'] = self.get_previous(limit, offset)\r\n meta['next'] = self.get_next(limit, offset, count)\r\n\r\n return {\r\n self.collection_name: objects, 'meta': meta}",
"def get_current_page(request, objects, **kwargs):\n # Show 25 items per page by default\n paginator = Paginator(objects, kwargs.get('slice', 25))\n page = request.GET.get('page')\n paginator._count = kwargs.get('count')\n try:\n objects = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n objects = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n objects = paginator.page(paginator.num_pages)\n\n return objects",
"def get_records(self, collection_name,num_records):\n\n try:\n self.logger.info('in get_records()')\n collection = self.get_db()[collection_name]\n records = collection.find().limit(num_records)\n self.logger.info('out get_records()')\n return records\n except Exception as e:\n self.logger.error(f'Error occurred while getting record {e}')",
"def load_paginated(per_page=25, page_num=1):\n def item_from_entity(entity):\n return {\n 'id': entity.id,\n 'partner_code': entity.partner.partner_code,\n 'added_at': entity.date_time.strftime(\n utils.FORMAT_US_DATE_TIME)\n }\n\n pagination = LinkageEntity.query.paginate(page_num, per_page, False)\n items = map(item_from_entity, pagination.items)\n return items, pagination.pages",
"def page_query(q):\n\toffset = 0\n\twhile True:\n\t\tr = False\n\t\tfor elem in q.limit(1000).offset(offset):\n\t\t r = True\n\t\t yield elem\n\t\toffset += 1000\n\t\tif not r:\n\t\t\tbreak",
"def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data",
"def fetch(\n self,\n limit=None,\n offset=0,\n start_cursor=None,\n end_cursor=None,\n client=None,\n eventual=False,\n retry=None,\n timeout=None,\n read_time=None,\n ):\n if client is None:\n client = self._client\n\n return CustomIterator(\n self.model_type,\n self,\n client,\n limit=limit,\n offset=offset,\n start_cursor=start_cursor,\n end_cursor=end_cursor,\n eventual=eventual,\n retry=retry,\n timeout=timeout,\n read_time=read_time\n )",
"def prefetch_all(self) -> PaginationIterator[T]:\n iterator = cast(PaginationIterator[T], iter(self))\n\n if not self._has_next_page():\n return iterator\n\n # tweak per_page setting to make fetching as fast as possible\n old_per_page = self._per_page\n self._per_page = PER_PAGE_LIMIT\n\n self._page = (self._yielded_items // PER_PAGE_LIMIT) + 1\n to_skip = (self._yielded_items % PER_PAGE_LIMIT) + len(self._queue)\n\n self._fetch_next_page(skip_first=to_skip)\n\n while self._has_next_page():\n self._fetch_next_page()\n\n self._per_page = old_per_page\n\n return iterator"
] | [
"0.69478124",
"0.6779926",
"0.67551893",
"0.6744978",
"0.66462934",
"0.66391194",
"0.6632975",
"0.64881206",
"0.645165",
"0.63969266",
"0.63845307",
"0.6366552",
"0.6357993",
"0.632409",
"0.632126",
"0.6318585",
"0.6283258",
"0.6261407",
"0.62570524",
"0.6219417",
"0.62056285",
"0.6202338",
"0.61366993",
"0.61155427",
"0.6107072",
"0.608923",
"0.6080783",
"0.60657406",
"0.6018344",
"0.60074925"
] | 0.76009315 | 0 |
Load a Bert Client | def load_bert_client() -> BertClient:
return BertClient() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_breadboard_client():\n\n import json\n import sys\n import os\n with open(os.path.join(os.path.dirname(__file__), \"breadboard_path_config.json\")) as my_file:\n breadboard_dict = json.load(my_file)\n breadboard_repo_path = breadboard_dict.get(\"breadboard_repo_path\")\n if(breadboard_repo_path is None):\n raise KeyError(\n \"The .json config does not contain variable breadboard_repo_path\")\n breadboard_API_config_path = breadboard_dict.get(\n \"breadboard_API_config_path\")\n if(breadboard_API_config_path is None):\n raise KeyError(\n \"The .json config does not contain variable breadboard_API_config_path\")\n sys.path.insert(0, breadboard_repo_path)\n try:\n from breadboard import BreadboardClient\n except ModuleNotFoundError:\n raise ValueError(\n \"Unable to import breadboard using specified value of breadboard_repo_path\")\n bc = BreadboardClient(breadboard_API_config_path)\n return bc",
"def client():\n\n client = Client()\n return client",
"def load_ckanclient(self):\n user = get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})\n api_url = urlparse.urljoin(config.get('ckan.site_url'), 'api')\n ckan = ckanclient.CkanClient(\n base_location=api_url,\n api_key=user.get('apikey'),\n is_verbose=True,\n )\n\n return ckan",
"def get_client():\n return Client(__address, authkey='strumamor')",
"def client_setup(self):\n self.client = Client()",
"def test_load_client_ca(self, context, ca_file):\n context.load_client_ca(ca_file)",
"def load_cogs(client):\n client.load_extension(\"cogs.message_listener\")",
"def load_bco(options):\n\n # Declare source of BioCompute Object\n print('\\nRemote BCO supplied: ', url_valid(options.bco), \\\n '\\t Local BCO supplied: ', os.path.exists(options.bco))\n\n if url_valid(options.bco):\n try:\n bco_dict = json.loads(requests.get(options.bco).content)\n print('Remote BioCompute loaded as ', bco_dict['provenance_domain']['name'])\n\n except ValueError: # includes simplejson.decoder.JSONDecodeError\n sys.exit('Loading remote JSON has failed \\U0001F61E\\nExiting')\n\n elif os.path.exists(options.bco):\n print(options.bco)\n try:\n with open(options.bco, 'r') as data:\n bco_dict = json.load(data)\n print('Local BioCompute loaded as ', bco_dict['provenance_domain']['name'])\n\n except ValueError: # includes simplejson.decoder.JSONDecodeError\n sys.exit(\"Importing local JSON has failed \\U0001F61E\\nExiting\")\n\n # If options.bco is not a valid FILE or URI program will exit\n else:\n print('BioCompute loading FAILED \\n')\n sys.exit(\"Please provide a valid URI or PATH\")\n\n return bco_dict",
"def client():",
"def get_client(client_mgr):\n manager = getattr(client_mgr, 'manager', client_mgr)\n net_client = getattr(manager, 'networks_client')\n try:\n _params = manager.default_params_with_timeout_values.copy()\n except Exception:\n _params = {}\n client = LoadBalancersClient(net_client.auth_provider,\n net_client.service,\n net_client.region,\n net_client.endpoint_type,\n **_params)\n return client",
"def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client",
"def init_client(self, client):\n self.client = client",
"def __init__(self, client):\n self.client = client\n self.config = {}\n self.load_config(False)\n self.default_prompts = read_default_prompts()\n\n self.sess = gpt2.start_tf_sess()\n try:\n gpt2.load_gpt2(self.sess, model_name=self.config['model_name'])\n except ValueError:\n self.sess = gpt2.reset_session(self.sess)\n gpt2.load_gpt2(self.sess, model_name=self.config['model_name'])",
"def establish_connection() -> storage.client.Client:\n storage_client = storage.Client.from_service_account_json(find('Agriculture.json', '/home'))\n return storage_client",
"def testclient():\n base_url = PARAMS.get(\"url\") + \"/v2\"\n client = Client(\n base_url=base_url,\n headers={\n \"Authorization\": f\"GenieKey {PARAMS.get('token')}\",\n }\n )\n return client",
"def load_bert(bert_path: str) -> DistilBertModel:\n \n return transformers.DistilBertModel.from_pretrained(bert_path.split(\"/\")[-1],)",
"def test_vogeler_client_init(self):\n c = VogelerClient(callback_function=self.echo, role='client', dsn=self.good_amqp_dsn)\n self.assertType(c, 'vogeler.vogeler.VogelerClient')\n c.close()",
"def load_bert(filepath):\n print('Loading BERT tokenizer...')\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n\n print('Loading BERT model...')\n model = BertForSequenceClassification.from_pretrained(filepath)\n\n return model, tokenizer",
"def __init__(self, client):\n super().__init__(client)",
"def __init__(self, client):\n self.client = client",
"def test_load_taric(self):\n\n c = Client()\n response = c.get('/taric_books/taric/')\n\n self.assertEqual(response.status_code, 200)",
"def init_client():\n init_config()\n begin_sending_packets()",
"def test_get_client(self):\n pass",
"def __init__(self, client):\n\n self.client = client",
"def create_rbclient(self):\n return RBClient(url=self.TEST_SERVER_URL,\n transport_cls=URLMapTransport)",
"def load_portal_client():\n # return globus_sdk.ConfidentialAppAuthClient(\n # app.config['PORTAL_CLIENT_ID'], app.config['PORTAL_CLIENT_SECRET'])\n return globus_sdk.ConfidentialAppAuthClient(\n app.config['PORTAL_CLIENT_ID'], app.config['PORTAL_CLIENT_SECRET'])",
"def __init__(self):\n self.__client = Client(verify_ssl_cert=True)\n self.__headers = {'Content-Type': 'application/json'}\n self.login()",
"def client(self):\n response = requests.get(self._url(self._CLIENT_PATH), headers=self._headers)\n return response.json()",
"async def load(self) -> None:\n pass",
"def fetch_boto3_client(service_name: str):\n region_name = load_aws_region_name()\n cache_key = f\"{region_name}-{service_name}\"\n\n if CLIENT_CACHE.get(cache_key):\n return CLIENT_CACHE[cache_key]\n\n config = Config(\n region_name=region_name,\n signature_version=\"v4\",\n retries={\"max_attempts\": 10, \"mode\": \"standard\"},\n )\n client = boto3.client(service_name, config=config) # type: ignore\n\n CLIENT_CACHE[cache_key] = client\n\n return client"
] | [
"0.64598465",
"0.5767674",
"0.56796074",
"0.5656902",
"0.56332827",
"0.5606863",
"0.55343646",
"0.5477369",
"0.5460385",
"0.5430609",
"0.5386382",
"0.5313879",
"0.53099513",
"0.529109",
"0.52668875",
"0.5258861",
"0.52381545",
"0.52267784",
"0.5216198",
"0.5202173",
"0.5200876",
"0.51874703",
"0.51646155",
"0.51620054",
"0.51581776",
"0.5150005",
"0.51379275",
"0.5129709",
"0.5083893",
"0.50823647"
] | 0.83506197 | 0 |
Find target number from list of ints | def find_num(target: int, numbers: List[int]) -> int:
for i, num in enumerate(numbers):
if num == target:
return i
return -1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def linearSearch(values: list, target: int) -> int:\n for i in range(len(values)):\n if target == values[i]:\n return i\n \n return -1",
"def pick(self, target: int) -> int:\n\t\tans = None\n cnt = 0\n for i, x in enumerate(self.nums): \n if x == target: \n cnt += 1\n if randint(1, cnt) == cnt: ans = i # prob 1/cnt\n return ans",
"def map_target(x):\n if x == 11:\n return x\n elif x == 12:\n return x\n elif x == 13:\n return x\n elif x == 14:\n return x\n else:\n return -1",
"def find_value(lists, target):\n loc = []\n l = len(lists)\n for i in range(0, l, 1):\n if(lists[i] == target):\n loc.append(i)\n else:\n continue\n return loc",
"def linear_search(list, target):\n for i in range (0, len(list)):\n if list[i] == target:\n return i\n\n\n return None",
"def last_occurance(target: int, numbers: List[int]) -> int:\n\n pass",
"def twoSum(self, nums: List[int], target: int) -> List[int]:\n dic = {target-n : i for i, n in enumerate(nums)}\n return next(([i, dic[n]] for i, n in enumerate(nums) if n in dic and i != dic[n]), [0, 0])",
"def return_indices(nums, target):\n indices = []\n i = 0\n number_found = False\n while not number_found:\n my_target = nums[i]\n \n for j in range(i+1,len(nums)):\n my_target += nums[j]\n if my_target == target:\n number_found = True\n indices = [i, j]\n break\n my_target = nums[i]\n \n i+=1\n return indices",
"def resolve(self, nums: List[int]) -> int:\n visit = 0\n for val in nums:\n if visit & (1<<val):\n return val\n visit = visit | (1<<val)",
"def search_linear(xs, target):\n for (i, v) in enumerate(xs):\n if v == target: # Is referred to as a probe.\n return i\n return -1",
"def find_nearest(numbers, target):\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]",
"def twoSum(self, nums: List[int], target: int) -> List[int]:\n diffRec = {}\n for i, v in enumerate(nums):\n if v in diffRec:\n return [diffRec[v], i]\n else:\n diffRec[target - v] = i\n return -1",
"def two_sum(self, nums: List[int], target: int) -> List[int]:\n found = {}\n\n for idx, value in enumerate(nums):\n rest = target - nums[idx]\n if rest in found:\n return [idx, found[rest]]\n else:\n found[value] = idx",
"def twoSum(self, nums: List[int], target: int) -> List[int]:\n d = {}\n for i, n in enumerate(nums):\n d[n]=i\n \n for i, n in enumerate(nums):\n m = target - n\n if m in d and d[m] != i:\n return [i,d[m]]\n return []",
"def binary_search(numbers: List[int], target: int) -> int:\n low = 0\n up = len(numbers) - 1\n while low <= up:\n mid = (low + up) // 2\n if numbers[mid] == target:\n return mid\n elif numbers[mid] > target:\n up = mid - 1\n else:\n low = mid + 1\n return -1",
"def searchRange4(self, nums: List[int], target: int) -> List[int]:\n def bisearch_l() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] >= target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n def bisearch_r() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] > target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n return [bisearch_l(), bisearch_r()]",
"def find_dst_value2(target: int, targets: list):\n targets.sort()\n i, j = 0, len(targets)-1\n while i < j:\n left_value = targets[i]\n right_value = targets[j]\n if left_value + right_value == target:\n return left_value, right_value\n if left_value + right_value > target:\n j -= 1\n elif left_value + right_value < target:\n i += 1",
"def find_pair(numbers, target_sum):\n for num in numbers:\n partner_num = target_sum - num\n if partner_num in numbers:\n return num * partner_num",
"def twoSum(self, nums: List[int], target: int) -> List[int]:\n # Use a dict to record visited numbers\n d = {}\n for i, n in enumerate(nums):\n m = target - n\n if m in d:\n return [d[m], i]\n else:\n d[n] = i",
"def max_index_of_smaller_number(list, number):\n for i, element in enumerate(list):\n if element >= number:\n return i - 1",
"def search(self, nums, target):\n\t\tl, r = 0, len(nums) - 1\n\t\twhile l <= r:\n\t\t\tmid = l + (r - l) // 2\n\t\t\tif nums[mid] == target:\n\t\t\t\treturn mid\n\t\t\telif nums[mid] < target:\n\t\t\t\tl = mid + 1\n\t\t\telse:\n\t\t\t\tr = mid - 1\n\n\t\treturn -1",
"def closest_match(num,num_list):\n\tdiffs = np.abs(np.subtract(num,num_list))\n\treturn num_list[np.argmin(diffs)]",
"def bin_search(target, low, high, int_list): # must use recursion\n if int_list is None:\n raise ValueError\n\n if not int_list:\n return None\n\n if len(int_list) == 1:\n if int_list[0] == target:\n return 0\n else:\n return None\n \"\"\" Commenting it out so I can try a different method to see if I can improve my score\n \n searches for target in int_list[low..high] and returns index if found\n If target is not found returns None. If list is None, raises ValueError \n \n if len(int_list[low:high]) >= 1:\n mid = 1 + int(high - 1 / 2) # In order to get the middle value\n if int_list[mid] == target: # Checks if the middle value matches the target\n return mid # returns the index, not the value\n elif int_list[mid] < target: # Checks if the mid value is less than the target\n # so that we can then decide whether to ignore the first half or the second\n # half\n return bin_search(target, mid + 1, high, int_list)\n else: # If the mid value is greater than the target, then we look at the bottom\n # half of the list and move our values accordingly\n return bin_search(target, low, mid - 1, int_list)\n else:\n return None # If target isn't found in the list, this makes sure to\n # return None, since the only way to get to the else statement is\n # to not have triggered any of the other bits of code that \"\"\"\n\n if high >= low:\n mid = low + int((high - low) / 2) # This gets the middle index as an int, not a float\n\n # If the target is in the middle, this is to catch it right away.\n if int_list[mid] == target:\n return mid\n\n # If element is smaller than mid, then it must be\n # in the lower half of the list\n elif int_list[mid] > target:\n return bin_search(target, low, mid - 1, int_list)\n\n # If not, then the target can only be in the upper half,\n # so we adjust our checkpoints for the next call to the function.\n else:\n return bin_search(target, mid + 1, high, int_list)\n\n else:\n # If the target is not present in the array\n return None",
"def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1",
"def solve_p2(target: int) -> int:\n mh = 1 + int(target / 10)\n npresents = [0] * mh\n for elf in range(1, mh):\n for h in range(elf, min(mh, elf*50), elf):\n npresents[h] += 11*elf\n if DEBUG:\n print(target, [(i, n) for i, n in enumerate(npresents)])\n for h, np in enumerate(npresents):\n if np >= target:\n return h\n return -1",
"def _find(self, candidates, target, lb, rb):\n # we'v made sure there's no duplicate in candidates\n li, ri = lb, rb\n while li < ri:\n mi = (li + ri) // 2\n if candidates[mi] < target:\n li = mi + 1\n elif candidates[mi] > target:\n ri = mi - 1\n else:\n return mi\n\n if li == ri:\n if candidates[li] <= target:\n return li\n else:\n return li - 1\n\n if ri < lb:\n return ri\n\n if li == rb:\n return rb - 1\n\n # now it's like c[ri] < target < c[li]\n # actually these 3 cases are all ri...\n return ri",
"def twoSum(self, nums: List[int], target: int) -> List[int]:\n \n # Given nums=[2,7,11,15],target=9\n \n d={}\n for i in range(len(nums)):\n x = target-nums[i]\n if x in d:\n return [d[x],i]\n\n d[nums[i]]=i\n\n return []",
"def _get_index_closest_val(list, val):\n\n return min(range(len(list)), key=lambda i: abs(list[i]-val))",
"def twoSumSorted1(nums: List[int], target: int) -> List[int]:\n pass",
"def bi_search(l: int, r: int) -> (int, bool):\n while l <= r:\n m = (l + r) // 2\n if nums[m] > target:\n r = m - 1\n elif nums[m] == target:\n return m, True\n else:\n l = m + 1\n \n return -1, False"
] | [
"0.6897303",
"0.685705",
"0.6728179",
"0.6685797",
"0.668036",
"0.6587649",
"0.658512",
"0.6526802",
"0.65027165",
"0.6445933",
"0.6339752",
"0.62850904",
"0.6272045",
"0.6240765",
"0.62220865",
"0.6179218",
"0.6173291",
"0.6168471",
"0.61567307",
"0.61140484",
"0.6111365",
"0.6110277",
"0.6060345",
"0.60523915",
"0.60400885",
"0.6014533",
"0.600691",
"0.599738",
"0.5987926",
"0.5935713"
] | 0.8206071 | 0 |
Find the last occurance of a number from a list of ints | def last_occurance(target: int, numbers: List[int]) -> int:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def last_index_of(my_list, my_value):\n return len(my_list) - my_list[::-1].index(my_value)",
"def findlastindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in reversed(tuple(enumerate(seq))) if iteratee(value)), -1)",
"def latest(scores: list) -> int:\n return scores[-1]",
"def max_index_of_smaller_number(list, number):\n for i, element in enumerate(list):\n if element >= number:\n return i - 1",
"def find_max(list):\n return find_value_at(list, 0)",
"def find_greatest_number(incoming_list):\n retval = max(incoming_list)\n return retval",
"def find_greatest_number(incoming_list):\n #return_value = max(incoming_list)\n #return return_value\n\n MAGIC_LOW_NUMBER = None\n retval = MAGIC_LOW_NUMBER\n\n # 1,2,3,4,5,1\n # MAGIC_LOW_NUMBER, 1 ->STORE 1\n #1 , 2 ->STORE 2\n #2, , 3 ->STORE 3\n #3, , 4 ->STORE 4 \n #4, , 5 ->STORE 5\n #5, , 1 ->??? nothing \n for value in incoming_list:\n if not retval:\n retval = value\n if value > retval:\n retval = value",
"def find_peak(list_of_integers):\n if list_of_integers == []:\n return None\n\n list_of_integers.sort()\n return list_of_integers[-1]",
"def _get_and_increment_last(l):\n if len(l) > 0:\n return l[-1] + 1\n else:\n return 0",
"def find_peak(list_of_integer):\n if not list_of_integer:\n return (None)\n return R_fp(list_of_integer, 0, len(list_of_integer) - 1,\n len(list_of_integer))",
"def question_24(list_num: int) -> int:\n return max(list_num, key=list_num.count)",
"def find_peak(list_of_integers):\n if list_of_integers:\n list_of_integers.sort()\n return list_of_integers[-1]\n else:\n return None",
"def find_greatest_number(incoming_list: list):\n return max(incoming_list)",
"def find_greatest_number(incoming_list):\n #magiclownumber= none\n #retval= magiclownumber\n #for value in incoming_list:\n #if not retval:\n #retval = value\n # if value> retvale\n #retval= value\n #return retval\n greatest_number = max(incoming_list)\n return greatest_number",
"def find_peak(list_of_integers):\n max_i = None\n for ele in list_of_integers:\n if max_i is None or max_i < ele:\n max_i = ele\n return max_i",
"def last_index(list_, value):\n\n found = None\n for index, val in enumerate(list_):\n if val == value:\n found = index\n if found is None:\n raise ValueError(\"{} is not in list {}\".format(value, list_))\n return found",
"def greatest_difference(num_list):",
"def search_for_nums(data):\n index = None\n for i in range(len(data)-1,0, -1): #count backwards through the loop\n if data[i] != None: #found most recent input\n print(\"index found...data: %s\" % (data[i]))\n return i\n #END IF\n #END FOR\n return index",
"def max_(lst: Iterable[int]) -> int:\n return reduce(lambda x, y: x if x > y else y, lst)",
"def recurrent_max_value_in_list(lst, max_value):\n if len(lst) == 0:\n return max_value\n elif lst[0] > max_value:\n max_value = lst[0]\n return recurrent_max_value_in_list(lst[1:], max_value)",
"def find_num(target: int, numbers: List[int]) -> int:\n \n for i, num in enumerate(numbers):\n if num == target:\n return i\n return -1",
"def find_peak(list_of_integers):\n\n if list_of_integers is None or len(list_of_integers) == 0:\n return None\n list_of_integers.sort()\n peak = list_of_integers[-1]\n \"\"\"\n for number in list_of_integers:\n if number > peak:\n peak = number\"\"\"\n return peak",
"def findMaxConsecutiveOnes(nums: List[int]) -> int:\n count = maxCount = 0\n for num in nums:\n if num == 1:\n count += 1\n else:\n maxCount = max([count, maxCount])\n count = 0\n return max([count, maxCount])",
"def max_known_number(self):\n return len(self.number_list)-1",
"def find_peak(list_of_integers):\n\n x = list_of_integers\n\n high = None\n\n for index, number in enumerate(x):\n if index == 0 or x[index - 1] < number:\n left = True\n else:\n left = False\n if index == len(x) - 1 or x[index + 1] < number:\n right = True\n else:\n right = False\n if right and left:\n return number\n if high is None or number > high:\n high = number\n\n return high",
"def find_peak(list_of_integers):\n if not list_of_integers:\n return None\n # I'll check first and last element separately\n if len(list_of_integers) == 1:\n return list_of_integers[0]\n first = list_of_integers[0]\n last = list_of_integers[-1]\n if first > list_of_integers[1]:\n return abs(first)\n if last > list_of_integers[-2]:\n return abs(last)\n\n # Now the rest of the elements\n npos = 2\n for n in list_of_integers[1:-1]:\n if max(n, first, list_of_integers[npos]) == n:\n return abs(n)\n npos = npos + 1\n return None",
"def find_largest_number_in_list(self, list_with_numbers):\n return 0",
"def max_in_list(list):\n x=list[0] #set x be the first number in the list\n for i in range(0,len(list)):#go over the number in the list\n if x<=list[i]: #if the second one is bigger than the first\n x=list[i] #assign x to the bigger one\n else:\n continue#repeat until find the max number\n return x",
"def nextMax(value,lista):\n for i in lista:\n if i>value:\n return i\n raise NameError('No value')",
"def binary_search_find_last(arr: List[int], value: int):\n start = 0\n end = len(arr) - 1\n\n while start <= end:\n # to avoid start+end overflow and bit operate is faster, use current start+((end-start)>>1)\n # which is start+(end-start)/2\n mid = start + ((end - start) >> 1)\n mid_value = arr[mid]\n\n if value < mid_value:\n end = mid - 1\n elif value > mid_value:\n start = mid + 1\n else:\n if mid == len(arr) - 1 or arr[mid + 1] != value:\n return mid\n else:\n start = mid + 1\n\n return -1"
] | [
"0.67861885",
"0.6606951",
"0.65986973",
"0.6564986",
"0.64735246",
"0.64663184",
"0.64490235",
"0.6440646",
"0.6426665",
"0.6395515",
"0.63938034",
"0.6376038",
"0.63716",
"0.6364949",
"0.6308313",
"0.624321",
"0.6206382",
"0.61868846",
"0.6134968",
"0.6130991",
"0.6116956",
"0.6114415",
"0.6110965",
"0.60968673",
"0.6090607",
"0.60661006",
"0.60479337",
"0.60242134",
"0.6004849",
"0.60026795"
] | 0.7794958 | 0 |
Return information about FB User ID | def get_user_info(self, user_id):
uri = '{}/?fields={}&access_token={}&appsecret_proof={}'.format(
user_id, FB_USER_FIELDS, self.access_token, self.app_secret_proof)
try:
response = requests.get(self.url + uri)
except Exception:
LOGGER.exception('Error connecting to Facebook Server')
raise HTTPMethodError('Error connecting to Facebook Server')
else:
status = response.status_code
data = response.json()
if response.ok:
data.update({
'facebook': self._get_facebook_link(data)
})
return data
else:
LOGGER.warning('Error: %d - %s', status, data)
raise HTTPMethodError(data, status) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fb_id(self):\n social_auth = self.social_auth.latest('id')\n return social_auth.uid",
"def user_info(user_id):\n return User.query.filter_by(id=user_id).first()",
"def user_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time()*1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/users/me', param, self.timeout)",
"def get_user_info_by_id(self, user_id: int) -> dict:",
"def get_user_id(self, details, response):\n return response['uid']",
"def get_user_info(self, access_token, openid):\n url = get_config(\"login.wechat.user_info_url\") % (access_token, openid)\n return self._access_wxapi_or_raise(url)",
"def get_user_id(self, details, response):\n return details[\"user_id\"]",
"def get_user_id(self, details, response):\n return details['username']",
"def getUserInfo(self):\r\n userJson = self.httpGet(ReaderUrl.USER_INFO_URL)\r\n result = json.loads(userJson, strict=False)\r\n self.userId = result['userId']\r\n return result",
"def get_user_id():\n csc_name = get_user_csc_name()\n if csc_name:\n return csc_name\n haka_id = get_user_haka_identifier()\n if haka_id:\n return haka_id\n return None",
"def get_facebook_user_info(access_token):\n required_data_list = []\n for per in settings.FACEBOOK_EXTENDED_PERMISSIONS:\n required_data_list.append(per.replace(\"user_\",\"\"))\n \n required_data_list.append(\"picture.type(large)\")\n required_data = (\", \").join([data for data in required_data_list])\n \n graph_url = \"https://graph.facebook.com/me?access_token=%s&fields=%s\" % (access_token,required_data)\n public_info_url = \"https://graph.facebook.com/me?access_token=%s\" % access_token\n \n profile = json.load(urllib.urlopen(graph_url))\n profile_info = json.load(urllib.urlopen(public_info_url))\n \n profile_response_dict = {}\n profile_response_dict.update(profile)\n profile_response_dict.update(profile_info)\n profile_response_json = json.dumps(profile_response_dict)\n\n return (profile_response_json, profile_response_dict)",
"def get_user_info(self, token, openid, client_id):\n\n url = get_config(\"login.qq.user_info_url\") % (token, client_id, openid)\n user_info_resp = get_remote(url)\n user_info = convert(json.loads(user_info_resp))\n\n if user_info.get(\"ret\") != 0:\n raise Exception(user_info)\n\n return user_info",
"def getUserInfo(UserId):\n url = f\"https://users.roblox.com/v1/users/{UserId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n displayName = j['displayName']\n name = j['name']\n uid = j['id']\n isBanned = j['isBanned']\n joinDate = j['created']\n description = j['description']\n return displayName,name,uid,isBanned,joinDate,description",
"def get_user_info(user_id):\n user = session.query(User).filter_by(id=user_id).one()\n return user",
"def get_user_info(user_id):\n user = session.query(User).filter_by(id=user_id).one()\n return user",
"def get_numeric_id(self, user_name):\n url = 'https://www.facebook.com/' + user_name\n self.get(url)\n source = self.page_source\n try:\n match = re.search(r\"profile_id=(\\d*)\", source)\n numeric_id = match.group(1)\n return numeric_id\n except (AttributeError, TypeError, KeyError, ValueError):\n log.error(\"Numeric ID not found, returning 0\")\n return 0",
"def getUserID(self):\n\t\treturn self.UserID",
"def getUserInfo(user_id):\n user = session.query(User).filter_by(id=user_id).one()\n return user",
"def getUserInfo(userId):\n if(not searchForUser(userId)):\n raise RuntimeError('The user id not exist, the user id=> ' + userId)\n else:\n return client.service.getUser(userid=userId)['return']['user']",
"def get_user_details():\n rv = query_db('select * from user')\n return rv[0] if rv else None",
"def get_user_details(client):\n\n try:\n return client.user(user_id='me').get(fields=['login'])\n # print(f\"The email of the user is: {me['login']}\")\n\n except Exception as e:\n print(f\"Error has occurred: {e}\")\n return None",
"def getUserInfo(user_id):\n\n user = session.query(User).filter_by(id=user_id).one()\n return user",
"def getUserInfo(user_id):\n\n user = session.query(User).filter_by(id=user_id).one()\n return user",
"def user_info(self):\n return self.auth.get_user_by_session()",
"def user_id(self) -> str:\n return self.app_config()[\"metadata.user.id\"]",
"def get_user(id):\n pass",
"def getUserInfo(user_id):\r\n user = session.query(User_info).filter_by(id=user_id).one()\r\n return user",
"def get_user_id():\n user_id = session.get(\"user_id\")\n return user_id if user_id else None",
"def user_id(self):\n return json_loads(self.user_json).get('id')",
"def get_facebook_user(cls, client, facebook_id):\n\n try:\n fbu = cls.objects.get(facebook_id__exact=facebook_id, deleted=False)\n except cls.DoesNotExist:\n try:\n user_data = client.obj_id(\n facebook_id,\n fields='id,name,first_name,middle_name,last_name,link,username,gender,locale,picture',\n )\n except FacebookGenericError:\n user_data = client.obj_id(\n facebook_id,\n fields='id,name,link,picture',\n )\n\n person = Person(\n name=user_data[u'name'] if u'name' in user_data else None,\n )\n person.save()\n\n fbu = cls(\n person=person,\n facebook_id=user_data[u'id'],\n name=user_data[u'name'] if u'name' in user_data else None,\n link=user_data[u'link'] if u'link' in user_data else None,\n page=True,\n )\n\n if u'picture' in user_data:\n fbu.picture = user_data[u'picture'][u'data'][u'url']\n else:\n person = Person(\n name=user_data[u'name'] if u'name' in user_data else None,\n gender=user_data[u'gender'] if u'gender' in user_data else None,\n )\n person.save()\n\n fbu = cls(\n person=person,\n facebook_id=user_data[u'id'],\n name=user_data[u'name'] if u'name' in user_data else None,\n first_name=user_data[u'first_name'] if u'first_name' in user_data else None,\n middle_name=user_data[u'middle_name'] if u'middle_name' in user_data else None,\n last_name=user_data[u'last_name'] if u'last_name' in user_data else None,\n link=user_data[u'link'] if u'link' in user_data else None,\n username=user_data[u'username'] if u'username' in user_data else None,\n gender=user_data[u'gender'] if u'gender' in user_data else None,\n locale=user_data[u'locale'] if u'locale' in user_data else None,\n picture=user_data[u'picture'][u'data'][u'url'] if u'picture' in user_data else None,\n page=False,\n )\n\n fbu.save()\n return fbu"
] | [
"0.70822185",
"0.6976761",
"0.69598633",
"0.6939922",
"0.6908411",
"0.68796605",
"0.6850435",
"0.6764032",
"0.67587334",
"0.66086555",
"0.66081285",
"0.6605946",
"0.65709525",
"0.65701866",
"0.65701866",
"0.6562627",
"0.6548991",
"0.65472597",
"0.6517094",
"0.64992124",
"0.6499186",
"0.6496496",
"0.6496496",
"0.6492769",
"0.64917845",
"0.64593154",
"0.64522827",
"0.6447103",
"0.6426997",
"0.6422439"
] | 0.70641524 | 1 |
Extracts all nouns from the sentence_string. | def get_nouns(self):
word_punct_token = WordPunctTokenizer().tokenize(self.sentence_string)
clean_tokens = []
for token in word_punct_token:
token = token.lower()
# remove any value that are not alphabetical
new_token = re.sub(r"[^a-zA-Z]+", "", token)
# remove empty value and single character value
if new_token != "" and len(new_token) >= 2:
vowels = len([v for v in new_token if v in "aeiou"])
if vowels != 0: # remove line that only contains consonants
clean_tokens.append(new_token)
noun_types = ["NN", "NNS", "NNP", "NNPS", "N"]
is_noun = lambda pos: pos in noun_types
nouns = [word for (word, pos) in nltk.pos_tag(clean_tokens) if is_noun(pos)]
if nouns:
return nouns
else:
raise InvalidSentenceError(self.sentence_string) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())",
"def __get_relevant_words(sentence):\n nouns = None\n try:\n if sentence:\n tokens = nltk.word_tokenize(sentence)\n pos = nltk.pos_tag(tokens)\n nouns = [x[0] for x in pos if x[1].startswith('N') or x[1].startswith('F')]\n except Exception as e:\n nouns = None\n return ' '.join(nouns) if nouns else None",
"def parse(string):\n doc = nlp(string)\n return [str(n) for n in doc.noun_chunks]",
"def get_nouns(root):\n nouns = []\n for child in root.findall(\"./xdrs/taggedtokens/tagtoken/tags\"):\n noun = False\n for grandchildren in child.findall(\"./tag[@type='pos']\"):\n if grandchildren.text == 'NN' or grandchildren.text == 'NNS':\n noun = True\n if noun == True:\n for grandchildren in child.findall(\"./tag[@type='lemma']\"):\n nouns.append(grandchildren.text)\n return nouns",
"def _get_nouns(self, review):\n review_features = []\n for sent in review:\n doc = self.nlp(sent)\n # noun_phrase = [np.text for np in doc.noun_chunks]\n nouns = [unicode(lemma(str(word).lower())) for word in doc if word.pos == NOUN]\n review_features.append(nouns)\n return review_features",
"def process_sentence(sentence: str) -> list:\r\n return [process_word(word) for word in sentence.split()][:-1]",
"def extract_from_sentence(self, sent):\n # code_elements = self.extract_code_element(sent)\n\n domain_terms = set()\n doc = self.nlp(sent)\n for chunk in doc.noun_chunks:\n chunk = self.clean_chunk(chunk)\n\n if len(chunk) == 0:\n continue\n if len(chunk) == 1 and self.is_word_common(chunk.text):\n continue\n # if chunk.text in code_elements:\n # continue\n domain_terms.add(self.__chunk_lemmatize(chunk))\n domain_terms.update(self.extract_abbreviation_from_chunk(chunk))\n domain_terms.update(self.extract_NNPs_from_chunk(chunk))\n domain_terms.update(self.extract_np_of_np(doc))\n # print('sent: ' + sent)\n # print('result: ', result)\n domain_terms = self.__post_process(domain_terms)\n return domain_terms",
"def get_noun_phrases(blob):\n return blob.noun_phrases",
"def get_extras(self, text=None):\n if text is None:\n text = self.nltk_text(self.text)\n # Tag parts of speech\n tagged = nltk.pos_tag(text)\n # Try for composed NNP / NNPS\n is_proper_noun = False\n text = []\n proper_noun = \"\"\n for (word, tag) in tagged:\n if not is_proper_noun and (tag == 'NNP' or tag == 'NNPS'):\n # Start building a proper noun\n proper_noun = word\n # Set it true\n is_proper_noun = True\n # Add it to annotations anyway\n text.append(word)\n elif tag == 'NNP' or tag == 'NNPS':\n # Previous was proper noun. So it may be combined\n proper_noun += \" \" + word\n # Add the single word to annotations anyway\n text.append(word)\n elif is_proper_noun and tag == 'IN':\n # Add what we have by now to the text\n text.append(proper_noun)\n # Previous was proper noun. So it may be composed\n proper_noun += \" \" + word\n elif is_proper_noun:\n # Add what we have by now to the text\n text.append(proper_noun)\n # Finished with proper noun, so set it false\n is_proper_noun = False\n # Remove duplicates\n seen = {}\n result = []\n for w in text:\n if w in seen:\n continue\n seen[w] = 1\n result.append(w)\n # Eliminate common\n result = [w for w in result if w.lower() not in self.common_words and\n w.lower() not in stopwords.words('english')]\n return result",
"def parse(text):\n parts = text.split(' ')\n noun = Noun(parts[0], int(parts[1]))\n\n parts = parts[2:]\n while len(parts) > 0:\n noun.add_adjectives(Word(parts[0], int(parts[1])))\n parts = parts[2:]\n return noun",
"def find_features(sentence: str) -> Set[str]:\n sent_dict = set()\n sentence = _NLP(sentence)\n for token in sentence:\n # check if the word is an opinion word, then assign sentiment\n if token.text in _OPINION_WORDS:\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n if (token.dep_ == \"advmod\"):\n continue\n elif (token.dep_ == \"amod\"):\n sent_dict.add(token.head.text.lower())\n # for opinion words that are adjectives, adverbs, verbs...\n else:\n for child in token.children:\n # if verb, check if there's a direct object\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n sent_dict.add(child.text.lower())\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text == \"and\":\n conj=1\n if (conj == 1) and (subchild.text != \"and\"):\n subchildren.append(subchild.text)\n conj = 0\n for subchild in subchildren:\n sent_dict.add(subchild)\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text + \" \" + noun\n sent_dict.add(noun)\n return set(word.lower() for word in sent_dict)",
"def _get_words_and_punctiation(sentence: str) ->List[str]:\n return sum((_separate_word_and_punctiation(word) for word in sentence.strip().split()), [])",
"def _get_words(self, sentence):\n _uniq_words = set()\n for word in sentence.split():\n word = normed_word(re.sub(\"\\W\", \"\", word)).lower()\n _uniq_words.add(word)\n return _uniq_words",
"def anlSentence(self, sentence):\n cleanStr = re.sub(self._wrdSeps, \" \",\n re.sub(self._stcSeps, \"\", sentence))\n for word in cleanStr.split():\n self._wordCounter[word] += 1\n self._totalWords += 1\n else:\n self._totalSentences += 1",
"def is_noun(tag_string):\n result = True if tag_string in POS.POS_tags.noun_tags else False\n return result",
"def get_nouns(txt):\n query = 'https://api.textgain.com/1/tag?q='\n query += urllib.parse.quote(txt, safe='')\n query += '&lang=fr&key=***'\n resp = requests.get(query)\n\n body = json.loads(resp.text)['text'][0]\n\n nouns = {}\n for iterable_elem in body:\n for elem in iterable_elem:\n if elem['tag'] == 'NOUN':\n word = elem['word']\n if word in nouns.keys():\n nouns[word] += 1\n else:\n nouns[word] = 1\n print(nouns)\n return nouns",
"def noun_lemma(word):\n if word.endswith(\"s\"):\n if word.endswith(\"ss\"):\n return word.lower()\n elif word.endswith(\"ies\"):\n return word[:-3].lower() + (\"y\")\n else:\n return word[:-1].lower()\n if word.endswith(\"men\"):\n return word[:-2].lower() + (\"an\")\n else:\n return word.lower()",
"def process(self, sentence):\n\n # selects onlt alphanumeric words\n words = self.tokenizer.tokenize(sentence)\n\n # lemmatize the words\n words = [self.lemmatizer.lemmatize(word) for word in words]\n\n # lowercase all the words and remove single characters\n words = [word.lower() for word in words if len(word) > 1]\n\n # remove the stopwords using NLTK\n words = [word for word in words if word not in stopwords.words('english')]\n\n return words",
"def process_content(text_body, stopwords):\n\n tokenizer = PunktSentenceTokenizer()\n tokenized = tokenizer.tokenize(text_body)\n all_nouns = []\n for sentence in tokenized:\n words = sentence.split()\n # get the information of the word (noune, verb,etc..)\n tagged = nltk.pos_tag(words)\n for w in tagged:\n if ((w[1] == \"NN\")or(w[1] == \"JJ\")) and (w[0] not in stopwords):\n all_nouns.append(w[0])\n return all_nouns",
"def get_sentiments(self, d):\n\t\twords = tuple(d.split())\n\t\ttagged = nltk.pos_tag(words)\n\n\t\tnouns = 0\n\t\tadjectives = 0\n\t\tverbs = 0\n\t\tadverbs = 0\n\n\t\tfor word, pos_tag in tagged:\n\n\t\t\tif pos_tag.startswith(\"NN\"):\n\t\t\t\tnouns += 1\n\n\t\t\tif pos_tag.startswith(\"JJ\"):\n\t\t\t\tadjectives += 1\n\n\t\t\tif pos_tag.startswith(\"VB\"):\n\t\t\t\tverbs += 1\n\n\t\t\tif pos_tag.startswith(\"RB\"):\n\t\t\t\tadverbs += 1\n\t\t# adding one to consider empty document\n\t\tl = len(words) + 1\n\t\treturn [nouns / l, adjectives / l, verbs / l, adverbs / l]",
"def test_returns_nouns_for_string(self):\r\n test_value = 'google drives autonomous cars'\r\n self.assertEqual(\r\n set([u'cars', u'autonomous']),\r\n suggest_tags(test_value))",
"def sentence_to_words(sentence: str) -> List[str]:\n return list(map(clean_word, sentence.split(\" \")))",
"def find_noun(sent):\n noun = None\n\n if not noun:\n for w, p in sent.pos_tags:\n if p == 'NN': # This is a noun\n noun = w\n break\n if noun:\n #logger.info(\"Found noun: %s\", noun)\n pprint(\"FOUND NOUN\")\n pprint(noun)\n\n return noun",
"def part_of_speech(text):\n temp = nltk.pos_tag(text)\n return [word for word, tag in temp if \n (tag == \"NN\") or \n (tag == \"NNS\") or\n (tag == \"NNP\") or \n (tag == \"NNPS\")]",
"def getWords(speech):\r\n return speech.split()",
"def extract_nouns(tagged_abstracts_list, def_tags_per_abs = 0.3):\n\n noun_counter = []\n per_abstract_counts = []\n per_abstract_counts_list = []\n normalized_all_counts = {}\n per_abstract_tag_counts = []\n\n for tags in tagged_abstracts_list:\n\n for tag in tags:\n\n if tag[1] == \"NN\" or tag[1] == \"NNS\" or tag[1] == \"NNP\" or tag[1] == \"NNPS\":\n\n per_abstract_tag_counts.append(str(tag[0].encode('ascii', 'ignore')))\n\n noun_counter.append(str(tag[0].encode('ascii', 'ignore')))\n\n per_abstract_dict = dict(Counter(per_abstract_tag_counts))\n per_abstract_counts_list.append(per_abstract_dict)\n\n all_counts = dict(Counter(noun_counter))\n\n num_abstracts = float(len(tagged_abstracts_list))\n\n for key in all_counts.keys():\n\n if key in all_counts:\n\n total_occurrences = float(all_counts[key])\n else:\n\n total_occurrences = 0\n\n for abstract_ in per_abstract_counts_list:\n #print abstract_\n\n if key in abstract_ and key in all_counts:\n\n single_abstract_count = float(abstract_[key])\n\n if (single_abstract_count/total_occurrences) < def_tags_per_abs:\n normalized_all_counts[key] = float(all_counts[key])/num_abstracts\n\n return (normalized_all_counts)",
"def preprocess_sentence(sentence):\n def _wordnet_pos(tag):\n if tag.startswith('J'):\n return wordnet.ADJ\n elif tag.startswith('V'):\n return wordnet.VERB\n elif tag.startswith('R'):\n return wordnet.ADV\n else: \n # Default for WordNet is NOUN\n return wordnet.NOUN\n\n stop = stopwords.words(\"english\") + list(string.punctuation)\n\n sentence = sentence.lower()\n words = [word for word in word_tokenize(sentence) if word not in stop]\n tagged_words = pos_tag(words)\n return ' '.join([ \n lemmatizer.lemmatize(word, _wordnet_pos(tag)) for word,tag in tagged_words \n ])",
"def get_nouns(lemmas_tags):\r\n nouns = []\r\n for lemma in lemmas_tags:\r\n \"\"\"si la etiqueta es None porque no tiene lemma o es un sustantivo\"\"\"\r\n if lemma[1] == None or lemma[1][0] == 'n':\r\n \"\"\"se agrega solamente el lemma\"\"\"\r\n nouns.append(lemma[0])\r\n return nouns",
"def sentencing(any_text, nlp):\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n doc = nlp(any_text)\n sentences = [sent.string.strip() for sent in doc.sents]\n return sentences",
"def parse_sentence(self, text):\n l = []\n tokens = word_tokenize(text)\n print(tokens)\n skip = 0\n i = -1 # index of token in tokens list\n for token in tokens:\n i += 1\n if skip:\n skip -= 1\n # CORONA TERMS:\n elif token.lower() in corona_words:\n l.append('covid')\n elif is_flag_emoji(token):\n try:\n l.append(flag.ISO3166[flag.dflagize(token)[1:3]])\n except:\n continue\n # HASHTAGS:\n elif token == '#' and i+1 < len(tokens):\n parse_hashtage(tokens[i+1], l, tokens)\n skip += 1\n # TAGS:\n elif token == '@' and i+1 < len(tokens):\n parst_tag(tokens[i+1], l)\n skip = True\n # Size AS A WORD:\n elif token.lower() in sizes.keys():\n l.append(parse_number('1', token))\n elif check_if_term_is_fraction(token):\n if i < len(tokens)-1 and tokens[i+1].lower() in percent:\n l.append(token + '%')\n skip += 1\n else:\n l.append(token)\n # NUMBERS:\n elif isNumber(token):\n token = clean_number(token)\n if (i < len(tokens) - 2) and (tokens[i+1].lower() in sizes.keys()) and (tokens[i+2].lower() in percent):\n l.append(parse_number(token, tokens[i+1]) + '%')\n skip += 2\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in percent:\n l.append(parse_number(token) + '%')\n skip += 1\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in sizes.keys():\n l.append(parse_number(token, tokens[i+1]))\n skip += 1\n elif (i < len(tokens) - 1) and check_if_term_is_fraction(tokens[i+1]):\n l.append(token +' '+ tokens[i+1])\n skip += 1\n else:\n l.append(parse_number(token))\n elif isNumber(token[0:len(token) - 1]) and token[len(token)-1].lower() in sizes:\n tokens.append(token[0:len(token) - 1])\n tokens.append(token[len(token)-1])\n # OTHER TOKENS:\n else:\n cleaning(token, tokens, l)\n\n text_tokens_without_stopwords = [w for w in l if w.lower() not in stop_words]\n print(text_tokens_without_stopwords)\n return text_tokens_without_stopwords"
] | [
"0.704041",
"0.6643563",
"0.65388167",
"0.63166046",
"0.62674236",
"0.6093397",
"0.59018886",
"0.5853606",
"0.5843458",
"0.5821036",
"0.581984",
"0.56462425",
"0.55715865",
"0.553758",
"0.54761577",
"0.54706633",
"0.54552656",
"0.54124594",
"0.5382601",
"0.53791887",
"0.53776276",
"0.5376508",
"0.53333807",
"0.53315735",
"0.5329216",
"0.5328143",
"0.5326994",
"0.53242904",
"0.53137445",
"0.5311654"
] | 0.7327907 | 0 |
Return the number of columns of this Kakuro instance. | def ncolumns(self):
return self.__ncols | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def column_count(self):\n return self.column_length",
"def GetNumColumns(self):\n return len(self.columns)",
"def num_cols(self):\n return len(self.rows[0])",
"def ncolumns(self):\n return len(self.__column_list)",
"def n_cols(self):\n\n return len(self.plaincolumns)",
"def num_cols(self):\n return len(self.column_names())",
"def columns_count(self):\n if self.value.count != 0:\n return len(self.value[0])\n else:\n return 0",
"def num_cols(self):\n return (len(self.rows[0]))",
"def number_of_columns(self):\n return len(self._columns)",
"def get_num_cols(self):\n return self._num_cols",
"def GetNumberCols(self):\n return len(self.__colsKey)",
"def getNbColumns(self):\n return self.data.shape[0]",
"def getNoOfCols(self):\n return _patchExtractor.patchExtractor_getNoOfCols(self)",
"def columnCount( self ):\n if not self.o_data is None:\n if self.isItemMultiDimensional():\n return len(self.o_data)\n else:\n return 1\n else:\n return 1",
"def columns(self) -> int:\n return self.__squares[0].__len__()",
"def col_count(self):\n return self.__col_count",
"def getColumnCount(self) -> int:\n ...",
"def size(self) -> int:\n num_columns = len(self._internal.data_spark_columns)\n if num_columns == 0:\n return 0\n else:\n return len(self) * num_columns # type: ignore[arg-type]",
"def no_of_columns(self): \n return len(self.columns) + (1 if self.serialize else 0)",
"def cols(self) -> int:\n if self.is_empty():\n raise ValueError(\"Empty DataView contains no columns\")\n\n return len(self[0])",
"def getNumCols(self):\n return self.__cols",
"def GetColumnCount(self):\r\n\r\n return len(self._columns)",
"def n_cols(self):\n ch = self.children\n return 1 if not ch else sum([c.n_cols for c in ch])",
"def columnCount(self, _parent=None):\n return len(self._attr_cols)",
"def __len__(self):\n return self.num_rows * self.num_cols",
"def columnCount(self, index):\n return 4",
"def size(self):\n return self.__row_count * self.__col_count",
"def columnCount(self):\n return abs(self.minCol) + abs(self.maxCol)",
"def n_rows(self) -> int:\n\n return len(self.plaincolumns[0].values)",
"def columnCount(self,\n parent=QtCore.QModelIndex()) -> int:\n return len(self.Column);"
] | [
"0.8542264",
"0.8457889",
"0.8427655",
"0.84257317",
"0.84095126",
"0.84091353",
"0.8408543",
"0.83684427",
"0.83465314",
"0.8269028",
"0.8212129",
"0.81745464",
"0.80785227",
"0.80070645",
"0.7957829",
"0.7950505",
"0.7938426",
"0.79372364",
"0.79028684",
"0.79005444",
"0.7897698",
"0.7882654",
"0.78786975",
"0.7873058",
"0.7833377",
"0.779904",
"0.77858037",
"0.7771119",
"0.7740136",
"0.7706773"
] | 0.8535313 | 1 |
Return a string representing the kakuro instance with the given solution. | def str_with_solution(self, solution):
bak = self.__data.copy()
for k, v in solution.items():
self.__data[k] = v
res = str(self)
self.__data = bak
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n return f'{self.text}: {self.chs}, correct answer: {self.solution}'",
"def __str__(self):\n output = \"Solution for \" + self.vrpdata.InstanceName + \":\\n\"\n output += \"Total distance: \" + str(round(self.objective, 2)) + \"\\n\"\n output += \"Solution valid: \" + str(self.solutionValid) + \"\\n\\n\"\n count = 1 # count routes\n for r in self.routes:\n output += \"Route #\" + str(count) + \"\\n\" + str(r) + \"\\n\" + str(round(r.distance, 2)) + \"\\n\" + str(r.quantity) + \"\\n\"\n count += 1\n return output",
"def __str__(self):\n return self.name + \"-\" + SatSolver.getName(self.satSolver)",
"def ukko_str (self):\n return 'k=%d p=%d k..p=\"%s\"' % (self.k, self.p, self)",
"def __repr__(self) -> str:\n context = \" \".join(\"{}={}\".format(k, v) for k, v in self._solution.items())\n return \"<Twilio.Supersim.V1.FleetInstance {}>\".format(context)",
"def __str__(self):\n value = str(self.puzzle) + str(\" \") + str(self.g) + str(\" \") + str(self.h)\n return value",
"def __str__(self):\n return f\"\"\"\n SilentSubstitutionProblem:\n Calibration: {self.calibration.shape},\n Calibration Wavelengths: {self.calibration_wavelengths},\n Primary Resolutions: {self.primary_resolutions},\n Primary Colors: {self.primary_colors},\n Observer: {self.observer},\n Name: {self.name}\n \"\"\"",
"def __str__(self):\n return \"<aospy.Calc instance: \" + ', '.join(\n (self.name, self.proj.name, self.model.name, self.run.name)\n ) + \">\"",
"def __str__(self):\n # define the prefix\n prefix = \"py_SEX2GOL: \"\n\n # compose the feedback\n big_str = \"{0:s} Setup:\\n\".format(prefix)\n big_str += \"{0:s} Input g/prism image: {0:s} \\n\".format(prefix, self.grisim)\n big_str += \"{0:s} Configuration file name: {0:s} \\n\".format(prefix, self.config)\n big_str += \"{0:s} Direct image: {0:s} \\n\".format(prefix, self.dirname)\n big_str += \"{0:s} G/Prism extension: {0:s} \\n\".format(prefix, self.grism_extinfo['axe_ext'])\n big_str += \"{0:s} Direct image extension: {0:s} \\n\".format(prefix, self.dirname_extinfo['axe_ext'])\n big_str += \"{0:s} Input catalog name: {0:s} \\n\".format(prefix, self.in_sex)\n big_str += \"{0:s} Output catalog name: {0:s} \".format(prefix, self.out_sex)\n\n # return the string\n return big_str",
"def __str__( self ) :\n\n return( '%s with projectile \"%s\", target \"%s\", evaluation \"%s\", path \"%s\" and interaction \"%s\".' % \n ( self.moniker, self.projectile, self.target, self.evaluation, self.path, self.interaction ) )",
"def __repr__(self) -> str:\n context = \" \".join(\"{}={}\".format(k, v) for k, v in self._solution.items())\n return \"<Twilio.Supersim.V1.FleetContext {}>\".format(context)",
"def solution_to_string(self):\n solution_vector_index_format = [index+1 if elem == 1 else -index-1 for index, elem in enumerate(self.solution_vector)]\n return \" \".join(map(str, solution_vector_index_format))",
"def __str__( self ) :\n\n return( '%s with projectile \"%s\", target \"%s\", evaluation \"%s\", path \"%s\", standardTarget \"%s\" and standardEvaluation \"%s\".' % \n ( self.moniker, self.projectile, self.target, self.evaluation, self.path, self.standardTarget, self.standardEvaluation ) )",
"def __str__(self):\n\n rep = 'Generalized Syllogism:\\n'\n rep += '\\ttask: {}\\n'.format(self.task)\n rep += '\\tencoded_task: {}\\n'.format(self.encoded_task)\n rep += '\\tp1: {}\\n'.format(self.p1)\n rep += '\\tp2: {}\\n'.format(self.p2)\n rep += '\\tquantifier_p1: {}\\n'.format(self.quantifier_p1)\n rep += '\\tquantifier_p2: {}\\n'.format(self.quantifier_p2)\n rep += '\\tfigure: {}\\n'.format(self.figure)\n rep += '\\tTerms:\\n'\n rep += '\\t\\tA: {}\\n'.format(self.A)\n rep += '\\t\\tB: {}\\n'.format(self.B)\n rep += '\\t\\tC: {}\\n'.format(self.C)\n return rep",
"def __str__(self):\n if not self.has_converged or self.parameters is None:\n log.warning(\"The fit has not converged. Try again!\")\n return ''\n\n result = []\n for parameter in self.parameters.keys():\n if parameter in self.fit_for:\n parameter_string = self.get_parameter_string(parameter)\n if parameter_string is not None:\n result.append(parameter_string)\n\n rms = self.get_parameter_format('kelvin') % self.rms\n result.append(f\"[{rms} K rms]\")\n return '\\n'.join(result)",
"def __str__(self) -> str:\n return self.name + \" has a population of \" + str(self.population) + \" and is \" + str(self.area) + \" square \" \\\n \"kilometres.\"",
"def __str__(self):\r\n out = \"##\"*(self.width+1)+\"\\n\"\r\n for i in range(self.height):\r\n out += \"#\"\r\n for j in range(self.width):\r\n if self.grid[i][j] == 0:\r\n out += \"##\"\r\n else:\r\n if not self.showSolution:\r\n out += \" \"\r\n elif (i,j) in self.solution:\r\n out += \"**\"\r\n else:\r\n out += \" \"\r\n out += \"#\\n\"\r\n return out + \"##\"*(self.width+1)",
"def __str__(self):\n return '<Keplerian object: a={:e} m, e={:f}, i={:f} rad, '. \\\n format(self.a, self.e, self.i) + \\\n 'w={:f} rad, Node={:f} rad, M0={:f} rad, '. \\\n format(self.w, self.Node, self.M0) + \\\n 't0={:f} (MJD), GM={:e} m**3/kg/s**2>'. \\\n format(self.t0, self.GM)",
"def __str__(self):\n return str((self.code, self.fitness,))",
"def __str__(self) -> str:\n return f\"K{self._index_to_unicode(self.index)}\"",
"def __str__(self):\n s = 'hit '+str(self.hit)+'\\n'\n s+= 'states '+str(self.states)+'\\n'\n s+= 'chi2 '+str(self.chi2)\n return s",
"def __repr__(self) -> str:\n context = \" \".join(\"{}={}\".format(k, v) for k, v in self._solution.items())\n return \"<Twilio.FlexApi.V1.InsightsQuestionnairesQuestionInstance {}>\".format(\n context\n )",
"def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(UmbrellaSampling.key, self.x0, self.kf, self.n_upd)\n\n return strme",
"def __str__(self):\n d = {}\n d[\"tuner_number\"] = self.tuner_number\n d[\"output_format\"] = self.output_format\n d[\"output_source\"] = self.output_source\n return str(d)",
"def __str__(self):\r\n\t\toutStr = \"\"\r\n\t\toutStr += \"Heuristic Level: \" + str(self.heuristic)\r\n\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\t\tfor row in self.board:\r\n\t\t\ttempStr = (\"\\n|\" + \" %2d |\" * self.n)\r\n\t\t\toutStr += tempStr % tuple(row)\r\n\t\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\r\n\t\treturn outStr",
"def __str__(self) -> str:\n return '\\n'.join([f'{hp}: {self.hyperparams[hp]}'\n for hp in self.hyperparams])",
"def __str__(self):\n return self.get_equation()",
"def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string",
"def __str__(self):\n s = \"Survey class for {0}:\".format(self.surveyName)\n s = '\\n\\t'.join([s, \"beta = {0}\".format(self.beta)])\n s = '\\n\\t'.join([s, \"gain = {0}\".format(self.gain)])\n s = '\\n\\t'.join([s, \"tobs = {0} s\".format(self.tobs)])\n s = '\\n\\t'.join([s, \"tsamp = {0} ms\".format(self.tsamp)])\n s = '\\n\\t'.join([s, \"Tsys = {0} K\".format(self.tsys)])\n s = '\\n\\t'.join([s, \"Centre frequency = {0} MHz\".format(self.freq)])\n s = '\\n\\t'.join([s, \"Bandwidth = {0} MHz\".format(self.bw)])\n s = '\\n\\t'.join([s, \"Chan BW = {0} MHz\".format(self.bw_chan)])\n s = '\\n\\t'.join([s, \"Num polarisations = {0}\".format(self.npol)])\n s = '\\n\\t'.join([s, \"FWHM = {0} arcmin\".format(self.fwhm)])\n s = '\\n\\t'.join([s, \"SNR limit = {0}\".format(self.SNRlimit)])\n\n return s",
"def __str__(self):\n\n return \"[b:{} t:{}]\".format(self.obtem_bag_pass(), self.obtem_ciclo_in())"
] | [
"0.6657387",
"0.63881916",
"0.6257972",
"0.6257151",
"0.6128489",
"0.61164916",
"0.59598804",
"0.5901943",
"0.59001094",
"0.5859933",
"0.5855426",
"0.58453375",
"0.5817911",
"0.57616097",
"0.57536525",
"0.574048",
"0.5737571",
"0.5736057",
"0.5731489",
"0.5730198",
"0.5717515",
"0.56801087",
"0.5679035",
"0.5668739",
"0.5659282",
"0.56399804",
"0.56344503",
"0.56253797",
"0.55869776",
"0.55858785"
] | 0.6712024 | 0 |
This function will make out generators to provide our model with both train and validation data. | def make_generators():
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
TRAIN_DATA_PATH,
target_size= (150, 150),
batch_size= 20,
class_mode= 'sparse')
validation_generator = test_datagen.flow_from_directory(
VAL_DATA_PATH,
target_size= (150, 150),
batch_size= 20,
class_mode= 'sparse')
return train_generator, validation_generator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )",
"def get_generators(patch_size, batch_size, preprocess_func, output_reshape_func, num_validation, train_processes,\n train_cache, train_data_dir='data/train/'):\n\n dirs = util.get_data_list(train_data_dir)\n labels = util.parse_labels_months()\n train_paths, validation_paths = util.train_validation_split(dirs, labels)\n # generate train batch loader\n train_data_loader = CTBatchLoader(train_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func)\n\n train_transforms = get_train_transform(patch_size)\n train_data_generator = MultiThreadedAugmenter(train_data_loader, train_transforms, num_processes=train_processes,\n num_cached_per_queue=train_cache, seeds=None, pin_memory=False)\n\n # wrapper to be compatible with keras\n train_generator_keras = KerasGenerator(train_data_generator, output_reshapefunc=output_reshape_func)\n\n # generate validation batch loader\n valid_data_loader = CTBatchLoader(validation_paths, num_validation, patch_size,\n num_threads_in_multithreaded=1, preprocess_func=preprocess_func)\n valid_transforms = get_valid_transform(patch_size)\n valid_data_generator = MultiThreadedAugmenter(valid_data_loader, valid_transforms, num_processes=1,\n num_cached_per_queue=1, seeds=None, pin_memory=False)\n # wrapper to be compatible with keras\n valid_generator_keras = KerasGenerator(valid_data_generator, output_reshape_func, 1)\n\n return train_generator_keras, valid_generator_keras",
"def train_model_generator(mdl, training_list, validation_list, epochs, batch_size):\n spe_train = calc_samples_per_epoch(len(training_list), batch_size)\n spe_val = calc_samples_per_epoch(len(validation_list), batch_size)\n\n mdl.compile(\n loss='mean_squared_error',\n optimizer=Adam(),#lr=LEARNING_RATE\n )\n\n history = mdl.fit_generator(my_generator(training_list, batch_size),\n samples_per_epoch=spe_train,\n nb_epoch=epochs)\n\n score_eval = mdl.evaluate_generator(generator=my_generator(validation_list, batch_size),\n val_samples=spe_val)\n\n loss = score_eval\n\n print(\"[Evaluation]%s: %.2f%%\" % (mdl.metrics_names, loss))\n\n return mdl, loss",
"def setGenerators(self):\n shape = (self.input_shape[0],self.input_shape[1])\n self.trainGen,self.validateGen = getBatchGenerators(self.batch_size,\n self.dataPath,\n shape,\n self.classMap,\n self.regression)",
"def build_generators(folder_path, train_batch_size, val_batch_size, height, width):\n train_image_generator, train_mask_generator = create_train_generator(folder_path,\n train_batch_size,\n (height, width),\n preprocessing_masks)\n val_image_generator, val_mask_generator = create_validation_generator(folder_path,\n val_batch_size,\n (height, width),\n preprocessing_masks)\n my_train_generator = my_image_mask_generator(train_image_generator, train_mask_generator)\n my_val_generator = my_image_mask_generator(val_image_generator, val_mask_generator)\n\n return my_train_generator, my_val_generator",
"def train(self, train_gen, valid_gen=None, optimizer=SGD(lr=0.001, momentum=0.9, decay=0.00004, nesterov=True), classifier_dropout=0.7,\n steps_per_epoch=100, validation_steps=100, epochs=1, out_prefix='', out_period=1, fix_extractor=False):\n \n self.classifier().trainable = True\n self.extractor().trainable = not fix_extractor \n \n self.classifier().layers[1].rate = classifier_dropout\n \n train_model = Sequential([self.extractor(), self.classifier()])\n train_model.summary()\n \n train_model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])\n \n out_dir = os.path.dirname(out_prefix)\n if out_dir != '' and not os.path.exists(out_dir):\n os.mkdir(out_dir)\n \n callbacks = []\n callbacks.append(TensorBoard())\n if out_prefix is not None:\n callbacks.append(self.SaveWeightsCallback(target_models=[self.extractor(), self.classifier()], out_prefix=out_prefix, period=out_period)) \n history = train_model.fit_generator(train_gen, steps_per_epoch=steps_per_epoch, epochs=epochs+self.current_epochs, callbacks=callbacks, workers=0, validation_data=valid_gen, validation_steps=validation_steps, initial_epoch=self.current_epochs)\n self.current_epochs += epochs\n \n return history",
"def train(self):\n # setup model\n self.createModel()\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n \n # train model\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)\n # clear save paths to avoid overwriting accidentaly\n self.saveName = None",
"def trainval_generators(indices, indices_aug, model_type, folder, batch_size, labels, kfold_bool=False):\n #which data to load\n if model_type == \"aug_padding\":\n file_data = os.path.join(absPath, 'data/', folder, 'aug_data.h5')\n indices = indices_aug\n else:\n file_data = os.path.join(absPath, 'data/', folder, 'data.h5')\n h5f = h5py.File(file_data, 'r')\n #now creating batches\n if kfold_bool == False:\n i_train, i_val, i_test = indices\n train_generator = batch_generator(batch_size, file_data, model_type, i_train, labels)\n val_generator = batch_generator(batch_size, file_data, model_type, i_val, labels)\n generators = (train_generator, val_generator)\n else:\n generators = []\n for k_fold in indices:\n i_train, i_val, i_test = k_fold\n train_generator = batch_generator(batch_size, file_data, model_type, i_train, labels)\n val_generator = batch_generator(batch_size, file_data, model_type, i_val, labels)\n generators.append((train_generator, val_generator))\n return generators",
"def get_data_generator(train_data, validation_data):\n\n def batch_generator(mode=\"train\", batch_size=100):\n assert mode in [\"train\", \"val\"], \"The mode should be in {train, val}.\"\n if mode == \"train\":\n data = train_data.copy()\n elif mode == \"val\":\n data = validation_data.copy()\n\n while True:\n indices = np.random.permutation(np.arange(len(data)))\n data = data[indices]\n\n for i in range(len(data) // batch_size):\n yield data[i * batch_size:(i + 1) * batch_size]\n\n return batch_generator",
"def train_generator(self, train, validation=None, epochs=20, class_weight=None):\n history = self.model.fit_generator(\n generator=train, validation_data=validation,\n epochs=epochs, shuffle=True, class_weight=class_weight)\n self.training_history.append(\n ({\"epochs\": epochs, \"class_weight\": class_weight}, history)\n )\n self.data_ids = {\n \"train\": train.dataset.labels,\n \"validation\": validation.dataset.labels if validation else [],\n }\n return history",
"def get_training_and_validation_generators(data_file, batch_size, n_labels, training_keys_file, validation_keys_file,\n data_split=0.8, overwrite=False):\n training_list, validation_list = get_validation_split(data_file, data_split=data_split, overwrite=overwrite,\n training_file=training_keys_file,\n testing_file=validation_keys_file)\n training_generator = data_generator(data_file, training_list, batch_size=batch_size, n_labels=n_labels)\n validation_generator = data_generator(data_file, validation_list, batch_size=1, n_labels=n_labels)\n # Set the number of training and testing samples per epoch correctly\n num_training_steps = len(training_list)//batch_size\n num_validation_steps = len(validation_list)\n return training_generator, validation_generator, num_training_steps, num_validation_steps",
"def train(args):\r\n print('Create generators')\r\n generators = train_valid_test_generators(\r\n valid_proportion=args.valid_proportion,\r\n test_proportion=args.test_proportion,\r\n seed=args.seed,\r\n shape=(args.height, args.width),\r\n batch_size=args.batch_size,\r\n shuffle=True\r\n )\r\n print('Create model')\r\n model = create_mobilenetv2(\r\n input_shape=(args.height, args.width, 3),\r\n alpha=args.alpha,\r\n depth_multiplier=args.depth_multiplier,\r\n l2_reg=args.l2_reg,\r\n seed=args.seed\r\n )\r\n\r\n print('Training freezed model')\r\n freeze_model(model, 'global_max_pooling2d_1')\r\n callbacks = callbacks_factory(\r\n callbacks_list=[\r\n 'early_stopping',\r\n 'tensorboard',\r\n ],\r\n model_mask='mobilenetv2_multiclassification_freezed'\r\n )\r\n model = train_pipeline(\r\n model,\r\n generators['hard_train_generator'],\r\n generators['valid_generator'],\r\n callbacks,\r\n optimizer_lr=args.optimizer_lr,\r\n optimizer_decay=args.optimizer_decay,\r\n epochs=args.epochs\r\n )\r\n\r\n print('Training unfreezed model')\r\n unfreeze_model(model)\r\n callbacks = callbacks_factory(\r\n callbacks_list=[\r\n 'best_model_checkpoint',\r\n 'early_stopping',\r\n 'tensorboard',\r\n 'learning_rate_scheduler'\r\n ],\r\n model_mask='mobilenetv2_multiclassification'\r\n )\r\n model = train_pipeline(\r\n model,\r\n generators['easy_train_generator'],\r\n generators['valid_generator'],\r\n callbacks,\r\n optimizer_lr=args.optimizer_lr,\r\n optimizer_decay=args.optimizer_decay,\r\n epochs=3 * args.epochs\r\n )\r\n\r\n print('Save test evaluation')\r\n results = model.evaluate_generator(generators['test_generator'])\r\n pd.DataFrame({\r\n 'MetricsNames': model.metrics_names,\r\n 'Results': results\r\n }).to_csv(os.path.join('../logs/solution_1_test_generator_evaluation.csv'), index=False)",
"def __build_generators(self, x, y, split=0.9):\n\n # Sanity check\n assert len(x) == len(y)\n\n # Split dataset into train and validation sets\n cut = int(split * len(x))\n x_train = x[:cut]\n x_valid = x[cut:]\n y_train = y[:cut]\n y_valid = y[cut:]\n\n if self.input_type == \"mols\":\n self.__train_gen = HetSmilesGenerator(\n x_train,\n None,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n self.__valid_gen = HetSmilesGenerator(\n x_valid,\n None,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n else:\n self.__train_gen = DescriptorGenerator(\n x_train,\n y_train,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n self.__valid_gen = DescriptorGenerator(\n x_valid,\n y_valid,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n # Calculate number of batches per training/validation epoch\n train_samples = len(x_train)\n valid_samples = len(x_valid)\n self.__steps_per_epoch = train_samples // self.batch_size\n self.__validation_steps = valid_samples // self.batch_size\n\n print(\n \"Model received %d train samples and %d validation samples.\"\n % (train_samples, valid_samples)\n )",
"def generate(self, train_loader, test_loader, **kwargs):\n self.parse_params(**kwargs)\n\n torch.manual_seed(100)\n device = torch.device(self.device)\n\n optimizer = optim.Adam(self.model.parameters(), self.lr)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[75, 100], gamma = 0.1)\n save_model = True\n for epoch in range(1, self.epoch + 1):\n print('Training epoch: ', epoch, flush = True)\n self.train(self.device, train_loader, optimizer, epoch)\n self.test(self.model, self.device, test_loader)\n\n if (self.save_model and epoch % self.save_per_epoch == 0):\n if os.path.isdir(str(self.save_dir)):\n torch.save(self.model.state_dict(), os.path.join(self.save_dir, self.save_name + '_epoch' + str(epoch) + '.pth'))\n print(\"model saved in \" + str(self.save_dir))\n else:\n print(\"make new directory and save model in \" + str(self.save_dir))\n os.mkdir('./' + str(self.save_dir))\n torch.save(self.model.state_dict(), os.path.join(self.save_dir, self.save_name + '_epoch' + str(epoch) + '.pth'))\n\n scheduler.step()\n\n return self.model",
"def generate(args):\n\n # Using the data Augmentation in traning data\n\n normalizer = Normalizer()\n\n train_aug = tf.keras.preprocessing.image.ImageDataGenerator(\n #rescale=1. / 255.,\n shear_range=args.shear_range,\n zoom_range=args.zoom_range,\n rotation_range=args.rotation_range,\n width_shift_range=args.width_shift_range,\n height_shift_range=args.height_shift_range,\n horizontal_flip=args.horizontal_flip,\n vertical_flip=args.vertical_flip,\n preprocessing_function=normalizer)\n\n\n validation_aug = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=normalizer)\n\n train_generator = train_aug.flow_from_directory(\n args.train_dir,\n target_size=(args.input_size, args.input_size),\n batch_size=args.batch_size,\n class_mode='categorical',\n shuffle=True)\n\n mean, std = [], []\n if args.mean is None or args.std is None:\n mean, std = normalizer.get_stats(args.train_dir, train_generator.filenames, (args.input_size, args.input_size))\n else:\n mean = [float(m.strip()) for m in args.mean.split(',')]\n std = [float(s.strip()) for s in args.std.split(',')]\n normalizer.set_stats(mean, std)\n\n if not os.path.exists('model'):\n os.makedirs('model')\n with open('model/stats.txt', 'w') as stats:\n stats.write(\"Dataset mean [r, g, b] = {}\\n\".format(mean))\n\n\n label_map = (train_generator.class_indices)\n label_map = dict((v,k) for k,v in label_map.items())\n\n with open('model/labels.csv', 'w') as csv_file:\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n csv_writer.writerows(label_map.items())\n\n validation_generator = validation_aug.flow_from_directory(\n args.validation_dir,\n target_size=(args.input_size, args.input_size),\n batch_size=args.batch_size,\n class_mode='categorical')\n\n return train_generator, validation_generator, train_generator.samples, validation_generator.samples, len(label_map)",
"def train_model(model, train, validation):\n # Add your code here\n\n monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, restore_best_weights=True)\n\n model.fit_generator(\n generator=train,\n validation_data=validation,\n epochs=1000,\n callbacks=monitor\n\n )\n # Preprocessing (Enrichment)\n # Preprocessing (Normalisation)\n\n return model",
"def create_data_pipeline(self):\n # unpack the params dictionary\n mag_files = self.params['mag_files']\n ss_file = self.params['ss_file']\n val_size = self.params['val_size']\n batch_size = self.params['batch_size']\n data_interval = self.params['data_interval']\n prediction_interval = self.params['prediction_interval']\n # create the generators\n train_gen = large_dset_gen(mag_files[:-1], ss_file, data_interval, prediction_interval)\n # the validation generator is created from the final file in the input file list\n val_gen = small_dset_gen(mag_files[-1], ss_file, data_interval, prediction_interval, val_size)\n\n # creates the dataset using the generator, puts the data into padded batches (to accomodate variable n stations)\n train_dataset = MagNN._create_dataset(train_gen, data_interval, batch_size)\n # shuffles up the training data\n train_dataset.shuffle(batch_size * 4).prefetch(2)\n\n val_dataset = MagNN._create_dataset(val_gen, data_interval, batch_size)\n\n iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)\n\n # these iterators need to be run in order to select which dataset the model is using\n train_init = iterator.make_initializer(train_dataset)\n val_init = iterator.make_initializer(val_dataset)\n\n return iterator, train_init, val_init",
"def trainGenerator(self,):\n return tf.data.Dataset.from_generator(self.trainData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )",
"def generate(self):\n self.training_data.gen_x(self.x_func)\n self.training_data.gen_a(self.a_func)\n self.training_data.gen_y(self.y_func)\n \n self.testing_data.gen_x(self.x_func)\n self.testing_data.gen_ys(self.y_func)\n self.testing_data.gen_azero(self.ytotal_func)",
"def train_model(train_generator, validation_generator):\n # we build a test generator to benchmark the model on unseen data\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n test_generator = test_datagen.flow_from_directory(\n test_path,\n target_size=(200, 200),\n color_mode=\"rgb\",\n shuffle=True,\n class_mode='sparse',\n batch_size=batch_size)\n model = build_model()\n filepath = join(save_path, weights_path)\n checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', save_best_only=True, mode='max')\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=epochs // 5, verbose=1, restore_best_weights=True)\n log_dir = join(home, save_path, 'logs', 'fit_smart', datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)\n callbacks_list = [early_stopping, checkpoint, tensorboard_callback]\n # origin [sessions] models each [epochs] times\n max_acc = 0.0\n for i in range(sessions):\n # model training and evaluation\n history = model.fit(\n train_generator,\n steps_per_epoch=train_generator.samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=validation_generator.samples // batch_size\n , verbose=2, callbacks=callbacks_list, workers=multiprocessing.cpu_count(),\n use_multiprocessing=False)\n model.load_weights(join(save_path, weights_path))\n test_loss, test_acc = model.evaluate(test_generator, steps=len(test_generator))\n # save model if it performed better\n if test_acc > max_acc:\n max_acc = test_acc\n model.save(join(home, save_path, model_name))\n print(\"accuracy: \", test_acc, \"\\n Loss:\", test_loss)",
"def setup_training_generator(model):\n train_dir = os.path.join(FLAGS.log_root, \"train-generator\")\n if not os.path.exists(train_dir): os.makedirs(train_dir)\n\n model.build_graph() # build the graph\n\n saver = tf.train.Saver(max_to_keep=20) # we use this to load checkpoints for decoding\n sess = tf.Session(config=util.get_config())\n #sess.run(tf.train.Saver(max_to_keep=20))\n #init = tf.global_variables_initializer()\n #sess.run(init)\n\n # Load an initial checkpoint to use for decoding\n util.load_ckpt(saver, sess, ckpt_dir=\"train-generator\")\n\n\n return sess, saver,train_dir",
"def trainModel(model, train_raw, validation_raw):\n # compile and train the model using the generator function\n train_generator = generator(train_raw, batch_size=32)\n validation_generator = generator(validation_raw, batch_size=32)\n\n # model checkpoint\n now = datetime.datetime.now()\n datenow = now.strftime(\"%Y-%m-%d-\")\n #file_path_model = \"Model_checkpoints/\" + datenow + \"model-weights-{epoch:02d}-{val_loss:0.2f}.hdf5\"\n file_path_model = \"Model_checkpoints/\" + datenow + \"model-weights.hdf5\"\n checkpoint = ModelCheckpoint(file_path_model, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')\n callbacks_list = [checkpoint]\n model.compile(loss='mse', optimizer='adam')\n # left/center/right images, and all flipped\n ntrain = len(train_raw)*3*2\n nvalid = len(validation_raw)*3*2\n history_object = model.fit_generator(train_generator, samples_per_epoch= \\\n ntrain, validation_data=validation_generator, \\\n nb_val_samples=nvalid, nb_epoch=10, \\\n callbacks = callbacks_list, verbose=1)\n #history_object = model.fit_generator(train_generator, steps_per_epoch= ntrain, \\\n # validation_data=validation_generator, validation_steps=nvalid, \\\n # callbacks = callbacks_list, epochs=5, verbose = 1) \n return history_object",
"def create_gens(train_path, gen):\n\n _logger.debug(\"Creating Data Generators\")\n image_files = glob(train_path + '/*/*.jp*g')\n try:\n train_generator = gen.flow_from_directory(\n train_path,\n target_size=input_size,\n shuffle=True,\n batch_size=batch_size,\n subset = \"validation\"\n )\n test_generator = gen.flow_from_directory(\n train_path,\n target_size=input_size,\n shuffle=True,\n batch_size=batch_size,\n subset = \"training\"\n )\n class_indices = train_generator.class_indices\n except FileNotFoundError:\n _logger.error(\"data generators invalid\")\n train_generator = None\n test_generator = None\n image_files = None\n class_indices= None\n return train_generator, test_generator, image_files, class_indices",
"def get_train_generators(cf, logger):\n config_file = os.environ[CONFIG_ENV_VAR]\n config = load_config(config_file)\n\n all_sections = find_all_subdir_sections(config)\n\n # separate into training and validation folds randomly\n fold_ratios = config[\"train_validation_splits\"]\n # rng = np.random.default_rng(seed=config[\"split_random_seed\"])\n # rng.shuffle(all_sections)\n rnd = random.Random(config[\"split_random_seed\"])\n rnd.shuffle(all_sections)\n split_idx = round(fold_ratios[0] * len(all_sections))\n train_sections = all_sections[:split_idx]\n val_sections = all_sections[split_idx:]\n\n logger.info(\n \"Loaded %d annotation sections, using %d train, %d val\"\n % (len(all_sections), len(train_sections), len(val_sections))\n )\n\n train_pipeline = create_data_gen_pipeline(\n train_sections, cf=cf, annotation_config=config, is_training=True\n )\n val_pipeline = create_data_gen_pipeline(\n val_sections, cf=cf, annotation_config=config, is_training=False\n )\n batch_gen = {\n \"train\": train_pipeline,\n \"val_sampling\": val_pipeline,\n \"n_val\": len(val_sections),\n }\n # batch_gen[\"val_patient\"] = create_data_gen_pipeline(\n # val_sections, cf=cf, annotation_config=config, is_training=False\n # )\n\n return batch_gen",
"def setup_training_generator(model):\n train_dir = os.path.join(FLAGS.log_root, \"train-generator\")\n if not os.path.exists(train_dir): os.makedirs(train_dir)\n\n model.build_graph() # build the graph\n\n saver = tf.train.Saver(max_to_keep=20) # we use this to load checkpoints for decoding\n sess = tf.Session(config=util.get_config())\n init = tf.global_variables_initializer()\n\n sess.run(init)\n #tf.get_variable_scope().reuse_variables()\n\n # Load an initial checkpoint to use for decoding\n #util.load_ckpt(saver, sess, ckpt_dir=\"train-generator\")\n\n\n return sess, saver,train_dir",
"def train_pipeline(model, train_generator, valid_generator, callbacks, optimizer_lr, optimizer_decay, epochs):\r\n model.compile(\r\n optimizer=Adam(\r\n lr=optimizer_lr,\r\n decay=optimizer_decay\r\n ),\r\n loss={\r\n 'smile_output': 'binary_crossentropy',\r\n 'open_mouth_output': 'binary_crossentropy'\r\n },\r\n loss_weights={\r\n 'smile_output': 0.5,\r\n 'open_mouth_output': 0.5\r\n },\r\n metrics=[f1_score]\r\n )\r\n model.fit_generator(\r\n train_generator,\r\n epochs=epochs,\r\n callbacks=callbacks,\r\n validation_data=valid_generator,\r\n verbose=1,\r\n workers=4,\r\n use_multiprocessing=False,\r\n )\r\n\r\n return model",
"def fit_model(model, generators, training_step, val_step, epoch=1):\n # Compile your model\n model.compile(optimizer=tf.keras.optimizers.Adam(),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n # Train your model here\n history = model.fit(generators[0],\n steps_per_epoch=training_step,\n epochs=epoch,\n verbose=1,\n validation_data=generators[1],\n validation_steps=val_step)\n\n return history",
"def get_data_generators(config_tuple):\n\n (_, flags) = config_tuple\n # Configure training data flow\n train_generator = init_data_generator(config_tuple, flags.train_dir)\n # Configure validation data flow\n validation_generator = init_data_generator(config_tuple, flags.val_dir)\n # Configure test data flow\n test_generator = init_data_generator(config_tuple, flags.test_dir)\n\n return (train_generator, validation_generator, test_generator)",
"def generate(batch, size=32):\n\n # Using the data Augmentation in traning data\n ptrain = 'data224/train'\n pval = 'data224/test'\n\n datagen1 = ImageDataGenerator(\n samplewise_center=True,\n samplewise_std_normalization=True,\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\n datagen2 = ImageDataGenerator(samplewise_center=True,\n samplewise_std_normalization=True,)\n\n train_generator = datagen1.flow_from_directory(\n ptrain,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n validation_generator = datagen2.flow_from_directory(\n pval,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n count1 = 0\n for root, dirs, files in os.walk(ptrain):\n for each in files:\n count1 += 1\n\n count2 = 0\n for root, dirs, files in os.walk(pval):\n for each in files:\n count2 += 1\n\n return train_generator, validation_generator, count1, count2",
"def train_model(model, data_train, y_train, data_test, y_test, ARGS):\n callback_list = create_callbacks(model, (data_test, y_test), ARGS)\n train_generator = SequenceBuilder(data_train, ARGS, target=y_train, target_out=True)\n test_generator = SequenceBuilder(data_test, ARGS, target=y_test, target_out=True)\n history = model.fit_generator(generator=train_generator,\n epochs=ARGS.epochs, verbose=2,\n validation_data=test_generator,\n # validation_freq=[1, 5, 10],\n callbacks=callback_list\n # ,max_queue_size=15, use_multiprocessing=False,\n # workers=3, initial_epoch=0\n )\n return history"
] | [
"0.70969266",
"0.7074017",
"0.70614856",
"0.70605785",
"0.699151",
"0.69356424",
"0.6889953",
"0.6871072",
"0.68467623",
"0.680157",
"0.6774476",
"0.67295635",
"0.6717706",
"0.67161566",
"0.6681554",
"0.66807485",
"0.66589004",
"0.665683",
"0.66468203",
"0.6622943",
"0.6589832",
"0.6583555",
"0.65778047",
"0.6576777",
"0.6545765",
"0.64984465",
"0.64754236",
"0.6423636",
"0.64214134",
"0.6402967"
] | 0.76590663 | 0 |
This function will plot and save the history of the training of model to two external png file. One for the model loss over the epochs and one for the model accuracy over the epochs. | def plotHistory(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
# Make and save the plot for our accuracy
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.savefig("trainValAccSecond.png")
# Make and save the plots for our loss
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
plt.savefig("trainValLossSecond.png") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_train_history(modell=config.model):\n history = pd.read_csv(f'model/this/{modell}_history.csv')\n epochs = len(history.epoch)\n\n plt.style.use(\"ggplot\")\n plt.rcParams['figure.figsize'] = (5, 9)\n plt.plot(np.arange(0, epochs), history[\"accuracy\"], label=\"model accuracy\", color=\"red\", zorder=10, linewidth=2)\n plt.plot(np.arange(0, epochs), history[\"loss\"], label=\"training loss\", color=\"blue\", zorder=9, linewidth=2)\n plt.plot(np.arange(0, epochs), history[\"val_accuracy\"], label=\"validation accuracy\", color=\"red\", zorder=1, linewidth=1, alpha= 0.4)\n plt.plot(np.arange(0, epochs), history[\"val_loss\"], label=\"validation loss\", color=\"blue\", zorder=2, linewidth=1, alpha= 0.4)\n plt.hlines(1.0,0, epochs, colors=\"black\", linestyles=\"dotted\")\n plt.title(f'Trening av modell: {modell}')\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy / Loss\")\n plt.ylim(0, 2.)\n plt.yticks(np.append(np.arange(0, 1., 0.05), (np.arange(1, 2., 0.2) )))\n\n plt.xlim(0, epochs)\n plt.legend(loc=\"upper right\")\n plt.tight_layout(True)\n\n xint = []\n locs, labels = plt.xticks()\n for each in locs:\n xint.append(int(each))\n plt.xticks(xint)\n\n plt.savefig(f'model/this/{modell}.png')\n plt.show()",
"def plot_history(history, config):\n\n # Plot training and validation history\n train_acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n train_prec = history.history['precision']\n val_prec = history.history['val_precision']\n train_rec = history.history['recall']\n val_rec = history.history['val_recall']\n train_auc = history.history['auc']\n val_auc = history.history['val_auc']\n\n plt.figure(figsize=(8, 8))\n plt.subplot(2, 2, 1)\n plt.plot(train_acc, label='Training')\n plt.plot(val_acc, label='Validation')\n plt.legend(loc='lower left')\n plt.ylabel('Accuracy')\n plt.ylim([0, 1.0])\n # plt.title('Accuracy')\n\n plt.subplot(2, 2, 2)\n plt.plot(train_prec, label='Training')\n plt.plot(val_prec, label='Validation')\n plt.legend(loc='upper left')\n plt.ylabel('Precision')\n plt.ylim([0, 1.0])\n # plt.title('Training and Validation Precision')\n plt.xlabel('epoch')\n\n plt.subplot(2, 2, 3)\n plt.plot(train_rec, label='Training')\n plt.plot(val_rec, label='Validation')\n plt.legend(loc='upper left')\n plt.ylabel('Recall')\n plt.ylim([0, 1.0])\n # plt.title('Training and Validation Recall')\n\n plt.subplot(2, 2, 4)\n plt.plot(train_auc, label='Training')\n plt.plot(val_auc, label='Validation')\n plt.legend(loc='upper left')\n plt.ylabel('AUC')\n plt.ylim([0, 1.0])\n # plt.title('Training and Validation AUC')\n plt.xlabel('epoch')\n\n plt.savefig(f\"{config['model_label']}.png\")",
"def draw_keras_history(history, output_dir='.', output_name='loss.pdf'):\n\n fig = plt.figure(1, figsize=(6, 6), dpi=300)\n fig.clear()\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n if 'loss' in history.history.keys():\n training_losses = history.history['loss']\n epochs = np.arange(0, len(training_losses))\n l1 = ax.plot(epochs, training_losses, '-', color='#8E2800', lw=2, label=\"Training loss\")\n \n if 'val_loss' in history.history.keys():\n validation_losses = history.history['val_loss']\n epochs = np.arange(0, len(validation_losses))\n l2 = ax.plot(epochs, validation_losses, '-', color='#468966', lw=2, label=\"Validation loss\")\n\n ax.set_xlabel(\"Epochs\")\n ax.set_ylabel(\"Loss\")\n \n # training_acc = history.history['acc']\n # validation_acc = history.history['val_acc']\n\n # ax2 = ax.twinx()\n # l3 = ax2.plot(epochs, training_acc, '--', color='#8E2800', lw=2, label=\"Training accuracy\")\n # l4 = ax2.plot(epochs, validation_acc, '--', color='#468966', lw=2, label=\"Validation accuracy\")\n # ax2.set_ylabel(\"Accuracy\")\n\n ax.margins(0.05)\n # ax2.margins(0.05)\n\n fig.set_tight_layout(True)\n\n # lns = l1 + l2 + l3 + l4\n #lns = l1 + l2\n #labs = [l.get_label() for l in lns]\n #ax.legend(lns, labs, loc='best', numpoints=1, frameon=False)\n ax.legend(loc='best', numpoints=1, frameon=False)\n\n fig.savefig(os.path.join(output_dir, output_name))\n\n plt.close()",
"def save_accuracy_chart(self):\n history = self.model.history.history\n fig = plt.figure()\n plt.plot(history['accuracy'], label='Training Accuracy')\n plt.plot(history['val_accuracy'],label='Validation Set Accuracy')\n plt.legend()\n fig.savefig('model_accuracy.png')",
"def plot_eval_2(trained_model, image_name):\n # Get training evaluation data\n train_accuracy = trained_model.history['acc']\n train_val_accuracy = trained_model.history['val_acc']\n train_loss = trained_model.history['loss']\n train_val_loss = trained_model.history['val_loss']\n \n # Generate accuracy plot\n epochs = range(len(train_accuracy))\n plt.figure()\n plt.plot(epochs, train_accuracy, 'bo', label='Training accuracy')\n plt.plot(epochs, train_val_accuracy, 'b', label='Validation accuracy')\n plt.title('Training and validation accuracy')\n plt.legend()\n \n # Save accuracy plot\n plot_file = os.path.join(OUTPUT_DIR,\n \"{}_training_accuracy\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')\n \n # Generate loss plot\n plt.figure()\n plt.plot(epochs, train_loss, 'bo', label='Training loss')\n plt.plot(epochs, train_val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n \n # Save loss plot\n plot_file = os.path.join(OUTPUT_DIR, \"{}_training_loss\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')",
"def plot_loss(model_fit, save_folder): \n train_loss = model_fit.history['loss']\n val_loss = model_fit.history['val_loss']\n epoch_axis = np.arange(1, len(train_loss) + 1)\n plt.title('Train vs Validation Loss')\n plt.plot(epoch_axis, train_loss, 'b', label='Train Loss')\n plt.plot(epoch_axis, val_loss,'r', label='Val Loss')\n plt.xlim([1, len(train_loss)])\n plt.xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_loss) / 10) + 0.5)))\n plt.legend(loc='upper right')\n plt.ylabel('Loss')\n plt.xlabel('Epochs')\n plt.savefig(save_folder + '/loss.png')\n plt.show()\n plt.close()",
"def draw_keras_history(history, output_dir='.', output_name='loss.pdf'):\n\n fig = plt.figure(1, figsize=(7, 7), dpi=300)\n fig.clear()\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n training_losses = history.history['loss']\n validation_losses = history.history['val_loss']\n epochs = np.arange(0, len(training_losses))\n\n l1 = ax.plot(epochs, training_losses, '-', color='#8E2800', lw=2, label=\"Training loss\")\n l2 = ax.plot(epochs, validation_losses, '-', color='#468966', lw=2, label=\"Validation loss\")\n\n ax.set_xlabel(\"Epochs\")\n ax.set_ylabel(\"Loss\")\n \n # training_acc = history.history['acc']\n # validation_acc = history.history['val_acc']\n\n # ax2 = ax.twinx()\n # l3 = ax2.plot(epochs, training_acc, '--', color='#8E2800', lw=2, label=\"Training accuracy\")\n # l4 = ax2.plot(epochs, validation_acc, '--', color='#468966', lw=2, label=\"Validation accuracy\")\n # ax2.set_ylabel(\"Accuracy\")\n\n ax.margins(0.05)\n # ax2.margins(0.05)\n\n fig.set_tight_layout(True)\n\n # lns = l1 + l2 + l3 + l4\n lns = l1 + l2\n labs = [l.get_label() for l in lns]\n ax.legend(lns, labs, loc='best', numpoints=1, frameon=False)\n\n fig.savefig(os.path.join(output_dir, output_name))\n\n plt.close()",
"def save_history(history, save_folder_path=None, params=['acc', 'val_acc']):\n \n print(\"[INFO] Showing train and test accuracy plot...\")\n\n # Plot all lines in parameters\n for param in params:\n plt.plot(history.history[param])\n \n plt.title('Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n\n if save_folder_path is not None:\n save_path = \"{}/{}\".format(save_folder_path, \"train_history.png\")\n history_fig = plt.gcf() # get current figure\n history_fig.savefig(save_path)\n print(\"[INFO] Plot saved to {0}\".format(save_path))\n\n plt.show()",
"def plot_eval_1(trained_model, image_name):\n # Get training evaluation data\n train_accuracy = trained_model.history['acc']\n train_val_accuracy = trained_model.history['val_acc']\n train_loss = trained_model.history['loss']\n train_val_loss = trained_model.history['val_loss']\n \n # Generate accuracy plot\n epochs = range(len(train_accuracy))\n plt.figure()\n plt.plot(epochs, train_accuracy, 'bo', label='Training accuracy')\n plt.plot(epochs, train_val_accuracy, 'b', label='Validation accuracy')\n plt.title('Training and validation accuracy')\n plt.legend()\n \n # Save accuracy plot\n plot_file = os.path.join(OUTPUT_DIR,\n \"{}_training_accuracy\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')\n \n # Generate loss plot\n plt.figure()\n plt.plot(epochs, train_loss, 'bo', label='Training loss')\n plt.plot(epochs, train_val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n \n # Save loss plot\n plot_file = os.path.join(OUTPUT_DIR, \"{}_training_loss\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')",
"def plot_loss(history, name):\n\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n # plt.show()\n plt.savefig(name, format=\"png\")",
"def plot_training_history(history):\n fig, (ax_loss, ax_acc) = plt.subplots(1, 2, figsize=(15, 5))\n ax_loss.plot(history.epoch, history.history[\"loss\"], label=\"Train loss\")\n ax_loss.plot(history.epoch, history.history[\"val_loss\"], label=\"Validation loss\")\n ax_loss.legend()\n ax_acc.plot(history.epoch, history.history[\"iou_score\"], label=\"Train iou\")\n ax_acc.plot(history.epoch, history.history[\"val_iou_score\"], label=\"Validation iou\")\n ax_acc.legend()",
"def plot_history(self, model_history, n_epochs):\n # Visualize performance\n plt.style.use(\"fivethirtyeight\")\n plt.figure()\n plt.plot(np.arange(0, n_epochs), model_history.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, n_epochs), model_history.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, n_epochs), model_history.history[\"accuracy\"], label=\"train_acc\")\n plt.plot(np.arange(0, n_epochs), model_history.history[\"val_accuracy\"], label=\"val_acc\")\n plt.title(\"Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.tight_layout()\n plt.savefig(os.path.join(\"..\", \"output\", \"model_loss_accuracy_history.png\"))",
"def plot_loss_vs_epoch(history, var_train, var_val, show=False):\n plt.figure(figsize=(10, 8))\n plt.grid(True)\n plt.plot(history.history['loss']/var_train, marker=\"o\")\n plt.plot(history.history['val_loss']/var_val, marker=\"o\")\n plt.title('Model Loss')\n plt.ylabel('Loss (Normalised to variance of dataset)')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'])\n # plt.ylim(bottom=0)\n filename = \"img/\"\n filename += datetime.now().strftime(\"%y%m%d_%H%M\")\n filename += \"_model_loss.png\"\n plt.savefig(filename)\n\n if show:\n plt.show()",
"def plots(self, history):\n print(history.history.keys())\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train','test'], loc='upper left')\n plt.show()\n\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train','test'], loc='upper left')\n plt.show()",
"def save_learning_curves(history, run_name, base_path=\"plots/\"):\n path = os.path.join(base_path, run_name)\n if not os.path.isdir(path):\n os.makedirs(path)\n losses = {k: history[k] for k in ['loss', 'val_loss']}\n accuracies = {k: history[k] for k in ['acc', 'val_acc']}\n x = range(len(losses['loss']))\n fn_losses = os.path.join(path, \"loss.png\")\n fn_accuracies = os.path.join(path, \"accuracy.png\")\n ut.save_plot(x, ys=losses, xlabel=\"epoch\", ylabel=\"loss\",\n title=run_name, path=fn_losses)\n ut.save_plot(x, ys=accuracies, xlabel=\"epoch\", ylabel=\"accuracy\",\n title=run_name, path=fn_accuracies)",
"def plot_history(H, epochs):\n plt.style.use(\"fivethirtyeight\")\n plt.figure()\n plt.plot(np.arange(0, epochs), H.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"accuracy\"], label=\"train_acc\")\n plt.plot(np.arange(0, epochs), H.history[\"val_accuracy\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.tight_layout()\n plt.show()",
"def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc, save_figure_path):\n\n green = '#72C29B'\n orange = '#FFA577'\n\n with plt.xkcd():\n fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n ax1.set_title('Model loss through #epochs', fontweight='bold')\n\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n ax2.set_title('Model accuracy through #epochs', fontweight='bold')\n\n plt.tight_layout()\n plt.show()\n fig.savefig(save_figure_path)\n plt.close(fig)",
"def visualize_train_history(history):\n cat_acc = history.history['categorical_accuracy']\n val_cat_acc = history.history['val_categorical_accuracy']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(1, len(cat_acc) + 1)\n\n plt.plot(epochs, cat_acc, 'bo', label='Training cat_acc')\n plt.plot(epochs, val_cat_acc, 'b', label='Validation cat_acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n\n plt.figure()\n\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n\n plt.show()",
"def plot_observations():\n plt.plot(history.history['loss'], label='training_loss')\n plt.plot(history.history['val_loss'], label='val_loss ')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n plt.plot(history.history['acc'], label='accuracy')\n plt.plot(history.history['val_acc'], label='val_accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n plt.show()\n\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print(\"Test Accuracy:\", test_acc)",
"def train_visualization(output_path): \n log_path = output_path + 'output.log'\n Train_Cost, Valid_Cost, Test_Cost, Train_Acc, Valid_Acc, Test_Acc = log_reader(log_path)\n n_epoch = len(Train_Cost)\n\n x1 = range(n_epoch)\n x2 = range(n_epoch)\n y1 = Train_Cost\n y2 = Valid_Cost\n y3 = Test_Cost\n y4 = Train_Acc\n y5 = Valid_Acc\n y6 = Test_Acc\n plt.subplot(2, 1, 1)\n plt.plot(x1, y1, label=\"Train_Cost\", linewidth=2)\n plt.plot(x1, y2, label=\"Valid_Cost\", linewidth=2)\n plt.plot(x1, y3, label=\"Test_Cost\", linewidth=2)\n\n plt.title('binary cross entropy vs. epoches')\n plt.ylabel('binary cross entropy')\n plt.legend(loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(x2, y4, label=\"Train_Acc\", linewidth=2)\n plt.plot(x2, y5, label=\"Valid_Acc\", linewidth=2)\n plt.plot(x2, y6, label=\"Test_Acc\", linewidth=2)\n plt.xlabel('Accuracy@20 vs. epoches')\n plt.ylabel('Accuracy@20')\n plt.legend(loc='best')\n plt.savefig(output_path + 'loss_fig.png')\n # plt.show()",
"def save_history_plot(history, base_dir):\n\n plt.figure(figsize=(10, 10))\n\n plt.subplot(211)\n plt.plot(history.history['sparse_categorical_accuracy'], color='b', label='Acc')\n plt.plot(history.history['val_sparse_categorical_accuracy'], '--', color='r', label='Val Acc')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n\n plt.subplot(212)\n plt.plot(history.history['loss'], color='b', label='Loss')\n plt.plot(history.history['val_loss'], '--', color='r', label='Val Loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n\n plt.savefig(f'{base_dir}\\\\history.png')",
"def plot_training_info(case, metrics, save, history):\n val = False\n if 'val_accuracy' in history and 'val_loss' in history:\n val = True\n plt.ioff()\n if 'accuracy' in metrics:\n fig = plt.figure()\n plt.plot(history['accuracy'])\n if val:\n plt.plot(history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'accuracy.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)\n\n # summarize history for loss\n if 'loss' in metrics:\n fig = plt.figure()\n plt.plot(history['loss'])\n if val:\n plt.plot(history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n # plt.ylim(1e-3, 1e-2)\n plt.yscale(\"log\")\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'loss.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)",
"def plot_train_history(self):\n plt.figure()\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.plot(self.train_history.history['loss'])\n plt.plot(self.train_history.history['val_loss'])\n plt.legend(['Training', 'Validation'])\n\n plt.show()",
"def plot_loss_and_acc(history):\n hist = history.history\n x_arr = np.arange(len(hist['loss'])) + 1\n fig = plt.figure(figsize=(12,4))\n ax = fig.add_subplot(1,2,1)\n ax.plot(x_arr, hist['loss'], '-o', label='Train loss')\n ax.plot(x_arr, hist['val_loss'], '--<', label='Validation loss')\n ax.legend(fontsize=15)\n ax.set_xlabel('Epoch', size=15)\n ax.set_ylabel('Loss', size=15)\n\n ax = fig.add_subplot(1,2,2)\n ax.plot(x_arr, hist['accuracy'], '-o', label='Train acc.')\n ax.plot(x_arr, hist['val_accuracy'], '--<', label='Validation acc.')\n ax.legend(fontsize=15)\n ax.set_xlabel('Epoch', size=15),\n ax.set_ylabel('Accuracy', size=15)\n plt.show()",
"def plot_results(self):\n\n\n f1, ax1 = plt.subplots()\n h1, = ax1.plot(self.history[\"step\"], self.history[\"trainLoss\"],\\\n \"b-\", label=\"Loss - Train\")\n h2, = ax1.plot(self.history[\"step\"], self.history[\"validLoss\"],\\\n \"b.\", label=\"Loss - Validation\")\n\n ax1.set_ylabel(\"Loss\", color = \"blue\")\n ax1.tick_params(\"y\", color = \"blue\")\n ax1.yaxis.label.set_color(\"blue\")\n ax1.set_xlabel(\"Training Steps [{}]\".format(self.FLAGS.eval_every))\n\n ax2 = ax1.twinx()\n h3, = ax2.plot(self.history[\"step\"], self.history[\"trainAccr\"], \"r-\",\\\n label = \"Accuracy - Train\")\n h4, = ax2.plot(self.history[\"step\"], self.history[\"validAccr\"], \"r.\",\\\n label = \"Accuracy - Validation\")\n\n ax2.set_ylabel(\"Accuracy\", color = \"red\")\n ax2.tick_params(\"y\", color = \"red\")\n ax2.yaxis.label.set_color(\"red\")\n\n hds = [h1,h2,h3,h4]\n lbs = [l.get_label() for l in hds]\n ax1.legend(hds, lbs)\n f1.tight_layout()\n plt.savefig(\"trainingHistory.png\")\n\n plt.close(f1)\n #plt.show()",
"def plot_loss(path, current_epoch, train_loss, test_loss):\n plotname = os.path.join(path, \"training_loss_curve.png\")\n fig = plt.figure()\n plt.axes().set_facecolor(\"#fbc9bc\")\n plt.plot(\n range(1, current_epoch + 1), train_loss, color=\"#ff6050\", label=\"Training Loss\"\n )\n plt.plot(range(1, current_epoch + 1), test_loss, color=\"#19214e\", label=\"Test Loss\")\n plt.xlabel(\"Epoch Count\")\n plt.ylabel(\"Model Loss\")\n plt.legend()\n fig.savefig(plotname, bbox_inches=\"tight\")\n plt.close()",
"def show_training(history: tf.keras.callbacks.History) -> None:\n hist = history.history\n\n if \"loss\" not in hist:\n print(\"Error: 'loss' values not found in the history\")\n return\n\n # plot training\n plt.figure(figsize=(14, 4))\n plt.subplot(121)\n plt.plot(hist[\"loss\"], label=\"Training\")\n if \"val_loss\" in hist:\n plt.plot(hist[\"val_loss\"], label=\"Validation\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n plt.legend()\n\n if \"accuracy\" in hist:\n plt.subplot(122)\n plt.plot(hist[\"accuracy\"], label=\"Training\")\n if \"val_accuracy\" in hist:\n plt.plot(hist[\"val_accuracy\"], label=\"Validation\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.legend()\n\n plt.suptitle(\"Training history\")\n plt.show()\n\n # show final results\n print(\"\\nTraining loss: \\t{:.4f}\".format(hist[\"loss\"][-1]))\n if \"val_loss\" in hist:\n print(\"Validation loss: \\t{:.4f}\".format(hist[\"val_loss\"][-1]))\n if \"accuracy\" in hist:\n print(\"\\nTraining accuracy: \\t{:.3f}\".format(hist[\"accuracy\"][-1]))\n if \"val_accuracy\" in hist:\n print(\"Validation accuracy:\\t{:.3f}\".format(hist[\"val_accuracy\"][-1]))",
"def plot_accuracy(model_fit, save_folder): \n train_acc = model_fit.history['binary_accuracy']\n val_acc = model_fit.history['val_binary_accuracy']\n epoch_axis = np.arange(1, len(train_acc) + 1)\n plt.title('Train vs Validation Accuracy')\n plt.plot(epoch_axis, train_acc, 'b', label='Train Acc')\n plt.plot(epoch_axis, val_acc,'r', label='Val Acc')\n plt.xlim([1, len(train_acc)])\n plt.xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_acc) / 10) + 0.5)))\n plt.legend(loc='lower right')\n plt.ylabel('Accuracy')\n plt.xlabel('Epochs')\n plt.savefig(save_folder + '/accuracy.png')\n plt.show()\n plt.close()",
"def plot_training_history(history, metric):\n \n val_metric = 'val_'+metric\n acc = history.history[metric]\n val_acc = history.history[val_metric]\n \n loss = history.history['loss']\n val_loss = history.history['val_loss']\n \n epochs_range = history.epoch\n \n plt.figure(figsize=(8, 8))\n plt.subplot(2, 1, 1)\n plt.plot(epochs_range, acc, label='Training Acc.')\n plt.plot(epochs_range, val_acc, label='Validation Acc.')\n plt.legend(loc='best',)\n plt.title('Training and Validation Accuracy')\n \n plt.subplot(2, 1, 2)\n plt.plot(epochs_range, loss, label='Training Loss')\n plt.plot(epochs_range, val_loss, label='Validation Loss')\n plt.legend(loc='best')\n plt.title('Training and Validation Loss')\n plt.show()",
"def train_model_and_plot_results(**kwargs):\n epochs = kwargs[\"epochs\"]\n\n history, model = train_model(**kwargs)\n\n dice = history.history['dice_loss']\n val_dice = history.history['val_dice_loss']\n\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs_range = range(epochs)\n\n plt.figure(figsize=(16, 8))\n plt.subplot(1, 2, 1)\n plt.plot(epochs_range, dice, label='Training Dice Loss')\n plt.plot(epochs_range, val_dice, label='Validation Dice Loss')\n plt.legend(loc='upper right')\n plt.title('Training and Validation Dice Loss')\n\n plt.subplot(1, 2, 2)\n plt.plot(epochs_range, loss, label='Training Loss')\n plt.plot(epochs_range, val_loss, label='Validation Loss')\n plt.legend(loc='upper right')\n plt.title('Training and Validation Loss')\n\n plt.show()"
] | [
"0.7977149",
"0.78650296",
"0.78495806",
"0.7803552",
"0.7779729",
"0.776749",
"0.7753306",
"0.77321535",
"0.77168226",
"0.77015525",
"0.7658724",
"0.76476336",
"0.76449656",
"0.76299965",
"0.7609301",
"0.75890297",
"0.7568557",
"0.7522234",
"0.7485536",
"0.74618995",
"0.7457487",
"0.7419651",
"0.7412116",
"0.74015576",
"0.7373021",
"0.73678005",
"0.7361447",
"0.73299754",
"0.7328668",
"0.7326124"
] | 0.82532877 | 0 |
This function will create, compile, train, plot the history of, and save the model that will predict between many bass guitar notes. | def main():
model = make_model()
train_generator, validation_generator = make_generators()
history = model.fit_generator(
train_generator,
steps_per_epoch=350,
epochs=30,
validation_data=validation_generator,
validation_steps=70)
model.save('2ndMelSpecModel30Epochs.h5')
plotHistory(history) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n \n # Load the notes used to train the model\n notes = pickle.load(open('data/notes', 'rb'))\n \n # Load the notes from all video games combined\n all_notes = pickle.load(open('data/all_notes', 'rb'))\n \n # Get number of unique notes, rests, and chords in the midi files\n n_vocab = len(set(all_notes))\n\n # Generate Network Inputs (list of lists containing note sequences)\n # Generate Normalized Network Input\n network_input, normalized_input = prepare_sequences(notes, all_notes, n_vocab)\n \n # Generate the Keras model with final dense layer having n_vocab number of nodes\n model = create_network(normalized_input, n_vocab)\n \n # Generate the note outputs from the model, and random sequence of notes for network input\n prediction_output = generate_notes(model, network_input, all_notes, n_vocab)\n \n # Create the Midi file from the generated note output\n create_midi(prediction_output)",
"def load_our_model():\n \n model = load_model('2ndMelSpecModel.h5')\n train_generator, validation_generator = make_generators()\n \n history = model.fit_generator(\n train_generator,\n steps_per_epoch= 145,\n epochs= 30,\n validation_data=validation_generator,\n validation_steps=50)\n plotHistory(history)",
"def generate(train_data_path, trained_model_path, num_output_files):\n # load the notes used to train the model\n\n train_data = data_preprocess.load_from_pickle(train_data_path)\n training_notes = train_data[\"data\"]\n note_translator = train_data[\"note_translator\"]\n\n net = networks.TransformerNet.load_checkpoint(trained_model_path)\n\n for i in range(num_output_files):\n prediction_output = generate_notes(net, training_notes, note_translator)\n create_midi(prediction_output, file_suffix=i)",
"def main():\n\n if not os.path.exists( os.path.join(os.getcwd(), 'Plots') ):\n os.mkdir('Plots')\n\n # Initialise the canvas and set aesthetics\n canv = TCanvas(\"canv\", \"canv\", 800, 600)\n canv.SetLogy()\n gStyle.SetOptStat(0)\n gStyle.SetOptTitle(0)\n\n # Initialise legend and set colours\n leg_height = len(models) * 0.06 # make y-length of legend dependent on n_models\n myLeg = TLegend(0.6, 0.9 - leg_height, 0.9, 0.9)\n myLeg.SetTextSize(0.02)\n\n # Initialise histogram arrays\n nJetHist = [None] * len(models)\n jetPtHist = [None] * len(models)\n leadJetPtHist = [None] * len(models)\n metPtHist = [None] * len(models)\n dPhiJJHist = [None] * len(models)\n\n # x-axis labels for plots\n nJetLabel = \"#it{n}_{jet}\"\n jetPtLabel = \"#it{p}_{T}^{jet}\"\n leadJetPtLabel = \"#it{p}_{T}^{j_{1}}\"\n metPtLabel = \"#it{E}_{T}^{miss}\"\n dPhiJJLabel = \"#Delta#it{#phi}_{j_{1} j_{2}}\"\n\n # Initialise histograms here so I can use them later\n for i, model in enumerate(models):\n nJetHist[i] = TH1F(\"nJet\"+model, \"nJet dist \"+model, 30, 0, 29)\n jetPtHist[i] = TH1F(\"jetPt\"+model, \"Jet pT dist \"+model, 30, 0, 3000)\n leadJetPtHist[i] = TH1F(\"leadJetPt\"+model, \"Lead jet pT dist \"+model, 30, 0, 3000)\n metPtHist[i] = TH1F(\"met\"+model, \"MET dist \"+model, 30, 0, 3000)\n dPhiJJHist[i] = TH1F(\"dPhijj\"+model, \"DPhi dist \"+model, 20, -1*(pi+0.1), pi+0.1)\n \n\n # Open root files, then draw individual histograms\n for i, model in enumerate(models):\n print Fore.MAGENTA + \"Running over model {0}/{1}.\".format(i+1, len(models))\n openFile = TFile(files[i])\n tree = openFile.Get(\"Events\")\n nEntries = tree.GetEntries()\n\n # Initialise progress bar\n widgets = [Percentage(), Bar('>'), ETA()]\n pbar = ProgressBar(widgets = widgets, maxval = nEntries).start() \n\n for entry in xrange(nEntries):\n treeEntry = tree.GetEntry(entry)\n nJetHist[i].Fill(tree.nJet)\n \n for jet in xrange( len(tree.Jet_pt) ):\n jetPtHist[i].Fill(tree.Jet_pt[jet])\n\n if len(tree.Jet_pt) > 0: leadJetPtHist[i].Fill(tree.Jet_pt[0])\n metPtHist[i].Fill(tree.MET_pt)\n\n if len(tree.Jet_phi) >= 2:\n deltaPhi = tree.Jet_phi[0] - tree.Jet_phi[1]\n dPhiJJHist[i].Fill(deltaPhi) \n\n pbar.update(entry+1)\n \n pbar.finish()\n\n # Normalise histograms\n nJetHist[i].Scale(1./nEntries)\n jetPtHist[i].Scale(1./nEntries)\n leadJetPtHist[i].Scale(1./nEntries)\n metPtHist[i].Scale(1./nEntries)\n dPhiJJHist[i].Scale(1./nEntries)\n\n # Draw individual histograms and save\n drawIndivHistos(model, nJetHist[i], canv, myLeg, nJetLabel, \"nJet\", index=i)\n drawIndivHistos(model, jetPtHist[i], canv, myLeg, jetPtLabel, \"jetPT\", index=i)\n drawIndivHistos(model, leadJetPtHist[i], canv, myLeg, leadJetPtLabel, \"leadJetPT\", index=i)\n drawIndivHistos(model, metPtHist[i], canv, myLeg, metPtLabel, \"MET\", index=i)\n drawIndivHistos(model, dPhiJJHist[i], canv, myLeg, dPhiJJLabel, \"dPhi\", index=i)\n \n\n # Draw histograms for different models overlaid\n drawMultipleHistos(nJetHist, canv, myLeg, nJetLabel, \"nJet\")\n drawMultipleHistos(jetPtHist, canv, myLeg, jetPtLabel, \"jetPT\")\n drawMultipleHistos(leadJetPtHist, canv, myLeg, leadJetPtLabel, \"leadJetPT\")\n drawMultipleHistos(metPtHist, canv, myLeg, metPtLabel, \"MET\")\n drawMultipleHistos(dPhiJJHist, canv, myLeg, dPhiJJLabel, \"dPhi\")",
"def main():\n nn = CarsClassifierModel()\n train_x, train_y, test_x, test_y = nn.load_data_preprocess()\n history = nn.run(train_x,train_y)\n nn.evaluate(test_x, test_y)\n nn.save(\"keras_nn_5\")\n #nn.plots(history)\n #print(train_x.shape)\n #plt.imshow(train_x[52])\n #plt.title(\"Car\")\n #plt.show()\n #print(train_y[52])",
"def drawOutput():\n global lastPlayedCoordinates\n notes = notes_text.get(\"1.0\", \"end-1c\")\n\n tempDir = \".bt_temp\"\n\n # Create the directory only if it does not exist\n try:\n os.makedirs(tempDir)\n except FileExistsError:\n pass\n\n tempNotesFile = tempDir + \"/currentNotes.btu\"\n tempNewBTI = tempDir + \"/generatedNotes.bti\"\n\n with open(tempNotesFile, 'w') as f:\n f.write(str(notes))\n\n BitTuneDrawingLanguage.generateBitTuneImageFromFile(tempNotesFile,tempNewBTI)\n clearCanvas()\n\n loadFromFile(tempNewBTI)\n os.remove(tempNewBTI)\n os.remove(tempNotesFile)\n lastPlayedCoordinates = []",
"def run(self):\n model = self.model\n self.summary_cards(model)\n self.hospitalizations_chart(model)\n self.available_beds_chart(model)\n self.write_population_info(model)\n self.write_age_distribution_chart(model)\n self.write_fatalities_chart(model)\n self.write_healthcare_parameters(model)\n self.write_epidemiological_parameters(model)\n self.write_footnotes(model)",
"def save_information(self, path: utils.URLPath):\n # Text summary of model\n with (path / \"model_summary.txt\").open(\"w\") as summary_file:\n def print_file(*args, **kwargs):\n print(*args, **kwargs, file=summary_file)\n self.model.summary(print_fn=print_file)\n\n # Image plotting structure of model\n keras.utils.plot_model(self.model, to_file=str(path / \"model_plot.png\"))\n\n # plot all training history\n for i, (meta, history) in enumerate(self.training_history):\n training_output = path / f\"train_{i}\"\n io_functions.save_json(meta, training_output / \"info.json\")\n plot_training_history(history, training_output / \"training.png\")",
"def model_architecture_to_file(model, save_path, show_shapes=True):\n plot_model(model, to_file=save_path + \"_model_architecture.png\", show_shapes=show_shapes)",
"def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial() \r\n # Add adjusted COM (RRA/CMC) model\r\n self.loadAdjustedModel()\r\n # Hide the markers from view\r\n self.hideModelMarkers()\r\n # Load CMC motion to model\r\n self.loadCMCMotion()",
"def main():\n \n data_base = '/local/duman/SIMULATIONS/many_polymers_5/'\n save_base = '/usr/users/iff_th2/duman/RolfData/many_polymers_5'\n \n \n ## Index the data\n# density = [0.08, 0.2, 0.4]\n# xi_L = [0.05, 0.1, 0.2, 1.0, 2.5, 5.0, 8.0, 16.0]\n# Pe = [3.0, 7.0, 15.0, 50.0, 150.0, 500.0, 750.0, 1500.0, 4375.0, 8000.0, 10000.0]\n# kappa = [1.25, 2.5, 5.0, 25.0, 62.5, 125.0, 200.0, 400.0]\n# fp = [0.0, 0.0048, 0.0112, 0.024, 0.08, 0.24, 0.8, 1.2, 2.4, 7.0]\n \n density = [0.2]\n kappa = [1.25, 2.5, 5.0, 25.0, 62.5, 125.0, 200.0, 400.0]\n xi_L = [0.05, 0.1, 0.2, 1.0, 2.5, 5.0, 8.0, 16.0]\n fp = [0.0048, 0.0112, 0.024, 0.08, 0.24, 0.8, 1.2, 2.4, 7.0]\n Pe = [3.0, 7.0, 15.0, 50.0, 150.0, 500.0, 750.0, 1500.0, 4375.0]\n \n ## Create points\n points = []\n for i, x in enumerate(xi_L):\n for j, p in enumerate(Pe):\n points.append( Phase(x, p, kappa[i], fp[j], 'short') ) \n \n for point in points:\n point.analyse_type()\n point.set_plot_props()\n \n long_xil = [0.05, 0.2, 1.0, 2.5, 16.0]\n long_pe = [3.0, 150.0, 750.0, 8000.0, 10000.0]\n long_kappa = [5.0, 20.0, 100.0, 250.0, 1600.0]\n long_fp = [0.0003, 0.015, 0.075, 0.0, 0.0]\n long_points = []\n for i, x in enumerate(long_xil):\n for j, p in enumerate(long_pe):\n long_points.append( Phase(x, p, long_kappa[i], long_fp[j], 'long') ) \n \n for point in long_points:\n point.determine_type()\n point.set_plot_props()\n \n plot_data(points, long_points, save_base, xi_L, Pe)",
"def main():\n\tmodel = load_model(args.model)\n\tif not args.retrain:\n\t\twhile True:\n\t\t\tvideos = glob.glob(args.input + '/*.mov')\n\t\t\tif len(videos) > 0:\n\t\t\t\tlabel(videos, model)\n\t\t\telse:\n\t\t\t\tsleep(10)\n\telif args.retrain:\n\t\t\tpath, file = os.path.split(args.model)\n\t\t\tmodel.save(path + '/old_' + file)\n\n\t\t\tX, Y = process_train_data(args.train)\n\t\t\tX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state = 24)\n\n\t\t\tes_cb = EarlyStopping(monitor='val_loss', patience=2, mode='auto', restore_best_weights=True)\n\t\t\thistory = model.fit(X_train, y_train, batch_size=32, epochs=50, callbacks=[es_cb], validation_data=(X_test, y_test), verbose=int(args.debug))\n\t\t\tmodel.save(args.model)\n\n\t\t\tmaintain(args.train, 600)",
"def generate_notes(model, training_notes, note_translator):\n\n # pick a random sequence from the input as a starting point for the prediction\n sequence_length = model.sequence_length\n note_reverse_translator = {index: note for note, index in note_translator.items()}\n init_state = data_preprocess.prepare_predict_init_state(training_notes, sequence_length)\n\n # create lists for model samples and actual notes created\n X = [note_translator[note] for note in init_state]\n prediction_output = init_state\n\n # copy model to computation device and set to evaluation mode\n device = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n model = model.to(device)\n model.eval()\n\n # generate notes\n notes_to_generate = 512\n for i in range(notes_to_generate):\n input_tensor = torch.tensor(X).view(1, -1).to(device)\n all_next_notes_logits = model(input_tensor)\n next_note_logits = all_next_notes_logits[:, -1, :] # take the last vector of the sequence\n\n next_note_index = sample_prediction(next_note_logits, \"\")\n # _, next_note_index = torch.max(next_note_logits, dim=2)\n # next_note_index = next_note_index.cpu().item()\n\n # next_notes = [note_reverse_translator[note] for note in next_notes_index.cpu().numpy().flatten()]\n next_note_index = next_note_index.cpu().item()\n next_note = note_reverse_translator[next_note_index]\n prediction_output.append(next_note)\n X.append(next_note_index)\n X = X[1:] # advance to next prediction using generated note\n\n return prediction_output",
"def main(args):\n\n\n train_file = args.train_data\n truth_file = args.truth_file\n path_to_model = args.path_to_save_model\n\n with open(train_file, \"r\") as f:\n task = [np.array(x.split(\";\"), dtype=\"float64\") for x in f.readlines()]\n task = np.array([x[1:] for x in task]) \n task = task[:,]\n\n with open(truth_file, \"r\") as f:\n ref = [x.split(\";\") for x in f.readlines()]\n ref_cat = np.array([float(x[1].replace(\"\\n\", \"\")) for x in ref])\n\n max_length = 500\n task_HomLen = seqq.pad_sequences(task, maxlen=max_length, dtype=\"float64\")\n task_HomLenRes = task_HomLen.reshape((-1, max_length, 1)) \n\n n_train = 1800000\n n_trainval = 2000000\n trainX, testX = task_HomLenRes[:n_train, :], task_HomLenRes[n_train:n_trainval, :]\n trainY, testY = ref_cat[:n_train], ref_cat[n_train:n_trainval]\n\n # define model\n optimizer = Adam(lr=0.001, decay=1e-6)\n\n model = Sequential()\n model.add(Conv1D(filters=64, kernel_size=8, activation='relu', input_shape=(max_length,1)))\n model.add(Conv1D(filters=64, kernel_size=8, activation='relu'))\n model.add(Bidirectional(LSTM(32,return_sequences=True)))\n model.add(Dropout(0.1))\n model.add(Bidirectional(LSTM(32,return_sequences=True)))\n model.add(Dropout(0.1))\n model.add(Bidirectional(LSTM(32,return_sequences=True)))\n model.add(Dropout(0.1))\n model.add(Bidirectional(LSTM(32,return_sequences=False))) \n model.add(Dense(1, activation='linear'))\n\n model.compile(loss=\"mean_absolute_error\", optimizer=optimizer, metrics=[\"mean_absolute_error\"])\n \n es = EarlyStopping(monitor='val_mean_absolute_error', mode='min', verbose=1, patience=20)\n mc = ModelCheckpoint(path_to_model + 'task1_len_400_500-{epoch:03d}-{val_mean_absolute_error:.4f}.h5', monitor='val_mean_absolute_error', mode='min', verbose=0, save_best_only=True)\n\n print('------------------------------------------------------------------------')\n print(f'Training ...')\n \n history = model.fit(trainX, trainY, validation_data=(testX, testY), epochs=200, verbose=1, callbacks=[es, mc])\n\n print(\"Program finished.\")\n sys.stdout.flush()",
"def main(args):\n # read data from disk\n X_train, X_val, X_test, Y_train, Y_val, Y_test = util_fruit.get_fruit_data()\n\n # build model\n model = get_model(model_name=\"fruit_classifier\")\n model.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy']\n )\n # augment training and testing data\n datagen, X_train, X_val = augment_data(X_train, X_val)\n\n # save model and training history\n model_folder_path = util_fruit.make_model_folder(model_name=MODEL_NAME)\n\n # Callbacks\n callbacks=[]\n cp_callback = ModelCheckpoint(\"{}/{}_callback.hdf5\".format(model_folder_path, MODEL_NAME),\n monitor='val_loss',\n verbose=0,\n save_best_only=True,\n save_weights_only=False,\n period=10)\n callbacks.append(cp_callback)\n\n # train model and save train history \n batch_size = args.batch_size\n epochs = args.epochs\n history = train(model, X_train, X_val, Y_train, Y_val, callbacks, datagen, batch_size=batch_size, epochs=epochs)\n\n # winsound.Beep(5000, 10) # play sound after training completes\n \n \n save_model(model=model, save_folder_path=model_folder_path, model_name=MODEL_NAME, model_ext=MODEL_EXT)\n save_history(history=history, save_folder_path=model_folder_path)\n\n # evaluate model and save results-\n evaluate(model=model, datagen=datagen, X_test=X_test, Y_test=Y_test, batch_size=batch_size, save_folder_path=model_folder_path)",
"def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)",
"def prediction():\r\n\r\n loaded_model = load_model('imageTrainedModel.h5')\r\n print(loaded_model.summary())\r\n\r\n # retrieve history also:\r\n f = open('history.pckl', 'rb')\r\n history = pickle.load(f)\r\n f.close()\r\n\r\n print(history.keys())\r\n print(history)\r\n\r\n epochs = len(history['loss']) # length of the list stored at 'loss'\r\n # Plot losses for train and validation\r\n plt.figure()\r\n plt.title('Loss as training progresses')\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Loss')\r\n plt.plot(history['loss'], label='Train Error')\r\n plt.plot(history['val_loss'], label='Val Error')\r\n plt.legend()\r\n plt.show()\r\n\r\n # Plot metrics\r\n plt.plot(history['acc']) # use same metric that was used for training. 'history' is a dictionary.\r\n plt.title('Accuracy as training progresses')\r\n plt.ylabel('Accuracy (%)')\r\n plt.xlabel('Epoch')\r\n ymax = max(history['acc'])\r\n xpos = history['acc'].index(ymax)\r\n xmax = xpos\r\n plt.annotate('Maximum accuracy: %s' % round(ymax, 3),\r\n xy=(xmax, ymax), xycoords='data',\r\n xytext=(0.5, 0.5), textcoords='axes fraction',\r\n fontsize=12)\r\n plt.show()\r\n\r\n # make predictions using x_test\r\n test_y_predictions = loaded_model.predict(x_test, batch_size=None, verbose=1, steps=None)\r\n test_y_predictions = np.around(test_y_predictions, decimals=0) # round to whole integers\r\n true_false_array = np.equal(y_test, test_y_predictions) # test of equality.\r\n true_count = np.sum(true_false_array) # number of correctly categorised images\r\n false_count = true_false_array.shape[0] - true_count # number of images not correctly categorised\r\n\r\n # Plot predicted and actual image categories\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111)\r\n plt.title('Classification of Image Categories')\r\n plt.ylabel('Number of Images')\r\n plt.xlabel('Image Classification')\r\n label = ['Correct', 'Incorrect']\r\n index = np.arange(len(label))\r\n plt.xticks(index, label, fontsize=10, rotation=0)\r\n ax1.bar(index, [true_count, false_count])\r\n plt.show()",
"def main(\n model=Sequential(),\n train_data=([], []), # (X, Y) or batch generator\n val_data=([], []), # (X, Y) or batch generator (no generator when using tensorboard).\n epochs=10,\n batch_size=32,\n n_train_batches=None,\n n_val_batches=None,\n loss=\"binary_crossentropy\",\n metrics=[\"accuracy\"], # None if not needed/wanted\n optimizer_name='rmsprop',\n lr=0.001,\n epsilon=1e-8,\n decay=0.0, # suggested: lr / epochs\n class_weight=None, # e.g., {0: 1., 1: 50., 2: 2.}\n save_to_dir='.temp_log', # empty: saves to tb_logs/current_datetime\n datetime_subdir=True,\n use_tensorboard=False,\n tensorboard_histogram_freq=10,\n ylabels=[],\n verbose=True,\n):\n optimizer = get_optimizer(\n optimizer_name, lr, epsilon, decay\n )\n compile_model(\n model, optimizer, loss, metrics\n )\n log_dir = setup_logdir(\n save_to_dir, datetime_subdir\n )\n callbacks_list = setup_callbacks(\n log_dir, use_tensorboard, tensorboard_histogram_freq,\n )\n history = fit_model(\n model, train_data, val_data, epochs,\n batch_size, n_train_batches, n_val_batches,\n class_weight,\n callbacks_list,\n False,\n )\n\n # if verbose:\n model_filepath = get_model_path(log_dir)\n train_score, val_score = evaluate_model(\n model_filepath, train_data, val_data,\n batch_size,\n n_train_batches, n_val_batches,\n )\n # plot_training_results()# todo\n\n save_history(history, log_dir)",
"def createModel(data, eta, epoch, file_name, plot_name, no_of_input=2):\n logging.info(f\"\\n\\n>>>>>>>>>>Starting training>>>>>>>>>>>>>>>>{file_name}\")\n logging.info(f\"eta = {str(eta)} epochs ={str(epoch)}\\n\")\n\n df = pd.DataFrame(data)\n\n X,y = prepare_data(df)\n logging.info(f\"X={X}\")\n logging.info(f\"Y={y}\") \n\n model = Perceptron(eta, epoch, no_of_input)\n\n model.fit(X, y)\n\n model.predict(X)\n\n model.total_loss()\n\n save_model(model, file_name)\n\n \n logging.info(f\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n\n if no_of_input == 2:\n save_plot(df, plot_name, model)",
"def retrain_dl(self):\n if self.print_sequential:\n print(\"Opening files...\")\n data=self.open_files()\n if self.print_sequential:\n print(\"Generating training data and labels...\")\n train_data, label_data=self.transpose_load_concat(**data)\n if self.print_sequential:\n print(\"Removing nans...\")\n train_data, label_data=self.omit_nans(train_data, label_data)\n if self.print_sequential:\n print(\"Open previously trained model...\")\n model=load_model(f'{self.working_directory}/model_{self.model_num}_current.h5')\n if self.print_sequential:\n print(\"Retrain previously trained model...\") \n history=model.fit(x=train_data, \n y=label_data, \n validation_split=self.validation_split, \n batch_size=self.batch_size, \n epochs=self.epochs, \n shuffle=True)\n pd.DataFrame(history.history).to_csv(f'/{self.working_directory}/model_{self.retrain_model_num}_{self.climate}.csv')\n save_model(model, f\"/{self.working_directory}/model_{self.retrain_model_num}_{self.climate}.h5\")\n data=None\n train_data=None\n label_data=None",
"def main():\n if args.file and not args.nomodel:\n text = read_file(args.file)\n trained_model = train_char_model(text, args.prev)\n save_model(trained_model, args.file)\n sys.exit()\n if args.model:\n trained_model = load_model(args.model)\n if args.nomodel and args.file:\n trained_model = train_char_model(read_file(args.file), args.prev)\n # generate some random text\n history = check_history(trained_model, args.prev)\n gentext = generate_text(trained_model, history, args.gen)\n print(gentext)",
"def main(database_filepath,model_filepath):\n X_train, X_test, y_train, y_test = load_data(database_filepath)\n \n print(X_train.shape,y_train.shape)\n \n print('Building model...')\n model = build_pipeline()\n \n print('Training model...')\n model.fit(X_train, y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, y_test)\n\n print('Saving model...')\n save_model(model, model_filepath)\n\n print('Trained model saved!')",
"def train_dl(self, model, data, label):\n history=model.fit(x=data, \n y=label, \n validation_split=self.validation_split, \n batch_size=self.batch_size, \n epochs=self.epochs, \n shuffle=True)\n pd.DataFrame(history.history).to_csv(f'/{self.working_directory}/model_{self.model_num}_{self.climate}.csv')\n save_model(model, f\"/{self.working_directory}/model_{self.model_num}_{self.climate}.h5\")",
"def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return",
"def save_model_history_manual(loss, val_loss, fpath):\n\n\t# Open file\n\tfid = open(fpath, 'a')\n\tlogging.info('trained at {}'.format(datetime.datetime.utcnow()))\n\tprint('iteration\\tloss\\tval_loss', file=fid)\n\n\ttry:\n\t\t# Iterate through\n\t\tfor i in range(len(loss)):\n\t\t\tprint('{}\\t{}\\t{}'.format(i + 1, \n\t\t\t\t\t\t\tloss[i], val_loss[i]),\n\t\t\t\t\t\t\tfile = fid)\n\texcept KeyError:\n\t\tprint('<no history found>', file = fid)\n\n\t# Close file\n\tfid.close()",
"def __init__(self, cell_index, stimulus_type, loss, optimizer, mean_adapt):\n\n # compile the model\n with notify('Compiling'):\n self.model.compile(loss=loss, optimizer=optimizer)\n\n # save architecture as a json file\n self.savedir = mksavedir(prefix=str(self))\n with notify('Saving architecture'):\n with open(join(self.savedir, 'architecture.json'), 'w') as f:\n f.write(self.model.to_json())\n\n # function to write data to a CSV file\n self.save_csv = partial(tocsv, join(self.savedir, 'performance'))\n self.save_csv(['Epoch', 'Iteration', 'Training CC', 'Test CC'])\n # load experimental data\n self.stimulus_type = stimulus_type\n if str(self) == 'lstm':\n numTime = self.stim_shape[0]\n self.holdout = loadexpt(cell_index, self.stimulus_type, 'test', self.stim_shape[1], mean_adapt=mean_adapt)\n self.training = loadexpt(cell_index, self.stimulus_type, 'train', self.stim_shape[1], mean_adapt=mean_adapt)\n X_train = self.training.X\n y_train = self.training.y\n X_test = self.holdout.X\n y_test = self.holdout.y\n numTrain = (int(X_train.shape[0]/numTime))*numTime\n numTest = (int(X_test.shape[0]/numTime))*numTime\n X_train = X_train[:numTrain]\n y_train = y_train[:numTrain]\n X_test = X_test[:numTest]\n y_test = y_test[:numTest]\n X_train = np.reshape(X_train, (int(numTrain/numTime), numTime, self.stim_shape[1], self.stim_shape[2], self.stim_shape[3]))\n y_train = np.reshape(y_train, (int(numTrain/numTime), numTime, 1))\n X_test = np.reshape(X_test, (int(numTest/numTime), numTime, self.stim_shape[1], self.stim_shape[2], self.stim_shape[3]))\n y_test = np.reshape(y_test, (int(numTest/numTime), numTime, 1))\n\t self.training = Batch(X_train, y_train)\n\t self.holdout = Batch(X_test, y_test)\n else:\n self.holdout = loadexpt(cell_index, self.stimulus_type, 'test', self.stim_shape[0], mean_adapt=mean_adapt)\n self.training = loadexpt(cell_index, self.stimulus_type, 'train', self.stim_shape[0], mean_adapt=mean_adapt)\n # save model information to a markdown file\n if 'architecture' not in self.__dict__:\n self.architecture = 'No architecture information specified'\n\n metadata = ['# ' + str(self), '## ' + strftime('%B %d, %Y'),\n 'Started training on: ' + strftime('%I:%M:%S %p'),\n '### Architecture', self.architecture,\n '### Stimulus', 'Experiment 10-07-15', stimulus_type, 'Mean adaptation: ' + str(mean_adapt),\n 'Cell #{}'.format(cell_index),\n '### Optimization', str(loss), str(optimizer)]\n tomarkdown(join(self.savedir, 'README'), metadata)",
"def save_training_history(hist):\n if os.path.exists(\"history/history\") != True:\n os.mknod(\"history/history\")\n with open(\"history/history\", \"wb\") as file_pi:\n pickle.dump(hist.history, file_pi)\n print(\"Training History saved\")",
"def save_history(history):\n cat_acc = history.history['categorical_accuracy']\n val_cat_acc = history.history['val_categorical_accuracy']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n np.savetxt(\n os.path.join(dbc.MODEL_PATH, \"cat_acc.txt\"), cat_acc, delimiter=\",\")\n np.savetxt(\n os.path.join(dbc.MODEL_PATH, \"val_cat_acc.txt\"), val_cat_acc,\n delimiter=\",\")\n np.savetxt(os.path.join(dbc.MODEL_PATH, \"loss.txt\"), loss, delimiter=\",\")\n np.savetxt(\n os.path.join(dbc.MODEL_PATH, \"val_loss.txt\"), val_loss, delimiter=\",\")",
"def run(_store):\n\n\t# ###################################################### FRAMEWORK SETTINGS ######################################################\n\n\t# TODO: Play with these settings\n\t#data_column_names =['RLAaccx', 'RLAaccy', 'RLAaccz']\n\tdata_column_names =['RLAaccx', 'RLAaccy', 'RLAaccz','RLAgyrx', 'RLAgyry', 'RLAgyrz','RLAmagx', 'RLAmagy', 'RLAmagz','RLAroll', 'RLApitch', 'RLAyaw'] # Right hand accelerometer; check dataset.py for feature names\n\tp_TM = 0.6 # p(TM) - the entry probability of the garbage HMM; p(G) = (1 - p(TM)); See Lee & Kim paper\n\textension_len = 0 # Additional samples before and after the training label to compensate for cut-offs\n\n\t# No need to change these\n\toutput_dir = 'img/' # Directory is needed for intermediate output when plotting\n\tdata_dimension = len(data_column_names)\n\t# Available labels:\n\t# - 17: nested label, separates training data from free-living data, this should NOT be used directly\n\t# - 18: drinking gesture\n\t# - 19: drinking gesture\n\t# - 20: drinking gesture\n\t# - 21: drinking gesture\n\tspotter_label = 90 # Virtual label for marking spotted gestures\n\n\t# Label color lookup table; 'spotter_label' is for the spotted events\n\tlabel_to_color = {17: 'c', 18: 'b', 19: 'g', 20: 'r', 21: 'm', spotter_label: 'y'}\n\n\t# ###################################################### TRAINING DATA 18 ######################################################\n\tprint ('\\nExtracting training data ...')\n\n\t# Fetch the TRAINING dataset, this is a list of data frames\n\t# TODO: Increase the training pool\n\ttraining_participants = [11,15] # Can pick from [11, 13, 15]\n\ttraining_labels = [18] # Can pick from [18, 19, 20, 21]; see above\n\ttraining_type = 'both' # Can be initial 'training' data (scripted), 'free-living' (office) data or 'both'\n\ttraining_dfs18 = list(dataset.gen_segments(\n\t\t_store, _participants=training_participants,\n\t\t_labels=training_labels, columns=data_column_names,\n\t\t_extend_front=extension_len, _extend_back=extension_len,\n\t\t_type=training_type\n\t\t)\n\t)\n\t\n\t# Plot some (3) training gestures\n\t# TODO: try this\n\tif False:\n\t\tfor i, train_df in zip(range(3), training_dfs18):\n\t\t\ttrain_df.plot()\n\t\t\tplt.title('TRAINING set %d' % i)\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\t\t\tplt.tight_layout()\n\t\t\tplt.show()\n\n\t# Print some statistics of the training set\n\tprint ('gesture sample statistics:')\n\tsample_lengths = [df.shape[0] for df in training_dfs18]\n\tprint ('training instances:'), len(sample_lengths)\n\tprint ('mean length: '), np.average(sample_lengths)\n\tprint ('median len: '), np.median(sample_lengths)\n\tprint ('min length: '), min(sample_lengths)\n\tprint ('max length: '), max(sample_lengths)\n# ###################################################### TRAINING DATA 19 ######################################################\n\tprint ('\\nExtracting training data ...')\n\n\t# Fetch the TRAINING dataset, this is a list of data frames\n\t# TODO: Increase the training pool\n\ttraining_participants = [11,15] # Can pick from [11, 13, 15]\n\ttraining_labels = [19] # Can pick from [18, 19, 20, 21]; see above\n\ttraining_type = 'both' # Can be initial 'training' data (scripted), 'free-living' (office) data or 'both'\n\ttraining_dfs19 = list(dataset.gen_segments(\n\t\t_store, _participants=training_participants,\n\t\t_labels=training_labels, columns=data_column_names,\n\t\t_extend_front=extension_len, _extend_back=extension_len,\n\t\t_type=training_type\n\t\t)\n\t)\n\t\n\t# Plot some (3) training gestures\n\t# TODO: try this\n\tif False:\n\t\tfor i, train_df in zip(range(3), training_dfs19):\n\t\t\ttrain_df.plot()\n\t\t\tplt.title('TRAINING set %d' % i)\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\t\t\tplt.tight_layout()\n\t\t\tplt.show()\n\n\t# Print some statistics of the training set\n\tprint ('gesture sample statistics:')\n\tsample_lengths = [df.shape[0] for df in training_dfs19]\n\tprint ('training instances:'), len(sample_lengths)\n\tprint ('mean length: '), np.average(sample_lengths)\n\tprint ('median len: '), np.median(sample_lengths)\n\tprint ('min length: '), min(sample_lengths)\n\tprint ('max length: '), max(sample_lengths)\n\n\n# ###################################################### TRAINING DATA 20 ######################################################\n\tprint ('\\nExtracting training data ...')\n\n\t# Fetch the TRAINING dataset, this is a list of data frames\n\t# TODO: Increase the training pool\n\ttraining_participants = [11,15] # Can pick from [11, 13, 15]\n\ttraining_labels = [20] # Can pick from [18, 19, 20, 21]; see above\n\ttraining_type = 'both' # Can be initial 'training' data (scripted), 'free-living' (office) data or 'both'\n\ttraining_dfs20 = list(dataset.gen_segments(\n\t\t_store, _participants=training_participants,\n\t\t_labels=training_labels, columns=data_column_names,\n\t\t_extend_front=extension_len, _extend_back=extension_len,\n\t\t_type=training_type\n\t\t)\n\t)\n\t\n\t# Plot some (3) training gestures\n\t# TODO: try this\n\tif False:\n\t\tfor i, train_df in zip(range(3), training_dfs20):\n\t\t\ttrain_df.plot()\n\t\t\tplt.title('TRAINING set %d' % i)\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\t\t\tplt.tight_layout()\n\t\t\tplt.show()\n\n\t# Print some statistics of the training set\n\tprint ('gesture sample statistics:')\n\tsample_lengths = [df.shape[0] for df in training_dfs20]\n\tprint ('training instances:'), len(sample_lengths)\n\tprint ('mean length: '), np.average(sample_lengths)\n\tprint ('median len: '), np.median(sample_lengths)\n\tprint ('min length: '), min(sample_lengths)\n\tprint ('max length: '), max(sample_lengths)\n\n# ###################################################### TRAINING DATA 21 ######################################################\n\tprint ('\\nExtracting training data ...')\n\n\t# Fetch the TRAINING dataset, this is a list of data frames\n\t# TODO: Increase the training pool\n\ttraining_participants = [11,15] # Can pick from [11, 13, 15]\n\ttraining_labels = [21] # Can pick from [18, 19, 20, 21]; see above\n\ttraining_type = 'both' # Can be initial 'training' data (scripted), 'free-living' (office) data or 'both'\n\ttraining_dfs21 = list(dataset.gen_segments(\n\t\t_store, _participants=training_participants,\n\t\t_labels=training_labels, columns=data_column_names,\n\t\t_extend_front=extension_len, _extend_back=extension_len,\n\t\t_type=training_type\n\t\t)\n\t)\n\t\n\t# Plot some (3) training gestures\n\t# TODO: try this\n\tif False:\n\t\tfor i, train_df in zip(range(3), training_dfs21):\n\t\t\ttrain_df.plot()\n\t\t\tplt.title('TRAINING set %d' % i)\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\t\t\tplt.tight_layout()\n\t\t\tplt.show()\n\n\t# Print some statistics of the training set\n\tprint ('gesture sample statistics:')\n\tsample_lengths = [df.shape[0] for df in training_dfs21]\n\tprint ('training instances:'), len(sample_lengths)\n\tprint ('mean length: '), np.average(sample_lengths)\n\tprint ('median len: '), np.median(sample_lengths)\n\tprint ('min length: '), min(sample_lengths)\n\tprint ('max length: '), max(sample_lengths)\n\t# ###################################################### TESTING DATA ######################################################\n\n\n\tprint ('\\nExtracting testing data ...')\n\n\t# The test data only serves for testing the model and to get a quick look at the data. It is NOT intended for evaluation\n\n\t# Fetch TEST data; segments of labels and raw sample data\n\ttest_participant = 15\n\ttest_labels = [18] # The labels associated with the gesture\n\ttest_type = 'free-living' # Can be initial 'training' data, 'free-living' data or 'both'\n\ttest_seg = list(dataset.gen_seg_table(_store, _participants=test_participant, _labels=test_labels, _type=test_type)) # For for each subject\n\tassert test_seg # Ensure we got data, otherwise the query was invalid\n\ttest_seg = test_seg[0][1] # Fetch single frame; there is only one\n\ttest_seg = test_seg.iloc[1:2] # Pick the first 10 segments\n\ttest_min_start, test_max_stop = test_seg.iloc[0][0], test_seg.iloc[-1][1] # Limit TEST data frame to region of interest\n\ttest_min_start, test_max_stop = test_min_start - 300, test_max_stop + 300 # Add some margin for visibility\n\t\n\ttest_df = dataset.get_frame(_store, _participant=test_participant, _start=test_min_start, _stop=test_max_stop, columns=data_column_names)\n\n\t# Plot TEST data stream\n\t# TODO: try this\n\tif False:\n\t\tax = test_df.plot()\n\t\tplot_labels(test_seg, label_to_color)\n\t\tlut = {key: col for key, col in label_to_color.iteritems() if key in test_labels} # Reduce legend to used labels\n\t\tadd_labels_to_legend(_ax=ax, _label_to_color=lut)\n\t\tplt.title('TEST set with ground truth; participant=%d' % test_participant)\n\t\tplt.xlabel('Samples')\n\t\tplt.ylabel('Raw data')\n\t\tplt.tight_layout()\n\t\tplt.show()\n\n\t# ###################################################### BUILD HMM 18 ######################################################\n\tprint ('\\nBuilding the gesture model ...')\n\n\t# Example HMM; linear with 3 states; all gaussian\n\t#\n\t# model_entry --> state_1 --> state_2 --> state_3 --> model_exit\n\t# ^ | ^ | ^ |\n\t# \\__/ \\__/ \\__/\n\n\n\t# TODO: Try different models. Models can be nested (i.e., 'model' can have sub-models) as can be seen in the yahmm tutorial.\n\tmodel = Model(name='Drink18')\n\n\t# Gaussian probability distribution function factory\n\tdef make_dist(_mean=0, _std=2):\n\t\treturn make_nd_distribution(data_dimension, NormalDistribution, [_mean, _std])\n\n\t# The name of the State() can be suffixed with color codes:\n\t# * '-start' (green) and '-end' (red) to mark silent entry and exit points\n\t# * '-tm' (blue) to mark derived threshold model states\n\t# * '-meta' (yellow) to mark silent / meta states\n\n\t# TODO: Play with the distribution types and the initial parameters, they do not need to be all the same\n\tmodel_states = []\n\tfor i in range(3): # Generate some states\n\t\ts = State(make_dist(), name=model.name + \"_%d\" % (i + 1))\n\t\tmodel_states.append(s)\n\tmodel.add_states(model_states)\n\tmodel.add_transition(model.start, model_states[0], 1) # Entry transition\n\tmodel.add_transitions(model_states, model_states, [0.8] * len(model_states)) # 80% self loop for each state\n\tmodel.add_transitions(model_states, model_states[1:] + [model.end], [0.2] * len(model_states)) # 20% to-next for each state\n\n\t# Always bake the model after changes to update internal structures and normalize transition probabilities\n\tmodel.bake()\n\n\t# TODO: try this\n\tif False: # Plot and save visual model\n\t\tplot_model(model, _output_file=output_dir + 'drink_initial.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Initial gesture HMM')\n\t\tplt.show()\n\n# ###################################################### BUILD HMM 19 ######################################################\n\tprint ('\\nBuilding the gesture model ...')\n\n\t# Example HMM; linear with 3 states; all gaussian\n\t#\n\t# model_entry --> state_1 --> state_2 --> state_3 --> model_exit\n\t# ^ | ^ | ^ |\n\t# \\__/ \\__/ \\__/\n\n\n\t# TODO: Try different models. Models can be nested (i.e., 'model' can have sub-models) as can be seen in the yahmm tutorial.\n\tmodel19 = Model(name='Drink19')\n\n\t# Gaussian probability distribution function factory\n\tdef make_dist(_mean=0, _std=2):\n\t\treturn make_nd_distribution(data_dimension, NormalDistribution, [_mean, _std])\n\n\t# The name of the State() can be suffixed with color codes:\n\t# * '-start' (green) and '-end' (red) to mark silent entry and exit points\n\t# * '-tm' (blue) to mark derived threshold model states\n\t# * '-meta' (yellow) to mark silent / meta states\n\n\t# TODO: Play with the distribution types and the initial parameters, they do not need to be all the same\n\tmodel_states = []\n\tfor i in range(3): # Generate some states\n\t\ts = State(make_dist(), name=model19.name + \"_%d\" % (i + 1))\n\t\tmodel_states.append(s)\n\tmodel19.add_states(model_states)\n\tmodel19.add_transition(model19.start, model_states[0], 1) # Entry transition\n\tmodel19.add_transitions(model_states, model_states, [0.8] * len(model_states)) # 80% self loop for each state\n\tmodel19.add_transitions(model_states, model_states[1:] + [model19.end], [0.2] * len(model_states)) # 20% to-next for each state\n\n\t# Always bake the model after changes to update internal structures and normalize transition probabilities\n\tmodel19.bake()\n\n\t# TODO: try this\n\tif False: # Plot and save visual model\n\t\tplot_model(model19, _output_file=output_dir + 'drink_initial.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Initial gesture HMM')\n\t\tplt.show()\n\n# ###################################################### BUILD HMM 20 ######################################################\n\tprint ('\\nBuilding the gesture model ...')\n\n\t# Example HMM; linear with 3 states; all gaussian\n\t#\n\t# model_entry --> state_1 --> state_2 --> state_3 --> model_exit\n\t# ^ | ^ | ^ |\n\t# \\__/ \\__/ \\__/\n\n\n\t# TODO: Try different models. Models can be nested (i.e., 'model' can have sub-models) as can be seen in the yahmm tutorial.\n\tmodel20 = Model(name='Drink20')\n\n\t# Gaussian probability distribution function factory\n\tdef make_dist(_mean=0, _std=2):\n\t\treturn make_nd_distribution(data_dimension, NormalDistribution, [_mean, _std])\n\n\t# The name of the State() can be suffixed with color codes:\n\t# * '-start' (green) and '-end' (red) to mark silent entry and exit points\n\t# * '-tm' (blue) to mark derived threshold model states\n\t# * '-meta' (yellow) to mark silent / meta states\n\n\t# TODO: Play with the distribution types and the initial parameters, they do not need to be all the same\n\tmodel_states = []\n\tfor i in range(3): # Generate some states\n\t\ts = State(make_dist(), name=model20.name + \"_%d\" % (i + 1))\n\t\tmodel_states.append(s)\n\tmodel20.add_states(model_states)\n\tmodel20.add_transition(model20.start, model_states[0], 1) # Entry transition\n\tmodel20.add_transitions(model_states, model_states, [0.8] * len(model_states)) # 80% self loop for each state\n\tmodel20.add_transitions(model_states, model_states[1:] + [model20.end], [0.2] * len(model_states)) # 20% to-next for each state\n\n\t# Always bake the model after changes to update internal structures and normalize transition probabilities\n\tmodel20.bake()\n\n\t# TODO: try this\n\tif False: # Plot and save visual model\n\t\tplot_model(model20, _output_file=output_dir + 'drink_initial.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Initial gesture HMM')\n\t\tplt.show()\n\n# ###################################################### BUILD HMM 21 ######################################################\n\tprint ('\\nBuilding the gesture model ...')\n\n\t# Example HMM; linear with 3 states; all gaussian\n\t#\n\t# model_entry --> state_1 --> state_2 --> state_3 --> model_exit\n\t# ^ | ^ | ^ |\n\t# \\__/ \\__/ \\__/\n\n\n\t# TODO: Try different models. Models can be nested (i.e., 'model' can have sub-models) as can be seen in the yahmm tutorial.\n\tmodel21 = Model(name='Drink21')\n\n\t# Gaussian probability distribution function factory\n\tdef make_dist(_mean=0, _std=2):\n\t\treturn make_nd_distribution(data_dimension, NormalDistribution, [_mean, _std])\n\n\t# The name of the State() can be suffixed with color codes:\n\t# * '-start' (green) and '-end' (red) to mark silent entry and exit points\n\t# * '-tm' (blue) to mark derived threshold model states\n\t# * '-meta' (yellow) to mark silent / meta states\n\n\t# TODO: Play with the distribution types and the initial parameters, they do not need to be all the same\n\tmodel_states = []\n\tfor i in range(3): # Generate some states\n\t\ts = State(make_dist(), name=model21.name + \"_%d\" % (i + 1))\n\t\tmodel_states.append(s)\n\tmodel21.add_states(model_states)\n\tmodel21.add_transition(model21.start, model_states[0], 1) # Entry transition\n\tmodel21.add_transitions(model_states, model_states, [0.8] * len(model_states)) # 80% self loop for each state\n\tmodel21.add_transitions(model_states, model_states[1:] + [model21.end], [0.2] * len(model_states)) # 20% to-next for each state\n\n\t# Always bake the model after changes to update internal structures and normalize transition probabilities\n\tmodel21.bake()\n\n\t# TODO: try this\n\tif False: # Plot and save visual model\n\t\tplot_model(model21, _output_file=output_dir + 'drink_initial.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Initial gesture HMM')\n\t\tplt.show()\n\n\t# ###################################################### TRAIN HMM 18 ######################################################\n\tprint ('\\nTraining the gesture model 18 ...')\n\tmodel18=model\n\n\t# Get a list of training instances\n\ttraining_stream = [df.to_records(index=False).tolist() for df in training_dfs18]\n\n\t# Train gesture HMM; If you have sub-models you can train them individually even after combining them\n\tmodel18.train(training_stream, algorithm='baum-welch', transition_pseudocount=1)\n\n\t# Training changes the internal representation, back-annotate changes to internal graph for plotting\n\tbackannotate_internal_graph(model18)\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model18, _output_file=output_dir + 'drink_trained.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Trained gesture HMM')\n\t\tplt.show()\n\n# ###################################################### TRAIN HMM 19 ######################################################\n\tprint ('\\nTraining the gesture model 19 ...')\n\t\n\t# Get a list of training instances\n\ttraining_stream = [df.to_records(index=False).tolist() for df in training_dfs19]\n\n\t# Train gesture HMM; If you have sub-models you can train them individually even after combining them\n\tmodel19.train(training_stream, algorithm='baum-welch', transition_pseudocount=1)\n\n\t# Training changes the internal representation, back-annotate changes to internal graph for plotting\n\tbackannotate_internal_graph(model19)\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model19, _output_file=output_dir + 'drink_trained.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Trained gesture HMM')\n\t\tplt.show()\n\n# ###################################################### TRAIN HMM 20 ######################################################\n\tprint ('\\nTraining the gesture model 20 ...')\n\t\n\t\n\n\t# Get a list of training instances\n\ttraining_stream = [df.to_records(index=False).tolist() for df in training_dfs20]\n\n\t# Train gesture HMM; If you have sub-models you can train them individually even after combining them\n\tmodel20.train(training_stream, algorithm='baum-welch', transition_pseudocount=1)\n\n\t# Training changes the internal representation, back-annotate changes to internal graph for plotting\n\tbackannotate_internal_graph(model20)\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model20, _output_file=output_dir + 'drink_trained.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Trained gesture HMM')\n\t\tplt.show()\n\n# ###################################################### TRAIN HMM 21 ######################################################\n\tprint ('\\nTraining the gesture model 21 ...')\n\t\n\t\n\n\t# Get a list of training instances\n\ttraining_stream = [df.to_records(index=False).tolist() for df in training_dfs21]\n\n\t# Train gesture HMM; If you have sub-models you can train them individually even after combining them\n\tmodel21.train(training_stream, algorithm='baum-welch', transition_pseudocount=1)\n\n\t# Training changes the internal representation, back-annotate changes to internal graph for plotting\n\tbackannotate_internal_graph(model21)\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model21, _output_file=output_dir + 'drink_trained.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Trained gesture HMM')\n\t\tplt.show()\n\n# ###################################################### Merger Models ######################################################\n\tprint ('\\nMerging Models ...')\n\tmodelG = Model(name='MergedModel')\n\tmodelG.add_model(model18)\n\tmodelG.add_model(model19)\n\tmodelG.add_model(model20)\n\tmodelG.add_model(model21)\n\n\t# Introduce silent states for loops\n\tmodelG_meta_entry = State(None, name=modelG.name + ' start-meta')\n\tmodelG_meta_exit = State(None, name=modelG.name + ' end-meta')\n\tmodelG.add_transition(modelG.start, modelG_meta_entry, 1)\n\tmodelG.add_transition(modelG_meta_exit, modelG.end, 1)\n\n\t# p_enter_tm specifies the probability for choosing the threshold model over the gesture model\n\tmodelG.add_transition(modelG_meta_entry, model18.start, 0.1) # Enter model 18\n\tmodelG.add_transition(modelG_meta_entry, model19.start, 0.4) # Enter model 19\n\tmodelG.add_transition(modelG_meta_entry, model20.start, 0.2) # Enter model 20\n\tmodelG.add_transition(modelG_meta_entry, model21.start, 0.3) # Enter model 21\n\tmodelG.add_transition(model18.end, modelG_meta_exit, 1)\n\tmodelG.add_transition(model19.end, modelG_meta_exit, 1)\n\tmodelG.add_transition(model20.end, modelG_meta_exit, 1)\n\tmodelG.add_transition(model21.end, modelG_meta_exit, 1)\n\n\tmodelG.bake(merge='None')\n\t# TODO: try this\n\tif False: # Plot and save visual model\n\t\tplot_model(modelG, _output_file=output_dir + 'mergedmodel.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Merged Model')\n\t\tplt.show()\t\n\n\n\n# ###################################################### BUILD THRESHOLD HMM ######################################################\n\tprint ('\\nBuilding the threshold model ...')\n\n\t# The threshold model contains all emitting states from the gesture model; the threshold model is ergodic\n\tmodel_tm = Model(name=\"threshold\")\n\n\t# Collect all states and their self-loop transition probabilities\n\ttm_states = {}\n\ttrans_mat = modelG.dense_transition_matrix() # The transition matrix is created during the baking process\n\tfor i, s in enumerate(modelG.states):\n\t\tp = trans_mat[i, i]\n\t\tif not math.isinf(p):\n\t\t\ts_ = s.tied_copy() # New state but same distribution\n\t\t\ts_.name += '-tm'\n\t\t\ttm_states[s_] = math.exp(p)\n\t#exit(-1)\n\t# TODO: try to improve the threshold model by adding states\n#\t# Add a noise state\n#\tdummy = State(make_nd_distribution(data_dimension, NormalDistribution, [0, 47]), name='1337')\n#\ttm_states[dummy] = 0.6 # 60% loop probability\n\n\t# Create the ergodic graph\n\tmodel_tm_meta = State(None, name=model_tm.name + '-meta') # Virtual node to model ergodic graph\n\tfor s, p in tm_states.items():\n\t\tmodel_tm.add_state(s)\n\t\tmodel_tm.add_transition(s, s, p) # Loop\n\t\tmodel_tm.add_transition(s, model_tm_meta, 1 - p) # Return to virtual node\n\t\tmodel_tm.add_transition(model_tm_meta, s, 1) # Enter state\n\n\t# We cannot create a 'start -> meta' as that could lead to a non-emitting 'start -> meta -> end' sequence\n\t# and consequently, a non-emitting loop in the top level HMM. Thus, force a 'start -> emitting-state' transition\n\tmodel_tm.add_transitions(model_tm.start, tm_states.keys(), [1] * len(tm_states))\n\tmodel_tm.add_transition(model_tm_meta, model_tm.end, 1)\n\n\t# Normalize transition probabilities, do not merge silent states\n\tmodel_tm.bake(merge='None')\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model_tm, _output_file=output_dir + 'threshold_model.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Threshold HMM')\n\t\tplt.show()\n\n\t# ###################################################### BUILD TOP LEVEL HMM ######################################################\n\tprint ('\\nBuilding the top level model ...')\n\n\t# (1 - p_TM) (model)\n\t# /-----------> gesture_hmm -----\\\n\t# TLD_entry --> meta_entry --O O---> meta_exit --> TLD_exit\n\t# ^ \\-----------> threshold_hmm --/ |\n\t# | (p_TM) (model_tm) |\n\t# \\__________________________________________________/\n\n\t# Build the TLD model which just combines the gesture and threshold models\n\tmodel_tld = Model(name='TLD')\n\tmodel_tld.add_model(modelG) # Import gesture model as instance\n\tmodel_tld.add_model(model_tm) # Import threshold model as instance\n\n\t# Introduce silent states for loops\n\tmodel_tld_meta_entry = State(None, name=model_tld.name + ' start-meta')\n\tmodel_tld_meta_exit = State(None, name=model_tld.name + ' end-meta')\n\tmodel_tld.add_transition(model_tld.start, model_tld_meta_entry, 1)\n\tmodel_tld.add_transition(model_tld_meta_exit, model_tld.end, 1)\n\n\t# Loopback to start of model; this allows for capturing multiple gestures in one stream\n\tmodel_tld.add_transition(model_tld_meta_exit, model_tld_meta_entry, 1)\n\n\t# p_enter_tm specifies the probability for choosing the threshold model over the gesture model\n\tmodel_tld.add_transition(model_tld_meta_entry, model_tm.start, p_TM) # Enter threshold model\n\tmodel_tld.add_transition(model_tld_meta_entry, modelG.start, 1 - p_TM) # Enter gesture model\n\tmodel_tld.add_transition(model_tm.end, model_tld_meta_exit, 1)\n\tmodel_tld.add_transition(modelG.end, model_tld_meta_exit, 1)\n\n\t# Normalize transition probabilities, do not merge silent states\n\tmodel_tld.bake(merge='None')\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model_tld, _output_file=output_dir + 'tld_model.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Top level HMM')\n\t\tplt.show()\n\n\t# ###################################################### TEST HMMs ######################################################\n\tprint ('\\nTesting the top level model ...')\n\n\t# Hide some of the silent states to increase readability\n\tdrop_states = [model_tld_meta_entry, model_tld_meta_exit, model_tm_meta]\n\n\t# Test with limited TRAINING data; only use sets\n\t# TODO: try this\n\tif False:\n\t\tfor i, train_df in zip(range(3), training_dfs):\n\n\t\t\t# Decode evaluation set using the top level model; omit meta states for readability when plotting\n\t\t\tpath = output_dir + 'TRAINING set %d' % i\n\t\t\ttest_decoded = from_data_make_sequence(train_df, _models=model_tld, _drop_states=drop_states, _output_prefix=path, _show=True)\n\t\t\tplt.title('Testing with TRAINING set %d' % i)\n\n\t\t\t# Convert the sequence to segments (intervals); result is compatible with plot_labels()\n\t\t\t# If we capture a sequence part of the gesture HMM 'model' assign the label ID 'spotter_label'\n\t\t\ttest_spotted_seg = from_sequence_make_segments(test_decoded, _model_to_label={model: spotter_label})\n\t\t\ttest_spotted_seg = test_spotted_seg[0] # We only have a single model to evaluate\n\n\t\t\t# Next plot the data stream\n\t\t\tax = train_df.plot()\n\n\t\t\t# The whole set is ground truth, no need for coloring, just color the spotted segment\n\t\t\tplot_labels(test_spotted_seg, _label_to_color=label_to_color, _alpha=0.5)\n\t\t\tlut = {key: col for key, col in label_to_color.iteritems() if key in test_labels + [spotter_label]} # Reduce legend to used labels\n\t\t\tadd_labels_to_legend(_ax=ax, _label_to_color=lut)\n\n\t\t\tplt.title('Testing with TRAINING set %d' % i)\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\n\t\t\tplt.tight_layout()\n\t\t\tplt.savefig(output_dir + 'testing with TRAINING set %d.png' % i, bbox_inches='tight')\n\t\t\tplt.show()\n\n\t# ###################################################### EVALUATE HMM ######################################################\n\tprint ('\\nEvaluating the top level model ...')\n\n\t# Demonstrate evaluation with test set\n\t# TODO: Increase the test set to cover more participants.\n\teval_labels = test_labels\n\teval_dfs = [test_df] # We can evaluate more than one test DataFrame\n\teval_segs = [test_seg]\n\t\n\n\t# Evaluate each frame individually\n\teval_spotted_segs = []\n\tfor i, (eval_df, eval_seg) in enumerate(zip(eval_dfs, eval_segs)):\n\t\n\t\t# Decode evaluation set using the top level model; omit meta states for readability when plotting\n\t\t# TODO: try '_show=True' to plot the decoded sequence as graph\n\t\tpath = output_dir + 'EVALUATION set %d' % i\n\t\teval_decoded = from_data_make_sequence(eval_df, _models=model_tld, _drop_states=drop_states, _output_prefix=path, _show=True)\n\t\tplt.title('EVALUATION set %d' % i)\n\n\t\t# Convert the sequence to segments (intervals); result is compatible with plot_labels()\n\t\t# If we capture a sequence part of the gesture HMM 'model' assign the label ID 'spotter_label'\n\t\teval_spotted_seg = from_sequence_make_segments(eval_decoded, _model_to_label={modelG: spotter_label})\n\t\t\t\t\n\t\teval_spotted_seg = eval_spotted_seg[0] # We only have a single model to evaluate\n\t\t\n\t\teval_spotted_segs.append(eval_spotted_seg)\n\n\t\t# TODO: Implement spotter performance evaluation per frame (based on eval_spotted_seg and eval_seg)\n\t\tprint ('Evaluation results for set %d ...') % i\n\t\tprint ('Ground truth')\n\t\tprint (eval_seg)\n\t\tprint ('Spotted segments')\n\t\tprint (eval_spotted_seg)\n\t\tprint ('')\n\t\t\n\t\t#Evaluation metrics (recall and precision) computation\n\t\tif(not(eval_spotted_seg.empty) and len(eval_spotted_seg)==1):\n\t\t\tif ((int(eval_spotted_seg['End']) <= int(eval_seg['Begin'])) or (int(eval_spotted_seg['Begin']) >= int(eval_seg['End']))):\n\t\t\t\tprint \"Precision: 0\"\n\t\t\t\tprint \"Recall: 0\"\n\t\t\telif (int(eval_spotted_seg['Begin'])<=int(eval_seg['Begin'])):\n\t\t\t\tprint \"Precision: \"+str(float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_seg['Begin']))/\t(int(eval_spotted_seg['End'])-int(eval_spotted_seg['Begin'])))\n\t\t\t\tprecision = float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_seg['Begin']))/(int(eval_spotted_seg['End'])-int(eval_spotted_seg['Begin']))\n\t\t\t\tprint \"Recall: \"+str(float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_seg['Begin']))/(int(eval_seg['End'])-int(eval_seg['Begin'])))\n\t\t\n\t\t\telif ((int(eval_spotted_seg['Begin'])<=int(eval_seg['End'])) and (int(eval_spotted_seg['Begin'])>=int(eval_seg['Begin']))):\n\t\t\t\tprint \"Precision: \"+str(float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_spotted_seg['Begin']))/(int(eval_spotted_seg['End'])-int(eval_spotted_seg['Begin'])))\t\n\t\t\t\tprecision = float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_spotted_seg['Begin']))/(int(eval_spotted_seg['End'])-int(eval_spotted_seg['Begin']))\n\t\t\t\tprint \"Recall: \"+str(float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_spotted_seg['Begin']))/(int(eval_seg['End'])-int(eval_seg['Begin'])))\n\n\t\t# Plot the evaluation data set with labels\n\t\tif True:\n\t\t\tax = eval_df.plot()\n\t\t\tplt.tight_layout()\n\t\t\tplt.title('EVALUATION set %d, label of spotted events=%d' % (i, spotter_label))\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\t\t\tplot_labels(eval_seg, _label_to_color=label_to_color)\n\t\t\tplot_labels(eval_spotted_seg, _label_to_color=label_to_color, _alpha=0.5)\n\n\t\t\t# Reduce legend to used labels\n\t\t\tlut = {key: col for key, col in label_to_color.iteritems() if key in eval_labels + [spotter_label]}\n\t\t\tadd_labels_to_legend(_ax=ax, _label_to_color=lut)\n\n\t\t\tplt.tight_layout()\n\t\t\tplt.savefig(output_dir + 'evaluation samples %d.png' % i, bbox_inches='tight')\n\n\t\tplt.show()\n\n\t# TODO: Implement spotter performance evaluation over all frames (based on eval_spotted_segs and eval_segs)",
"def plot_history(history, config):\n\n # Plot training and validation history\n train_acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n train_prec = history.history['precision']\n val_prec = history.history['val_precision']\n train_rec = history.history['recall']\n val_rec = history.history['val_recall']\n train_auc = history.history['auc']\n val_auc = history.history['val_auc']\n\n plt.figure(figsize=(8, 8))\n plt.subplot(2, 2, 1)\n plt.plot(train_acc, label='Training')\n plt.plot(val_acc, label='Validation')\n plt.legend(loc='lower left')\n plt.ylabel('Accuracy')\n plt.ylim([0, 1.0])\n # plt.title('Accuracy')\n\n plt.subplot(2, 2, 2)\n plt.plot(train_prec, label='Training')\n plt.plot(val_prec, label='Validation')\n plt.legend(loc='upper left')\n plt.ylabel('Precision')\n plt.ylim([0, 1.0])\n # plt.title('Training and Validation Precision')\n plt.xlabel('epoch')\n\n plt.subplot(2, 2, 3)\n plt.plot(train_rec, label='Training')\n plt.plot(val_rec, label='Validation')\n plt.legend(loc='upper left')\n plt.ylabel('Recall')\n plt.ylim([0, 1.0])\n # plt.title('Training and Validation Recall')\n\n plt.subplot(2, 2, 4)\n plt.plot(train_auc, label='Training')\n plt.plot(val_auc, label='Validation')\n plt.legend(loc='upper left')\n plt.ylabel('AUC')\n plt.ylim([0, 1.0])\n # plt.title('Training and Validation AUC')\n plt.xlabel('epoch')\n\n plt.savefig(f\"{config['model_label']}.png\")"
] | [
"0.6482984",
"0.6248759",
"0.6107442",
"0.60848117",
"0.6071356",
"0.6057766",
"0.59756935",
"0.596948",
"0.5928477",
"0.58596635",
"0.58493274",
"0.5839817",
"0.5836883",
"0.5824653",
"0.5790684",
"0.5768602",
"0.57625675",
"0.5759772",
"0.5756882",
"0.57428855",
"0.5741854",
"0.57225573",
"0.57067764",
"0.5705681",
"0.5702729",
"0.56925106",
"0.56922007",
"0.5682308",
"0.5679966",
"0.5661924"
] | 0.6890652 | 0 |
BPM file with PROVHISTORY (old name for HISTORY) | def BPM_PROVHISTORY():
return download_from_archive("bpm_20220128_gmos-s_Ham_11_full_12amp.fits") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def history():",
"def phist():\n history = hist();\n for line in history:\n print(line, \":\", history[line])",
"def get_history(hdr):\n return hdr['HISTORY']",
"def history():\n return apology(\"TODO\")",
"def history():\n return apology(\"TODO\")",
"def history():\n return apology(\"TODO\")",
"def save_history(cube, field, filename): \n\n history.append(cube.attributes['history'])",
"def save_training_history(hist):\n if os.path.exists(\"history/history\") != True:\n os.mknod(\"history/history\")\n with open(\"history/history\", \"wb\") as file_pi:\n pickle.dump(hist.history, file_pi)\n print(\"Training History saved\")",
"def test_historyReport(self):\n history = self.o.getInterface(\"history\")\n history.interactBOL()\n history.interactEOL()\n testLoc = self.o.r.core.spatialGrid[0, 0, 0]\n testAssem = self.o.r.core.childrenByLocator[testLoc]\n fileName = history._getAssemHistoryFileName(testAssem)\n actualFilePath = os.path.join(THIS_DIR, fileName)\n expectedFileName = os.path.join(THIS_DIR, fileName.replace(\".txt\", \"-ref.txt\"))\n # copy from fast path so the file is retrievable.\n shutil.move(fileName, os.path.join(THIS_DIR, fileName))\n\n self.compareFilesLineByLine(expectedFileName, actualFilePath)\n\n # test that detailAssemblyNames() is working\n self.assertEqual(len(history.detailAssemblyNames), 1)\n history.addAllFuelAssems()\n self.assertEqual(len(history.detailAssemblyNames), 51)",
"def _grab_history(self):\n self.data['history_lines'] = []\n self.data['history_file'] = None\n self.data['history_encoding'] = None\n self.data['headings'] = []\n self.data['history_last_release'] = ''\n self.data['history_insert_line_here'] = 0\n default_location = None\n config = self.setup_cfg.config\n if config and config.has_option('zest.releaser', 'history_file'):\n default_location = config.get('zest.releaser', 'history_file')\n history_file = self.vcs.history_file(location=default_location)\n self.data['history_file'] = history_file\n if not history_file:\n logger.warn(\"No history file found\")\n return\n logger.debug(\"Checking %s\", history_file)\n history_lines, history_encoding = read_text_file(history_file)\n history_lines = history_lines.split('\\n')\n headings = utils.extract_headings_from_history(history_lines)\n if not headings:\n logger.warn(\"No detectable version heading in the history \"\n \"file %s\", history_file)\n return\n self.data['history_lines'] = history_lines\n self.data['history_encoding'] = history_encoding\n self.data['headings'] = headings\n\n # Grab last header.\n start = headings[0]['line']\n if len(headings) > 1:\n # Include the next header plus underline, as this is nice\n # to show in the history_last_release.\n end = headings[1]['line'] + 2\n else:\n end = len(history_lines)\n history_last_release = '\\n'.join(history_lines[start:end])\n self.data['history_last_release'] = history_last_release\n\n # Add line number where an extra changelog entry can be inserted. Can\n # be useful for entry points. 'start' is the header, +1 is the\n # underline, +2 is probably an empty line, so then we should take +3.\n # Or rather: the first non-empty line.\n insert = start + 2\n while insert < end:\n if history_lines[insert].strip():\n break\n insert += 1\n self.data['history_insert_line_here'] = insert",
"def generate_history(self):\n self.reporter.generate()",
"def _write_history(self):\n if self.data['history_file'] is None:\n return\n contents = '\\n'.join(self.data['history_lines'])\n history = self.data['history_file']\n write_text_file(\n history, contents, encoding=self.data['history_encoding'])\n logger.info(\"History file %s updated.\", history)",
"def save_history():\n\n mid = get_mid()\n back_file = contact_name + \"_\" + today\n\n if not os.path.isdir(back_path):\n print('WARNING: o {} directory found, creating.').format(back_path)\n os.mkdir(back_path)\n else:\n print(\"OK: {} found.\".format(back_path))\n\n os.chdir(back_path)\n with open(back_file, 'w') as bf:\n for mes in get_todays_history(mid):\n data = \"{}\\n\".format(mes)\n bf.write(data)",
"def rnase_p_model_info(filename, db_url, output):\n r2dt.write_rfam(filename, db_url, output)",
"def updateMdrizVerHistory(self,build,versions):\n _plist = self.assoc.parlist[0]\n if build == True: _output = _plist['output']\n else: _output = _plist['outdata']\n \n fhdu = pyfits.open(_output,mode='update')\n prihdr = fhdu[0].header\n \n ver_str = \"MultiDrizzle product generated using: \"\n prihdr.add_history(ver_str)\n \n for key in versions:\n if versions[key].find('\\n') < 0:\n prihdr.add_history(key+versions[key])\n else:\n # This will accomodate multi-line comments\n _ver_str = versions[key].split('\\n')\n prihdr.add_history(key)\n for val in _ver_str:\n if val.strip() != '':\n prihdr.add_history(val)\n \n #ver_str = ' MultiDrizzle Version '+str(version)\n #prihdr.add_history(ver_str)\n \n fhdu.close()\n del fhdu",
"def write_queue_history(songname: str) -> NoReturn:\n last_n = read_queue_history()\n last_n.append('{0}'.format(songname))\n last_n = last_n[-PLAYLIST_NO_REPEAT_LEN:]\n with open(os.path.join(RADIOPYO_PATH, QUEUE_HISTORY_FILE), 'w') \\\n as queue_hist:\n queue_hist.write('\\n'.join(last_n))\n return None",
"def save_history(histfile):\n readline.write_history_file(histfile)",
"def read_history (filename) :\n \n col_names = ['i', 't', 'M', 'log_L', 'log_R', 'log_T_s', \n 'log_T_c', 'log_rho_c', 'log_P_c', 'Psi_c', \n 'X_c', 'Y_c', 'X_Cc', 'X_Nc', 'X_Oc', \n 'tau_dyn', 'tau_KH', 'tau_nuc', 'L_PP',\n 'L_CNO', 'L_3a', 'L_Z', 'L_nu', 'M_He',\n 'M_C', 'M_O', 'R_He', 'R_C', 'R_O']\n\n return read_ez_zip(filename, 'summary.txt', col_names)",
"def write_history_file(config):\n readline.set_history_length(int(config.get('history', 'length')))\n readline.write_history_file(config.rh_get_data('historyFile'))",
"def get_history_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, histories.DEFAULT_FILENAME_HISTORY)",
"def make_all_files_historical():\n sql=\"UPDATE files SET is_history=1\"\n execute_query(sql)\n sql=\"UPDATE files SET should_instrument=0\"\n execute_query(sql)",
"def changelog(count, name):\n for n in name:\n print(f\"\\n \\033[1m{n} changelog\\033[0m\")\n print(head(rpm(\"-q\", \"--changelog\", n), \"-n\", count))",
"def run_save_historical_data():\n data = get_historical_submissions(TEST_SUBREDDIT, TEST_MAX)\n save_historical_submission_comments(data, TEST_SUBREDDIT + '_past_30_months_comments.csv')",
"def _append_history(ds):\n try:\n history = ds.attrs['history']\n except KeyError:\n history = \"\"\n now = datetime.datetime.now()\n prog = __file__ # os.path.basename(__file__)\n history = (now.strftime(\"%a %b %d %H:%M:%S %Y\") +\n \": {} {}\\n\".format(prog, \" \".join(sys.argv[1:])) +\n history)\n ds.attrs['history'] = history",
"def appendProcessingHistoryItem(context, item):\n projectDir = context.projectDir\n history = GenericMetadata._readEntriesForSection(projectDir, GenericMetadata.HISTORY_SECTION)\n try:\n idx = int(history['numsteps'])\n except KeyError:\n idx = 0\n idx += 1\n \n idxStr = str(idx)\n key = GenericMetadata.HISTORY_PROTO + idxStr\n GenericMetadata._writeEntriesToSection(projectDir, GenericMetadata.HISTORY_SECTION, [key, 'numsteps'], [item, idxStr])",
"def rnase_p_model_info(filename, output):\n r2dt.write_rnase_p(filename, output)",
"def load_pt_file(fname):\n fname = str(Path(fname).expanduser())\n data = torch.load(fname)\n if \"history\" not in data:\n data[\"history\"] = {}\n return data",
"def create_new_hist(gi, galaxyemail, galaxypass, server, workflowid, files, new_hist):\n if workflowid != \"0\":\n if len(filter(None, files)) > 0:\n workflow = gi.workflows.show_workflow(workflowid)\n if new_hist is None or new_hist == \"\":\n new_hist_name = strftime(workflow['name'] + \"_%d_%b_%Y_%H:%M:%S\", gmtime())\n else:\n new_hist_name = new_hist\n gi.histories.create_history(name=new_hist_name)\n history_id = get_history_id(galaxyemail, galaxypass, server)\n else:\n pass\n else:\n if len(filter(None, files)) > 0:\n if new_hist is None or new_hist == \"\":\n new_hist_name = strftime(\"Use_Galaxy_%d_%b_%Y_%H:%M:%S\", gmtime())\n else:\n new_hist_name = new_hist\n gi.histories.create_history(name=new_hist_name)\n history_id = get_history_id(galaxyemail, galaxypass, server)\n else:\n pass\n return history_id",
"def create_readme(histfile, vb):\n\tme = \"Utils.create_readme: \"\n\treadmefile = os.path.dirname(histfile)+\"/README.txt\"\n\ttry:\n\t\tassert os.path.isfile(readmefile)\n\texcept AssertionError:\n\t\tnow = str(datetime.now().strftime(\"%Y-%m-%d %H.%M\"))\n\t\tcommit = subprocess.check_output(['git', 'rev-parse', 'HEAD'])\n\t\theader = \"Time:\\t\"+now+\"\\nCommit hash:\\t\"+commit+\"\\n\\n\"\n\t\twith open(readmefile,\"w\") as f:\n\t\t\tf.write(header)\n\t\tif vb: print me+\"Created readme file \"+readmefile\n\treturn",
"def read_history(self):\n if path.isfile(self.HISTORY_FILE_PATH):\n return pd.read_csv(self.HISTORY_FILE_PATH)\n\n df = pd.DataFrame({}, columns=self.HISTORY_COLS)\n df.to_csv(self.HISTORY_FILE_PATH, index=False)\n return df"
] | [
"0.57269067",
"0.5609817",
"0.5568867",
"0.55137694",
"0.55137694",
"0.55137694",
"0.55124533",
"0.5498292",
"0.54757226",
"0.5428406",
"0.5398429",
"0.5387263",
"0.5287365",
"0.5286495",
"0.52550834",
"0.5235499",
"0.5219656",
"0.5213459",
"0.51678",
"0.516655",
"0.51558787",
"0.51328313",
"0.50717306",
"0.50569546",
"0.5047231",
"0.50452477",
"0.5037894",
"0.4988814",
"0.49881217",
"0.49828196"
] | 0.7360491 | 0 |
Convert AST to C{NetworkX.DiGraph} for graphics. | def ast_to_labeled_graph(tree, detailed):
g = nx.DiGraph()
for u in tree:
if hasattr(u, 'operator'):
label = u.operator
elif hasattr(u, 'value'):
label = u.value
else:
raise TypeError(
'AST node must be Operator or Terminal, '
'got instead: {u}'.format(u=u) +
', of type: {t}'.format(t=type(u)))
# show both repr and AST node class in each vertex
if detailed:
label += '\n' + str(type(u).__name__)
g.add_node(id(u), label=label)
for u, v, k in tree.edges(keys=True):
g.add_edge(id(u), id(v), label=k)
return g | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def onnx_to_graphviz(g, include_attrs=False):\n\n def get_attribute_value(attr):\n # pylint: disable=len-as-condition, no-else-return\n # For Python2:\n # - str(long) has 'L' as suffix, cast it to int\n # - always decode from utf-8 bytes to avoid 'b' prefix\n if attr.HasField('f'):\n return attr.f\n elif attr.HasField('i'):\n return int(attr.i) if six.PY2 else attr.i\n elif attr.HasField('s'):\n return attr.s.decode(\"utf-8\")\n elif attr.HasField('t'):\n return attr.t\n elif attr.HasField('g'):\n return attr.g\n elif len(attr.floats):\n return list(attr.floats)\n elif len(attr.ints):\n return [int(i) for i in attr.ints] if six.PY2 else list(attr.ints)\n elif len(attr.strings):\n return [s.decode(\"utf-8\") for s in attr.strings]\n elif len(attr.tensors):\n return list(attr.tensors)\n elif len(attr.graphs):\n return list(attr.graphs)\n else:\n raise ValueError(\"Unsupported ONNX attribute: {}\".format(attr))\n\n g2 = gv.Digraph()\n for node in g.get_nodes():\n kwarg = {}\n attr = node.attr\n if include_attrs:\n for a in attr:\n kwarg[a] = \"{}\".format(get_attribute_value(attr[a]))\n else:\n if \"shape\" in attr:\n kwarg[\"shape\"] = \"{}\".format([int(i) for i in attr[\"shape\"].ints])\n if \"broadcast\" in attr:\n kwarg[\"broadcast\"] = \"{}\".format(int(attr[\"broadcast\"].i))\n\n # display domain if it is not onnx domain\n if node.domain:\n kwarg[\"domain\"] = node.domain\n\n g2.node(node.name, op_type=node.type, **kwarg)\n for node in g.get_nodes():\n for i in node.input:\n if i:\n g2.edge(i, node.name)\n return \" \".join(g2.source.split())",
"def to_DiGraph(program):\r\n grid = {}\r\n\r\n for idx, op in enumerate(program.operations):\r\n dependencies = set(op['modes'])\r\n\r\n if 'args' in op:\r\n\r\n for a in op['args']:\r\n if isinstance(a, RegRefTransform):\r\n dependencies |= set(a.regrefs)\r\n\r\n for _, v in op['kwargs'].items():\r\n if isinstance(v, RegRefTransform):\r\n dependencies |= set(v.regrefs)\r\n else:\r\n op['args'] = []\r\n op['kwargs'] = {}\r\n\r\n cmd = Command(name=op['op'], args=op['args'], kwargs=op['kwargs'], modes=tuple(op['modes']))\r\n\r\n for q in dependencies:\r\n # Add cmd to the grid to the end of the line r.ind.\r\n if q not in grid:\r\n # add a new line to the circuit\r\n grid[q] = []\r\n\r\n grid[q].append([idx, cmd])\r\n\r\n G = nx.DiGraph()\r\n\r\n for q, cmds in grid.items():\r\n if cmds:\r\n # add the first operation on the wire that does not depend on anything\r\n attrs = cmds[0][1]._asdict()\r\n G.add_node(cmds[0][0], **attrs)\r\n\r\n for i in range(1, len(cmds)):\r\n # add the edge between the operations, and the operation nodes themselves\r\n if cmds[i][0] not in G:\r\n attrs = cmds[i][1]._asdict()\r\n G.add_node(cmds[i][0], **attrs)\r\n\r\n G.add_edge(cmds[i-1][0], cmds[i][0])\r\n\r\n return G",
"def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n# G_new.add_node(str(nd), x=str(attrs['attributes'][0]),\n# y=str(attrs['attributes'][1]))\n for nd1, nd2, attrs in G.edges(data=True):\n G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n# G_new.add_edge(str(nd1), str(nd2))\n\n return G_new",
"def _to_dot(self, detailed=False):\n g = ast_to_labeled_graph(self, detailed)\n import tulip.graphics as _graphics\n return _graphics.networkx_to_graphviz(g)",
"def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n for nd1, nd2, attrs in G.edges(data=True):\n # G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n G_new.add_edge(str(nd1), str(nd2))\n\n return G_new",
"def spn_to_digraph(root: Node) -> nx.DiGraph:\n # Check the SPN\n nodes = topological_order(root)\n if nodes is None:\n raise ValueError(\"SPN structure is not a directed acyclic graph (DAG)\")\n graph = nx.DiGraph()\n\n # Add nodes to the graph\n for node in nodes:\n if isinstance(node, Sum):\n weights = [round(float(w), 8) for w in node.weights]\n attr = {'class': Sum.__name__, 'scope': node.scope, 'weights': weights}\n elif isinstance(node, Product):\n attr = {'class': Product.__name__, 'scope': node.scope}\n elif isinstance(node, Leaf):\n params = node.params_dict()\n for name, value in params.items():\n if isinstance(value, np.ndarray): # Convert Numpy arrays into lists\n if value.dtype in [np.float32, np.float64]:\n value = value.astype(np.float64)\n params[name] = np.around(value, 8).tolist()\n else:\n params[name] = value.tolist()\n elif isinstance(value, (np.float32, np.float64)): # Convert Numpy floats into Python float\n params[name] = round(float(value), 8)\n elif isinstance(value, float): # Round Python floats\n params[name] = round(value, 8)\n attr = {'class': node.__class__.__name__, 'scope': node.scope, 'params': params}\n else:\n raise ValueError(\"Unknown node of type {}\".format(node.__class__.__name__))\n graph.add_node(node.id, **attr)\n\n # Add edges to the graph\n for node in nodes:\n for i, c in enumerate(node.children):\n graph.add_edge(c.id, node.id, idx=i)\n\n return graph",
"def _dag_to_dot(dag: DAGNode):\n # Step 0: check dependencies and init graph\n _check_pydot_and_graphviz()\n import pydot\n\n graph = pydot.Dot(rankdir=\"LR\")\n\n # Step 1: generate unique name for each node in dag\n nodes, edges = _get_nodes_and_edges(dag)\n name_generator = _DAGNodeNameGenerator()\n node_names = {}\n for node in nodes:\n node_names[node] = name_generator.get_node_name(node)\n\n # Step 2: create graph with all the edges\n for edge in edges:\n graph.add_edge(pydot.Edge(node_names[edge[0]], node_names[edge[1]]))\n # if there is only one node\n if len(nodes) == 1 and len(edges) == 0:\n graph.add_node(pydot.Node(node_names[nodes[0]]))\n\n return graph",
"def tree2gv(tree: TreeNode) -> graphviz.Graph:\n result = graphviz.Graph(\"ni\")\n # result.attr(size='12,0')\n tree2gv_helper(tree, result, \"\")\n return result",
"def build_ast(expression):\n\n # use a directed graph to store the tree\n G = DiGraph()\n\n stack = []\n\n for n in expression:\n # Since the graph does not maintain the order of adding nodes/edges\n # add an extra attribute 'pos' so we can always sort to the correct order\n if isinstance(n, OperatorNode):\n if n.ttype == ept.TOK_TYPE_OP_IN:\n arg2 = stack.pop()\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_node(arg2, pos=2)\n G.add_edge(arg1, n)\n G.add_edge(arg2, n)\n else:\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_edge(arg1, n)\n\n elif isinstance(n, FunctionNode):\n args = [stack.pop() for _ in range(n.num_args)]\n args.reverse()\n for i, a in enumerate(args):\n G.add_node(a, pos=i)\n G.add_edge(a, n)\n # for i in range(n.num_args):\n # G.add_edge(stack.pop(),n)\n else:\n G.add_node(n, pos=0)\n\n stack.append(n)\n\n return G, stack.pop()",
"def _construct_graph(self):\n raise NotImplementedError",
"def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]",
"def build_graph(self):\n raise NotImplementedError",
"def get_graph_drawing(self):\r\n graph_drawing = graphviz.Digraph(comment=\"Directed Graph\", format=\"png\")\r\n for vertex in self.get_all_vertices():\r\n graph_drawing.node(str(vertex))\r\n for _from, _to, _cost in self.get_all_edges():\r\n graph_drawing.edge(str(_from), str(_to), label=str(_cost))\r\n return graph_drawing",
"def build_graph(self):\n pass",
"def generate_dag_graph(self):\n # generate ranom graph\n G = nx.DiGraph()\n G.add_nodes_from(range(self.nodes))\n return self.fix_graph(G)",
"def graph_cmd(args):\n\n # charge graphviz\n Digraph = load_graphviz()\n\n view = True\n for i in args:\n if i == \"noview\":\n view = False\n args.remove(i)\n break\n\n if len(args) > 0:\n if len(args) >= 2:\n r = requete('NeMo.Intf.%s:getMIBs' % args[0], { \"traverse\":args[1], \"mibs\":\"base\" })\n else:\n r = requete('NeMo.Intf.%s:getMIBs' % args[0], { \"mibs\":\"base\" })\n else:\n r = requete('NeMo.Intf.lo:getMIBs', { \"traverse\":\"all\", \"mibs\":\"base\" })\n if r is None: return\n if not 'status' in r or not 'base' in r['status']: return\n r = r['status']['base']\n\n dot = Digraph(name='NeMo.Intf', format='svg', engine='dot')\n\n dot.attr('node', fontname='Helvetica')\n #dot.attr('node', fontname='Times-Roman')\n\n for i, node in r.items():\n #dot.attr('node', tooltip=v['Flags'] if 'Flags' in v else '')\n if 'Enable' in node:\n if node['Enable'] == True:\n dot.node(i, shape='box')\n else:\n dot.node(i, shape='ellipse', color='lightgrey')\n else:\n dot.node(i, shape='box', color='lightgrey')\n\n for i, v in r.items():\n for j in v['LLIntf']:\n dot.edge(i, j)\n\n dot.render(filename=\"nemo_intf.gv\", view=view)",
"def to_graph(cls, local=False):\n try:\n from .codegen.symbolic import VariableAnalyzer\n except ImportError as e:\n raise e(\"'to_graph' requires 'pycodegen'\")\n\n g = VariableAnalyzer(cls)\n return g.to_graph(local=local)",
"def as_graph(self, graph=None):\n # at this level it works but what if we have nested structures?\n # What is a graph if not a set of links? Why do not we put all into a graph?\n if not graph:\n graph = nx.Graph()\n\n for link in self.sequence:\n logging.info(link)\n (l, r) = link.value\n (ln, rn) = link.name\n logging.info (\"Node: %s %s \" % (l.name, str(l.shannon)))\n graph.add_node(l.name, shannon=l.shannon, IC=l.IC)\n logging.info (\"Node: %s %s \" % (r.name, str(r.shannon)))\n graph.add_node(r.name, shannon=r.shannon, IC=r.IC)\n logging.info (\"Edge: %s %s %s \" % (l.name, r.name, str(link.PMI)))\n graph.add_edge(l.name, r.name, pmi=link.PMI)\n\n return graph",
"def parse_graph(self):\n\t\tnx_graph = nx.Graph()\n\t\tfor node in self.vertices:\n\t\t\tnx_graph.add_node(node)\n\n\t\tfor edge in self.edges:\n\t\t\tnode1, node2, weight = edge\n\t\t\tnx_graph.add_edge(node1, node2, weight=weight)\n\n\t\treturn nx_graph",
"def gen_graph(self):",
"def to_networkx(self):\n import networkx as nx\n G = nx.DiGraph()\n # G.graph.update(self.config)\n\n if nx.__version__.startswith('1'):\n node_dict = G.node\n else:\n node_dict = G.nodes\n\n def _defaultstyle(node, color, shape='none', **kwargs):\n node_dict[node]['fillcolor'] = color\n node_dict[node]['style'] = 'filled'\n node_dict[node]['shape'] = shape\n node_dict[node].update(kwargs)\n # node_dict[node]['color'] = color\n\n # Add all processes\n # Make inputs and outputs nodes to prevent needing a multigraph\n for proc in self.procs.values():\n G.add_node(proc.name)\n _defaultstyle(proc.name, 'turquoise', shape='ellipse', fontsize=20)\n node_dict[proc.name]['label'] = '{}:\\n{}'.format(proc.name,\n proc.type)\n\n for iport in proc.iports.values():\n iport_name = iport.absname()\n G.add_node(iport_name)\n G.add_edge(iport_name, proc.name)\n node_dict[iport_name]['label'] = iport.name\n _defaultstyle(iport_name, '#fefefe', fontsize=14)\n\n for oport in proc.oports.values():\n oport_name = oport.absname()\n G.add_node(oport_name)\n G.add_edge(proc.name, oport_name)\n node_dict[oport_name]['label'] = oport.name\n _defaultstyle(oport_name, '#f0f0f0', fontsize=14)\n\n # Add all connections\n for proc in self.procs.values():\n for iport in proc.iports.values():\n iport_name = iport.absname()\n for oport in iport.connections:\n if oport is not None:\n oport_name = oport.absname()\n G.add_edge(oport_name, iport_name)\n return G",
"def _build_graph(self):\n pass",
"def to_networkx(self):\n g = nx.Graph()\n for v in self.vs.values():\n g.add_node(v)\n for v in self.fs:\n g.add_node(v)\n for u in v.neighbors:\n g.add_edge(v, u)\n return g",
"def graphviz_prettify(self, network):\n graph_settings = {\n 'rankdir': 'LR',\n 'dpi': 60,\n }\n network.graph.update(graph_settings)\n\n for n in network.nodes():\n if isinstance(n, Variable):\n network.nodes[n]['label'] = n.name\n elif isinstance(n, Equation):\n network.nodes[n]['shape'] = 'diamond'",
"def draw_graph(self):\n\t\tif None in self.graph:\n\t\t\tdel self.graph[None]\n\n\t\tfor vs in self.graph.itervalues():\n\t\t\tto_delete = []\n\t\t\tfor i in xrange(len(vs)):\n\t\t\t\tif vs[i] is None:\n\t\t\t\t\tto_delete.append(i)\n\n\t\t\tfor i in reversed(to_delete):\n\t\t\t\tdel vs[i]\n\n\t\tself.G=nx.Graph(self.graph)\n\n\t\tfor k,v in self.labels.iteritems():\n\t\t\tif v[:6] == 'Module':\n\t\t\t\troot = k\n\t\t\t\tbreak\n\n\t\treturn self.__dfs_plot(root)",
"def fsm2graph(fsm: FiniteStateMachine) -> Digraph:\n\n def hash_state(state):\n return hashlib.md5(state.encode()).hexdigest()\n\n dot = Digraph()\n\n for state in fsm.states:\n dot.node(\n f\"{hash_state(state)}\",\n label=f\"{state}\",\n shape=\"circle\" if state != fsm.init_state else \"doublecircle\",\n )\n\n for frm in fsm.states:\n for input in fsm.transition[frm]:\n to = fsm.transition[frm][input]\n output = fsm.emit[frm][input]\n dot.edge(\n f\"{hash_state(frm)}\", f\"{hash_state(to)}\", label=f\"{input}/{output}\"\n )\n\n return dot",
"def nx_graph_from_dot_file(dot_file_path):\n # this does not understand dot statements like X->Y,Z;\n # nx_graph = nx.nx_pydot.read_dot(dot_file_path)\n\n nodes, edges = DotTool.read_dot_file(dot_file_path)\n g = nx.DiGraph()\n g.add_edges_from(edges)\n\n return g",
"def graph(self) -> rx.PyDiGraph:\n return self._graph",
"def assemble_graph():\n response = request.body.read().decode('utf-8')\n body = json.loads(response)\n stmts_json = body.get('statements')\n stmts = stmts_from_json(stmts_json)\n ga = GraphAssembler(stmts)\n model_str = ga.make_model()\n res = {'model': model_str}\n return res",
"def network_to_dgraph_json(network):\n\n uid_prefix = \"_:\"\n node_uid_prefix = uid_prefix + \"node\"\n\n json_mutations = []\n for node, attributes in network.nodes.items():\n json_mutations.append({\n \"uid\": node_uid_prefix + str(attributes.get(\"osmid\", attributes.get(\"osmids\"))),\n \"osmids\": str(attributes.get(\"osmid\", attributes.get(\"osmids\"))),\n \"location\": {\n \"type\": \"Point\",\n \"coordinates\": [\n attributes[\"x\"],\n attributes[\"y\"]\n ]\n }\n })\n for node, neighbors in network.adj.items():\n for neighbor, edges in neighbors.items():\n json_mutations.append({\n \"uid\": node_uid_prefix + str(network.nodes[node].get(\"osmid\", network.nodes[node].get(\"osmids\"))),\n \"connects_to\": {\n \"uid\": node_uid_prefix + str(network.nodes[neighbor].get(\"osmid\", network.nodes[neighbor].get(\"osmids\"))),\n \"connects_to|osmids\": \":\".join(str(edge_attributes.get(\"osmid\", edge_attributes.get(\"osmids\"))) for edge, edge_attributes in edges.items() if \"osmid\" in edge_attributes or \"osmids\" in edge_attributes)\n }\n })\n return json_mutations"
] | [
"0.6587515",
"0.63597286",
"0.6294866",
"0.627144",
"0.6201967",
"0.6074532",
"0.60174185",
"0.6013258",
"0.5817947",
"0.58172935",
"0.57610655",
"0.5738357",
"0.57218426",
"0.5716704",
"0.5716428",
"0.5704641",
"0.5683666",
"0.5681228",
"0.5667547",
"0.5666186",
"0.5664986",
"0.5642197",
"0.5642018",
"0.5616511",
"0.5603741",
"0.5602288",
"0.5595296",
"0.55753094",
"0.55692244",
"0.5564855"
] | 0.7188409 | 0 |
Check that types in C{tree} are incompatible with C{domains}. | def check_for_undefined_identifiers(tree, domains):
for u in tree:
if u.type == 'var' and u.value not in domains:
var = u.value
raise ValueError(
('Undefined variable "{var}" missing from '
'symbol table:\n\t{doms}\n'
'in subformula:\n\t{f}').format(
var=var, f=tree.to_recursive_ast(), doms=domains))
if u.type not in {'str', 'num'}:
continue
# is a Const or Num
var, c = pair_node_to_var(tree, u)
if c.type == 'str':
dom = domains[var]
if not isinstance(dom, list):
raise Exception(
('String constant "{c}" assigned to non-string '
'variable "{var}" with domain:\n\t{dom}').format(
var=var, c=c, dom=dom))
if c.value not in domains[var.value]:
raise ValueError(
('String constant "{c}" is not in the domain '
'of variable "{var}"').format(var=var, c=c))
if c.type == 'num':
dom = domains[var]
if not isinstance(dom, tuple):
raise Exception(
('Number: {c}, assigned to non-integer ' + str(c) +
'variable "{var}" with domain:\n\t{dom}').format(
var=var, c=c, dom=dom))
if not dom[0] <= c.value <= dom[1]:
raise Exception(
('Integer variable "{var}", is assigned the '
'value: {c}, that is out of its domain:'
'{dom[0]} ... {dom[1]}').format(
var=var, c=c, dom=dom)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_tree_type(tree):\n return tree.type in ref",
"def test_change_domain_type_assignment_rule(self):\n pass",
"def enforce_node_consistency(self):\n # Loop over each variable (space for word) in the crossword\n # Use copy to prevent domains from being modified while looping\n for var in self.domains.copy():\n # Get all unary constraints for this variable\n for value in self.domains[var].copy():\n # Check if the value is consistent with all unary constraints\n if len(value) != var.length:\n # If not, remove the value from the domain\n self.domains[var].remove(value)\n # No return value is necessary",
"def _check_domain_additional(cls, domain: D) -> bool:\n action_space = domain.get_action_space().unwrapped()\n observation_space = domain.get_observation_space().unwrapped()\n\n if not isinstance(action_space, Iterable) and not isinstance(action_space, gym.spaces.Tuple):\n action_space = [action_space]\n if not isinstance(observation_space, Iterable) and not isinstance(observation_space, gym.spaces.Tuple):\n observation_space = [observation_space]\n\n flat_action_space = list(flatten(action_space))\n flat_observation_space = list(flatten(observation_space))\n\n print(flat_action_space)\n print(flat_observation_space)\n\n valide_action_space = True\n for x in flat_action_space:\n valide_action_space = isinstance(x,(gym.spaces.Tuple, gym.spaces.Discrete, gym.spaces.Box))\n \n validate_observation_space = True\n for x in flat_observation_space:\n validate_observation_space = isinstance(x,(gym.spaces.Tuple, gym.spaces.Discrete, gym.spaces.Box))\n \n return valide_action_space and validate_observation_space",
"def type_check(self):\n self.link_all_refs()\n self.check_ast()",
"def _is_denies_valid(self):\n if not isinstance(self.denies_, list):\n raise TypeError(\n 'denies type is %s but expected type is list: %s' % (\n type(self.denies_), self.denies_))\n\n for i, deny in enumerate(self.denies_):\n if not isinstance(deny, str):\n raise TypeError(\n 'denies[%s] type is %s but expected type is str: %s' % (\n i, type(deny), deny))",
"def test_add_domain_type_assignment_rule(self):\n pass",
"def _validate_model_domain(self, model, domain):\n if model.model_config != domain.domain_config:\n raise TypeError('Model configuration ' +\n model.model_config +\n ' not compatible with domain configuration ' +\n domain.domain_config)\n if model.version[0:2] != domain.compatible_version[0:2]:\n raise TypeError('Model version ' +\n model.version +\n ' not compatible with domain version ' +\n domain.compatible_version)\n elif model.version != domain.compatible_version:\n warnings.warn('Model minor versions ' +\n model.version +\n ' do not match domain minor versions ' +\n domain.compatible_version)",
"def _check_integrity(self):\n try:\n for q in self:\n pass\n except TreeIntegrityError:\n raise ValueError('Invalid \"tree\" parameter.')",
"def test_entity_domain() -> None:\n schema = vol.Schema(cv.entity_domain(\"sensor\"))\n\n for value in (\n \"invalid_entity\",\n \"cover.demo\",\n \"cover.demo,sensor.another_entity\",\n \"\",\n ):\n with pytest.raises(vol.MultipleInvalid):\n schema(value)\n\n assert schema(\"sensor.LIGHT\") == \"sensor.light\"\n\n schema = vol.Schema(cv.entity_domain((\"sensor\", \"binary_sensor\")))\n\n for value in (\"invalid_entity\", \"cover.demo\"):\n with pytest.raises(vol.MultipleInvalid):\n schema(value)\n\n assert schema(\"sensor.LIGHT\") == \"sensor.light\"\n assert schema(\"binary_sensor.LIGHT\") == \"binary_sensor.light\"",
"def are_two_trees_incompatible(tree1, tree2):\n leaves1 = get_leaf_set(tree1)\n leaves2 = get_leaf_set(tree2)\n shared = list(leaves1.intersection(leaves2))\n\n taxa = dendropy.TaxonNamespace(shared) # CRITICAL!!!\n\n # No topological information\n if len(shared) < 4:\n return False\n\n # Move trees onto shared leaf set\n tree1.retain_taxa_with_labels(shared)\n tree1.migrate_taxon_namespace(taxa)\n tree1.is_rooted = False\n tree1.collapse_basal_bifurcation()\n tree1.update_bipartitions()\n\n tree2.retain_taxa_with_labels(shared)\n tree2.migrate_taxon_namespace(taxa)\n tree2.is_rooted = False\n tree2.collapse_basal_bifurcation()\n tree2.update_bipartitions()\n\n # Check for compatibility\n [fp, fn] = false_positives_and_negatives(tree1, tree2)\n if fp > 0 or fn > 0:\n return True\n else:\n return False",
"def enforce_node_consistency(self):\n for node in self.domains:\n #creates a list of words per node to remove since we cannot remove the elements in a set while it is iterating\n words_to_remove= []\n\n for word in self.domains[node]:\n if len(word) != node.length:\n words_to_remove.append(word)\n\n for word in words_to_remove:\n self.domains[node].remove(word)",
"def test_validate_valid_org(self):\r\n assert self.org_tree != 0",
"def validate(tree):\n return rvalidate(tree.root, None, None, None, None, 0, set())",
"def node_type_validator(field, presentation, context, node_value, node_obj):\n the_child_nodetypes = []\n the_parent_capability_type_name = _get_requirement_in_type(context, presentation).\\\n capability\n the_parent_node_type_name = _get_requirement_in_type(context, presentation).node\n\n node_type = get_type_by_name(context, node_value, 'node_types')\n if node_type is None:\n context.validation.report(\n '\"%s\" refers to an unknown node type in \"%s\"'\n % (presentation._name, presentation._container._fullname),\n locator=presentation._get_child_locator(field.name),\\\n level=Issue.BETWEEN_FIELDS)\n return\n\n if the_parent_node_type_name:\n if not _is_parent(context, node_obj, the_parent_node_type_name, 'node_types'):\n context.validation.report(\n '\"%s\" refers to an unknown/inappropriate node type in \"%s\"'\n % (presentation._name, presentation._container._fullname),\n locator=presentation._get_child_locator(field.name),\\\n level=Issue.BETWEEN_FIELDS)\n return\n\n for the_node_type in context.presentation.presenter.service_template.node_types.\\\n iteritems():\n if the_node_type[1]._get_capabilities(context):\n the_capabilities = the_node_type[1]._get_capabilities(context)\n for the_capability in the_capabilities.iteritems():\n if _is_parent(context, the_capability[1]._get_type(context),\\\n the_parent_capability_type_name, 'capability_types'):\n the_child_nodetypes.append(the_node_type)\n\n for the_child_node_type in the_child_nodetypes:\n if _is_parent(context, the_child_node_type[1], node_obj._name, 'node_types'):\n return\n\n context.validation.report(\n '\"%s\" refers to a node type that does not match the capability requirement in \"%s\"'\n % (presentation._name, presentation._container._fullname),\n locator=presentation._get_child_locator(field.name), level=Issue.BETWEEN_FIELDS)\n return",
"def isValidCompatible(cls,root):\n valid = True\n # the order of node types in chains is restricted\n # (this would be easier if the data was in a Corpus-instance)\n allowed = NX.XDiGraph(selfloops=True)\n\n # continue from here!\n allowed.add_edge('Physical','Physical')\n allowed.add_edge('Property','Physical')\n allowed.add_edge('Process','Physical')\n allowed.add_edge('Regulation','Physical')\n\n allowed.add_edge('Property','Property')\n allowed.add_edge('Process','Property')\n allowed.add_edge('Regulation','Property')\n\n allowed.add_edge('Property','Process')\n# allowed.add_edge('Process','Process')\n allowed.add_edge('Regulation','Process')\n\n allowed.add_edge('Property','Regulation')\n# allowed.add_edge('Process','Regulation')\n allowed.add_edge('Regulation','Regulation')\n\n mapping = {}\n for a in root.find(\"ontologies\").findall(\"ontology\"):\n if a.attrib['id']=='interaction':\n for x in a.getiterator(\"ontnode\"):\n if x.attrib.has_key('effect') and x.attrib['effect'].endswith('regulation'):\n t = 'Regulation'\n else:\n t = x.attrib['onttype']\n mapping[x.attrib['id']] = t\n \n for a in root.getiterator(\"relannotation\"):\n t2type = dict( [(x.attrib['id'],x.attrib['type'])\n for x in a.findall(\"reltoken\")] )\n n2t = dict( [(x.attrib['id'],x.attrib['token'])\n for x in a.findall(\"relnode\")] )\n for x in a.findall(\"reledge\"):\n bt = t2type[n2t[x.attrib['bgn']]]\n et = t2type[n2t[x.attrib['end']]]\n bgn = mapping[bt]\n end = mapping[et]\n if not allowed.has_edge(bgn,end):\n printError(cls,inspect.stack()[1][3],\n \"%s -- %s (%s) -> %s (%s) is not a valid edge\"%\n (x.attrib['id'].split('.')[1],bgn,bt,end,et))\n valid = False\n \n return(valid)",
"def CheckLeafType(leaf):\n if leaf not in ['upper', 'lower', 'both']:\n raise ValueError()\n return leaf",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, CudaNdarrayType):\r\n raise NotImplementedError()",
"def list_node_type_or_group_type_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n values = getattr(presentation, field.name)\n if values is not None:\n for value in values:\n if (get_type_by_name(context, value, 'node_types') is None) and \\\n (get_type_by_name(context, value, 'group_types') is None):\n report_issue_for_unknown_type(context, presentation, 'node type or group type',\n field.name, value)",
"def data_type_constraints_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n value = getattr(presentation, field.name)\n if value is not None:\n if presentation._get_primitive_ancestor(context) is None:\n context.validation.report(\n u'data type \"{0}\" defines constraints but does not have a primitive ancestor'\n .format(presentation._fullname),\n locator=presentation._get_child_locator(field.name), level=Issue.BETWEEN_TYPES)",
"def enforce_node_consistency(self):\n # print(\"Entered enforce_node_consistency Function\")\n # print(\"self.domains\")\n # print(self.domains)\n for mystery in self.domains:\n # print(\"!!!!!!!!!!!!\")\n # print(mystery)\n # print(self.domains[mystery])\n keep_list = set()\n while self.domains[mystery]:\n word = self.domains[mystery].pop()\n if(len(word) == mystery.length):\n keep_list.add(word)\n for word in keep_list:\n self.domains[mystery].add(word)\n # print(self.domains[mystery])\n\n # raise NotImplementedError",
"def bad_tree():\n t = graph.oval_graph.OvalNode(\n 1, \"value\", \"true\", False, [\n graph.oval_graph.OvalNode(\n 2, \"operator\", \"and\", False, [\n graph.oval_graph.OvalNode(\n 3, \"value\", \"true\", False)])])\n return",
"def test_validate_invalid(self):\r\n self.assertEqual(get_tree_and_validate(self.invalid_xml, open(self.SCHEMA, 'r').read()), 0)",
"def tree_type_checker(*ref):\n ref = tuple(ref)\n if len(ref) == 1 and isinstance(ref[0], tuple):\n # if `ref` is passed as a tuple of types\n ref = ref[0]\n # validate that all values are of NeuriteType\n for t in ref:\n NeuriteType(t)\n if NeuriteType.all in ref:\n def check_tree_type(_):\n \"\"\"Always returns true.\"\"\"\n return True\n else:\n def check_tree_type(tree):\n \"\"\"Check whether tree has the same type as ref.\n\n Returns:\n True if ref in the same type as tree.type or ref is NeuriteType.all\n \"\"\"\n return tree.type in ref\n\n return check_tree_type",
"def _validate_node(self, node):\n if not isinstance(node, self._Node):\n raise TypeError('Invalid object type!')\n if node._container != self:\n raise ValueError('Node does not belong to this list!')\n if node._index < 0 or node._index >= self._size:\n raise ValueError('Invalid node!')",
"def validate(t):\n return _dispatcher[type(t)](t)",
"def validate(self, node):",
"def _parse_domain(self, f_domain):\n\n parse_tree = PDDL_Tree.create(f_domain)\n\n assert \"domain\" in parse_tree, \"Domain must have a name\"\n self.domain_name = parse_tree [\"domain\"].named_children ()[0]\n\n # must read types before constants\n if \":types\" in parse_tree:\n if \"-\" in parse_tree[\":types\"].named_children():\n type_hierarchy = PDDL_Utils.read_type(parse_tree[\":types\"])\n self.parent_types = {subtype: parent for subtype, parent in type_hierarchy}\n self.types = set(parse_tree[\":types\"].named_children())\n self.types.discard(\"-\")\n else:\n self.types = set(parse_tree[\":types\"].named_children())\n self.parent_types = {t: None for t in self.types}\n else:\n self.types = set([Predicate.OBJECT])\n self.parent_types = {Predicate.OBJECT: None}\n\n self.agents = [a.name for a in parse_tree[\":agents\"].children]\n self.types.add('agent')\n self.parent_types['agent'] = None\n self._add_objects([(ag, 'agent') for ag in self.agents])\n\n # must read in constants before actions or predicates\n if \":constants\" in parse_tree:\n object_list = PDDL_Utils.read_type(parse_tree[\":constants\"])\n self._add_objects(object_list)\n\n #TODO this may not be correct, depending on the type hierarchy\n const_map = dict()\n for const in self.objects:\n if len(self.obj_to_type[const]) == 0:\n raise RuntimeError(\"No type for constant object %s\" % const)\n else:\n const_map[const] = list(self.obj_to_type[const])[0]\n\n self.predicates = [self.to_predicate(c, map=const_map) for c in parse_tree[\":predicates\"].children]\n\n # some predicates have this property: they are untyped.\n for predicate in self.predicates:\n if Predicate.OBJECT not in self.types and any([arg[1] == Predicate.OBJECT for arg in predicate.args]):\n for t in self.types:\n if self.parent_types[t] is None:\n self.parent_types[t] = Predicate.OBJECT\n\n self.parent_types[Predicate.OBJECT] = None\n self.types.add(Predicate.OBJECT)\n self.type_to_obj[Predicate.OBJECT] = set([])\n for obj, type_list in self.obj_to_type.items():\n type_list.add(Predicate.OBJECT)\n self.type_to_obj[Predicate.OBJECT].add(obj)\n\n # only need to do this once, obviously\n break\n\n self.actions = [self.to_action(c) for c in parse_tree.find_all(\":action\")]",
"def test_domain_and_target_type(self):\n t = Identity()\n assert t.domain_type is None\n assert t.target_type is None\n\n t = Identity(\"mpogias\")\n assert t.domain_type == \"mpogias\"\n assert t.target_type == \"mpogias\"",
"def test_domain_and_target_type(self):\n t = Linearize()\n assert t.domain_type == \"real\"\n assert t.target_type == \"real\""
] | [
"0.60200727",
"0.597314",
"0.5825701",
"0.5775754",
"0.57678396",
"0.5748894",
"0.5740573",
"0.56898326",
"0.5574514",
"0.5513598",
"0.5471777",
"0.5386306",
"0.53780425",
"0.53614146",
"0.5352406",
"0.53448987",
"0.53350884",
"0.53203106",
"0.52765816",
"0.5265692",
"0.52645195",
"0.5256449",
"0.5246905",
"0.52450705",
"0.52379614",
"0.5204361",
"0.52042115",
"0.51803976",
"0.5170588",
"0.5154672"
] | 0.6137138 | 0 |
Find variable under L{Binary} operator above given node. First move up from C{nd}, stop at first L{Binary} node. Then move down, until first C{Var}. This assumes that only L{Unary} operators appear between a L{Binary} and its variable and constant operands. May be extended in the future, depending on what the tools support and is thus needed here. | def pair_node_to_var(tree, c):
# find parent Binary operator
while True:
old = c
c = next(iter(tree.predecessors(c)))
if c.type == 'operator':
if len(c.operands) == 2:
break
p, q = tree.successors(c)
v = p if q == old else q
# go down until terminal found
# assuming correct syntax for gr1c
while True:
if not tree.succ.get(v):
break
v = next(iter(tree.successors(v)))
# now: b, is the operator and: v, the variable
return v, c | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _expr2bddnode(expr):\n\t# print(\"_expr2bddnode\")\n\tif expr.is_zero():\n\t\treturn BDDNODEZERO\n\telif expr.is_one():\n\t\treturn BDDNODEONE\n\telse:\n\t\ttop = expr.top\n\n\t\t# Register this variable\n\t\t_ = bddvar(top.names, top.indices)\n\n\t\troot = top.uniqid\n\t\tlo = _expr2bddnode(expr.restrict({top: 0}))\n\t\thi = _expr2bddnode(expr.restrict({top: 1}))\n\t\treturn _bddnode(root, lo, hi)",
"def getVar(tree):\n if(tree.data == \"string_expression\"):\n if(tree.children[0].data == \"string\"):\n return tree.children[0].children[0]\n elif(tree.children[0].data == \"variable\"):\n return getValue(tree.children[0].children[0])\n elif(tree.children[0].data == \"string_expression\"):\n # if the child is a string expression apply getVar again on the child\n if(len(tree.children)== 2):\n return getVar(tree.children[0])+getVar(tree.children[1])\n return getVar(tree.children[0])\n elif(tree.data == \"integer\"):\n return evalInteger(tree) \n \n elif(tree.data == \"string_list\"):\n return getStringInterior(tree.children[0],[])\n return \"ERROR\"",
"def make_binary(sv, piece, o, op):\r\n here=piece.rfind(op) # look for last occurrence\r\n there=here+len(op)\r\n t1=piece[:here].strip(Space) # first term (sometimes omitted)\r\n t2=piece[there:].strip(Space) # second term must be present\r\n if not t2: \r\n print(\"\\n\", Err_op_syntax, o) # *** Syntax error in operator ***\r\n print(\" \", piece)\r\n raise ReferenceError\r\n first=tree_build(sv, t1) # process each term RECURSIVE\r\n second=tree_build(sv, t2)\r\n return (o, first, second)",
"def visit_BinOpNode(self, node: BinOpNode, symbol_table: SymbolTable) -> Union[Number, String]:\n left_node = self.visit(node.left_node, symbol_table)\n right_node = self.visit(node.right_node, symbol_table)\n if type(left_node).__name__ == 'Number' and \\\n type(right_node).__name__ == 'Number':\n if node.op_tok.token_type == TokenType.PLUS:\n return Number(left_node) + Number(right_node)\n elif node.op_tok.token_type == TokenType.MINUS:\n return Number(left_node) - Number(right_node)\n elif node.op_tok.token_type == TokenType.MULT:\n return Number(left_node) * Number(right_node)\n elif node.op_tok.token_type == TokenType.DIV:\n return Number(left_node) / Number(right_node)\n elif node.op_tok.token_type == TokenType.EXP:\n return Number(left_node) ** Number(right_node)\n elif node.op_tok.token_type == TokenType.N_EQ:\n return Number(left_node != right_node)\n elif node.op_tok.token_type == TokenType.IS_EQ:\n return Number(left_node == right_node)\n elif node.op_tok.token_type == TokenType.LTE:\n return Number(left_node <= right_node)\n elif node.op_tok.token_type == TokenType.GTE:\n return Number(left_node >= right_node)\n elif node.op_tok.token_type == TokenType.LT:\n return Number(left_node < right_node)\n elif node.op_tok.token_type == TokenType.GT:\n return Number(left_node > right_node)\n elif node.op_tok.value == 'and':\n return Number(left_node).anded_by(right_node)\n elif node.op_tok.value == 'or':\n return Number(left_node).ored_by(right_node)\n\n elif type(left_node).__name__ == 'String' and \\\n type(right_node).__name__ == 'String':\n if node.op_tok.token_type == TokenType.PLUS:\n return String(left_node.value) + String(right_node.value)\n elif node.op_tok.token_type == TokenType.MINUS:\n return String(left_node) - String(right_node)\n elif node.op_tok.token_type == TokenType.N_EQ:\n return Number(left_node != right_node)\n elif node.op_tok.token_type == TokenType.IS_EQ:\n return Number(left_node == right_node)\n elif node.op_tok.token_type == TokenType.LTE:\n return Number(left_node <= right_node)\n elif node.op_tok.token_type == TokenType.GTE:\n return Number(left_node >= right_node)\n elif node.op_tok.token_type == TokenType.LT:\n return Number(left_node < right_node)\n elif node.op_tok.token_type == TokenType.GT:\n return Number(left_node > right_node)\n\n elif (type(left_node).__name__ == 'String' and \\\n isinstance(right_node.value, int)) or \\\n type(right_node).__name__ == 'String' and \\\n isinstance(left_node.value, int):\n if node.op_tok.token_type == TokenType.MULT:\n return String(left_node.value * Number(right_node).value)\n elif type(left_node).__name__ == 'List' and \\\n type(right_node).__name__ == 'List':\n if node.op_tok.token_type == TokenType.N_EQ:\n return Number(left_node != right_node)\n elif node.op_tok.token_type == TokenType.IS_EQ:\n return Number(left_node == right_node)\n else:\n return Number(0)",
"def find_node(node, v):\n while node.value != v:\n node = node.right\n return node",
"def __recursive_backtrack(back, i, j, var_or_term):\n root = ParseTreeNode(var_or_term)\n\n if PCFG.is_variable(var_or_term):\n backpointer = back[i, j, var_or_term]\n curr_node = root\n route = backpointer[\"route\"]\n for rule_index in range(len(route) - 1):\n rule = route[rule_index]\n curr_node.rule = rule\n curr_node.children = [ParseTreeNode(rule.derivation[0])]\n curr_node = curr_node.children[0]\n\n rule = route[-1]\n if backpointer[\"type\"] == ORDINARY_BACK_POINTER:\n k = backpointer[\"k\"]\n curr_node.children = [NearCNF.__recursive_backtrack(back, i, k, rule.derivation[0]),\n NearCNF.__recursive_backtrack(back, k, j, rule.derivation[1])]\n else:\n curr_node.children = [NearCNF.__recursive_backtrack(back, i, j, rule.derivation[0])]\n\n return root",
"def move_not_inwards(s):\n if s.op == '~':\n NOT = lambda b: move_not_inwards(~b)\n a = s.args[0]\n if a.op == '~': return move_not_inwards(a.args[0]) # ~~A ==> A\n if a.op =='&': return associate('|', map(NOT, a.args))\n if a.op =='|': return associate('&', map(NOT, a.args))\n return s\n elif is_symbol(s.op) or not s.args:\n return s\n else:\n return Expr(s.op, *map(move_not_inwards, s.args))",
"def visit_BinaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.left) + self.visit(node.right)\n if token.type == MINUS:\n return self.visit(node.left) - self.visit(node.right)\n if token.type == MUL:\n return self.visit(node.left) * self.visit(node.right)\n if token.type == DIV:\n result = self.visit(node.left) / self.visit(node.right)\n if result.is_integer():\n return int(result)\n return result\n self.raise_error()",
"def _UnaryOperatorVariable(operatorClass=None):\n\n class unOp(operatorClass):\n def _calcValue_(self):\n return self.op(self.var[0].value)\n\n @property\n def unit(self):\n assert(hasattr(self, \"_unit\") == True)\n if self._unit is None:\n try:\n var = self._varProxy\n return self._extractUnit(self.op(var[0]))\n except:\n return self._extractUnit(self._calcValue())\n else:\n return self._unit\n\n return unOp",
"def backward_var_iter_nodup(start):\n seen = set()\n seen.add(id(start))\n \n for rank,func,var in backward_var_iter(start):\n if id(var) in seen:\n continue\n yield (rank,func,var)\n seen.add(id(var))",
"def find_variablerefs(self, node, parent, userdata):\n #- get all variable references/used\n #if node.kind.is_reference():\n #if node.kind.is_statement() :\n #if (not node.kind.is_invalid()) and (not clang.cindex.Cursor_displayname(node) == \"\") and (not node.kind == clang.cindex.CursorKind.VAR_DECL) and (not node.kind == clang.cindex.CursorKind.FUNCTION_DECL) and (not node.kind == clang.cindex.CursorKind.CALL_EXPR):\n if not ( (node.kind.is_invalid()) or (clang.cindex.Cursor_displayname(node) == \"\") or (node.kind == clang.cindex.CursorKind.VAR_DECL) or (node.kind == clang.cindex.CursorKind.FUNCTION_DECL) or (node.kind == clang.cindex.CursorKind.CALL_EXPR) or node.kind == clang.cindex.CursorKind.PARM_DECL or node.kind == clang.cindex.CursorKind.STRUCT_DECL or node.kind == clang.cindex.CursorKind.UNION_DECL or node.kind == clang.cindex.CursorKind.ENUM_DECL or node.kind == clang.cindex.CursorKind.FIELD_DECL ):\n #print \" node=%s= [line=%s, col=%s] \" % ( clang.cindex.Cursor_displayname(node) , node.location.line, node.location.column )\n self.add_entry(clang.cindex.Cursor_displayname(node) , node.location.line, node.location.column)\n \n # Recurse for children of this node\n return 2\n #for c in node.get_children():\n # find_variablerefs(c)",
"def correct_subscript(sv, tree):\r\n if not tree or tree[0]!=Special: return tree\r\n O, A, B=tree # subscripting ( e.g. cumul(L)(0) ) \r\n O=tree_join(A) # make operator from first term \r\n A, B = B, None # make subscript from second term\r\n return O, A, B",
"def make_unary(sv, piece, o, op):\r\n there=len(op) # start position of last part\r\n # if the object is subscripted / has args\r\n if piece[there:].startswith(Special+Bloc): \r\n here=piece[there+1:].find(Special) # find ending delimiter\r\n key=piece[there+1:there+here+1] # extract key for the block\r\n if piece[there+here+2:].strip(Space): # something after the block (some other subscript)\r\n first=(o, tree_build(sv, sv.Blocks[key]), None) # Build block RECURSIVE \r\n last=tree_build(sv, piece[there+here+2:]) # build other subscript RECURSIVE\r\n res=(Special, first, last) # code for a subscripted object\r\n else:\r\n res=(o, tree_build(sv, sv.Blocks[key]), None) # Build block RECURSIVE\r\n return res\r\n # the object is not subscripted but may have parts separated by space\r\n if Space in piece.strip(Space): return (o, tree_build(sv, piece[there:]), None) # Build RECURSIVE\r\n return make_leaf(sv, piece.strip(Space))",
"def tree_find(T, x):\n if T.is_empty:\n return False\n if x == T.label:\n return True\n if x < T.label:\n return tree_find(T.left, x)\n else:\n return tree_find(T.right, x)",
"def find(self, var):\n if var in self: return self\n elif self.outer: return self.outer.find(var)\n else:\n raise Exception(\"Unresolved symbol: %s\", var)",
"def get_value_var(self, var, data):\n \n #special case if the operand is boolean return it\n if isinstance(var, bool):\n return var\n \n try:\n #find the value for the key \n for key in str(var).split('.'):\n data = data[key]\n \n except (KeyError):\n # if key doesnt exist rather than returning None return the key as it is. This would be helpful for operands as strings\n return var\n else:\n return data",
"def _forward_search_related_op(self, graph, param):\n assert isinstance(param, VarWrapper)\n visited = {}\n for op in graph.ops():\n visited[op.idx()] = False\n stack = []\n for op in graph.ops():\n if (not op.is_bwd_op()) and (param in op.all_inputs()):\n stack.append(op)\n visit_path = []\n while len(stack) > 0:\n top_op = stack[len(stack) - 1]\n if visited[top_op.idx()] == False:\n visit_path.append(top_op)\n visited[top_op.idx()] = True\n next_ops = None\n if top_op.type() == \"conv2d\" and param not in top_op.all_inputs():\n next_ops = None\n elif top_op.type() == \"mul\":\n next_ops = None\n else:\n next_ops = self._get_next_unvisited_op(graph, visited, top_op)\n if next_ops == None:\n stack.pop()\n else:\n stack += next_ops\n return visit_path",
"def visit_BinOp(self, node):\n self.generic_visit(node)\n return to_call(self.op_to_function(node.op), [node.left, node.right])",
"def visit_UnaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.right)\n if token.type == MINUS:\n return -1 * self.visit(node.right)\n self.raise_error()",
"def pull_out_quantifications_from_left_across_binary_operator(formula):\n assert type(formula) == Formula and is_binary(formula.root)\n # Task 11.6.1\n if not is_quantifier(formula.first.root):\n return formula, proof_of_formula_eq_formula(formula)\n\n inner_formula = Formula(formula.root, formula.first.predicate, formula.second)\n eq_bin_pred, proof_eq_bin_pred = pull_out_quantifications_from_left_across_binary_operator(inner_formula)\n\n new_quantifier, axiom = get_new_quantifier_and_axioms(formula.root, formula.first.root, True)\n x = formula.first.variable\n\n eq_formula = new_quantifier + x + '[' + str(eq_bin_pred) + ']'\n want_to_proof = EQUIVALENCE_FORMAT.format(formula, eq_formula)\n prover = Prover(DEFAULT_PROOF_ASSUMPTIONS, want_to_proof)\n endl_proof = prover.add_proof(proof_eq_bin_pred.conclusion, proof_eq_bin_pred)\n\n formula_with_bin_inside = Formula(new_quantifier, x, inner_formula)\n\n endl = apply_15_or_16_axiom(formula_with_bin_inside, prover, eq_bin_pred, formula_with_bin_inside.predicate, x, x)\n endl_first = prover.add_tautological_inference(prover.proof.lines[endl].formula.second, [endl_proof, endl])\n\n instantiation_map = {\n 'R(v)': prover.substitute_term_to_formal_param(formula.first.predicate, x),\n 'Q()': str(formula.second),\n 'x': x,\n }\n endl_second = prover.add_instantiated_assumption(axiom.instantiate(instantiation_map), axiom, instantiation_map)\n prover.add_tautological_inference(want_to_proof, [endl_first, endl_second])\n\n return Formula.parse(eq_formula), prover.proof",
"def get_loop_vars(rule):\n traverse_list = [rule]\n loop_vars = []\n while(traverse_list):\n one_rule = traverse_list.pop()\n operator = one_rule[0]\n operate_nums = one_rule[1:]\n # print(\"operate_nums: %s\" % operate_nums)\n for operate_num in operate_nums:\n if isinstance(operate_num, str):\n loop_vars.append(operate_num)\n elif isinstance(operate_num, list):\n traverse_list.append(operate_num)\n else:\n continue\n # remove redundant vars, and keep the order\n ans_vars = list(set(loop_vars))\n ans_vars.sort(key=loop_vars.index)\n return ans_vars",
"def find(self, var):\n return self if (var in self) else self.outer.find(var)",
"def visit_UnaryOpNode(self, node: UnaryOpNode, symbol_table: SymbolTable) -> Number:\n number = self.visit(node.node, symbol_table)\n\n if node.op_tok.token_type == TokenType.MINUS:\n return number * Number(-1)\n elif node.op_tok.token_type == TokenType.PLUS:\n return number\n elif node.op_tok.value == 'not':\n return number.notted_by()",
"def special_binary(sv, tree):\r\n o, A, B=tree\r\n # solve unary minus\r\n if o==Minus:\r\n x,y,z=B\r\n if isnumber(x) or isduration(x): return ('-'+x, None, None) # just insert '-' prefix to numbers and durations\r\n return (Mult, ('-1', None, None), B) # replace with -1 * B\r\n \r\n # solve unary pick and sort\r\n if o in [Pick, Sort]:\r\n return (o, B, B)\r\n\r\n if o==Isnot:\r\n if B==(Empty, None, None): return (Within, B, A)\r\n return tree\r\n \r\n print(Err_missing_args) # *** Error: missing argument ***\r\n print(\"???\", o, tree_join(B))\r\n raise ReferenceError",
"def relop_bexp(env, node):\n left_value = node.left.interpret(env)\n right_value = node.right.interpret(env)\n if node.op == '<':\n value = left_value < right_value\n elif node.op == '<=':\n value = left_value <= right_value\n elif node.op == '>':\n value = left_value > right_value\n elif node.op == '>=':\n value = left_value >= right_value\n elif node.op == '==':\n value = left_value == right_value\n elif node.op == '!=':\n value = left_value != right_value\n else:\n raise RuntimeError('unknown operator: ' + node.op)\n return value",
"def pull_out_quantifications_from_left_across_binary_operator(formula:\r\n Formula) -> \\\r\n Tuple[Formula, Proof]:\r\n assert has_uniquely_named_variables(formula)\r\n assert is_binary(formula.root)\r\n # Task 11.7.1\r\n\r\n\r\n prover = Prover(Prover.AXIOMS.union(ADDITIONAL_QUANTIFICATION_AXIOMS))\r\n\r\n # Basic Case - No quantifier to change n = 0 and no n = 1\r\n if not is_quantifier(formula.first.root):\r\n ccl = equivalence_of(formula, formula)\r\n prover.add_tautology(ccl)\r\n return formula, prover.qed()\r\n\r\n\r\n # Without the predicate\r\n form = Formula(formula.root, formula.first.predicate, formula.second)\r\n pred, proof = pull_out_quantifications_from_left_across_binary_operator(form)\r\n\r\n my_quantifier = formula.first.root\r\n\r\n # Define (or change) the quantifier and define the axioms depending on the binary operator\r\n if formula.root == \"->\":\r\n if my_quantifier == \"A\":\r\n my_quantifier = \"E\"\r\n axiom_scd = 10\r\n else: # \"E\"\r\n my_quantifier = \"A\"\r\n axiom_scd = 11\r\n\r\n elif formula.root == \"&\":\r\n axiom_scd = 2 if my_quantifier == \"A\" else 3\r\n\r\n else: # \"|\" or\r\n axiom_scd = 6 if my_quantifier == \"A\" else 7\r\n\r\n\r\n\r\n # proof for changing quantifier\r\n # because add_proof() is my friend\r\n step1 = prover.add_proof(proof.conclusion, proof)\r\n\r\n form2 = Formula(\"->\", proof.conclusion, equivalence_of(Formula(my_quantifier, formula.first.variable, form),\r\n Formula(my_quantifier, formula.first.variable, pred)))\r\n my_map2 = {'R': str(form.substitute({formula.first.variable: Term(\"_\")})),\r\n 'Q': str(pred.substitute({formula.first.variable: Term(\"_\")})), \"x\": formula.first.variable, \"y\": formula.first.variable}\r\n\r\n step2 = prover.add_instantiated_assumption(form2,\r\n ADDITIONAL_QUANTIFICATION_AXIOMS[14 if my_quantifier==\"A\" else 15], my_map2)\r\n\r\n step3 = prover.add_mp(equivalence_of(Formula(my_quantifier, formula.first.variable, form),\r\n Formula(my_quantifier, formula.first.variable, pred)), step1, step2)\r\n\r\n\r\n my_map4 = {'R': str(formula.first.predicate.substitute({formula.first.variable: Term(\"_\")})), \"x\": formula.first.variable, \"Q\" : str(formula.second)}\r\n form4 = equivalence_of(formula, Formula(my_quantifier, formula.first.variable, form))\r\n step4 = prover.add_instantiated_assumption(form4,\r\n ADDITIONAL_QUANTIFICATION_AXIOMS[axiom_scd], my_map4)\r\n\r\n prover.add_tautological_implication(equivalence_of(formula, Formula(my_quantifier, formula.first.variable, pred)), [step3, step4])\r\n\r\n return Formula(my_quantifier, formula.first.variable, pred), prover.qed()",
"def extract_var(node):\n if (node[\"nodeType\"] == \"VariableDeclarationStatement\"):\n for var_decl in node[\"declarations\"]:\n if(var_decl[\"nodeType\"] == \"VariableDeclaration\"):\n return var_decl\n else:\n return node",
"def expression(self, rbp=0):\n t = self.token\n self.token = next(self.token_gen)\n left = t.nud()\n while rbp < self.token.lbp:\n t = self.token\n self.token = next(self.token_gen)\n left = t.led(left)\n return left",
"def visit_Unary(self, node):\n op = node.op.type\n if op == PLUS:\n return +self.visit(node.expr)\n elif op == MINUS:\n return -self.visit(node.expr)",
"def _pick_variable(self, variable_and_offsets):\n\n if isinstance(self.operand, MemoryOperand):\n if len(variable_and_offsets) > 1:\n log.error(\"Instruction %#x has two memory operands. Please report it on GitHub.\", self.insn.addr)\n return variable_and_offsets[0]\n\n elif isinstance(self.operand, RegisterOperand):\n # there might be multiple register-type variables for an instruction. pick the right one is... not easy\n\n the_reg = self.operand.register\n if the_reg is None:\n # huh, it does not have a Register child\n return None, None\n\n reg_name = the_reg.reg\n arch = self.instance.project.arch\n\n if len(variable_and_offsets) == 1:\n # only one candidate...\n var, offset = variable_and_offsets[0]\n if arch.registers[reg_name][0] == var.reg:\n return var, offset\n return None, None\n\n if self.operand_index > 0:\n # this is the source operand\n # which variable is read here?\n for var, offset in variable_and_offsets:\n if arch.registers[reg_name][0] == var.reg:\n if self._variable_has_access(var, self.insn.addr, \"read\"):\n return var, offset\n\n log.debug(\n \"Cannot find any source variable for operand %d at instruction %#x.\",\n self.operand_index,\n self.insn.addr,\n )\n return None, None\n\n # this is the destination operand\n # which variable is written here?\n for var, offset in variable_and_offsets:\n if arch.registers[reg_name][0] == var.reg and self._variable_has_access(var, self.insn.addr, \"write\"):\n return var, offset\n\n log.debug(\n \"Cannot find any destination variable for operand %d at instruction %#x.\",\n self.operand_index,\n self.insn.addr,\n )\n # just return the first one\n return None, None\n\n else:\n # what's this type? why am I here?\n log.error(\"_pick_variable: Unsupported operand type %s.\", self.operand.__class__)\n\n return None, None"
] | [
"0.5339228",
"0.52295554",
"0.50677377",
"0.5041437",
"0.5031985",
"0.49616277",
"0.48720962",
"0.48576695",
"0.4832301",
"0.48263368",
"0.47855213",
"0.4772769",
"0.47136208",
"0.46906525",
"0.46764454",
"0.46512195",
"0.46261036",
"0.46183684",
"0.46052608",
"0.45750728",
"0.45705682",
"0.45703688",
"0.45495152",
"0.45419198",
"0.45385164",
"0.45379034",
"0.45237297",
"0.45171976",
"0.45168546",
"0.45042253"
] | 0.59162974 | 0 |
Raise exception if set intersects existing variable name, or values. Values refers to arbitrary finite data types. | def _check_var_conflicts(s, variables):
# check conflicts with variable names
vars_redefined = {x for x in s if x in variables}
if vars_redefined:
raise Exception('Variables redefined: {v}'.format(v=vars_redefined))
# check conflicts with values of arbitrary finite data types
for var, domain in variables.items():
# not arbitrary finite type ?
if not isinstance(domain, list):
continue
# var has arbitrary finite type
conflicting_values = {x for x in s if x in domain}
if conflicting_values:
raise Exception(
'Values redefined: {v}'.format(v=conflicting_values)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_badxvaluewithsets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, {1, 2, 3}, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_badyvaluewithsets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, {1, 2, 3}, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def test_bad_setitem(self):\n space = Space()\n\n # The name of an integer must be a of `str` type.\n # Integers are reversed for indexing the OrderedDict.\n with pytest.raises(TypeError) as exc:\n space[5] = Integer(\"yolo\", \"uniform\", -3, 6)\n assert \"string\" in str(exc.value)\n\n # Only object of type `Dimension` are allowed in `Space`.\n with pytest.raises(TypeError) as exc:\n space[\"ispis\"] = \"nope\"\n assert \"Dimension\" in str(exc.value)\n\n # Cannot register something with the same name.\n space.register(Integer(\"yolo\", \"uniform\", -3, 6))\n with pytest.raises(ValueError) as exc:\n space.register(Real(\"yolo\", \"uniform\", 0, 6))\n assert \"another name\" in str(exc.value)",
"def _validate_set(val):\n if not isinstance(val, set):\n raise ValueError(\"Passed value {} is not a set\".format(val))\n if not all([isinstance(char, str) for char in val]):\n raise ValueError(\"Passed overrides of non-string to overrides\")\n return val",
"def _variable(self, name, vars_set):\n if not re.match(r\"[_a-zA-Z][_a-zA-Z0-9]*$\", name):\n self._syntax_error(\"Not a valid name\", name)\n vars_set.add(name)",
"def _set_values(self, sel_rows, sel_cols, value):\n global get_missing\n \n varvals = self._varvals\n typlist = self._typlist\n varlist = self._varlist\n # copy typlist to check changes against later\n old_typlist = [typlist[i] for i in sel_cols] \n \n alt_missing = False\n \n for row_num, i in zip(sel_rows, range(len(sel_rows))):\n row = value[i]\n for col_num, k in zip(sel_cols, range(len(sel_cols))):\n val = row[k]\n st_type = typlist[col_num]\n if st_type == 32768:\n if all(not isinstance(val, t) \n for t in (str, bytes, bytearray)):\n msg = (\"values in \\\"\" + varlist[col_num] + \n \"\\\" must be str or bytes\")\n raise TypeError(msg)\n elif st_type <= 2045:\n if isinstance(val, str):\n val_len = len(val)\n typlist[col_num] = (32768 if val_len > 2045 \n else max(st_type, val_len))\n elif val is None or isinstance(val, MissingValue):\n val = ''\n alt_missing = True\n elif isinstance(val, bytes) or isinstance(val, bytearray):\n typlist[col_num] = 32768\n else:\n msg = (\"\\\"\" + varlist[col_num] + \"\\\" cannot \" + \n \"take non-string values\")\n raise TypeError(msg)\n else:\n if any(isinstance(val, t) \n for t in (str, bytes, bytearray)):\n msg = (\"\\\"\" + varlist[col_num] + \"\\\" cannot take \" + \n \"string or bytes values; has Stata type \" + \n self._get_type_name(st_type))\n raise TypeError(msg)\n elif val is None:\n val = MISSING\n alt_missing = True\n elif isinstance(val, MissingValue):\n pass\n elif (not isinstance(val, float) and \n not isinstance(val, int)):\n msg = (\"value in right-hand position \" + \n \"{},{} is not of recognized type\".format(i, k))\n raise TypeError(msg)\n elif (-1.7976931348623157e+308 > val or\n val > 8.988465674311579e+307):\n val = get_missing(val)\n alt_missing = True\n elif st_type >= 65528: # int types\n if (val != int(val) or -2147483647 > val or \n val > 2147483620): \n # val is not int or is outside of bounds of long\n typlist[col_num] = 65526 # double\n elif st_type >= 65529 and not (-32767 <= val <= 32740):\n # st_type int, but val outside of bounds\n typlist[col_num] = 65528 # long\n elif st_type == 65530 and not (-127 <= val <= 100): \n # st_type byte, but val outside of bounds\n typlist[col_num] = 65529 # int\n else: # was float or double and will continue to be\n if (st_type == 65527 and \n (-1.7014117331926443e+38 > val or\n val > 1.7014117331926443e+38)): \n # st_type float, but outisde of bounds\n typlist[col_num] = 65526 # double\n # This should maybe just set value to missing?\n # Stata sets value to missing, \n # does not promote float to double.\n \n varvals[row_num][col_num] = val\n \n if not self._quiet: \n # Record seen columns. \n # Use a set because same column can appear multiple times.\n seen_cols = set()\n smcl = \"{txt}\" if IN_STATA else \"\"\n msg = smcl + \"Stata type for {} was {}, now {}\"\n for old_type,c in zip(old_typlist, sel_cols):\n new_type = typlist[c]\n if old_type != new_type and c not in seen_cols:\n old_name = self._get_type_name(old_type)\n new_name = self._get_type_name(new_type)\n print(msg.format(varlist[c], old_name, new_name))\n seen_cols.add(c)\n \n smcl = \"{err}\" if IN_STATA else \"\"\n if alt_missing:\n print(smcl + \"warning: some missing values inserted\")",
"def _raise_value_error(is_gt, tracker, seq):\n if is_gt:\n raise TrackEvalException(\n 'GT data for sequence %s cannot be converted to the right format. Is data corrupted?' % seq)\n else:\n raise TrackEvalException(\n 'Tracking data from tracker %s, sequence %s cannot be converted to the right format. '\n 'Is data corrupted?' % (tracker, seq))",
"def verify(self, values):\n s = set(values)\n if not s.issubset(self.keys):\n raise ValueError, 'unknown keys in values'",
"def value_error(var, _list):\n\n #if not any(r):\n if len(_list) == 2:\n divisor = \" or \"\n elif len(_list) > 2:\n divisor = \", \"\n\n print(_list)\n print(len(_list))\n raise ValueError(\"'{var_name}' must be {type}, received '{var_type}'\"\n .format(var_name=RaiseIfNot._get_name(var),\n type=divisor.join(map(\n lambda x: \"'\" + x + \"'\",\n _list)), var_type=var))",
"def verify(self, values):\n s = set(values)\n if not s.issubset(self.keys):\n raise ValueError('unknown keys in values')",
"def checkValName(self):\n valLength = len(self.val)\n if valLength == 0:\n try:\n valsLength = len(self.val)\n if valsLength == 0:\n self.val = self.vals\n except Exception:\n print \"No value set\"",
"def test_select_var_failed_if_bad_var_name(self):\n with patch('esmvalcore.cmor._fixes.fix.Fix.get_fixes',\n return_value=[]):\n with self.assertRaises(ValueError):\n fix_metadata(\n cubes=[\n self._create_mock_cube('not_me'),\n self._create_mock_cube('me_neither')\n ],\n short_name='short_name',\n project='CMIP6',\n dataset='model',\n mip='mip',\n )",
"def test_validation_modes(member, set_values, values, raising_values):\n\n class MemberTest(Atom):\n m = member\n\n tester = MemberTest()\n for sv, v in zip(set_values, values):\n tester.m = sv\n assert tester.m == v\n\n for rv in raising_values:\n with pytest.raises(\n OverflowError\n if (isinstance(member, Int) and isinstance(rv, float) and rv > 2**32)\n else ValueError\n if isinstance(member, Enum)\n else TypeError\n ):\n tester.m = rv",
"def _set_values(self, sel_rows, sel_cols, value):\n global get_missing\n \n varvals = self._varvals\n typlist = self._typlist\n varlist = self._varlist\n old_typlist = [typlist[i] for i in sel_cols]\n \n str_clipped = False\n alt_missing = False\n \n for row_num, i in zip(sel_rows, range(len(sel_rows))):\n row = value[i]\n for col_num, k in zip(sel_cols, range(len(sel_cols))):\n val = row[k]\n st_type = typlist[col_num]\n if st_type <= 244:\n if isinstance(val, str):\n val_len = len(val)\n if val_len > 244:\n val = val[:244]\n val_len = 244\n str_clipped = True\n if val_len > st_type:\n typlist[col_num] = val_len\n elif val is None or isinstance(val, MissingValue):\n val = ''\n alt_missing = True\n else:\n msg = (\"\\\"\" + varlist[col_num] + \"\\\" cannot \" + \n \"take non-string values\")\n raise TypeError(msg)\n else:\n if isinstance(val, str):\n msg = (\"\\\"\" + varlist[col_num] + \"\\\" cannot take \" + \n \"string values; has Stata type \" + \n self._get_type_name(st_type))\n raise TypeError(msg)\n elif val is None:\n val = MISSING\n alt_missing = True\n elif isinstance(val, MissingValue):\n pass\n elif not (isinstance(val, float) or isinstance(val, int)):\n msg = (\"value in right-hand position \" + \n \"{},{} is not of recognized type\".format(i, k))\n raise TypeError(msg)\n elif (-1.7976931348623157e+308 > val or\n val > 8.988465674311579e+307):\n val = get_missing(val)\n alt_missing = True\n elif st_type <= 253: # int types\n if (val != int(val) or -2147483647 > val or\n val > 2147483620):\n typlist[col_num] = 255 # double\n elif st_type <= 252 and not (-32767 <= val <= 32740):\n typlist[col_num] = 253 # long\n elif st_type == 251 and not (-127 <= val <= 100):\n typlist[col_num] = 252 # int\n else: # was float and will continue to be\n if (st_type == 254 and \n (-1.7014117331926443e+38 > val or\n val > 1.7014117331926443e+38)):\n typlist[col_num] = 255 # double\n # This should maybe just set value to missing?\n # Stata sets value to missing, \n # does not promote float to double.\n varvals[row_num][col_num] = val\n \n if not self._quiet:\n seen_cols = set() # same column can appear multiple times\n smcl = \"{txt}\" if IN_STATA else \"\"\n msg = smcl + \"Stata type for {} was {}, now {}\"\n for old_type,c in zip(old_typlist, sel_cols):\n new_type = typlist[c]\n if old_type != new_type and c not in seen_cols:\n old_name = self._get_type_name(old_type)\n new_name = self._get_type_name(new_type)\n print(msg.format(varlist[c], old_name, new_name))\n seen_cols.add(c)\n \n smcl = \"{err}\" if IN_STATA else \"\"\n if str_clipped:\n msg = \"warning: some strings were shortened to 244 characters\"\n print(smcl + msg)\n if alt_missing:\n print(smcl + \"warning: some missing values inserted\")",
"def test_interval_load_duplicate_name_raises(self, months):\n register = NDimensionalRegister()\n register.register(IntervalSet(\"months\", months))\n with raises(ValueError):\n register.register(IntervalSet(\"months\", months))",
"def _var_check(self):\n missing = set()\n for v in self.variables:\n if getattr(self, v) is None:\n missing.add(v)\n self.missing = missing",
"def _check_determinancy(self, values, errors, combo):\n val, err = self.used_vars(values, errors, combo)\n n, m = len(val), len(self.get_equations(combo))\n\n if n != m:\n if m > n:\n s = '>'\n t = 'remove'\n v = err\n else:\n s = '<'\n t = 'add'\n v = val\n\n a = abs(n - m)\n\n raise ValueError('Indeterminant system:: Number of equations ({}) '\n '{} number of unknowns ({}). To correct, {} ({}) errors in {} '\n 'or adjust the input equations.'.format(m, s, n, t, a, v))",
"def check_set_command(self, line):\n self.E_str = \"check_set_command\"\n err_msg = \"The set command takes the syntax 'set <set type> <data name>\"\n err_msg += \" to <set value>' e.g. 'set system data to pentacene'\"\n\n # Check syntax\n words = line.split()\n\n if len(words) != 5:\n self.print_error(\"Too many words found!\\n\\n\" + err_msg)\n\n # Check undeclared variables\n _, set_folder, var_name, _, set_name = words\n if var_name not in self.variables:\n self.print_error(f\"Undeclared variable {var_name}\" + \"\\n\\n\" + err_msg)\n\n # Run set command for error checking variables later\n self.parse_set_cmd(line)",
"def check_for_undefined_identifiers(tree, domains):\n for u in tree:\n if u.type == 'var' and u.value not in domains:\n var = u.value\n raise ValueError(\n ('Undefined variable \"{var}\" missing from '\n 'symbol table:\\n\\t{doms}\\n'\n 'in subformula:\\n\\t{f}').format(\n var=var, f=tree.to_recursive_ast(), doms=domains))\n\n if u.type not in {'str', 'num'}:\n continue\n\n # is a Const or Num\n var, c = pair_node_to_var(tree, u)\n\n if c.type == 'str':\n dom = domains[var]\n\n if not isinstance(dom, list):\n raise Exception(\n ('String constant \"{c}\" assigned to non-string '\n 'variable \"{var}\" with domain:\\n\\t{dom}').format(\n var=var, c=c, dom=dom))\n\n if c.value not in domains[var.value]:\n raise ValueError(\n ('String constant \"{c}\" is not in the domain '\n 'of variable \"{var}\"').format(var=var, c=c))\n\n if c.type == 'num':\n dom = domains[var]\n\n if not isinstance(dom, tuple):\n raise Exception(\n ('Number: {c}, assigned to non-integer ' + str(c) +\n 'variable \"{var}\" with domain:\\n\\t{dom}').format(\n var=var, c=c, dom=dom))\n\n if not dom[0] <= c.value <= dom[1]:\n raise Exception(\n ('Integer variable \"{var}\", is assigned the '\n 'value: {c}, that is out of its domain:'\n '{dom[0]} ... {dom[1]}').format(\n var=var, c=c, dom=dom))",
"def _checkValues(set_):\n if len(set_)<3: return False\n x = set_[2]\n # TODO: OPT: need optimization\n if (x is None) or len(x) == 0: return False # undefined\n for v in x:\n try:\n if Nlabels <= 2 and N.isscalar(v):\n continue\n if (isinstance(v, dict) or # not dict for pairs\n ((Nlabels>=2) and len(v)!=Nlabels) # 1 per each label for multiclass\n ): return False\n except Exception, e:\n # Something else which is not supported, like\n # in shogun interface we don't yet extract values per each label or\n # in pairs in the case of built-in multiclass\n if __debug__:\n debug('ROC', \"Exception %s while checking \"\n \"either %s are valid labels\" % (str(e), x))\n return False\n return True",
"def check_value_error_exception_raised(correct_parameters: dict, unexpected_values: list, class_or_function) -> None:\n for key, value in unexpected_values:\n incorrect_parameters_dict = dict(correct_parameters)\n incorrect_parameters_dict[key] = value\n with pytest.raises(ValueError):\n class_or_function(**incorrect_parameters_dict)",
"def test_members_are_set_when_args_are_invalid(self):\n\n self.assertRaises(ValueError, Vec3, \"abc\", 6, \"q\")",
"def _check(self):\n for molname in self.options.keys():\n for key in self.options[molname].keys():\n if key in [\"Ncopies\"]:\n try:\n self.options[molname][key]=int(self.options[molname][key])\n except:\n raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))\n if key in [\"Cutoff\"]:\n try:\n self.options[molname][key]=float(self.options[molname][key])\n except:\n raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))\n if key in [\"Addon\"]: # test the addon part and convert variables\n for item in self.options[molname][key]: # Iterate over all attachments\n if item is not None:\n # attachment point\n dtypes={\"attachment\":int}\n try:\n item[\"attachment\"]=int(item[\"attachment\"])\n except:\n raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))\n # position\n #~ try:\n #~ print self.options[molname][key][\"position\"]\n #~ self.options[molname][key][\"position\"]=int(self.options[molname][key][\"position\"])\n #~ except:\n #~ raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))",
"def test_badxvaluewithtuple(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, (1, 2), 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def _validate_value(self, val):\r\n if type(val) in (int, long, float, str, unicode, ):\r\n return val\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_value(i)\r\n return val\r\n raise TypeError(\r\n \"Only number/strings and tuples/frozensets allowed here.\",\r\n )",
"def try_wrong_types(self, p, name, type_):\n for x in (1, 1.0, \"x\", True, np.ndarray,):\n if type(x) != type_:\n with self.assertRaises(TypeError, msg=f\"{name} {type_} {x}\"):\n setattr(p, name, x)",
"def test_set_bad_name(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4, shape=(4, 4))\n with pytest.raises(TypeError):\n dim.name = 4",
"def check_set_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")",
"def _check_variables(datasets, necessary_short_names):\n dataset_name = datasets[0]['dataset']\n necessary_short_names = set(necessary_short_names)\n short_names = set(group_metadata(datasets, 'short_name').keys())\n if short_names != necessary_short_names:\n raise ValueError(\n f\"Expected variables {necessary_short_names} for dataset \"\n f\"'{dataset_name}', got {short_names}\")",
"def check_variable_line(self, line):\n self.E_str = \"check_variable_line\"\n line, any_vars = self.find_vars_in_str(line)\n words = [i for i in line.split('=') if i]\n words = self.fix_words(words)\n\n if len(words) < 2:\n self.print_error(\"The syntax for declaring variables is: \"\n + \"'<name> = <value>'\")"
] | [
"0.61296064",
"0.5936811",
"0.577528",
"0.5726689",
"0.56829995",
"0.5656397",
"0.5629316",
"0.55581087",
"0.5551229",
"0.55102354",
"0.54137504",
"0.53667116",
"0.534853",
"0.5334453",
"0.5313276",
"0.530166",
"0.53014976",
"0.52841115",
"0.52704513",
"0.5249702",
"0.5235442",
"0.5230851",
"0.51984304",
"0.5180844",
"0.5170272",
"0.51570493",
"0.5151438",
"0.514572",
"0.51453215",
"0.51441854"
] | 0.6406304 | 0 |
Setter method for peer_group_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/config/peer_group_name (string) | def _set_peer_group_name(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="peer-group-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """peer_group_name must be of a type compatible with string""",
"defined-type": "string",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="peer-group-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=True)""",
}
)
self.__peer_group_name = t
if hasattr(self, "_set"):
self._set() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_evpn_neighbor_peergroup_name(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\\\\\\\\\\\\\\\@#\\\\+\\\\*\\\\(\\\\)=\\\\{~\\\\}%<>=$_\\\\[\\\\]\\\\|]{0,62})'}), is_leaf=True, yang_name=\"evpn-neighbor-peergroup-name\", rest_name=\"evpn-neighbor-peergroup-name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Word:1-63;;Peer Group Name'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='bgp-peergroup', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"evpn_neighbor_peergroup_name must be of a type compatible with bgp-peergroup\"\"\",\n 'defined-type': \"brocade-bgp:bgp-peergroup\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\\\\\\\\\\\\\\\@#\\\\+\\\\*\\\\(\\\\)=\\\\{~\\\\}%<>=$_\\\\[\\\\]\\\\|]{0,62})'}), is_leaf=True, yang_name=\"evpn-neighbor-peergroup-name\", rest_name=\"evpn-neighbor-peergroup-name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Word:1-63;;Peer Group Name'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='bgp-peergroup', is_config=True)\"\"\",\n })\n\n self.__evpn_neighbor_peergroup_name = t\n if hasattr(self, '_set'):\n self._set()",
"def _get_peer_group_name(self):\n return self.__peer_group_name",
"def _get_peer_group_name(self):\n return self.__peer_group_name",
"def set_group_name(self, name):\n params = [('groupname', name, 'cdata')]\n\n self.get(COMMAND_UIC, 'SetGroupName', params)",
"def set_group_name(self, name):\n self.groupname = name",
"def get_group_name(self, group_id):\n group = self.table.query.filter_by(group_id=group_id).first()\n return group.name",
"def _get_evpn_neighbor_peergroup_name(self):\n return self.__evpn_neighbor_peergroup_name",
"def dunning_group_name(self, dunning_group_name):\n\n self._dunning_group_name = dunning_group_name",
"def RenameGroup(self, group, new_name, reason=None):\n body = {\n \"new_name\": new_name,\n }\n\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/groups/%s/rename\" %\n (GANETI_RAPI_VERSION, group)), query, body)",
"def error_group_name(self, error_group_name):\n\n self._error_group_name = error_group_name",
"def group_name(self):\n\n return self._group_name",
"def signing_group_name(self, signing_group_name):\n\n self._signing_group_name = signing_group_name",
"def participants_group_name(self):\n return self.short_name+\"_participants\"",
"def group_name(self) -> str:\n return pulumi.get(self, \"group_name\")",
"def get_group_name(self):\n return self.groupname",
"def ad_group_name(self):\n\n return self._ad_group_name",
"def ad_group_name(self):\n\n return self._ad_group_name",
"def set_group(self, group: str) -> None:\n self.group = group",
"def _set_group_name(self):\n self._scene_gen.group_name = self._group_name_le.text()\n self._refresh_view()",
"def _set_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"name\", rest_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"name\", rest_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__name = t\n if hasattr(self, '_set'):\n self._set()",
"def subnet_group_name(self) -> Optional[str]:\n return pulumi.get(self, \"subnet_group_name\")",
"def group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_name\")",
"def group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_name\")",
"def group_name_hierarchy(self, group_name_hierarchy):\n\n self._group_name_hierarchy = group_name_hierarchy",
"def get_group_name(name: str) -> str:\n if is_shortcut_name(name):\n return name.split(config.name_separator)[0]\n raise CHCShortCutNameError(name)",
"def _set_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__name = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__name = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__name = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__name = t\n if hasattr(self, '_set'):\n self._set()",
"def parameter_group_name(self) -> Optional[str]:\n return pulumi.get(self, \"parameter_group_name\")"
] | [
"0.6388651",
"0.55820066",
"0.55820066",
"0.555519",
"0.5260953",
"0.48957533",
"0.48141342",
"0.48051596",
"0.47079813",
"0.4597745",
"0.45123902",
"0.44226515",
"0.43617657",
"0.43595254",
"0.43145773",
"0.43107888",
"0.43107888",
"0.42116663",
"0.42069572",
"0.41871294",
"0.41818482",
"0.41762957",
"0.41762957",
"0.41177392",
"0.4102843",
"0.40838936",
"0.40838936",
"0.40838936",
"0.40838936",
"0.40741286"
] | 0.78954 | 1 |
Setter method for route_flap_damping, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/config/route_flap_damping (boolean) | def _set_route_flap_damping(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="route-flap-damping",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """route_flap_damping must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="route-flap-damping", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__route_flap_damping = t
if hasattr(self, "_set"):
self._set() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_route_flap_damping(self):\n return self.__route_flap_damping",
"def _get_route_flap_damping(self):\n return self.__route_flap_damping",
"def flap(self) -> None:\n self.delta_time = 0.2\n self.velocity = 37",
"def disable_bgp_route_propagation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_bgp_route_propagation\")",
"def disable_bgp_route_propagation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_bgp_route_propagation\")",
"def disable_bgp_route_propagation(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disable_bgp_route_propagation\")",
"def _set_lsp_config_frr_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-configured\", rest_name=\"lsp-config-frr-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_frr_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-configured\", rest_name=\"lsp-config-frr-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_frr_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_adaptive(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-adaptive\", rest_name=\"lsp-config-adaptive\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_adaptive must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-adaptive\", rest_name=\"lsp-config-adaptive\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_adaptive = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_frr_bandwidth_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-bandwidth-configured\", rest_name=\"lsp-config-frr-bandwidth-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_frr_bandwidth_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-bandwidth-configured\", rest_name=\"lsp-config-frr-bandwidth-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_frr_bandwidth_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_frr_forwarding_protected_up(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-frr-forwarding-protected-up\", rest_name=\"lsp-frr-forwarding-protected-up\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_frr_forwarding_protected_up must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-frr-forwarding-protected-up\", rest_name=\"lsp-frr-forwarding-protected-up\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_frr_forwarding_protected_up = t\n if hasattr(self, '_set'):\n self._set()",
"def flap(self):\n\n if self.pos_y > -2 * IMAGES['player'][0].get_height():\n self.vel_y = self.acc_flap\n self.flapped = True\n self.last_flapped = time()",
"def setSolverDamping(*argv):",
"def load_conf_permban_with_frozensand(self):\n self._permban_with_frozensand = False\n if self.config.has_option('server', 'permban_with_frozensand'):\n try:\n self._permban_with_frozensand = self.config.getboolean('server', 'permban_with_frozensand')\n except ValueError, err:\n self.warning(err)\n\n self.info(\"Send permbans to Frozen Sand : %s\" % ('yes' if self._permban_with_frozensand else 'no'))",
"def eval_damping():\n # Environment\n env = WAMBallInCupSim(num_dof=7, max_steps=1500)\n\n # Policy (random init)\n policy_hparam = dict(num_feat_per_dim=12, bounds=(np.array([0.0]), np.array([1.0])))\n policy = DualRBFLinearPolicy(env.spec, policy_hparam, dim_mask=2)\n\n # Do the rolllouts\n t_all = []\n qpos_all = []\n dp_vals = [0.0, 0.01, 0.1, 0.5, 1.0]\n print_cbt(f\"Run policy for damping coefficients: {dp_vals}\")\n for dpv in dp_vals:\n env.reset(\n domain_param=dict(\n joint_1_damping=dpv,\n joint_2_damping=dpv,\n joint_3_damping=dpv,\n joint_4_damping=dpv,\n joint_5_damping=dpv,\n joint_6_damping=dpv,\n joint_7_damping=dpv,\n )\n )\n ro = rollout(env, policy, render_mode=RenderMode(video=False), eval=True)\n t_all.append(ro.time[:-1])\n qpos_all.append(ro.env_infos[\"qpos\"])\n\n # Plot\n fig, ax = plt.subplots(nrows=env.num_dof, sharex=\"all\", figsize=(16, 7))\n for i, idx_joint in enumerate([dof for dof in range(env.num_dof)]):\n ax[i].set_prop_cycle(color=plt.get_cmap(\"cividis\")(np.linspace(0, 1, env.num_dof)))\n ax[i].set_ylabel(f\"joint {idx_joint+1} pos [rad]\")\n for j in range(len(dp_vals)):\n ax[i].plot(t_all[j], qpos_all[j][:, idx_joint], ls=\"--\", label=f\"d = {dp_vals[j]}\")\n if i == 0:\n ax[i].legend(ncol=len(dp_vals))\n ax[-1].set_xlabel(\"time [s]\")\n plt.suptitle(\"Evaluation of joint damping coefficients\")\n plt.show()",
"def _set_lsp_config_frr_priority_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-priority-configured\", rest_name=\"lsp-config-frr-priority-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_frr_priority_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-priority-configured\", rest_name=\"lsp-config-frr-priority-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_frr_priority_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def calc_force_from_damping(v, damping, masses):\n F = masses*damping*np.diff(v, 0)\n\n return F",
"def lap(self, lap=\"__lap__\"):\n t = time.time()\n self.laps[lap] = t\n return t",
"def _set_lsp_config_notify_ospf(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-notify-ospf\", rest_name=\"lsp-config-notify-ospf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_notify_ospf must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-notify-ospf\", rest_name=\"lsp-config-notify-ospf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_notify_ospf = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_frr_admin_group_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-admin-group-configured\", rest_name=\"lsp-config-frr-admin-group-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_frr_admin_group_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-admin-group-configured\", rest_name=\"lsp-config-frr-admin-group-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_frr_admin_group_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_shortcut_ospf(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-shortcut-ospf\", rest_name=\"lsp-config-shortcut-ospf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_shortcut_ospf must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-shortcut-ospf\", rest_name=\"lsp-config-shortcut-ospf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_shortcut_ospf = t\n if hasattr(self, '_set'):\n self._set()",
"def _get_lsp_config_frr_configured(self):\n return self.__lsp_config_frr_configured",
"def _set_lsp_config_frr_hop_limit_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-hop-limit-configured\", rest_name=\"lsp-config-frr-hop-limit-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_frr_hop_limit_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-hop-limit-configured\", rest_name=\"lsp-config-frr-hop-limit-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_frr_hop_limit_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_isis_announce_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-isis-announce-configured\", rest_name=\"lsp-config-isis-announce-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_isis_announce_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-isis-announce-configured\", rest_name=\"lsp-config-isis-announce-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_isis_announce_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_path_select_mode_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-path-select-mode-configured\", rest_name=\"lsp-config-path-select-mode-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_path_select_mode_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-path-select-mode-configured\", rest_name=\"lsp-config-path-select-mode-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_path_select_mode_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def lap(self):\n current_time = time.perf_counter()\n ret = current_time - self._lap\n if abs(ret) != ret:\n ret = self._time_corruption\n self._lap = current_time\n return ret",
"def SetStandbyFPMode(self):\n handler = self.get_command_object(\"SetStandbyFPMode\")\n handler()",
"def prevent_drift(self) -> Optional[bool]:\n return pulumi.get(self, \"prevent_drift\")",
"def set_fedcm_delay_enabled(self, enabled):\n pass",
"def spBellmanFord(self, node, returnPaths = False):\n # Initialize working dictionaries and next edges to check\n curr = dict(map(lambda k: (k, (0, k) if k == node else (self.Inf, None)),\\\n self.__nodes.keys()))\n prev = {}\n edges = self.__nodes[node][\"tails\"]\n \n # Iterate through maximum number of edges in valid paths\n for i in xrange(1, self.__nodeCount):\n # Break if no edges to check (i.e. no nodes updated in previous\n # iteraton)\n if len(edges) == 0: break\n \n # Swap working dictionaries and initialize empty list of nodes\n # updated this iteration\n prev = curr\n curr = prev.copy()\n updated = []\n \n # Iterate through edges to check\n for e in edges:\n # Get edge components\n tail, head, weight = self.__edges[e]\n \n # Update head node if new path length less than previous\n if prev[tail][0] + weight < curr[head][0]:\n curr[head] = (prev[tail][0] + weight, tail)\n updated.append(head)\n \n # Determine edges to check in next iteration\n edges = []\n for node in set(updated): edges.extend(self.__nodes[node][\"tails\"])\n \n # Return None if negative cycle found\n for e in edges:\n tail, head, weight = self.__edges[e]\n if curr[tail][0] + weight < curr[head][0]: return None\n \n # Return shortest paths\n return self.__reconstructPath(curr, returnPaths)",
"def _set_lsp_config_ospf_aaf(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-config-ospf-aaf\", rest_name=\"lsp-config-ospf-aaf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_ospf_aaf must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-config-ospf-aaf\", rest_name=\"lsp-config-ospf-aaf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__lsp_config_ospf_aaf = t\n if hasattr(self, '_set'):\n self._set()"
] | [
"0.6670155",
"0.6670155",
"0.50464815",
"0.49564946",
"0.49564946",
"0.4852906",
"0.47725207",
"0.45903733",
"0.44792357",
"0.42650697",
"0.4256708",
"0.42126364",
"0.41603813",
"0.4116638",
"0.41036993",
"0.40746707",
"0.40386954",
"0.4013948",
"0.40102044",
"0.40004253",
"0.39728242",
"0.39094612",
"0.38987383",
"0.3898457",
"0.3898119",
"0.38976932",
"0.38859314",
"0.3885012",
"0.3838198",
"0.38203302"
] | 0.8421709 | 1 |
Setter method for peer_group_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/config/peer_group_name (string) | def _set_peer_group_name(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="peer-group-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """peer_group_name must be of a type compatible with string""",
"defined-type": "string",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="peer-group-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=True)""",
}
)
self.__peer_group_name = t
if hasattr(self, "_set"):
self._set() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_evpn_neighbor_peergroup_name(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\\\\\\\\\\\\\\\@#\\\\+\\\\*\\\\(\\\\)=\\\\{~\\\\}%<>=$_\\\\[\\\\]\\\\|]{0,62})'}), is_leaf=True, yang_name=\"evpn-neighbor-peergroup-name\", rest_name=\"evpn-neighbor-peergroup-name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Word:1-63;;Peer Group Name'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='bgp-peergroup', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"evpn_neighbor_peergroup_name must be of a type compatible with bgp-peergroup\"\"\",\n 'defined-type': \"brocade-bgp:bgp-peergroup\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\\\\\\\\\\\\\\\@#\\\\+\\\\*\\\\(\\\\)=\\\\{~\\\\}%<>=$_\\\\[\\\\]\\\\|]{0,62})'}), is_leaf=True, yang_name=\"evpn-neighbor-peergroup-name\", rest_name=\"evpn-neighbor-peergroup-name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Word:1-63;;Peer Group Name'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='bgp-peergroup', is_config=True)\"\"\",\n })\n\n self.__evpn_neighbor_peergroup_name = t\n if hasattr(self, '_set'):\n self._set()",
"def _get_peer_group_name(self):\n return self.__peer_group_name",
"def _get_peer_group_name(self):\n return self.__peer_group_name",
"def set_group_name(self, name):\n params = [('groupname', name, 'cdata')]\n\n self.get(COMMAND_UIC, 'SetGroupName', params)",
"def set_group_name(self, name):\n self.groupname = name",
"def get_group_name(self, group_id):\n group = self.table.query.filter_by(group_id=group_id).first()\n return group.name",
"def _get_evpn_neighbor_peergroup_name(self):\n return self.__evpn_neighbor_peergroup_name",
"def dunning_group_name(self, dunning_group_name):\n\n self._dunning_group_name = dunning_group_name",
"def RenameGroup(self, group, new_name, reason=None):\n body = {\n \"new_name\": new_name,\n }\n\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/groups/%s/rename\" %\n (GANETI_RAPI_VERSION, group)), query, body)",
"def error_group_name(self, error_group_name):\n\n self._error_group_name = error_group_name",
"def group_name(self):\n\n return self._group_name",
"def signing_group_name(self, signing_group_name):\n\n self._signing_group_name = signing_group_name",
"def participants_group_name(self):\n return self.short_name+\"_participants\"",
"def group_name(self) -> str:\n return pulumi.get(self, \"group_name\")",
"def get_group_name(self):\n return self.groupname",
"def ad_group_name(self):\n\n return self._ad_group_name",
"def ad_group_name(self):\n\n return self._ad_group_name",
"def set_group(self, group: str) -> None:\n self.group = group",
"def _set_group_name(self):\n self._scene_gen.group_name = self._group_name_le.text()\n self._refresh_view()",
"def _set_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"name\", rest_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"name\", rest_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__name = t\n if hasattr(self, '_set'):\n self._set()",
"def subnet_group_name(self) -> Optional[str]:\n return pulumi.get(self, \"subnet_group_name\")",
"def group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_name\")",
"def group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_name\")",
"def group_name_hierarchy(self, group_name_hierarchy):\n\n self._group_name_hierarchy = group_name_hierarchy",
"def get_group_name(name: str) -> str:\n if is_shortcut_name(name):\n return name.split(config.name_separator)[0]\n raise CHCShortCutNameError(name)",
"def _set_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__name = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__name = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__name = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__name = t\n if hasattr(self, '_set'):\n self._set()",
"def parameter_group_name(self) -> Optional[str]:\n return pulumi.get(self, \"parameter_group_name\")"
] | [
"0.63907355",
"0.5583477",
"0.5583477",
"0.55552965",
"0.5261759",
"0.48967224",
"0.48166996",
"0.48066276",
"0.4708673",
"0.45989397",
"0.45126435",
"0.4424181",
"0.43623227",
"0.43602124",
"0.43156803",
"0.43112826",
"0.43112826",
"0.42138124",
"0.42072046",
"0.41854343",
"0.41826075",
"0.41771603",
"0.41771603",
"0.41178408",
"0.4104014",
"0.40823245",
"0.40823245",
"0.40823245",
"0.40823245",
"0.40754884"
] | 0.7895534 | 0 |
Setter method for route_flap_damping, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/config/route_flap_damping (boolean) | def _set_route_flap_damping(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="route-flap-damping",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """route_flap_damping must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="route-flap-damping", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__route_flap_damping = t
if hasattr(self, "_set"):
self._set() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_route_flap_damping(self):\n return self.__route_flap_damping",
"def _get_route_flap_damping(self):\n return self.__route_flap_damping",
"def flap(self) -> None:\n self.delta_time = 0.2\n self.velocity = 37",
"def disable_bgp_route_propagation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_bgp_route_propagation\")",
"def disable_bgp_route_propagation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_bgp_route_propagation\")",
"def disable_bgp_route_propagation(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disable_bgp_route_propagation\")",
"def _set_lsp_config_frr_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-configured\", rest_name=\"lsp-config-frr-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_frr_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-configured\", rest_name=\"lsp-config-frr-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_frr_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_adaptive(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-adaptive\", rest_name=\"lsp-config-adaptive\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_adaptive must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-adaptive\", rest_name=\"lsp-config-adaptive\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_adaptive = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_frr_bandwidth_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-bandwidth-configured\", rest_name=\"lsp-config-frr-bandwidth-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_frr_bandwidth_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-bandwidth-configured\", rest_name=\"lsp-config-frr-bandwidth-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_frr_bandwidth_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_frr_forwarding_protected_up(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-frr-forwarding-protected-up\", rest_name=\"lsp-frr-forwarding-protected-up\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_frr_forwarding_protected_up must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-frr-forwarding-protected-up\", rest_name=\"lsp-frr-forwarding-protected-up\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_frr_forwarding_protected_up = t\n if hasattr(self, '_set'):\n self._set()",
"def flap(self):\n\n if self.pos_y > -2 * IMAGES['player'][0].get_height():\n self.vel_y = self.acc_flap\n self.flapped = True\n self.last_flapped = time()",
"def setSolverDamping(*argv):",
"def load_conf_permban_with_frozensand(self):\n self._permban_with_frozensand = False\n if self.config.has_option('server', 'permban_with_frozensand'):\n try:\n self._permban_with_frozensand = self.config.getboolean('server', 'permban_with_frozensand')\n except ValueError, err:\n self.warning(err)\n\n self.info(\"Send permbans to Frozen Sand : %s\" % ('yes' if self._permban_with_frozensand else 'no'))",
"def eval_damping():\n # Environment\n env = WAMBallInCupSim(num_dof=7, max_steps=1500)\n\n # Policy (random init)\n policy_hparam = dict(num_feat_per_dim=12, bounds=(np.array([0.0]), np.array([1.0])))\n policy = DualRBFLinearPolicy(env.spec, policy_hparam, dim_mask=2)\n\n # Do the rolllouts\n t_all = []\n qpos_all = []\n dp_vals = [0.0, 0.01, 0.1, 0.5, 1.0]\n print_cbt(f\"Run policy for damping coefficients: {dp_vals}\")\n for dpv in dp_vals:\n env.reset(\n domain_param=dict(\n joint_1_damping=dpv,\n joint_2_damping=dpv,\n joint_3_damping=dpv,\n joint_4_damping=dpv,\n joint_5_damping=dpv,\n joint_6_damping=dpv,\n joint_7_damping=dpv,\n )\n )\n ro = rollout(env, policy, render_mode=RenderMode(video=False), eval=True)\n t_all.append(ro.time[:-1])\n qpos_all.append(ro.env_infos[\"qpos\"])\n\n # Plot\n fig, ax = plt.subplots(nrows=env.num_dof, sharex=\"all\", figsize=(16, 7))\n for i, idx_joint in enumerate([dof for dof in range(env.num_dof)]):\n ax[i].set_prop_cycle(color=plt.get_cmap(\"cividis\")(np.linspace(0, 1, env.num_dof)))\n ax[i].set_ylabel(f\"joint {idx_joint+1} pos [rad]\")\n for j in range(len(dp_vals)):\n ax[i].plot(t_all[j], qpos_all[j][:, idx_joint], ls=\"--\", label=f\"d = {dp_vals[j]}\")\n if i == 0:\n ax[i].legend(ncol=len(dp_vals))\n ax[-1].set_xlabel(\"time [s]\")\n plt.suptitle(\"Evaluation of joint damping coefficients\")\n plt.show()",
"def _set_lsp_config_frr_priority_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-priority-configured\", rest_name=\"lsp-config-frr-priority-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_frr_priority_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-priority-configured\", rest_name=\"lsp-config-frr-priority-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_frr_priority_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def calc_force_from_damping(v, damping, masses):\n F = masses*damping*np.diff(v, 0)\n\n return F",
"def lap(self, lap=\"__lap__\"):\n t = time.time()\n self.laps[lap] = t\n return t",
"def _set_lsp_config_notify_ospf(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-notify-ospf\", rest_name=\"lsp-config-notify-ospf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_notify_ospf must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-notify-ospf\", rest_name=\"lsp-config-notify-ospf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_notify_ospf = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_frr_admin_group_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-admin-group-configured\", rest_name=\"lsp-config-frr-admin-group-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_frr_admin_group_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-admin-group-configured\", rest_name=\"lsp-config-frr-admin-group-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_frr_admin_group_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_shortcut_ospf(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-shortcut-ospf\", rest_name=\"lsp-config-shortcut-ospf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_shortcut_ospf must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-shortcut-ospf\", rest_name=\"lsp-config-shortcut-ospf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_shortcut_ospf = t\n if hasattr(self, '_set'):\n self._set()",
"def _get_lsp_config_frr_configured(self):\n return self.__lsp_config_frr_configured",
"def _set_lsp_config_frr_hop_limit_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-hop-limit-configured\", rest_name=\"lsp-config-frr-hop-limit-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_frr_hop_limit_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-frr-hop-limit-configured\", rest_name=\"lsp-config-frr-hop-limit-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_frr_hop_limit_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_path_select_mode_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-path-select-mode-configured\", rest_name=\"lsp-config-path-select-mode-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_path_select_mode_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-path-select-mode-configured\", rest_name=\"lsp-config-path-select-mode-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_path_select_mode_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_lsp_config_isis_announce_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-isis-announce-configured\", rest_name=\"lsp-config-isis-announce-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_isis_announce_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-isis-announce-configured\", rest_name=\"lsp-config-isis-announce-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_isis_announce_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def SetStandbyFPMode(self):\n handler = self.get_command_object(\"SetStandbyFPMode\")\n handler()",
"def lap(self):\n current_time = time.perf_counter()\n ret = current_time - self._lap\n if abs(ret) != ret:\n ret = self._time_corruption\n self._lap = current_time\n return ret",
"def prevent_drift(self) -> Optional[bool]:\n return pulumi.get(self, \"prevent_drift\")",
"def set_fedcm_delay_enabled(self, enabled):\n pass",
"def spBellmanFord(self, node, returnPaths = False):\n # Initialize working dictionaries and next edges to check\n curr = dict(map(lambda k: (k, (0, k) if k == node else (self.Inf, None)),\\\n self.__nodes.keys()))\n prev = {}\n edges = self.__nodes[node][\"tails\"]\n \n # Iterate through maximum number of edges in valid paths\n for i in xrange(1, self.__nodeCount):\n # Break if no edges to check (i.e. no nodes updated in previous\n # iteraton)\n if len(edges) == 0: break\n \n # Swap working dictionaries and initialize empty list of nodes\n # updated this iteration\n prev = curr\n curr = prev.copy()\n updated = []\n \n # Iterate through edges to check\n for e in edges:\n # Get edge components\n tail, head, weight = self.__edges[e]\n \n # Update head node if new path length less than previous\n if prev[tail][0] + weight < curr[head][0]:\n curr[head] = (prev[tail][0] + weight, tail)\n updated.append(head)\n \n # Determine edges to check in next iteration\n edges = []\n for node in set(updated): edges.extend(self.__nodes[node][\"tails\"])\n \n # Return None if negative cycle found\n for e in edges:\n tail, head, weight = self.__edges[e]\n if curr[tail][0] + weight < curr[head][0]: return None\n \n # Return shortest paths\n return self.__reconstructPath(curr, returnPaths)",
"def _set_lsp_config_ospf_aaf(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-config-ospf-aaf\", rest_name=\"lsp-config-ospf-aaf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_ospf_aaf must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-config-ospf-aaf\", rest_name=\"lsp-config-ospf-aaf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__lsp_config_ospf_aaf = t\n if hasattr(self, '_set'):\n self._set()"
] | [
"0.6670411",
"0.6670411",
"0.50461954",
"0.495629",
"0.495629",
"0.4852504",
"0.47718793",
"0.45903903",
"0.4479416",
"0.4264815",
"0.4255928",
"0.4213232",
"0.41593775",
"0.41157565",
"0.41016784",
"0.40750417",
"0.40383345",
"0.4013707",
"0.4010021",
"0.39996752",
"0.39727268",
"0.39090574",
"0.38990456",
"0.38990277",
"0.38981107",
"0.38977146",
"0.38847834",
"0.3884556",
"0.38380656",
"0.38204002"
] | 0.8421727 | 0 |
Setter method for description, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/config/description (string) | def _set_description(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="description",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="string",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """description must be of a type compatible with string""",
"defined-type": "string",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=True)""",
}
)
self.__description = t
if hasattr(self, "_set"):
self._set() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def description(self, description: ConfigNodePropertyString):\n\n self._description = description",
"def get_description(self):\n if CONFIG_KEY not in self:\n return\n if hasattr(self[CONFIG_KEY], DESC_KEY):\n desc_str = str(self[CONFIG_KEY][DESC_KEY])\n if not isinstance(desc_str, str):\n try:\n desc_str = str(desc_str)\n except Exception as e:\n raise InvalidConfigFileException(\n \"Could not convert the specified Project description \"\n \"({}) to string. Caught exception: {}\".\n format(desc_str, getattr(e, 'message', repr(e))))\n return desc_str",
"def setDescription(self, value):\n return self.getDbRecord().setColumnValue(DESCRIPTION_COLUMN, value)",
"def set_description(self, desc: str) -> None:\n self.metadata.data[\"description\"] = desc",
"def description(self, value):\n self._update_values('description', value)",
"def description(self, description):\n\n self._set_field(\"description\", description)",
"def description(self) -> ConfigNodePropertyString:\n return self._description",
"def set_description(self, sNewVmDescription):\n\t\tcall_sdk_function('PrlVmCfg_SetDescription', self.handle, sNewVmDescription)",
"def set_description(self, sDescription):\n\t\tcall_sdk_function('PrlVirtNet_SetDescription', self.handle, sDescription)",
"def description(self, description):\n if description is None:\n raise ValueError(\"Invalid value for `description`, must not be `None`\")\n\n self._description = description",
"def description(self, newDescription=None):\n if newDescription != None:\n self._setValue('description', newDescription)\n return self._getValue('description')",
"def add_description(self, desc):\n self.description = desc",
"def dunning_group_description(self, dunning_group_description):\n\n self._dunning_group_description = dunning_group_description",
"def set_description(self, sNewDescription):\n\t\tcall_sdk_function('PrlVmDev_SetDescription', self.handle, sNewDescription)",
"def description(self, description) :\n\t\ttry :\n\t\t\tself._description = description\n\t\texcept Exception as e:\n\t\t\traise e",
"def set_description(self, description):\n self.description = description",
"def description(self, description: str):\n\n self._description = description",
"def description(self, description: str):\n\n self._description = description",
"def description(self, description: str):\n\n self._description = description",
"def description(self, description: str):\n\n self._description = description",
"def description(self, description):\n if description is not None and len(description) > 255:\n raise ValueError(\"Invalid value for `description`, length must be less than or equal to `255`\")\n\n self._description = description",
"def description(self, value):\n self.definition.description = value",
"def set_description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def set_description(self, description):\n self.__description = description",
"def get_config_descr(self, name):\n return self.configs[name][1]",
"def description(self, description: str):\n if description is None:\n raise ValueError(\"Invalid value for `description`, must not be `None`\") # noqa: E501\n\n self._description = description"
] | [
"0.61069083",
"0.56617916",
"0.5597504",
"0.55908495",
"0.5535336",
"0.5373763",
"0.53228736",
"0.5321155",
"0.5290115",
"0.52707696",
"0.5243526",
"0.52357316",
"0.52219355",
"0.5220431",
"0.5206525",
"0.5178723",
"0.51558536",
"0.51558536",
"0.51558536",
"0.51558536",
"0.51421905",
"0.5137174",
"0.5116379",
"0.5111103",
"0.5111103",
"0.5111103",
"0.5111103",
"0.5104515",
"0.5093274",
"0.5088531"
] | 0.5761193 | 1 |
Get IP from a long int | def get_ip_from_long(long_ip):
return socket.inet_ntoa(struct.pack('!L', long_ip)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def longToIp(longIp):\n stringIp = socket.inet_ntoa(struct.pack(\"!L\", longIp))\n return stringIp",
"def ipToLong(ip):\n packedIP = socket.inet_aton(ip)\n return struct.unpack(\"!L\", packedIP)[0]",
"def long2ip(lint):\n return socket.inet_ntoa(struct.pack(\"!I\", lint))",
"def ip_to_long(ip):\n return int(IPAddress(ip))",
"def ip2long(ip):\n return struct.unpack(\"!I\", socket.inet_aton(ip))[0]",
"def ip2long(ip):\n packedIP = socket.inet_aton(ip)\n return struct.unpack(\"!L\", packedIP)[0]",
"def ip2long(ip):\r\n\t\tpackedIP = socket.inet_aton(ip)\r\n\t\treturn struct.unpack(\"!L\", packedIP)[0]",
"def int2ip(n: int) -> str:\n return socket.inet_ntoa(struct.pack(\"!I\", n))",
"def ip_to_int(self, addr):\n return struct.unpack(\"!I\", socket.inet_aton(addr))[0]",
"def int_to_ip(self, ip_int):\n return socket.inet_ntoa(struct.pack(\"=I\", ip_int))",
"def int2ip(ipint: int) -> str:\n try:\n return socket.inet_ntoa(struct.pack(\"!I\", ipint))\n except struct.error:\n return socket.inet_ntop(\n socket.AF_INET6,\n struct.pack(\"!QQ\", ipint >> 64, ipint & 0xFFFFFFFFFFFFFFFF),\n )",
"def int2ip(int_num):\n try:\n return inet_ntoa(pack(\"!I\", int_num))\n except Exception:\n return False",
"def test_Int_to_IP(self):\n self.assertEqual(helpers.int_to_IP(0), '00000000000000000000000000000000')\n self.assertEqual(helpers.int_to_IP(2291809961), '10001000100110100011111010101001')",
"def __ip2intstr(self, address):\n return str(struct.unpack('!I', address)[0])",
"def get_primary_ip(options, index):\n\n second_octet = 160 + index\n return \"192.%s.1.1\" % second_octet",
"def getLong(self, addr: ghidra.program.model.address.Address) -> long:\n ...",
"def get_ip(ifn):\n sck = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(\n fcntl.ioctl(sck.fileno(), 0x8915, struct.pack(\"256s\", ifn[:15]))[20:24]\n )",
"def int_2_ip_str(ip_int):\n return socket.inet_ntoa(struct.pack(\"!I\", ip_int))",
"def int32_to_ip(int32):\n return str(ipaddress.IPv4Address(int32))",
"def pkcs_os2ip(x):\n return int(x.encode(\"hex\"), 16)",
"def test_IP_to_Int(self):\n self.assertEqual(helpers.IP_to_int('00000000000000000000000000000000'), 0)\n self.assertEqual(helpers.IP_to_int('10001000100110100011111010101001'), 2291809961)",
"def ip2int(ipstr: AnyStr) -> int:\n if isinstance(ipstr, bytes):\n data = ipstr.decode()\n else:\n data = ipstr\n try:\n return cast(int, struct.unpack(\"!I\", socket.inet_aton(data))[0])\n except socket.error:\n val1: int\n val2: int\n val1, val2 = struct.unpack(\n \"!QQ\",\n socket.inet_pton(socket.AF_INET6, data),\n )\n return (val1 << 64) + val2",
"def getIP():\n try:\n page = urlopen(\"http://www.whatismyip.com/automation/n09230945.asp\")\n IP = page.read()\n page.close()\n return IP\n except:\n return \"Could not retrieve the IP address.\"",
"def getLong(self, addr: ghidra.program.model.address.Address, bigEndian: bool) -> long:\n ...",
"def get_ip(self):",
"def _parse_inet(line):\n tokens = line.split()\n return netaddr.IPNetwork(tokens[1])",
"def bytes_to_addr(hh, ll):\n return (int(hh, 16) << 8) + int(ll, 16)",
"def ip(self, mess, args):\n return '%s\\n' % urlgrabber.urlread('http://whatismyip.org')",
"def nToIP(bytes):\n for klass in (V4Address, V6Address):\n try:\n return klass.fromBytes(bytes)\n except ValueError, ve:\n error = ve\n else:\n return ip\n\n raise error",
"def getIP():\n data = _get_page(\"http://myip.cz\")\n data = data.split(\"Your IP Address is: <b>\")[-1].split(\"</b>\")[0]\n return data.strip()"
] | [
"0.7891864",
"0.78559524",
"0.7835024",
"0.7710035",
"0.76549864",
"0.7576421",
"0.7519363",
"0.7221749",
"0.71280265",
"0.69111276",
"0.6889263",
"0.6782304",
"0.6636306",
"0.6530735",
"0.63172203",
"0.6299375",
"0.6286875",
"0.62551147",
"0.6231494",
"0.6223852",
"0.62189",
"0.6216702",
"0.61940444",
"0.61529136",
"0.61433375",
"0.6131114",
"0.6124157",
"0.61002666",
"0.60639787",
"0.60604924"
] | 0.8287169 | 0 |
Convert OpenFlow Datapath ID to human format | def datapath_id(a):
if isinstance(a, str):
dpid = "%s:%s:%s:%s:%s:%s:%s:%s" % (
a[0:2], a[2:4], a[4:6], a[6:8], a[8:10], a[10:12], a[12:14], a[14:16])
else:
string = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"
if isinstance(a, bytes):
a = a.decode("latin")
dpid = string % (ord(a[0]), ord(a[1]), ord(a[2]), ord(a[3]),
ord(a[4]), ord(a[5]), ord(a[6]), ord(a[7]))
return dpid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_lacp_sys_id(device):\n res = device.execute(\"show lacp sys-id\")\n #cli output for 'show lacp sys-id' example res: 32768, 70d3.7984.aa80\n res = ''.join([res[i] for i in range(len(res)) if i > 6])\n #Now the value in res: 70d3.7984.aa80\n res1 = ''.join([res[i] for i in range(len(res)) if res[i] != '.'])\n #Now the value in res1 : 70d37984aa80\n sys_id = ':'.join(res1[i:i + 2] for i in range(0, len(res1), 2))\n #After adding dots at required places sys id as 70:d3:79:84:aa:80\n return sys_id",
"def internal_id_to_display_id(i_id: int) -> str:\n i_id = str(i_id).zfill(9)\n return ''.join(i_id[x - 1] for x in [1, 5, 9, 6, 3, 8, 2, 4, 7])",
"def getid(data):\n return int(data.split('/')[-1])",
"def internal2spice(self,asteroid):\n return(str(2444444+asteroid.id))",
"def id_text(self) -> str:\n return self.source_system + \" - \" + self.external_id + \" (\" + str(self.internal_id) + \")\"",
"def id(self):\n return \"{model:s}--{serial:08x}\".format(model=self.model.replace('-',''), serial=self.serial_number).lower()",
"def format_unique_id(address: str) -> str:\n return address.replace(\":\", \"\").lower()",
"def idToMQTT(id:str) -> str:\n\treturn f'{id.lstrip(\"/\").replace(\"/\", \":\")}'",
"def stringifyID(point: dict, uid: Union[int, str]) -> str:\n # ordinal time for begin (b) and end (e)\n b = dt.datetime.fromisoformat(point['TBTimestamp']).strftime('%s')\n e = dt.datetime.fromisoformat(point['TETimestamp']).strftime('%s')\n # string concat of all sensor labels\n values = \"-\".join([str(sens[\"Scaled\"]) for sens in point[\"Channels\"]])\n\n idString = f\"{uid}-{b}-{e}_{values}\" # actual id string\n return idString",
"def _format_id(ns, id):\n label = '%s:%s' % (ns, id)\n label = label.replace(' ', '_')\n url = get_identifiers_url(ns, id)\n return (label, url)",
"def _short_id(video_id):\n return '-'.join(video_id.split('-')[0:2])",
"def full_id(schema_obj):\n\n return '0x%08x' % ((schema_obj.parent.number << 16) | schema_obj.number)",
"def id_format(param, **kwa):\n try:\n ns, ti = param.split('-')\n if ns and ti:\n return param\n else:\n raise ValueError\n except ValueError:\n raise ValueError('Supplied id is invalid.')",
"def id(self): # pylint: disable=invalid-name\n return \"{}:{}\".format(self.switch.dpid, self.port_number)",
"def get_device_id(self) -> str:\n return hexlify(self.message)[36:42].decode()",
"def format_id(i):\n return f'{i:>{ID_SIZE}}'",
"def mininet_dpid(int_dpid):\n return str(\"%x\" % int(int_dpid))",
"def get_id(self): # real signature unknown; restored from __doc__\n return \"\"",
"def short_id(self):\n if self.short_id_missing:\n return \"0\" * settings.ID_LENGTH\n return str(self.id)[0:settings.ID_LENGTH]",
"def shorten_id(id):\n if id.startswith('CN'):\n id = id[2:]\n if not id[-1].isdigit():\n id = id[:-1]\n return id",
"def autoid(self) -> str:",
"def autoid(self) -> str:",
"def idn(self):\n hname = (ct.c_char * 100)()\n self.lib.GetHeadModel(ct.pointer(hname))\n hname = str(hname.value)[2:-1]\n sn = ct.c_uint()\n self.lib.GetCameraSerialNumber(ct.pointer(sn))\n return 'Andor ' + hname + ', serial number ' + str(sn.value)",
"def get_debug_firmware_id_string(self):\n # Read the address via get_var_strict; this will fetch the value\n # from chipdata as well, but we can ignore it.\n chip_str = self.chipdata.get_var_strict('$_build_identifier_string')\n rawstr = self.debuginfo.get_dm_const(chip_str.address, chip_str.size)\n\n decoded_str = \"\"\n for chars in rawstr:\n if Arch.addr_per_word == 4:\n # The raw string is encoded with four chars per word\n string = cu.get_string_from_word(Arch.addr_per_word, chars)\n stop_decoding = False\n for char in string:\n if char != '\\0':\n decoded_str += char\n else:\n stop_decoding = True\n break\n if stop_decoding:\n break\n else:\n # The raw string is encoded with two chars per word\n upper_part = (chars & 0xFF00) >> 8\n lower_part = chars & 0x00FF\n # strip the null terminator.\n if upper_part != 0:\n decoded_str += chr(upper_part)\n else:\n break\n if lower_part != 0:\n decoded_str += chr(lower_part)\n else:\n break\n\n return decoded_str.strip() # Strip any leading/trailing whitespace",
"def get_short_task_id(task_id: str) -> str:\n return task_id.split(MESOS_TASK_SPACER)[-1]",
"def make_trace_id(trace_id: bytes) -> str:\n return base64.b64encode(trace_id).decode(\"utf-8\")",
"def test_convert_id():",
"def external_id(self) -> str:\n return self._search_in_properties(ATTR_GUID)",
"def normalize_osd_id(osd_id):\n if not isinstance(osd_id, str) or not osd_id.startswith('osd.'):\n osd_id = 'osd.' + str(osd_id)\n return osd_id",
"def id(self):\n return str(self.get_data(\"id\"))"
] | [
"0.64157075",
"0.61956793",
"0.6142641",
"0.61137575",
"0.61104363",
"0.60973483",
"0.59823877",
"0.5975986",
"0.59632874",
"0.59317726",
"0.59248674",
"0.5884124",
"0.586361",
"0.5790779",
"0.5767062",
"0.5758811",
"0.5721861",
"0.5718079",
"0.5702139",
"0.5702121",
"0.56987417",
"0.56987417",
"0.5688502",
"0.56473804",
"0.56384075",
"0.5630337",
"0.56298053",
"0.56134284",
"0.55895275",
"0.55842155"
] | 0.6462999 | 0 |
Print TCP/IP header. It uses command line option p to print 'mininal' or 'full' headers | def print_headers(pkt, overwrite_min=0):
if PrintingOptions().is_minimal_headers() and overwrite_min == 0:
print_minimal(pkt.position, pkt.l1.time, pkt.l1.caplen, pkt.l3,
pkt.l4)
else:
print_position(pkt.position)
print_layer1(pkt.l1.time, pkt.l1.caplen, pkt.l1.truncate)
print_layer2(pkt.l2)
print_layer3(pkt.l3)
print_tcp(pkt.l4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def displayTCP(tcp):\n\n print \"[TCP Header]\"\n print \"\\t Source Port: \" + str(tcp.sport)\n print \"\\t Destination Port: \" + str(tcp.dport)\n print \"\\t Sequence Number: \" + str(tcp.seq)\n print \"\\t Acknowledgment Number: \" + str(tcp.ack)\n print \"\\t Data Offset: \" + str(tcp.dataofs)\n print \"\\t Reserved: \" + str(tcp.reserved)\n print \"\\t Flags: \" + tcp.underlayer.sprintf(\"%TCP.flags%\")\n print \"\\t Window Size: \" + str(tcp.window)\n print \"\\t Checksum: \" + str(tcp.chksum)\n if (tcp.flags & URG):\n print \"\\t Urgent Pointer: \" + str(tcp.window)\n if (tcp.dataofs > 5):\n print \"\\t Options: \" + str(tcp.options)",
"def print_minimal(position, date, getlen, ip_addr, tcp):\n string = 'Packet #%s - %s %s:%s -> %s:%s Size: %s Bytes'\n\n source = OFProxy().get_name(ip_addr.s_addr, tcp.source_port)\n dest = OFProxy().get_name(ip_addr.d_addr, tcp.dest_port)\n\n print(string % (position, date, cyan(source), cyan(tcp.source_port),\n cyan(dest), cyan(tcp.dest_port), getlen))",
"def print_header(message):\n print('-----------')\n print(message)\n print('-----------')",
"def displayIP(ip) :\n print \"[IP HEADER]\"\n print \"\\t Version: \" + str(ip.version)\n print \"\\t IHL: \" + str(ip.ihl * 4) + \" bytes\"\n print \"\\t ToS: \" + str(ip.tos)\n print \"\\t Total Length: \" + str(ip.len)\n print \"\\t Identification: \" + str(ip.id)\n print \"\\t Flags: \" + str(ip.flags)\n print \"\\t Fragment Offset: \" + str(ip.frag)\n print \"\\t TTL: \" + str(ip.ttl)\n print \"\\t Protocol: \" + str(ip.proto)\n print \"\\t Header Checksum: \" + str(ip.chksum)\n print \"\\t Source: \" + str(ip.src)\n print \"\\t Destination: \" + str(ip.dst)\n if (ip.ihl > 5):\n print \"\\t Options: \" + str(ip.options)",
"def header_print(output):\n print(\"\\n----------------------------------------------------------------\")\n print(output)\n print(\"----------------------------------------------------------------\")",
"def displayUDP(udp):\n print \"[UDP Header]\"\n print \"\\t Source Port: \" + str(udp.sport)\n print \"\\t Destination Port: \" + str(udp.dport)\n print \"\\t Length: \" + str(udp.len)\n print \"\\t Checksum: \" + str(udp.chksum)",
"def print_header():\n\n def get_dashes(perc):\n dashes = \"|\" * int((float(perc) / 10 * 4))\n empty_dashes = \" \" * (40 - len(dashes))\n return dashes, empty_dashes\n\n # cpu usage\n percs = psutil.cpu_percent(interval=0, percpu=True)\n for cpu_num, perc in enumerate(percs):\n dashes, empty_dashes = get_dashes(perc)\n line = (\" CPU%-2s [%s%s] %5s%%\" % (cpu_num, dashes, empty_dashes,\n perc))\n print_line(line)\n\n # cpu usage\n mem = psutil.virtual_memory()\n dashes, empty_dashes = get_dashes(mem.percent)\n line = \" Mem [%s%s] %5s%% %6s / %s\" % (\n dashes, empty_dashes,\n mem.percent,\n str(int(mem.used / 1024 / 1024)) + \"M\",\n str(int(mem.total / 1024 / 1024)) + \"M\"\n )\n print_line(line)\n\n # swap usage\n swap = psutil.swap_memory()\n dashes, empty_dashes = get_dashes(swap.percent)\n line = \" Swap [%s%s] %5s%% %6s / %s\" % (\n dashes, empty_dashes,\n swap.percent,\n str(int(swap.used / 1024 / 1024)) + \"M\",\n str(int(swap.total / 1024 / 1024)) + \"M\"\n )\n print_line(line)",
"def print_header(message: str):\n print_with_color(message, constant.Color.HEADER)",
"def print_header(fitsfile, ext=0, ofileh=sys.stdout):\n\n hdr = fitsio.read_header(fitsfile, ext=ext)\n ofileh.write(f\"{hdr}\")\n ofileh.write(\"\\n\")",
"def _print_header():\n print()\n print(\n \" ┌─────────────────────── Measurements in BPM ─────────────────────┐\"\n )\n print(\n \"ID Date Activity Distance Elevation Start Duration 5s 30s 60s 5m 10m 20m 30m 60m 90m 120m\"\n )\n _print_separator()",
"def _header(self, pam=False):\r\n if pam or self.magicnum == b'P7':\r\n header = \"\\n\".join((\r\n \"P7\",\r\n \"HEIGHT %i\" % self.height,\r\n \"WIDTH %i\" % self.width,\r\n \"DEPTH %i\" % self.depth,\r\n \"MAXVAL %i\" % self.maxval,\r\n \"\\n\".join(\"TUPLTYPE %s\" % unicode(i) for i in self.tupltypes),\r\n \"ENDHDR\\n\"))\r\n elif self.maxval == 1:\r\n header = \"P4 %i %i\\n\" % (self.width, self.height)\r\n elif self.depth == 1:\r\n header = \"P5 %i %i %i\\n\" % (self.width, self.height, self.maxval)\r\n else:\r\n header = \"P6 %i %i %i\\n\" % (self.width, self.height, self.maxval)\r\n if sys.version_info[0] > 2:\r\n header = bytes(header, 'ascii')\r\n return header",
"def print_header():\n print()\n print(\"*\" * 45)\n print(\"Please, select algorithm:\")\n print(\"*\" * 45)",
"def print_header():\n\n print(\"\"\"\n _____ _ ____ _____ ____ ____ _____ ____ _____\n /__ __\\/ \\/ _\\ /__ __\\/ _ \\/ _\\ /__ __\\/ _ \\/ __/ 1 | 2 | 3\n / \\ | || / _____ / \\ | / \\|| / _____ / \\ | / \\|| \\ 4 | 5 | 6\n | | | || \\_\\____\\| | | |-||| \\_\\____\\| | | \\_/|| /_ 7 | 8 | 9\n \\_/ \\_/\\____/ \\_/ \\_/ \\|\\____/ \\_/ \\____/\\____|\n\n To play Tic-Tac-Toe, you need to get three in a row...\n Your choices are defined, they must be from 1 to 9...\n \"\"\")",
"def print_header():\n \n print_from_file(\"html/header.html\")",
"def print_header():\n print(\"STEM Center Temperature Project\")\n print(\"Shaotong Wen\")",
"def header(name, value):\n print '%s: %s\\n' % (name, value)",
"def displayIPv6(ip) :\n\n #TODO\n print \"[IP HEADER]\"\n print \"\\t Version: \" + str(ip.version)\n print \"\\t Header Length: \" + str(40) + \" bytes\"\n print \"\\t Flow Label: \" + str(ip.fl)\n print \"\\t Traffic Class: \" + str(ip.tc)\n print \"\\t Source: \" + str(ip.src)\n print \"\\t Destination: \" + str(ip.dst)",
"def __print_header():\n __collen[\"id\"] = max(__collen[\"id\"], 2) # min is \"ID\"\n __collen[\"name\"] = max(__collen[\"name\"], 14) # min is \"Subvolume Name\"\n __collen[\"used_lim\"] = max(__collen[\"used_lim\"], 10) # min is \"Max (Used)\"\n __collen[\"excl_lim\"] = max(__collen[\"excl_lim\"], 11) # min is \"Max (Excl.)\"\n print(\"ID{:s} | Subvolume Name{:s} | {:s}Used | {:s}Max (Used) | {:s}Exclusive | {:s}Max (Excl.)\".format(\n \" \"*(__collen[\"id\"]-2),\n \" \"*(__collen[\"name\"]-14),\n \" \"*(MAX_SIZE-4),\n \" \"*(__collen[\"used_lim\"]-10),\n \" \"*(MAX_SIZE-9),\n \" \"*(__collen[\"excl_lim\"]-11)))",
"def view_request_headers(line):\n args = shlex.split(line)\n if not args:\n raise PappyException(\"Request id is required\")\n reqid = args[0]\n\n reqs = yield load_reqlist(reqid)\n for req in reqs:\n if len(reqs) > 1:\n print 'Request %s:' % req.reqid\n view_full_message(req, True)\n if len(reqs) > 1:\n print '-'*30\n print ''",
"def header(msg, header_char='x'):\n header_chars = header_char * 50\n sys.stdout.write('%s\\n\\n %s\\n\\n%s\\n' % (header_chars, msg, header_chars))",
"def print_header(module, fd):\n module_name = str(module.arg)\n header = OrderedDict()\n header['swagger'] = '2.0'\n header['info'] = {\n 'description': '%s API generated from %s' % (\n module_name, module.pos.ref.rsplit('/')[-1]),\n 'version': '1.0.0',\n 'title': str(module_name + ' API')\n }\n header['host'] = 'localhost:8080'\n # TODO: introduce flexible base path. (CLI options?)\n header['basePath'] = '/restconf'\n header['schemes'] = ['http']\n return header",
"def print_header(name, texfile):\n texfile.write('\\n')\n texfile.write('%--------------------\\n')\n texfile.write('%---' + name.upper() + ('-' * (17 - len(name))) + '\\n')\n texfile.write('%--------------------\\n')",
"def print_header(level, msg):\n\n\tunderline = ['', '#', '=', '-', '', '', '']\n\n\tprint(''.join(['\\n' for _ in range(3 - level)]))\n\tprint(''.join(['*' for _ in range(4 - level)]) + ' ' + msg)\n\tprint(''.join([underline[level] for _ in range(80)]))",
"def print_header(msg):\n\n tf.print(BColors.BOLD + msg + BColors.ENDC, output_stream=sys.stderr)",
"def print_connection_being_established(pkt):\n print_headers(pkt, overwrite_min=0)\n print(green(\"!!!! New TCP/OpenFlow Connection being established!!\\n\"))",
"def print_header(header, msgInfo, expected_cksum=None):\n magic, command_hex = header[:4], header[4:16]\n payload_size, cksum = header[16:20], header[20:]\n\n command = str(bytearray([b for b in command_hex if b != 0]), encoding='utf-8')\n psz = unmarshal_uint(payload_size)\n if expected_cksum is None:\n verified = ''\n elif expected_cksum == cksum:\n verified = '(verified)'\n else:\n verified = '(WRONG!! ' + expected_cksum.hex() + ')'\n\n # If checksum is verified, process these\n if verified[1:6] != WRONG:\n print(msgInfo)\n prefix = ' '\n print(prefix + 'HEADER')\n print(prefix + '-' * 56)\n prefix *= 2\n print('{}{:32} magic'.format(prefix, magic.hex()))\n print('{}{:32} command: {}'.format(prefix, command_hex.hex(), command))\n print('{}{:32} payload size: {}'.format(prefix, payload_size.hex(), psz))\n print('{}{:32} checksum {}'.format(prefix, cksum.hex(), verified))\n else:\n return WRONG\n return command",
"def print_header_information():\n\t\tprint \"Elijah Molloy\"\n\t\tprint \"70-510 - Spring 1 - 2018\"\n\t\tprint \"PROGRAMMING ASSIGNMENT #4\\n\"",
"def print_the_header():\n print('-------------------')\n print(' Weather APP')\n print('-------------------')\n print()",
"def print_header():\n print('------------------------------------')\n print(' Lesson04')\n print(' Kata Fourteen Assignment')\n print('------------------------------------\\n')",
"def displayMatchedTCP(tcp, rule):\n\n print \"[TCP Header]\"\n if (hasattr(rule.srcPorts, \"listPorts\") and len(rule.srcPorts.listPorts) == 1):\n print RED + \"\\t Source Port: \" + str(tcp.sport) + ENDC\n else:\n print \"\\t Source Port: \" + str(tcp.sport)\n if (hasattr(rule.dstPorts, \"listPorts\") and len(rule.dstPorts.listPorts) == 1):\n print RED + \"\\t Destination Port: \" + str(tcp.dport) + ENDC\n else:\n print \"\\t Destination Port: \" + str(tcp.dport)\n if (hasattr(rule, \"seq\")):\n print RED + \"\\t Sequence Number: \" + str(tcp.seq) + ENDC\n else:\n print \"\\t Sequence Number: \" + str(tcp.seq)\n if (hasattr(rule, \"ack\")):\n print RED + \"\\t Acknowledgment Number: \" + str(tcp.ack) + ENDC\n else:\n print \"\\t Acknowledgment Number: \" + str(tcp.ack)\n print \"\\t Data Offset: \" + str(tcp.dataofs)\n print \"\\t Reserved: \" + str(tcp.reserved)\n if (hasattr(rule,\"flags\")):\n print RED + \"\\t Flags:\" + tcp.underlayer.sprintf(\"%TCP.flags%\") + ENDC\n else:\n print \"\\t Flags:\" + tcp.underlayer.sprintf(\"%TCP.flags%\")\n print \"\\t Window Size: \" + str(tcp.window)\n print \"\\t Checksum: \" + str(tcp.chksum)\n if (tcp.flags & URG):\n print \"\\t Urgent Pointer: \" + str(tcp.window)\n if (tcp.dataofs > 5):\n print \"\\t Options: \" + str(tcp.options)"
] | [
"0.68259215",
"0.68247175",
"0.63290894",
"0.62992567",
"0.60704964",
"0.60693145",
"0.5978117",
"0.5895332",
"0.585004",
"0.5826223",
"0.58216",
"0.5788322",
"0.5776013",
"0.573323",
"0.5729823",
"0.57296735",
"0.5714077",
"0.5712118",
"0.56988066",
"0.5692439",
"0.5672327",
"0.56458306",
"0.5636687",
"0.5615126",
"0.5603884",
"0.55894613",
"0.5581801",
"0.55542535",
"0.5550906",
"0.55490804"
] | 0.6886594 | 0 |
Print TCP/IP header with minimal information | def print_minimal(position, date, getlen, ip_addr, tcp):
string = 'Packet #%s - %s %s:%s -> %s:%s Size: %s Bytes'
source = OFProxy().get_name(ip_addr.s_addr, tcp.source_port)
dest = OFProxy().get_name(ip_addr.d_addr, tcp.dest_port)
print(string % (position, date, cyan(source), cyan(tcp.source_port),
cyan(dest), cyan(tcp.dest_port), getlen)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def displayTCP(tcp):\n\n print \"[TCP Header]\"\n print \"\\t Source Port: \" + str(tcp.sport)\n print \"\\t Destination Port: \" + str(tcp.dport)\n print \"\\t Sequence Number: \" + str(tcp.seq)\n print \"\\t Acknowledgment Number: \" + str(tcp.ack)\n print \"\\t Data Offset: \" + str(tcp.dataofs)\n print \"\\t Reserved: \" + str(tcp.reserved)\n print \"\\t Flags: \" + tcp.underlayer.sprintf(\"%TCP.flags%\")\n print \"\\t Window Size: \" + str(tcp.window)\n print \"\\t Checksum: \" + str(tcp.chksum)\n if (tcp.flags & URG):\n print \"\\t Urgent Pointer: \" + str(tcp.window)\n if (tcp.dataofs > 5):\n print \"\\t Options: \" + str(tcp.options)",
"def displayIP(ip) :\n print \"[IP HEADER]\"\n print \"\\t Version: \" + str(ip.version)\n print \"\\t IHL: \" + str(ip.ihl * 4) + \" bytes\"\n print \"\\t ToS: \" + str(ip.tos)\n print \"\\t Total Length: \" + str(ip.len)\n print \"\\t Identification: \" + str(ip.id)\n print \"\\t Flags: \" + str(ip.flags)\n print \"\\t Fragment Offset: \" + str(ip.frag)\n print \"\\t TTL: \" + str(ip.ttl)\n print \"\\t Protocol: \" + str(ip.proto)\n print \"\\t Header Checksum: \" + str(ip.chksum)\n print \"\\t Source: \" + str(ip.src)\n print \"\\t Destination: \" + str(ip.dst)\n if (ip.ihl > 5):\n print \"\\t Options: \" + str(ip.options)",
"def print_headers(pkt, overwrite_min=0):\n if PrintingOptions().is_minimal_headers() and overwrite_min == 0:\n print_minimal(pkt.position, pkt.l1.time, pkt.l1.caplen, pkt.l3,\n pkt.l4)\n else:\n print_position(pkt.position)\n print_layer1(pkt.l1.time, pkt.l1.caplen, pkt.l1.truncate)\n print_layer2(pkt.l2)\n print_layer3(pkt.l3)\n print_tcp(pkt.l4)",
"def print_header(message):\n print('-----------')\n print(message)\n print('-----------')",
"def _printable(self):\n toPrint = \"Communication header. \"\n toPrint += \"Remote App ID: \" + str(self.remote_app_id) + \" \"\n toPrint += \"Remote Node: \" + str(self.remote_node) + \" \"\n toPrint += \"Remote Port: \" + str(self.remote_port) + \" \"\n\n return toPrint",
"def print_info(self):\n \n print \"\"\"version: %d\\t header_len: %d\\t tos: %s\\t total_len: %d\n id: %s\\t flags_reservedbit: %d\\t flags_dont_fragment: %d\\t flags_more_fragment: %d\n fragment_offset: %d\\t TTL: %d\\t protocol: %s\\t\n header_checksum: %s\\t\n src: %s\\t dst: %s\n opt_paddings: %s\"\"\" % (\n self.version, self.header_len, self.type_of_service, self.total_len, self.id, self.flags_reservedbit, \n self.flags_dont_fragment, self.flags_more_fragment, \n self.fragment_offset, self.TTL, self.protocol, self.header_checksum, self.src, self.dst, repr(self.opt_paddings))",
"def print_connection_being_established(pkt):\n print_headers(pkt, overwrite_min=0)\n print(green(\"!!!! New TCP/OpenFlow Connection being established!!\\n\"))",
"def test_tcp_header_native(self):\n header = TCP_HEADER(\n source_port = 8080,\n dest_port = 8080,\n seq_num = 0xbeefcafe,\n ack_num = 0xcafebeef,\n data_offset = 0xf,\n flag_ns = 1,\n flag_cwr = 1,\n flag_ece = 1,\n flag_urg = 1,\n flag_ack = 1,\n flag_psh = 1,\n flag_rst = 1,\n flag_syn = 1,\n flag_fin = 1,\n window_size = 12,\n checksum = 0xffff\n )\n\n expected_data = [\n 8080, 8080, 0xbeefcafe, 0xcafebeef, int('10001111', 2), 0xff, 12, 0xffff\n ]\n\n expected_val = struct.pack('HHIIBBHH', *expected_data)\n\n self.assertEqual(header.to_bytes(), expected_val)",
"def print_header(header, msgInfo, expected_cksum=None):\n magic, command_hex = header[:4], header[4:16]\n payload_size, cksum = header[16:20], header[20:]\n\n command = str(bytearray([b for b in command_hex if b != 0]), encoding='utf-8')\n psz = unmarshal_uint(payload_size)\n if expected_cksum is None:\n verified = ''\n elif expected_cksum == cksum:\n verified = '(verified)'\n else:\n verified = '(WRONG!! ' + expected_cksum.hex() + ')'\n\n # If checksum is verified, process these\n if verified[1:6] != WRONG:\n print(msgInfo)\n prefix = ' '\n print(prefix + 'HEADER')\n print(prefix + '-' * 56)\n prefix *= 2\n print('{}{:32} magic'.format(prefix, magic.hex()))\n print('{}{:32} command: {}'.format(prefix, command_hex.hex(), command))\n print('{}{:32} payload size: {}'.format(prefix, payload_size.hex(), psz))\n print('{}{:32} checksum {}'.format(prefix, cksum.hex(), verified))\n else:\n return WRONG\n return command",
"def displayIPv6(ip) :\n\n #TODO\n print \"[IP HEADER]\"\n print \"\\t Version: \" + str(ip.version)\n print \"\\t Header Length: \" + str(40) + \" bytes\"\n print \"\\t Flow Label: \" + str(ip.fl)\n print \"\\t Traffic Class: \" + str(ip.tc)\n print \"\\t Source: \" + str(ip.src)\n print \"\\t Destination: \" + str(ip.dst)",
"def displayUDP(udp):\n print \"[UDP Header]\"\n print \"\\t Source Port: \" + str(udp.sport)\n print \"\\t Destination Port: \" + str(udp.dport)\n print \"\\t Length: \" + str(udp.len)\n print \"\\t Checksum: \" + str(udp.chksum)",
"def tcp_pkt_debug_info(pkt: dpkt.ip.IP) -> str:\n if isinstance(pkt, dpkt.ip.IP):\n paylod_len = pkt.len - (4 * pkt.hl) - (4 * pkt.data.off)\n return \"{}:{}-> {}:{}, seq: {}, ack:{}, flag:{}, payload len: {}, payload: {}, sum: {}\".format(\n inet_to_str(pkt.src), pkt.data.sport, inet_to_str(pkt.dst), pkt.data.dport, hex(pkt.data.seq),\n hex(pkt.data.ack), hex(pkt.data.flags), hex(paylod_len), pkt.data.data, hex(pkt.data.sum))",
"def displayMatchedTCP(tcp, rule):\n\n print \"[TCP Header]\"\n if (hasattr(rule.srcPorts, \"listPorts\") and len(rule.srcPorts.listPorts) == 1):\n print RED + \"\\t Source Port: \" + str(tcp.sport) + ENDC\n else:\n print \"\\t Source Port: \" + str(tcp.sport)\n if (hasattr(rule.dstPorts, \"listPorts\") and len(rule.dstPorts.listPorts) == 1):\n print RED + \"\\t Destination Port: \" + str(tcp.dport) + ENDC\n else:\n print \"\\t Destination Port: \" + str(tcp.dport)\n if (hasattr(rule, \"seq\")):\n print RED + \"\\t Sequence Number: \" + str(tcp.seq) + ENDC\n else:\n print \"\\t Sequence Number: \" + str(tcp.seq)\n if (hasattr(rule, \"ack\")):\n print RED + \"\\t Acknowledgment Number: \" + str(tcp.ack) + ENDC\n else:\n print \"\\t Acknowledgment Number: \" + str(tcp.ack)\n print \"\\t Data Offset: \" + str(tcp.dataofs)\n print \"\\t Reserved: \" + str(tcp.reserved)\n if (hasattr(rule,\"flags\")):\n print RED + \"\\t Flags:\" + tcp.underlayer.sprintf(\"%TCP.flags%\") + ENDC\n else:\n print \"\\t Flags:\" + tcp.underlayer.sprintf(\"%TCP.flags%\")\n print \"\\t Window Size: \" + str(tcp.window)\n print \"\\t Checksum: \" + str(tcp.chksum)\n if (tcp.flags & URG):\n print \"\\t Urgent Pointer: \" + str(tcp.window)\n if (tcp.dataofs > 5):\n print \"\\t Options: \" + str(tcp.options)",
"def debug_info_header(header):\n print(colored(\"Header:\", 'cyan'), colored(\"Valid FDT magic value found\", \"green\", attrs=['bold']))\n print(colored(\"Header\", 'cyan'), \"-> Total Size of file: \",\n colored('{0:>8d} {0:>#8x}'.format(header.totalsize), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to Struct Block: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_dt_struct), 'yellow'), \" with size: \",\n colored('{0:>8d} {0:>#8x}'.format(header.size_dt_struct), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to String Block: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_dt_strings), 'yellow'), \" with size: \",\n colored('{0:>8d} {0:>#8x}'.format(header.size_dt_strings), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to Memory Reser: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_mem_rsvmap), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Version of DTB: \",\n colored('{0:>8d} {0:>#8x}'.format(header.version), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Previous Version of DTB:\",\n colored('{0:>8d} {0:>#8x}'.format(header.last_comp_version), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Boot CPU Number: \",\n colored('{0:>8d} {0:>#8x}'.format(header.boot_cpuid_phys), 'yellow'))\n print()",
"def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for",
"def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for",
"def print_header(message: str):\n print_with_color(message, constant.Color.HEADER)",
"def header(name, value):\n print '%s: %s\\n' % (name, value)",
"def header_print(output):\n print(\"\\n----------------------------------------------------------------\")\n print(output)\n print(\"----------------------------------------------------------------\")",
"def print_header_information():\n\t\tprint \"Elijah Molloy\"\n\t\tprint \"70-510 - Spring 1 - 2018\"\n\t\tprint \"PROGRAMMING ASSIGNMENT #4\\n\"",
"def _printable(self):\n to_print = \"EPR Request Header.\"\n to_print += \"Remote IP: {}\".format(self.remote_ip)\n to_print += \"Remote port: {}\".format(self.remote_port)\n to_print += \"Min Fidelity: {}\".format(self.min_fidelity)\n to_print += \"Max Time: {}\".format(self.max_time)\n to_print += \"Num Pairs: {}\".format(self.num_pairs)\n to_print += \"Priority: {}\".format(self.priority)\n to_print += \"Store: {}\".format(self.store)\n to_print += \"Atomic: {}\".format(self.atomic)\n to_print += \"Measure Directly: {}\".format(self.measure_directly)\n\n return to_print",
"def readPacketHeader(stream):\n return makePacketHeader(stream.read(8))",
"def dump_protocol_header(major: int, minor: int, revision: int) -> bytes:\n return pack('>5sBBB', b'AMQP\\x00', major, minor, revision)",
"def _write_header(self, head_msg=None):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n header = \"\\n%s\\nDateTime: %s \\nMessage: %s \\n\" % (\"*\" * 100, now, head_msg)\n\n return header",
"def print_header(now):\n global config\n date_time = datetime.datetime.fromtimestamp(now).strftime('%Y-%m-%d %H:%M:%S')\n\n print('*************************************')\n print(f'HTTP LOGS STATISTICS - {date_time}')",
"def __print_header():\n __collen[\"id\"] = max(__collen[\"id\"], 2) # min is \"ID\"\n __collen[\"name\"] = max(__collen[\"name\"], 14) # min is \"Subvolume Name\"\n __collen[\"used_lim\"] = max(__collen[\"used_lim\"], 10) # min is \"Max (Used)\"\n __collen[\"excl_lim\"] = max(__collen[\"excl_lim\"], 11) # min is \"Max (Excl.)\"\n print(\"ID{:s} | Subvolume Name{:s} | {:s}Used | {:s}Max (Used) | {:s}Exclusive | {:s}Max (Excl.)\".format(\n \" \"*(__collen[\"id\"]-2),\n \" \"*(__collen[\"name\"]-14),\n \" \"*(MAX_SIZE-4),\n \" \"*(__collen[\"used_lim\"]-10),\n \" \"*(MAX_SIZE-9),\n \" \"*(__collen[\"excl_lim\"]-11)))",
"def print_header(msg):\n\n tf.print(BColors.BOLD + msg + BColors.ENDC, output_stream=sys.stderr)",
"def print_the_header():\n print('-------------------')\n print(' Weather APP')\n print('-------------------')\n print()",
"def _header(self, pam=False):\r\n if pam or self.magicnum == b'P7':\r\n header = \"\\n\".join((\r\n \"P7\",\r\n \"HEIGHT %i\" % self.height,\r\n \"WIDTH %i\" % self.width,\r\n \"DEPTH %i\" % self.depth,\r\n \"MAXVAL %i\" % self.maxval,\r\n \"\\n\".join(\"TUPLTYPE %s\" % unicode(i) for i in self.tupltypes),\r\n \"ENDHDR\\n\"))\r\n elif self.maxval == 1:\r\n header = \"P4 %i %i\\n\" % (self.width, self.height)\r\n elif self.depth == 1:\r\n header = \"P5 %i %i %i\\n\" % (self.width, self.height, self.maxval)\r\n else:\r\n header = \"P6 %i %i %i\\n\" % (self.width, self.height, self.maxval)\r\n if sys.version_info[0] > 2:\r\n header = bytes(header, 'ascii')\r\n return header",
"def print_header():\n print()\n print(\"*\" * 45)\n print(\"Please, select algorithm:\")\n print(\"*\" * 45)"
] | [
"0.77554166",
"0.7025853",
"0.6953015",
"0.6674366",
"0.6625914",
"0.66170645",
"0.65838134",
"0.65690565",
"0.6557152",
"0.6511575",
"0.65036327",
"0.64393747",
"0.6331855",
"0.6302267",
"0.62660515",
"0.62660515",
"0.62430453",
"0.61718196",
"0.6171234",
"0.61542714",
"0.6147116",
"0.6145997",
"0.61385584",
"0.6104087",
"0.61005783",
"0.60888755",
"0.6067138",
"0.60670054",
"0.60545015",
"0.6030188"
] | 0.7096347 | 1 |
Just prints that the TCP connection was terminated (FIN or RST flag). | def print_connection_terminated(pkt):
print_headers(pkt, overwrite_min=0)
print(red("!!!! Attention: TCP/OpenFlow Connection Terminated!!\n")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0",
"def eof_received(self):\n logger.debug(\"EOF from client, closing.\")\n self.connection_lost(None)",
"def outConnectionLost(self):\n self.logger('stdout closed by process %d' % self._pid)",
"def on_connection_end() -> None:\r\n print(\"Connection lost with G-Earth\")\r\n print()",
"def disconnect(self):\n print(\"<== Conexión cerrada ==>\")\n self.is_alive = False\n self._socket.close()",
"async def connection_lost(self):\n logging.info('connection dropped')",
"def close(self):\n self.connection.close()\n print(\"Connection on port \" + str(self.port) + \" closed.\")",
"def disconnect():\n logging.info('Client disconnected')",
"def exit(self):\n self._status = \"\"\n self._sock.settimeout(1.0)\n self._sock.sendto(bytes(\"bla\", \"utf-8\"), (self._cfg.host, self._cfg.port))",
"def print_connection_being_established(pkt):\n print_headers(pkt, overwrite_min=0)\n print(green(\"!!!! New TCP/OpenFlow Connection being established!!\\n\"))",
"def connection_teardown():\n test_str = make_random(100)\n server = start_server()\n client = start_client()\n\n # First write some data at both ends.\n write_to(client, test_str)\n write_to(server, test_str)\n time.sleep(TEST_TIMEOUT)\n\n # Write EOFs on both sides.\n write_to(client, '\\x1a')\n write_to(server, '\\x1a')\n client.stdin.close()\n server.stdin.close()\n time.sleep(TEST_TIMEOUT)\n\n return (\n DEBUG_TEARDOWN in read_debug_messages_from(client) and\n DEBUG_TEARDOWN in read_debug_messages_from(server)\n )",
"def end(self):\n if self.conn:\n self.conn.close()\n self.conn = None\n #print(\"closed.\")",
"def close(self):\n self.s.close()\n print(\"Socket closed\")",
"def connection_closed(self) -> bool:",
"def tcp_rst(self):\n return self.tcp_flags & dpkt.tcp.TH_RST != 0",
"def end(self):\n self.send_all(\"SHUTDOWN\") #On indique a tout le monde qu'on ferme\n self.socket.shutdown(socket.SHUT_RDWR)\n self.socket.close()",
"def exit(s_socket):\r\n s_socket.send(\"\")",
"def END(self):\n log.debug(\"All packets successfully transmitted!\")",
"def eof_received(self):\n self.connection_lost('EOF')\n return False",
"def connection_closed(self):\n self.atv = None\n self._start_connect_loop()\n self._update_state(disconnected=True)",
"def errConnectionLost(self):\n self.logger('stderr closed by process %d' % self._pid)",
"def shutdown(self):\n return self.packet().close().send()",
"def connection_lost(self, exc):\n if self._stream.done:\n # Stream is done, no need to panic\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id, exc_info=exc)",
"def close(self):\n # Report server that connection is closed\n self._socket.sendall(''.encode())\n self._socket.close()",
"def close(self):\n logging.debug(\"Closing TCP stream\")\n # Sometimes the socket read might be blocked in the reader thread. Therefore we force the shutdown by closing \n # the socket here\n self._wantExit = True \n if not self.socket is None:\n self.socket.shutdown(socket.SHUT_RDWR)\n self.socket.close()\n StreamInterface.close(self)",
"def fin_sent():\n test_str = \"f1N s3nt\\n\"\n server = start_server()\n client = start_client()\n\n # First write some data.\n write_to(client, test_str)\n if not read_segments_from(client):\n return False\n time.sleep(1)\n\n # Write an EOF character.\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check to see that segment sent from client is a FIN.\n segments = read_segments_from(client)\n if not segments:\n return False\n return \"FIN\" in segments[0].flags",
"def disconnect(self):\r\n try:\r\n self.connection.close()\r\n print (\"disconnected!\")\r\n except Exception as error:\r\n print (\"disconnect() - error - {}\".format(error))",
"def shutdown(self):\n self.connected = False\n self.protocol.send_message(self.sock, '__!shutdown__')\n data = self.protocol.recover_message(self.sock)\n self.sock.close()\n self.sock = None",
"def shutdown(self):\t\r\n\t\tself.is_running = False\r\n\t\tfor connection in self.established_connection_list:\r\n\t\t\tconnection.send('The server has been shutdown adruptly by the server owner.\\n')\r\n\t\t\tconnection.socket_send()",
"def server_closed_connection(self):\n\n print(\"Game Over!\")\n if self._winner:\n print(\"Player {} wins!\".format(self._winner))\n else:\n print(\"Draw!\")"
] | [
"0.6347258",
"0.6250159",
"0.6225721",
"0.6224855",
"0.61981726",
"0.6002705",
"0.5983145",
"0.59824896",
"0.5968627",
"0.59684265",
"0.5964312",
"0.59535646",
"0.59128535",
"0.58037907",
"0.5780889",
"0.5776544",
"0.57739186",
"0.57569766",
"0.5739121",
"0.57353306",
"0.5718915",
"0.5715888",
"0.56863487",
"0.5659174",
"0.5630589",
"0.5626356",
"0.56255656",
"0.5623052",
"0.5620442",
"0.562022"
] | 0.769878 | 0 |
Just prints that a new TCP connection is being established. | def print_connection_being_established(pkt):
print_headers(pkt, overwrite_min=0)
print(green("!!!! New TCP/OpenFlow Connection being established!!\n")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_connection_start(self) -> None:\r\n print(\r\n \"Connected with: {}:{}\\n\".format(\r\n self.connection_info[\"host\"], self.connection_info[\"port\"]\r\n )\r\n )",
"def connect():\n logging.info('Client connected')",
"def connectionMade(self):\n print \"connection received from\", self.addr",
"def on_connect(self):\n print('Client connected!')",
"def establish_connection(self):\n print('Listening...')\n self.socket.listen()\n self.conn, addr = self.socket.accept()\n print('Received connection', addr)",
"async def connection_made(self):\n logging.info('connecting to %s:%s' % self.address)",
"def print_connection_information(self):\n try:\n print(self.connection_information)\n except:\n print(\"Error in displaying connection information.\")",
"def connectionMade(self):\n self.output = DelayedStartupLineLogger()\n self.output.makeConnection(self.transport)\n self.output.tag = self.name",
"def connected(self, host, port=None):\n print(f\"connected to {host}\")",
"def connect():\n\n\tprint \"In connection state\"\n\n\tglobal cs\n\tglobal state\n\n\tcs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tcs.connect(('localhost', PORT))\n\tcs.send('Connected')\n\tstate += 1",
"def opened(self):\n self.logger.info(\"Socket connection open\")\n # Send a connection request\n self.sender.send_packet(52)",
"def startedConnecting(self, connector):\n\n print(\"Connecting to the server...\") # client console notification",
"def show_connections(self):\n prev_threads = self.get_num_connections()\n print(f\"[Initial Number of Threads] {threading.activeCount()}\")\n print(f\"[Initial Number of Connections] {self.get_num_connections()}\")\n while True:\n if self.get_num_connections() != prev_threads:\n print(f\"[CHANGE IN THREADS] Number of Connection threads: {self.get_num_connections()}\")\n sleep(0.1)\n self.print_connections()\n self.print_sessions()\n prev_threads = self.get_num_connections()\n for thread in threading.enumerate():\n print(thread.name)",
"def connectionMade(self):\r\n HistoricRecvLine.connectionMade(self)\r\n self._factory = PBClientFactory()\r\n\r\n reactor.connectTCP(self._masterIP, self._console_port, self._factory) #@UndefinedVariable\r\n self.terminal.write(\"Username: \")",
"def connectionMade(self):\n print \"new connection made to\", host, \"port\", port\n self.transport.write(\"GET /movies/32 HTTP/1.0\\r\\n\\r\\n\") # some get request\n self.transport.loseConnection()",
"def print_connection_terminated(pkt):\n print_headers(pkt, overwrite_min=0)\n print(red(\"!!!! Attention: TCP/OpenFlow Connection Terminated!!\\n\"))",
"def on_connect():\n logger.info(f\"{request.sid} Connected\")",
"def connect(self):\n return 1",
"def displayTCP(tcp):\n\n print \"[TCP Header]\"\n print \"\\t Source Port: \" + str(tcp.sport)\n print \"\\t Destination Port: \" + str(tcp.dport)\n print \"\\t Sequence Number: \" + str(tcp.seq)\n print \"\\t Acknowledgment Number: \" + str(tcp.ack)\n print \"\\t Data Offset: \" + str(tcp.dataofs)\n print \"\\t Reserved: \" + str(tcp.reserved)\n print \"\\t Flags: \" + tcp.underlayer.sprintf(\"%TCP.flags%\")\n print \"\\t Window Size: \" + str(tcp.window)\n print \"\\t Checksum: \" + str(tcp.chksum)\n if (tcp.flags & URG):\n print \"\\t Urgent Pointer: \" + str(tcp.window)\n if (tcp.dataofs > 5):\n print \"\\t Options: \" + str(tcp.options)",
"def print_connections(self):\n print(\"[Printing Connections]\")\n for key in self.connections.keys():\n print(f\"{key}:\\n\\t{self.connections[key]}\")",
"def connectionMade(self):\n \t#print \"[K] Connect effettuata\", self.port\n \tself.factory.state=\"0 open\"\n \tif (self.port in self.portfForHTTPGet):\n\t\tself.transport.write(\"GET / HTTP/1.1\\r\\n\\r\\n\")\n \t\t#self.transport.write(\"GET /index.html HTTP/1.1\\r\\n\\r\\n\")",
"def on_connect():\n print(\"User connected!\")",
"def on_connect(self):\n log.info(\"Stream connected\")",
"def connect_to_server(self):\r\n self.client_socket.connect((SERVER_IP, SERVER_PORT))\r\n print('[CLIENT] connected to streamer.')",
"def connect():",
"def connection(self):\n return \"Mysql Server connected.\"",
"async def connect(self):\n try:\n self._cmd_stream = await self._connect()\n self.inc_counter(\"%s.connected\" % self.objname)\n self.logger.info(\"Connected: %s\", self._extra_info)\n except Exception as e:\n self.logger.error(\"Connect Failed %r\", e)\n self.inc_counter(\"%s.failed\" % self.objname)\n raise e",
"def check_connection():\n r = requests.get('https://www.google.com')\n if r.status_code == 200:\n print (colored(\"Connected.\", 'green'))\n else:\n print (colored(\"Not Connected.\", 'red'))",
"def startTwoWayTCP():\r\n print(\"=================================\\n\")\r\n print(\" starting TCP test\")\r\n print(\"\\n=================================\")\r\n os.system(\"iperf3.exe -s -B 11.0.0.50 --logfile Server1.txt\")\r\n os.system(\"iperf3.exe -s -B 11.0.0.51 --logfile Server2.txt\")\r\n os.system(\"iperf3.exe -c 11.0.0.50 -b 0 -B 11.0.0.51 -t 0 -V --logfile Client1.txt\")\r\n os.system(\"iperf3.exe -c 11.0.0.51 -b 0 -B 11.0.0.50 -t 0 -V --logfile Client2.txt\")\r\n time.sleep(0.5)\r\n return isTCPRunning()",
"def server(conn, address):\n print(\"Client Connection Open\")\n while True:\n request = server_read(conn)\n if request:\n print(request)\n manage_client(request, conn)"
] | [
"0.7560201",
"0.71477664",
"0.68863255",
"0.67417765",
"0.67133015",
"0.661822",
"0.64948887",
"0.64504004",
"0.6437214",
"0.6415456",
"0.63819456",
"0.6380913",
"0.6280628",
"0.62490803",
"0.62355256",
"0.6143209",
"0.6108159",
"0.6106273",
"0.6080479",
"0.6068739",
"0.6064667",
"0.6063654",
"0.6034419",
"0.59895444",
"0.59693974",
"0.59603876",
"0.5953645",
"0.5931125",
"0.5891813",
"0.58832526"
] | 0.8331354 | 0 |
Create Django view for given SOAP soaplib services and tns | def __init__(self, services, tns):
return super(DjangoSoapApp, self).__init__(Application(services, tns)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def call_service(func, api_kwargs, context, request):\n pattern = request.matched_route.pattern\n service = request.registry['soap_services'].get(pattern)\n request.META = request.headers.environ # to be used by soapbox, like django\n request.service = service\n\n SOAP = service.version\n\n if request.method == 'GET' and 'wsdl' in request.params:\n tree = py2wsdl.generate_wsdl(request.service)\n body = etree.tostring(tree, pretty_print=True)\n return Response(body=body, content_type=SOAP.CONTENT_TYPE)\n\n try:\n xml = request.body\n envelope = SOAP.Envelope.parsexml(xml)\n message = envelope.Body.content()\n soap_action = SOAP.determin_soap_action(request)\n tagname, return_object = call_the_method(service,\n request, message, soap_action)\n soap_message = SOAP.Envelope.response(tagname, return_object)\n return Response(body=soap_message, content_type=SOAP.CONTENT_TYPE)\n except (ValueError, etree.XMLSyntaxError) as e:\n response = SOAP.get_error_response(SOAP.Code.CLIENT, str(e))\n except Exception, e:\n response = SOAP.get_error_response(SOAP.Code.SERVER, str(e))\n return Response(body=response, content_type=SOAP.CONTENT_TYPE)",
"def service(request):\n\treturn render(request,'service.html',None)",
"def __init__(self, wsdl, service=None, port=None, tracefile=None,\r\n typesmodule=None, nsdict=None, soapAction=None, ns=None, op_ns=None, use_wsdl=False):\r\n if not hasattr(wsdl, 'targetNamespace'):\r\n wsdl = wstools.WSDLTools.WSDLReader().loadFromURL(wsdl)\r\n\r\n# for item in wsdl.types.items():\r\n# self._serializer.loadSchema(item)\r\n\r\n self._service = wsdl.services[service or 0]\r\n self.__doc__ = self._service.documentation\r\n self._port = self._service.ports[port or 0]\r\n self._name = self._service.name\r\n self._wsdl = wsdl\r\n self._tracefile = tracefile\r\n self._typesmodule = typesmodule\r\n self._nsdict = nsdict or {}\r\n self._soapAction = soapAction\r\n self._ns = ns\r\n self._op_ns = op_ns\r\n self._use_wsdl = use_wsdl\r\n \r\n binding = self._port.getBinding()\r\n portType = binding.getPortType()\r\n for item in portType.operations:\r\n callinfo = wstools.WSDLTools.callInfoFromWSDL(self._port, item.name)\r\n method = MethodProxy(self, callinfo)\r\n setattr(self, item.name, method)",
"def __prepare_wsdl_objects(self):\r\n pass",
"def web_service_response_example(self, action, controller):",
"def createViews(views):\n ...",
"def _create_soap_object(self, name):\n return self.client.factory.create(name)",
"def handle_xmlrpc(request):\r\n if request.method == \"POST\":\r\n logger.info(request.body)\r\n try:\r\n response = HttpResponse(content_type='text/xml')\r\n response.write(\r\n xmlrpcdispatcher._marshaled_dispatch(request.body))\r\n logger.debug(response)\r\n return response\r\n except:\r\n return HttpResponseServerError()\r\n else:\r\n methods = xmlrpcdispatcher.system_listMethods()\r\n method_list = []\r\n\r\n for method in methods:\r\n sig_ = xmlrpcdispatcher.system_methodSignature(method)\r\n sig = {\r\n 'returns': sig_[0],\r\n 'args': \", \".join(sig_[1:]),\r\n }\r\n\r\n # this just reads your docblock, so fill it in!\r\n method_help = xmlrpcdispatcher.system_methodHelp(method)\r\n\r\n method_list.append((method, sig, method_help))\r\n\r\n return render_to_response('xmlrpc_get.html', {'methods': method_list},\r\n context_instance=RequestContext(request))",
"def get_service(self):",
"def services(request):\n\n services = Service.objects.all()\n creator_profile = UserProfile.objects.all()\n\n template = 'services/services.html'\n context = {\n 'services': services,\n 'creator_profile': creator_profile,\n }\n\n return render(request, template, context)",
"def get_proxy_url(self, request, service, url):\n params = None\n logger.debug(\n \"Enter MyUWRestProxyView service: {}, url: {}, GET: {}\".format(\n service, url, request.POST))\n\n if service == \"book\":\n url = \"uw/json_utf8_202007.ubs\"\n url = \"{}?quarter={}&sln1={}&returnlink=t\".format(\n \"uw/json_utf8_202007.ubs\",\n request.POST[\"quarter\"],\n request.POST[\"sln1\"])\n elif service == \"grad\":\n params = self.format_params(request)\n elif service == \"hfs\":\n url = \"myuw/v1/{}\".format(request.POST[\"uwnetid\"])\n elif re.match(r'^iasystem', service):\n if url.endswith('/evaluation'):\n index = url.find('/')\n service = 'iasystem_' + url[:index]\n index += 1\n url = url[index:]\n params = self.format_params(request)\n elif service == \"myplan\":\n url = \"plan/v1/{},{},1,{}\".format(\n request.POST[\"year\"],\n request.POST[\"quarter\"],\n request.POST[\"uwregid\"])\n elif service == \"sws\":\n if \"advisers\" == url:\n url = \"student/v5/person/{}/advisers.json\".format(\n request.POST[\"uwregid\"])\n elif \"degree\" == url:\n url = \"student/v5/person/{}/degree.json?deg_status=all\".format(\n request.POST[\"uwregid\"])\n elif \"notices\" == url:\n url = \"student/v5/notice/{}.json\".format(\n request.POST[\"uwregid\"])\n elif service == \"upass\":\n url = \"upassdataws/api/person/v1/membershipstatus/{}\".format(\n request.POST[\"uwnetid\"])\n elif service == \"uwnetid\":\n if \"password\" == url:\n url = \"nws/v1/uwnetid/{}/password\".format(\n request.POST[\"uwnetid\"])\n elif \"subscription\" == url:\n url = \"nws/v1/uwnetid/{}/subscription/60,64,105\".format(\n request.POST[\"uwnetid\"])\n else:\n service, url, params = super().get_proxy_url(request, service, url)\n\n logger.debug(\n \"Exit MyUWRestProxyView url: {}\".format(url))\n return service, url, params",
"def list_web_services(self, include_internals=\"false\"):",
"def _getService(self, namespace, soapBody, action, custid, acctid, wsdl, host):\n soapStr = self.soapHeader % (namespace,\n acctid,\n custid,\n self.username,\n self.password,\n self.devtoken\n ) + \\\n soapBody + self.soapFooter\n self.logger.debug(\"post = %s\", wsdl)\n req = urllib2.Request(wsdl)\n req.add_header(\"Accept\", \"text/xml\")\n req.add_header(\"Accept\", \"multipart/*\");\n req.add_header(\"Content-type\", \"text/xml; charset=\\\"UTF-8\\\"\")\n req.add_header(\"SOAPAction\", action)\n req.add_header(\"HOST\", host)\n req.add_data(soapStr)\n self.logger.debug(\"soapStr for %s: %s\", action, soapStr)\n res = None\n try:\n service = urllib2.urlopen(req)\n res = service.read()\n service.close()\n self.logger.debug(\"res: %s\", res)\n return res\n except urllib2.URLError, err:\n self.logger.error(\"%s failed\", action)\n if self.logger.level > logging.DEBUG:\n self.logger.error(\"soapStr: %s\", soapStr)\n if res is not None:\n self.logger.error(\"res: %s\", res)\n self.logger.error(\"reason: %s\", str(err.reason))\n if isinstance(err, urllib2.HTTPError):\n self.logger.error(\"code: %r\", err.code)\n return None",
"def soap_GetSitesXml(self, ps):\n try:\n #this code is just for deploying on apache\n #httpReq = kw['request']\n rsp = WaterOneFlow.soap_GetSitesXml(self,ps)\n request = self.request\n #get input parameter, save it in a string list strSiteCodeList.\n siteCodeList = request.get_element_site()\n strSiteCodeList = map(str,siteCodeList.get_element_string())\n #construct/renew the siteInfo_site_dictionary.\n #currently, renew it every 14 days (subject to change)?\n if(len(siteInfo_site_dictionary.keys())==1 or \n datetime.now() >= siteInfo_site_dictionary[\"__modTime\"] + timedelta(days=14)):\n #here has a possible race condition, so a lock is placed\n semaphore = open('/home/txhis/CBIService/semaphore/semaphore.file', \"w\")\n lock(semaphore, LOCK_EX)\n treeIter = getIterator(centralRegUrl)\n #save the srs information in dictionary\n buildSiteInfo_siteDictionary(treeIter,siteInfo_site_dictionary)\n siteInfo_site_dictionary[\"__modTime\"]=datetime.now()\n #close semaphore, release the lock file. (otherwise deadlock will be possible)\n semaphore.close()\n # Generate XML String\n #big response node\n siteResponse = SiteInfoResponseType()\n #set queryInfo\n queryInfo = generateQueryInfo_string(request.get_element_site());\n siteResponse.set_queryInfo(queryInfo)\n #get the \"key\" part, ready to match in cached dictionary:\n def getKeyPart(str): \n tempstr = str.split(':')\n if tempstr[0] == 'CBI':return tempstr[1]\n #use map, get siteCode part of every string, saving a loop \n strSiteCodeList = map(getKeyPart,strSiteCodeList)\n #Eliminate the \"None\" element\n strSiteCodeList = [i for i in strSiteCodeList if i]\n #site List array (if multiple sites)\n siteList = []\n # read this logic:\n #if input is empty, return all the Sites as the response\n if (not siteCodeList.get_element_string() or (not siteCodeList.get_element_string()[0] \n and len(siteCodeList.get_element_string())==1)):\n for key in siteInfo_site_dictionary.keys():\n if key == \"__modTime\" or key == 'srs' : continue\n #site nodes\n siteNode = site()\n #siteCodeArray[1]][1][1] is an arbitrary xml node of a certain location\n siteInfoNode = generateSiteInfo_string(siteInfo_site_dictionary[key][1][0],siteInfo_site_dictionary['srs'])\n siteNode.set_siteInfo(siteInfoNode)\n siteList.append(siteNode)\n siteResponse.set_site(siteList) \n siteResponseString = cStringIO.StringIO() \n siteResponse.export(siteResponseString, 0, \\\n namespacedef_=' '.join(['xmlns:gml=\"http://www.opengis.net/gml\"',\n 'xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"',\n 'xmlns:xlink=\"http://www.w3.org/1999/xlink\"',\n 'xmlns:wtr=\"http://www.cuahsi.org/waterML/\"',\n 'xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"',\n 'xmlns=\"http://www.cuahsi.org/waterML/1.0/\"'])) \n rsp.set_element_GetSitesXmlResult(siteResponseString.getvalue())\n except Exception, e:\n import traceback\n traceback.print_exc(file=sys.stdout)\n if isinstance(e,Fault):\n detail = None\n if e.detail is not None: \n detail = Detail()\n detail.any = e.detail \n rsp = FaultType(e.code, e.string, e.actor, detail)\n return rsp",
"def soap_GetVariableInfoObject(self, ps):\n try:\n #this code is just for deploying on apache\n #httpReq = kw['request']\n rsp = WaterOneFlow.soap_GetVariableInfoObject(self,ps)\n request = self.request \n variableCodeArray = map(str, request.get_element_variable().split(\":\"))\n variableResponseNode=rsp.new_variablesResponse()\n variablesNode = variableResponseNode.new_variables()\n variableList = [] \n if variableCodeArray == [\"\"]:\n for key in variable_Dictionary.keys():\n variableNode = variablesNode.new_variable()\n variableList.append(generateVariableTypeNode(variableNode,key,variable_Dictionary[key]))\n elif not variableCodeArray[0].upper()==\"CBI\" or not variable_Dictionary.has_key(variableCodeArray[1]) \\\n or len(variableCodeArray) != 2:\n fault = Fault(Fault.Client, \"Illegal variableCode\", actor=\"variableCode\", detail=\"variable code \\\"%s\\\" is illegal/not found\" % \":\".join(variableCodeArray))\n raise fault\n elif variable_Dictionary.has_key(variableCodeArray[1]):\n variableNode = variablesNode.new_variable()\n variableList.append(generateVariableTypeNode(variableNode,variableCodeArray[1],variable_Dictionary[variableCodeArray[1]]))\n variablesNode.set_element_variable(variableList)\n variableResponseNode.set_element_variables(variablesNode)\n rsp.set_element_variablesResponse(variableResponseNode)\n #here, how to generate a fault!!! \n except Exception, e:\n import traceback\n traceback.print_exc(file=sys.stdout) \n if isinstance(e,Fault):\n detail = None\n if e.detail is not None: \n detail = Detail()\n detail.any = e.detail \n rsp = FaultType(e.code, e.string, e.actor, detail)\n return rsp",
"def xml(self, request):\n raise Exception(\"Not Implemented\")",
"def register_class_views(state):\n try:\n prefixes = state.app.request_prefixes\n except AttributeError:\n prefixes = []\n state.app.request_prefixes = prefixes\n prefixes.append(state.url_prefix if state.url_prefix is not None else '')\n # Personal list\n personal_view = PersonalRequests.as_view('personal_requests')\n state.add_url_rule('/personal/', view_func=personal_view)\n state.add_url_rule('/personal/rss.xml', view_func=personal_view)\n state.add_url_rule('/personal/<path:filters>', view_func=personal_view)\n # Payout list\n payout_view = PayoutListing.as_view('list_approved_requests')\n payout_url_stub = '/pay/'\n state.add_url_rule(payout_url_stub, view_func=payout_view)\n state.add_url_rule(payout_url_stub + 'rss.xml', view_func=payout_view)\n state.add_url_rule(payout_url_stub + '<path:filters>',\n view_func=payout_view)\n # Other more generalized listings\n register_perm_request_listing(state, 'list_pending_requests',\n '/pending/', (PermissionType.review, PermissionType.audit),\n ActionType.pending, u'Pending Requests')\n register_perm_request_listing(state, 'list_completed_requests',\n '/completed/', PermissionType.elevated, ActionType.finalized,\n u'Completed Requests')\n # Special all listing, mainly intended for API users\n register_perm_request_listing(state, 'list_all_requests',\n '/all/', PermissionType.elevated, ActionType.statuses,\n u'All Requests')",
"def _prepare_wsdl_objects(self):\r\n\r\n\t# Default behavior is to not request transit information\r\n\tself.ReturnTransitAndCommit = False\r\n\r\n # This is the primary data structure for processShipment requests.\r\n self.RequestedShipment = self.client.factory.create('RequestedShipment')\r\n self.RequestedShipment.ShipTimestamp = datetime.now()\r\n \r\n TotalWeight = self.client.factory.create('Weight')\r\n # Start at nothing.\r\n TotalWeight.Value = 0.0\r\n # Default to pounds.\r\n TotalWeight.Units = 'LB'\r\n # This is the total weight of the entire shipment. Shipments may\r\n # contain more than one package.\r\n self.RequestedShipment.TotalWeight = TotalWeight\r\n \r\n # This is the top level data structure for Shipper information.\r\n ShipperParty = self.client.factory.create('Party')\r\n ShipperParty.Address = self.client.factory.create('Address')\r\n ShipperParty.Contact = self.client.factory.create('Contact')\r\n \r\n # Link the ShipperParty to our master data structure.\r\n self.RequestedShipment.Shipper = ShipperParty\r\n\r\n # This is the top level data structure for Recipient information.\r\n RecipientParty = self.client.factory.create('Party')\r\n RecipientParty.Contact = self.client.factory.create('Contact')\r\n RecipientParty.Address = self.client.factory.create('Address')\r\n \r\n # Link the RecipientParty object to our master data structure.\r\n self.RequestedShipment.Recipient = RecipientParty\r\n \r\n Payor = self.client.factory.create('Payor')\r\n # Grab the account number from the FedexConfig object by default.\r\n Payor.AccountNumber = self._config_obj.account_number\r\n # Assume US.\r\n Payor.CountryCode = 'US'\r\n \r\n ShippingChargesPayment = self.client.factory.create('Payment')\r\n ShippingChargesPayment.Payor = Payor\r\n\r\n self.RequestedShipment.ShippingChargesPayment = ShippingChargesPayment\r\n \r\n # ACCOUNT or LIST\r\n self.RequestedShipment.RateRequestTypes = ['ACCOUNT'] \r\n \r\n # Start with no packages, user must add them.\r\n self.RequestedShipment.PackageCount = 0\r\n self.RequestedShipment.RequestedPackageLineItems = []\r\n \r\n # This is good to review if you'd like to see what the data structure\r\n # looks like.\r\n self.logger.debug(self.RequestedShipment)",
"def generate_spore_description(services, name, base_url, version, **kwargs):\n spore_doc = dict(\n name=name,\n base_url=base_url,\n version=version,\n expected_status=[200, ],\n methods={},\n **kwargs)\n\n for service in services:\n # the :foobar syntax should be removed.\n # see https://github.com/SPORE/specifications/issues/5\n service_path = URL_PLACEHOLDER.sub(':\\g<1>', service.path)\n\n # get the list of placeholders\n service_params = URL_PLACEHOLDER.findall(service.path)\n\n for method, view, args in service.definitions:\n format_name = args['renderer']\n if 'json' in format_name:\n format_name = 'json'\n\n view_info = {\n 'path': service_path,\n 'method': method,\n 'formats': [format_name]\n }\n if service_params:\n view_info['required_params'] = service_params\n\n if getattr(view, '__doc__'):\n view_info['description'] = view.__doc__\n\n # we have the values, but we need to merge this with\n # possible previous values for this method.\n method_name = '{method}_{service}'.format(\n method=method.lower(), service=service.name.lower())\n spore_doc['methods'][method_name] = view_info\n\n return spore_doc",
"def SoapAction(self) -> str:",
"def __call__(self, **action_kwargs):\n\n return SOAP.send(self.service, self, **action_kwargs)",
"def getServiceDefinitions(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n ORG_ID = kwargs['ORG_ID']\n strCSPProdURL = kwargs['strCSPProdURL']\n json_response = get_services_json(strCSPProdURL, ORG_ID, sessiontoken)\n if json_response == None:\n print(\"API Error\")\n sys.exit(1)\n\n services= json_response['servicesList']\n table = PrettyTable(['Service Name', 'Access type', 'Service URL'])\n for i in services:\n table.add_row([i['displayName'], i['serviceAccessType'], i['serviceUrls']['serviceHome']])\n print(table)",
"def get_soap_accessor():\n db_ip = appscale_info.get_db_master_ip()\n bindport = constants.UA_SERVER_PORT\n return SOAPpy.SOAPProxy(\"https://{0}:{1}\".format(db_ip, bindport))",
"def __init__(self, base_url=\"TAPS\", major_versions=None, user=None,\n password=None, user_agent=DEFAULT_USER_AGENT, debug=False,\n timeout=120, service_mappings=None, jwt_access_token=None,\n jwt_refresh_token=None):\n self.debug = debug\n self.user = user\n self.timeout = timeout\n\n # Cache for the webservice versions. This makes interactive use of\n # the client more convenient.\n self.__version_cache = {}\n\n if base_url.upper() in URL_MAPPINGS:\n url_mapping = base_url.upper()\n base_url = URL_MAPPINGS[url_mapping]\n url_subpath = URL_DEFAULT_SUBPATH\n else:\n if base_url.isalpha():\n msg = \"The FDSN service shortcut `{}` is unknown.\"\\\n .format(base_url)\n raise ValueError(msg)\n url_subpath = URL_DEFAULT_SUBPATH\n\n # Make sure the base_url does not end with a slash.\n base_url = base_url.strip(\"/\")\n # Catch invalid URLs to avoid confusing error messages\n if not self._validate_base_url(base_url):\n msg = \"The FDSN service base URL `{}` is not a valid URL.\"\\\n .format(base_url)\n raise ValueError(msg)\n\n self.base_url = base_url\n self.url_subpath = url_subpath\n\n self._set_opener(user, password)\n\n self.request_headers = {\"User-Agent\": user_agent}\n # Avoid mutable kwarg.\n if major_versions is None:\n major_versions = {}\n # Make a copy to avoid overwriting the default service versions.\n self.major_versions = DEFAULT_SERVICE_VERSIONS.copy()\n self.major_versions.update(major_versions)\n\n # Avoid mutable kwarg.\n if service_mappings is None:\n service_mappings = {}\n self._service_mappings = service_mappings\n\n if self.debug is True:\n print(\"Base URL: %s\" % self.base_url)\n if self._service_mappings:\n print(\"Custom service mappings:\")\n for key, value in self._service_mappings.items():\n print(\"\\t%s: '%s'\" % (key, value))\n print(\"Request Headers: %s\" % str(self.request_headers))\n\n self.services = DEFAULT_SERVICES\n\n self.jwt_access_token = jwt_access_token\n self.jwt_refresh_token = jwt_refresh_token",
"def post(self, request, *args, **kwargs):\n request_data = request.data\n\n # create custom service serializers object\n serializer = self.serializer_class(data=request_data, context={'request': request})\n\n if not serializer.is_valid():\n return APIResponse(serializer.errors, HTTP_400_BAD_REQUEST)\n\n validated_data = serializer.validated_data\n\n # get last transaction save point id\n sid = transaction.savepoint()\n\n try:\n # add new custom service\n instance = serializer.create(validated_data)\n except Exception as err:\n # roll back transaction if any exception occur while adding custom service\n transaction.savepoint_rollback(sid)\n logger.error(\"Unexpected error occurred : %s.\", err.args[0])\n return APIResponse({\"message\": err.args[0]}, HTTP_400_BAD_REQUEST)\n\n # convert model object into json\n data = CustomServiceViewSerializer(instance).data\n data['message'] = ADD_CUSTOM_SERVICE\n\n return APIResponse(data, HTTP_201_CREATED)",
"def _prepare_wsdl_objects(self):\r\n self.DeletionControlType = self.client.factory.create('DeletionControlType')\r\n self.TrackingId = self.client.factory.create('TrackingId')\r\n self.TrackingId.TrackingIdType = self.client.factory.create('TrackingIdType')",
"def _prepare_wsdl_objects(self):\r\n # This holds some optional options for the request..\r\n self.AddressValidationOptions = self.client.factory.create('AddressValidationOptions')\r\n \r\n # This is good to review if you'd like to see what the data structure\r\n # looks like.\r\n self.logger.debug(self.AddressValidationOptions)",
"def services(request):\n cart = Cart(request)\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/services.html',\n {\n 'title':'Услуги',\n 'cart': cart,\n 'message':'Наши услуги',\n 'year':datetime.now().year,\n }\n )",
"def _prepare_wsdl_objects(self):\r\n self.TrackPackageIdentifier = self.client.factory.create('TrackPackageIdentifier')\r\n # Default to tracking number.\r\n self.TrackPackageIdentifier.Type = 'TRACKING_NUMBER_OR_DOORTAG'",
"def get_services(request):\n try:\n company = Company.objects.get(pk=int(request.POST['company_id']))\n\n services = []\n for service in company.services.all():\n services.append({'name': service.name, 'id': service.pk})\n\n return format_ajax_response(True, \"Company's services retrieved successfully.\", {'services': services})\n except Exception as ex:\n logging.error(\"failed to get_services: %s\" % ex)\n return format_ajax_response(False, \"There was a problem retrieving the company's services.\")"
] | [
"0.56936353",
"0.5549895",
"0.5401282",
"0.5395464",
"0.53373414",
"0.52628684",
"0.5209494",
"0.52003735",
"0.5165469",
"0.5165354",
"0.5142136",
"0.506702",
"0.4981452",
"0.49758002",
"0.495154",
"0.49393404",
"0.4938176",
"0.49051088",
"0.489236",
"0.4879681",
"0.487864",
"0.4876898",
"0.48367506",
"0.4825437",
"0.48251724",
"0.4821763",
"0.48098925",
"0.4794873",
"0.47700217",
"0.47515857"
] | 0.7150832 | 0 |
Values tagged True are right, those tagged False are left. >>> partition([(True, 1), (False, 2), (True, 3)]) ([2], [1, 3]) | def partition(xs):
left, right = [], []
for b, x in xs:
if b:
right.append(x)
else:
left.append(x)
return left, right | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def partition(is_included_fn, items):\n item_by_exclusion = { True : [], False : [] }\n for item in items:\n # \"not\" to normalise all values to either True or False\n item_by_exclusion[not is_included_fn(item)].append(item)\n return (item_by_exclusion[False], item_by_exclusion[True])",
"def partition(rows: list, question: Question) -> (list, list):\n true_rows = []\n false_rows = []\n for row in rows:\n if question.match(row): # True\n true_rows.append(row)\n else:\n false_rows.append(row)\n return true_rows, false_rows",
"def _partition(data: list, pivot) -> tuple:\n less, equal, greater = [], [], []\n for element in data:\n if element < pivot:\n less.append(element)\n elif element > pivot:\n greater.append(element)\n else:\n equal.append(element)\n return less, equal, greater",
"def partition(iterable, predicate):\n passes = list()\n fails = list()\n for element in iterable:\n if predicate(element):\n passes.append(element)\n else:\n fails.append(element)\n return passes, fails",
"def partition(iterable : Iterable[T], predicate : Callable[[T], bool]) -> Tuple[Iterable[T], Iterable[T]]:\n\n iter1, iter2 = tee(iterable)\n return filterfalse(predicate, iter1), filter(predicate, iter2)",
"def partition(self, rows, question):\n true_rows, false_rows = [], []\n for row in rows:\n if question.match(row):\n true_rows.append(row)\n else:\n false_rows.append(row)\n return true_rows, false_rows",
"def partition(pred, iterable):\n stream = list(iterable)\n matched = list(itertools.takewhile(pred, stream))\n unmatched = list(itertools.dropwhile(pred, stream))\n return matched, unmatched",
"def partition(lst, pred):\n start = []\n append = start.append\n\n while lst:\n x, lst_ = lst.uncons\n if pred(x):\n break\n lst = lst_\n append(x)\n\n return List(start), lst",
"def partition (s, cmp, pivotalea=False):\n global cpt\n l=s['data']\n left=s['left']\n right=s['right']\n if pivotalea:\n pos_pivot=random_pivot(s)\n l[left],l[pos_pivot]=l[pos_pivot],l[left]\n pos_pivot=left\n pivot=l[pos_pivot]\n left+=1\n while left<=right:\n if cmp(l[left],l[pos_pivot])==-1:\n l[left],l[pos_pivot]=l[pos_pivot],l[left]\n pos_pivot+=1\n left+=1\n else:\n l[left],l[right]=l[right],l[left]\n right-=1\n s1={'data' : l, 'left' : s['left'], 'right': pos_pivot - 1}\n s2={'data' : l, 'left' : pos_pivot, 'right': s['right']}\n return (s1,s2)",
"def partition_strict(function, items):\n left = []\n right = []\n for item in items:\n (left if function(item) else right).append(item)\n return (left, right)",
"def partition(lst_part, p, r):\n cond_part = 0\n assign_part = 0\n pivot = lst_part[r] # Set pivot to last (right-most) value in list\n pivot_index = p\n for j in range(p,r):\n cond_part +=1\n if lst_part[j] <= pivot:\n lst_part[pivot_index],lst_part[j] = swap(lst_part[pivot_index],lst_part[j]) \n assign_part +=3\n pivot_index += 1 \n lst_part[pivot_index],lst_part[r] = swap(lst_part[pivot_index],lst_part[r])\n assign_part +=3\n return pivot_index,cond_part,assign_part",
"def partition(rows, question):\n true_rows, false_rows = [], []\n for row in rows:\n if question.match(row):\n # the row's value of the column was greater than or equal to the questions value\n true_rows.append(row)\n else:\n false_rows.append(row)\n return true_rows, false_rows",
"def partition(x, low, high, cond, assign):\n\tassign += 3\n\tpivot_item = x[low]\n\tleft = low\n\tright = high\n\twhile left < right:\n\t\tcond += 1\n\t\twhile x[left] <= pivot_item:\n\t\t\tcond += 1\n\t\t\tassign += 1\n\t\t\tleft += 1\n\t\twhile x[right] > pivot_item:\n\t\t\tcond += 1\n\t\t\tassign += 1\t\t\t\n\t\t\tright -= 1\n\t\tcond += 1\n\t\tif left < right:\n\t\t\tassign += 2\n\t\t\tholder = x[left]\n\t\t\tx[left] = x[right]\n\t\t\tx[right] = holder\n\tassign += 2\n\tx[low] = x[right]\n\tx[right] = pivot_item\n\treturn right, cond, assign",
"def partition(ary, predicate, extra_args=[], preamble=\"\", queue=None, wait_for=None):\n if len(ary) > np.iinfo(np.uint32).max:\n scan_dtype = np.uint64\n else:\n scan_dtype = np.uint32\n\n extra_args_types, extra_args_values = extract_extra_args_types_values(extra_args)\n\n knl = _partition_template.build(\n ary.context,\n type_aliases=((\"item_t\", ary.dtype), (\"scan_t\", scan_dtype)),\n var_values=((\"predicate\", predicate),),\n more_preamble=preamble, more_arguments=extra_args_types)\n\n out_true = cl.array.empty_like(ary)\n out_false = cl.array.empty_like(ary)\n count = ary._new_with_changes(data=None, offset=0,\n shape=(), strides=(), dtype=scan_dtype)\n\n # **dict is a Py2.5 workaround\n evt = knl(ary, out_true, out_false, count, *extra_args_values,\n **dict(queue=queue, wait_for=wait_for))\n\n return out_true, out_false, count, evt",
"def partition(l, left, right):\n nonlocal c, r, w\n # take last index as pivot (item's index using to compare)\n pivot = right\n divider = left\n\n for i in range(left, right):\n c += 1\n r += 2\n if l[i] < l[pivot]: #If current element is smaller than pivot, swap them\n l[i], l[divider] = l[divider], l[i]\n divider += 1 # increment index of smaller elem\n w += 2\n l[pivot], l[divider] = l[divider], l[pivot]\n w += 2\n r += 2\n\n return divider",
"def _partition(data: List[Merchant], pivot: Merchant) -> Tuple[List[Merchant], List[Merchant], List[Merchant]]:\n\n # partitions the lists into three lists: those of less, those of greater and those of less value in terms of location number\n\n less, equal, greater = [], [], []\n for element in data:\n if element.merchant_location < pivot.merchant_location:\n less.append(element)\n elif element.merchant_location > pivot.merchant_location:\n greater.append(element)\n else:\n equal.append(element)\n return less, equal, greater",
"def test_partition(self):\n # one swap at the end\n list = [5, 6, 7, 8, 9, 2]\n partition(list, 0, 5)\n # assert list == [2, 6, 7, 8, 9, 5] # should be improved in future",
"def partitionData(data, labels, partition):\n\treturn [s[partition] for s in data], labels[partition]",
"def partition(a_list, first_position, last_position):\n pivot_value = a_list[first_position]\n\n left_mark = first_position + 1\n right_mark = last_position\n\n done = False\n while not done:\n\n # ignore anything on the left smaller than the pivot value\n while left_mark <= right_mark and a_list[left_mark] <= pivot_value:\n left_mark += 1\n\n # ignore anything on the right bigger than the pivot value\n while right_mark >= left_mark and a_list[right_mark] > pivot_value:\n right_mark -= 1\n\n if right_mark < left_mark:\n # items are in the correct position\n done = True\n else:\n # swap\n a_list[left_mark], a_list[right_mark] = a_list[right_mark], a_list[left_mark]\n\n # once everything is ordered, move the pivot value to its split point\n a_list[first_position], a_list[right_mark] = a_list[right_mark], a_list[first_position]\n\n return right_mark",
"def partition(first):\n lt = None\n eq = None\n gt = None\n\n p = first.next\n\n # put first element into equal list\n first.next = eq\n eq = first\n\n while p is not None:\n q = p\n p = p.next\n\n if q.value < eq.value:\n q.next = lt\n lt = q\n elif q.value > eq.value:\n q.next = gt\n gt = q\n else:\n q.next = eq\n eq = q\n\n # \"first\" is the last equal element\n return lt, eq, first, gt",
"def rpartition(self, x):\n pass",
"def partition(array, left, right, pivot_ind):\n pivot = array[pivot_ind]\n array[pivot_ind], array[right] = array[right], array[pivot_ind]\n index = left\n for i in range(left,right):\n if array[i] <= pivot:\n array[i], array[index] = array[index], array[i]\n index += 1\n array[index], array[right] = array[right], array[index]\n return index",
"def partition(l, left, right): \n nonlocal c, r, w\n \n # take middle index as pivot (item's index using to compare)\n pivot = left + (right - left) // 2\n i = left - 1\n j = right + 1\n \n c += 1\n while True:\n # Check elements on the left pivot side\n i += 1\n while l[i] < l[pivot]:\n i += 1\n r += 2\n c += 1\n\n # Check elements on the right pivot side\n j -= 1\n while l[j] > l[pivot]:\n j -= 1\n r += 2\n c += 1\n c += 1\n\n if i >= j:\n return j\n # Only if an element at i (on the left of the pivot) is larger than the\n # element at j (on right right of the pivot), swap them\n l[i], l[j] = l[j], l[i]\n w += 2\n r += 2",
"def partition(self, data, labels):\n\t\treturn self.kfold.split(labels)",
"def partition_nip(A):\n x = A[-1]\n left = []\n right = []\n for j in range(0, len(A)-1):\n if A[j] <= x:\n left.append(A[j])\n else:\n right.append(A[j])\n return left, right, x",
"def trivial_partition(set_):\n ensure_countable(set_)\n\n result = ((x,) for x in set_)\n return _harmonize_subset_types(set_, result)",
"def partition(self, sequence, lo, hi, pivot):\n swap(sequence, pivot, lo)\n pivot = lo\n p_value = self.key(sequence[pivot])\n\n lower, equal, higher = lo + 1, lo + 1, hi - 1\n while higher >= equal:\n if self.key(sequence[equal]) > p_value:\n swap(sequence, equal, higher)\n higher -= 1\n elif self.key(sequence[equal]) == p_value:\n equal += 1\n else:\n swap(sequence, lower, equal)\n lower += 1\n equal += 1\n\n # depends on the which pointer had been changed the last time.\n # design flaw? Control flow should be more uniform.\n if self.key(sequence[lower]) > p_value:\n lower -= 1\n if equal == len(sequence):\n equal -= 1\n if self.key(sequence[equal]) > p_value:\n equal -= 1\n\n swap(sequence, pivot, equal)\n\n return lower, equal",
"def partition(game, player):\n height = game.height\n width = game.width\n blanks = game.get_blank_spaces()\n has_partition = False\n partition_col = int(game.width/2)\n partition_row = int(game.height/2)\n moves = game.get_legal_moves(player)\n if moves:\n player_location = game.get_player_location(player)\n for i in range(2, width - 3): #search for vertical partitions\n if (0,i) not in blanks and (0,i+1) not in blanks:\n j = 1\n while j < height and (j, i) not in blanks and (j, i + 1) not in blanks:\n j += 1\n if j == height:\n has_partition = True\n pb = partition_blanks(game, (0,i))\n if pb[0] > pb[1]: #more blanks on the left of the partition\n for move in moves:\n if move[1] < i:\n return has_partition, True\n return has_partition, False\n else: #more blanks on right of partition\n for move in moves:\n if move[1] > i + 1:\n return has_partition, True\n return has_partition, False\n\n for i in range(2, height - 3): #seach for horizontal partitions\n if (i,0) not in blanks and (i+1,0) not in blanks:\n j = 1\n while j < width and (i,j) not in blanks and (i+1, j) not in blanks:\n j += 1\n if j == width:\n has_partition = True\n pb = partition_blanks(game, (i, 0))\n if pb[0] > pb[1]: #more blanks on top of partition\n for move in moves:\n if move[0] < i:\n return has_partition, True\n return has_partition, False\n else: #more blanks below partition\n for move in moves:\n if move[0] > i + 1:\n return has_partition, True\n return has_partition, False\n\n return has_partition, False",
"def unif_partition(l):\n return tuple(i/l for i in range(l+1))",
"def partition(self, sequence, lo, hi, pivot):\n swap(sequence, pivot, lo)\n pivot = lo\n p_value = self.key(sequence[pivot])\n\n lo += 1\n while hi > lo:\n if self.key(sequence[lo]) > p_value:\n swap(sequence, lo, hi)\n hi -= 1\n else:\n lo += 1\n\n # depends on the which pointer had been changed the last time.\n # design flaw? Control flow should be more uniform.\n if self.key(sequence[lo]) > p_value:\n lo -= 1\n\n swap(sequence, pivot, lo)\n pivot = lo\n\n return pivot, pivot+1"
] | [
"0.6702738",
"0.6410004",
"0.63018924",
"0.61310256",
"0.5890226",
"0.5880578",
"0.5873819",
"0.58590937",
"0.5840328",
"0.5830201",
"0.5793266",
"0.5698566",
"0.56066245",
"0.55598056",
"0.5480577",
"0.54560906",
"0.53937507",
"0.5380315",
"0.5350227",
"0.53156364",
"0.5186437",
"0.5170297",
"0.5166927",
"0.5152789",
"0.5150932",
"0.51349586",
"0.50748724",
"0.5055194",
"0.50497985",
"0.504605"
] | 0.6741035 | 0 |
Increase the indentation of a string or a list of lines. >>> print(indent(['ab','cd'])) ab cd >>> print(indent('ab'+chr(10)+'cd')) ab cd >>> indent(['ab','cd'], join=False) [' ab', ' cd'] | def indent(lines_or_str, indent_str=' ', join=True):
if isinstance(lines_or_str, str):
lines = lines_or_str.split('\n')
else:
lines = lines_or_str
indented = (indent_str + line.rstrip() for line in lines)
if join:
return '\n'.join(indented)
else:
return list(indented) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_indent(script, indent=2):\n if not isinstance(script, list):\n script = [script]\n\n indent = ' ' * indent\n return [indent + s for s in script]",
"def out_indent(indent, *args):\n s = \"\"\n s += indent * \" \"\n s += \" \".join(args)\n return s",
"def _text_indent(text, indent):\n # type: (str, str) -> str\n lines = [line.strip() for line in text.strip().split('\\n')]\n return indent + indent.join(lines)",
"def _indent(s, width=4, skip_first_line=False):\n lines = s.splitlines(1)\n indentstr = ' '*width\n if skip_first_line:\n return indentstr.join(lines)\n else:\n return indentstr + indentstr.join(lines)",
"def indent(lines, spaces=4):\n if isinstance(lines, str):\n text = [lines]\n text = \"\\n\".join(lines)\n return textwrap.indent(text, \" \" * spaces)",
"def indent(str, level):\n if level == 0: return str\n return \"\\n\".join(\"\\t\" * level + line for line in str.splitlines())",
"def indent(txt, indent_level):\n indent = \" \" * indent_level\n return \"\\n\".join(indent + x for x in txt.splitlines())",
"def reindent(text, indent):\n\n lines = textwrap.dedent(text).split('\\n')\n while lines and not lines[0].strip():\n lines.pop(0)\n while lines and not lines[-1].strip():\n lines.pop()\n return indent + ('\\n' + indent).join(lines)",
"def Indent(indents):\n return ' ' * (2 * indents)",
"def indent(text, indentation, width=None, pad_character=\" \"):\n\n text = pad_character * indentation + text\n length = len(text)\n if width is None or length >= width:\n return text\n else:\n return text + pad_character * (width - length)",
"def indent(string, prefix=\" \"):\n return \"\\n\".join([prefix + line for line in string.split(\"\\n\")])",
"def indent(string, level=1):\n spaces = ' ' * (level * 4)\n return \"%s%s\" % (spaces, string)",
"def _indent(text):\n prefix = ' ' * 4\n\n def prefixed_lines():\n for line in text.splitlines(True):\n yield (prefix + line if line.strip() else line)\n\n return ''.join(prefixed_lines())",
"def indent(text, prefix):\n lines = text\n newline = u\"\"\n if isinstance(text, basestring):\n lines = text.splitlines(True)\n elif lines and not lines[0].endswith(\"\\n\"):\n # -- TEXT LINES: Without trailing new-line.\n newline = u\"\\n\"\n return newline.join([prefix + unicode(line) for line in lines])",
"def indent(self, indent: str = default_indent):\n ori_bullet = self._bullet\n ori_indent = self._indent\n if not self._bullet:\n self._indent = self._indent + indent\n self._bullet = \"\"\n self._update()\n try:\n yield self\n finally:\n self._bullet = ori_bullet\n self._indent = ori_indent\n self._update()",
"def indent(text, count=1, prefix=\" \"):\n lines = text.split(\"\\n\")\n return \"\\n\".join(\"{}{}\".format(prefix * count, line)\n for line in lines)",
"def _indent_text(self, lines, level=1):\n prefix = ' ' * (4 * level)\n if isinstance(lines, basestring):\n return prefix + lines\n else:\n return '\\n'.join(\n prefix + line\n for line in lines\n )",
"def indent(input_string):\n return '\\n '.join(input_string.split('\\n'))",
"def indent_code(self, code):\n\n if isinstance(code, string_types):\n code_lines = self.indent_code(code.splitlines(True))\n return ''.join(code_lines)\n\n tab = \" \"\n inc_token = ('{', '(', '{\\n', '(\\n')\n dec_token = ('}', ')')\n\n code = [ line.lstrip(' \\t') for line in code ]\n\n increase = [ int(any(map(line.endswith, inc_token))) for line in code ]\n decrease = [ int(any(map(line.startswith, dec_token)))\n for line in code ]\n\n pretty = []\n level = 0\n for n, line in enumerate(code):\n if line == '' or line == '\\n':\n pretty.append(line)\n continue\n level -= decrease[n]\n pretty.append(\"%s%s\" % (tab*level, line))\n level += increase[n]\n return pretty",
"def indent(text, prefix, predicate=...): # -> str:\n ...",
"def _increaseindentation(self):\n self._indentlist.append(self._curindent)\n if not self._equalsigns[-1]:\n self._curindent = self._curindent + self._indent",
"def addIndent( self, increment=0 ):\n self.context.append( self.context[-1] )\n self.log_indent.debug( \"addIndent {!s}: {!r}\".format(self.lastIndent, self.context) )",
"def indent(text, n=4):\n if not text:\n return \"\"\n i = \" \" * n\n return i + text.replace(\"\\n\", \"\\n\" + i)",
"def test_indent():\n\n multiline_string = \"\"\"test\ntest1\ntest2\ntest3\"\"\"\n\n indented_multiline_string = \"\"\" test\n test1\n test2\n test3\"\"\"\n\n assert indented_multiline_string == _indent(multiline_string, 4)",
"def indentation(self, indent: str) -> None:\n self._indent = indent\n self._update()",
"def make_indentation(indent_size, part=u\" \"):\n return indent_size * part",
"def Indent( elem, level=0, indent=' ' ):\n i = \"\\n\" + level * indent\n if len( elem ):\n if not elem.text or not elem.text.strip():\n elem.text = i + indent\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n Indent( elem, level + 1, indent )\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and ( not elem.tail or not elem.tail.strip() ):\n elem.tail = i",
"def indent_wrap(s, indent=0, wrap=80):\n split = wrap - indent\n chunks = [indent * \" \" + s[i:i + split] for i in range(0, len(s), split)]\n return \"\\n\".join(chunks)",
"def insert_indent(event):\n env = XSH.env\n event.cli.current_buffer.insert_text(env.get(\"INDENT\"))",
"def indent_multiline_string(in_string, spaces):\n if in_string.find(\"\\n\") != -1:\n return \"\\n\".join([(' ' * spaces) + line for line in in_string.split(\"\\n\")])\n else:\n return in_string"
] | [
"0.6404776",
"0.6222365",
"0.62022024",
"0.606965",
"0.6027597",
"0.5771415",
"0.57700354",
"0.57553583",
"0.57324594",
"0.57014495",
"0.56932503",
"0.5672846",
"0.56462896",
"0.5641806",
"0.5585611",
"0.55051905",
"0.5477424",
"0.5429202",
"0.53347677",
"0.5225045",
"0.5221919",
"0.5212434",
"0.51778376",
"0.5156173",
"0.51192623",
"0.510625",
"0.5039957",
"0.5028059",
"0.5025659",
"0.4964608"
] | 0.64578503 | 0 |
Runs the test expressed in test.yaml in this directory. Returns an info tuple (bad, good) where bad is empty if test succeeded. | def run_test(test_dir, verbose, cleanup=False):
try:
with open(path.join(test_dir, 'test.yaml'), 'r') as y:
lines = yaml.load(y)
except Exception as e:
return ['Error when loading test.yaml:', str(e)], []
if not isinstance(lines, list):
lines = [lines]
###
# Run all commands:
run = []
seen = set()
def rm_if_unseen(filename):
if filename not in seen:
if os.path.exists(filename):
os.remove(filename)
seen.add(filename)
return filename
for line in lines:
if isinstance(line, dict) and 'clean' in line:
clean = True
cmd = line['clean']
else:
clean = False
cmd = line
if not isinstance(cmd, str):
return ['Invalid cmd in test.yaml:', str(cmd)], []
prefix = 'clean_' if clean else ''
stdout = path.join(test_dir, prefix + 'stdout')
stderr = path.join(test_dir, prefix + 'stderr')
if cleanup and not clean:
exc = 0
else:
with open(rm_if_unseen(stdout), 'a') as out:
with open(rm_if_unseen(stderr), 'a') as err:
for fd in [out, err]:
fd.write('>>> ' + cmd + '\n')
fd.flush()
with cd(test_dir):
run.append(cmd)
exc = call(cmd, stdout=out, stderr=err, shell=True)
if exc != 0 and not clean:
info = [cmd + ' failed (exit code: ' + str(exc) + ')']
for f in [stdout, stderr]:
with open(f, 'r') as fd:
info.append(f + '[-8:]:')
info.extend(indent(fd.readlines()[-8:], join=False))
return info, []
if cleanup:
for f in [stdout, stderr]:
if os.path.exists(f):
os.remove(f)
continue
if cleanup:
return [], run
###
# All commands run, check the diffs with goal_ files and directories:
return partition(check_goal_diffs(test_dir, verbose=verbose)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_and_parse(test_description: Tuple[str, str, List[str]]):\n test_executable, test_name, performance_counters = test_description\n try:\n test_output = run_test(test_executable, test_name, performance_counters)\n print(f'Finished running test {test_name}', file=sys.stderr)\n return (test_name, parse_perf_stat_output(test_output,\n performance_counters))\n except RuntimeError:\n return None",
"def test():\n tests = unittest.TestLoader().discover('tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n else:\n return 1",
"def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True",
"def run(self):\n list_test_scenarios = self.__get_list_scenarios_in_folder()\n\n if not list_test_scenarios:\n utils.print_error(\n \"\\n{}\\n\".format(constant.ERR_CANNOT_FIND_ANY_TEST_SCENARIOS))\n exit(1)\n\n (tests_pass, tests_fail) = self.__execute_tests(list_test_scenarios)\n\n complete_message = constant.INFO_TEST_PASS_FAIL.format(\n tests_pass, tests_fail)\n\n print(complete_message)\n\n self.__execute_reporter()",
"def run( self, test ):\n\n result = self._makeResult()\n test( result )\n result.printErrors()\n self.stream.writeln( result.separator2 )\n run = result.testsRun\n self.stream.writeln()\n\n if not result.wasSuccessful():\n self.stream.write( \"FAILED (\" )\n failed, errored = map( len, ( result.failures, result.errors ) )\n if failed:\n self.stream.write( \"failures=%d\" % failed )\n if errored:\n if failed: self.stream.write( \", \" )\n self.stream.write( \"errors=%d\" % errored )\n self.stream.writeln( \")\" )\n else:\n self.stream.writeln( \"OK\" )\n \n return result",
"def excecute(self):\r\n self.initialize()\r\n self.addteststeps()\r\n for teststep in self.test_steps_list:\r\n if teststep.run() == TestStatus.PASS:\r\n logging.info(\"test {} passed the test\".format(teststep.stepname))\r\n self.result = TestStatus.PASS\r\n else:\r\n logging.warn(\"test {} failed the test\".format(teststep.stepname))\r\n self.result = TestStatus.FAIL\r\n self.cleanup()\r\n return self.result",
"def Checktest(self, expectedoutput):\n\n if expectedoutput == 0:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"The configuration file does not exist.\", result.output)\n return\n\n if expectedoutput == 1:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"Name: Name\", result.output)\n self.assertIn(\"Email: [email protected]\", result.output)\n self.assertIn(\"Github username: GhUser\", result.output)",
"def test():\n tests = unittest.TestLoader().discover('.')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1",
"def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n sys.exit(result)",
"def test():\n tests = unittest.TestLoader().discover('app/test', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1",
"def test_running_ok():\n cli_result = subprocess.run(\n ['kaiba', 'tests/files/good_config.json', 'tests/files/input.json'],\n capture_output=True,\n )\n assert cli_result.returncode == 0",
"def main():\n argument_parser = argparse.ArgumentParser(add_help=True)\n argument_parser.add_argument(\"directory\", type=str,\n help=\"Directory to detect test smells.\")\n args = argument_parser.parse_args()\n \n if len(sys.argv) < 1:\n \n argument_parser.print_help()\n \n else:\n \n if os.path.exists(args.directory) or os.path.isdir(args.directory):\n\n #Stage 1: project level rule checking\n files = python_parser.get_python_files(os.path.abspath(args.directory))\n results_list = project_rule_runner(files)\n \n #Stage 2: test case level rule checking\n #test_case_pairs_list is a list of test cases paired with their file of origin\n filtered_files = python_parser.filter_python_files(files)\n test_case_pairs_list = python_parser.get_test_case_asts(filtered_files)\n \n for test_case_pair in test_case_pairs_list:\n results_list = results_list + test_case_rule_runner(test_case_pair)\n \n #Stage 3: test method level rule checking\n test_method_list = list()\n \n for test_case_pair in test_case_pairs_list:\n test_method_list = test_method_list + python_parser.get_test_asts(test_case_pair)\n \n for test_method in test_method_list: \n results_list = results_list + test_method_rule_runner(test_method)\n \n #Output formatting\n format_output(results_list)\n \n else:\n print(\"Invalid path given.\")",
"def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1",
"def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1",
"def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1",
"def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1",
"def run(cls): \n tests_to_run = cls.config.TESTS # A list of 5-tuple elements specifying the tests to run. See the\n # 'Test Setup' section in config.py.template for more info.\n test_group_name = \"Alchemist Tests\" # A short string identifier for this test run.\n output_dir = cls.config.OUTPUT_DIR # The output file where we write results.\n \n try:\n os.makedirs(output_dir,0o777)\n except:\n pass\n num_tests_to_run = len(tests_to_run)\n\n print(OUTPUT_DIVIDER_STRING)\n if num_tests_to_run == 1:\n print(\"Running %d test in %s\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Running %d tests in %s\" % (num_tests_to_run, test_group_name))\n failed_tests = []\n\n cls.before_run_tests()\n \n spark_settings = []\n for i in cls.config.SPARK_SETTINGS:\n spark_settings.append(i.to_array()[0])\n \n output_settings = []\n for i in cls.config.OUTPUT_SETTINGS:\n output_settings.append(i.to_array()[0])\n \n main_class = \"altest.AlTest\"\n\n for meta_data, opt_sets in tests_to_run:\n print(OUTPUT_DIVIDER_STRING + '\\n')\n# print(\"Running test command: '%s' ... \" % main_class)\n \n meta = {}\n meta_pairs = [i.to_tuple() for i in meta_data]\n for mp in meta_pairs:\n meta[mp[0].replace('-', '_')] = mp[1].replace('0x20', ' ')\n \n meta_settings = []\n for i in meta_data:\n meta_settings.append(i.to_array()[0])\n \n# stdout_filename = \"%s/%s.out\" % (output_dir, meta['short_name'])\n# stderr_filename = \"%s/%s.err\" % (output_dir, meta['short_name'])\n# \n# out_file = open(output_dir + \"/\" + meta['short_name'] + \".out\", 'w')\n\n # Run a test for all combinations of the OptionSets given, then capture\n # and print the output.\n opt_set_arrays = [i.to_array() for i in opt_sets]\n for opt_list in itertools.product(*opt_set_arrays):\n\n cmd = cls.get_spark_submit_cmd(spark_settings, main_class, output_settings, meta_settings, opt_list)\n# print(\"\\nSetting env var SPARK_SUBMIT_OPTS: %s\" % java_opts_str)\n# test_env[\"SPARK_SUBMIT_OPTS\"] = java_opts_str\n print(\"Running command:\")\n print(\"%s\\n\" % cmd)\n Popen(cmd, shell=True, env=test_env).wait()\n\n try:\n src = output_dir + meta['short_name'] + '_latest/'\n src_files = os.listdir(src)\n src_file = src_files[0][:-4]\n new_dir = output_dir + src_file\n os.makedirs(new_dir)\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if (os.path.isfile(full_file_name)):\n shutil.copy(full_file_name, new_dir)\n except:\n pass\n \n# result_string = cls.process_output(config, meta['short_name'], opt_list,\n# stdout_filename, stderr_filename)\n# print(OUTPUT_DIVIDER_STRING)\n# print(\"\\nResult: \" + result_string)\n# print(OUTPUT_DIVIDER_STRING)\n# if \"FAILED\" in result_string:\n# failed_tests.append(meta['short_name'])\n# \n# \n# out_file.write(result_string + \"\\n\")\n# out_file.flush()\n\n if num_tests_to_run == 1:\n print(\"Finished running %d test in %s.\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Finished running %d tests in %s.\" % (num_tests_to_run, test_group_name))\n# print(\"\\nNumber of failed tests: %d, failed tests: %s\" %\n# (len(failed_tests), \",\".join(failed_tests)))\n print(OUTPUT_DIVIDER_STRING)",
"def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='*test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0 \n return 1",
"def ok_to_run(self):\n # READING DOC STRING, LOOKING FOR VERSION\n doc_dict = self.doc_dict\n skip_test = False\n msg = ''\n if 'deprecated' in doc_dict:\n msg = \"This test has been deprecated\"\n skip_test = True\n elif 'version' in doc_dict and int(self.core.config.get('TestRun', 'driver_version')) < doc_dict['version']:\n msg = \"Features unavailable in this version: {}\".format(doc_dict['version'])\n skip_test = True\n elif 'datacenters' in doc_dict and len([s for s in doc_dict['datacenters'] if s in self.core.config.get('TestRun', 'datacenters')]) == 0:\n msg = \"Test only works in {}\".format(doc_dict['datacenters'])\n skip_test = True\n elif 'no_environment' in doc_dict and self.core.config.get('TestRun', 'datacenters').upper() in doc_dict['no_environment']:\n msg = \"Test does not work in {}\".format(doc_dict['no_environment'])\n skip_test = True\n if skip_test:\n self.core.write(\"\\n\" + \"_\" * 40 + \"\\n{}\".format(msg), level='error')\n if self.core.driver is not None:\n self.core.driver.close_driver()\n self.core.driver_state = False\n self.skipTest(msg)",
"def test():\n tests = unittest.TestLoader().discover('tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1",
"def test():\n tests = unittest.TestLoader().discover('./tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1",
"def test():\n tests = unittest.TestLoader().discover('./tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1",
"def run_tests():\n def print_result(result, correct):\n if result == correct:\n print(\" OK!\")\n else:\n print(f\" Failed ({result} != {correct})!\")\n for n, test in enumerate(_tests, start=1):\n print(f\"Running test {n}...\")\n nums = line2ints(test[\"in\"])\n try:\n correct = test[\"part1\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 1...\", end=\"\")\n result = part1(nums, steps=test.get(\"phases1\", 100))\n print_result(result, correct)\n try:\n correct = test[\"part2\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 2...\", end=\"\")\n result = part2(nums, steps=test.get(\"phases2\", 100))\n print_result(result, correct)",
"def test():\n tests = unittest.TestLoader().discover('src/test', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1",
"def run_tests(self, test_labels):\n import pytest\n\n argv = []\n if self.verbosity == 0:\n argv.append('--quiet')\n if self.verbosity == 2:\n argv.append('--verbose')\n if self.verbosity == 3:\n argv.append('-vv')\n if self.failfast:\n argv.append('--exitfirst')\n if self.keepdb:\n argv.append('--reuse-db')\n\n argv.extend(test_labels)\n return pytest.main(argv)",
"def test_converter_yaml(testname, fail_expected, error_text=None):\n test_converter_yaml.__name__ = \"yaml_\" +testname\n _test_converter(testname, fail_expected, error_text=error_text, format=\"yaml\")",
"def main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('--dir-metadata',\n type=pathlib.Path, required=True)\n\n args = parser.parse_args()\n\n with LockedMetadata(args.dir_metadata, __file__) as md:\n summary_dict = {}\n passing_tests = []\n failing_tests = []\n for f in md.tests_pickle_files:\n try:\n trr = TestRunResult.construct_from_pickle(f)\n summary_dict[f\"{trr.testname}.{trr.seed}\"] = \\\n ('PASS' if trr.passed else\n 'FAILED' + (\" {T}\" if (trr.failure_mode == Failure_Modes.TIMEOUT) else \"\"))\n if trr.passed:\n passing_tests.append(trr)\n else:\n failing_tests.append(trr)\n except RuntimeError as e:\n failing_tests.append(\n TestRunResult(\n name='broken_test',\n failure_message=str(e)\n ))\n\n md.regr_log = md.dir_run/'regr.log'\n md.regr_log_junit = md.dir_run/'regr_junit.xml'\n md.regr_log_junit_merged = md.dir_run/'regr_junit_merged.xml'\n\n # Write results as junit_xml\n with open(md.regr_log_junit,\n 'w',\n encoding='UTF-8') as junit_xml,\\\n open(md.regr_log_junit_merged,\n 'w',\n encoding='UTF-8') as junit_merged_xml:\n output_run_results_junit_xml(passing_tests, failing_tests,\n junit_xml,\n junit_merged_xml)\n\n with open(md.regr_log, 'w', encoding='UTF-8') as outfile:\n # Write results as regr.log (custom logfile format)\n output_results_text(passing_tests, failing_tests, summary_dict,\n outfile)\n\n test_summary_dict = create_test_summary_dict(passing_tests +\n failing_tests)\n\n cov_summary_dict = {}\n if md.simulator == \"xlm\":\n cov_summary_dict = create_cov_summary_dict(md)\n else:\n print(\"Warning: Not generating coverage summary, unsupported \" \\\n f\"simulator {md.simulator}\")\n\n html_report_filename = md.dir_run/'report.html'\n with open(html_report_filename, 'w') as outfile:\n output_results_html(md, passing_tests + failing_tests,\n test_summary_dict, cov_summary_dict, outfile)\n\n json_report_filename = md.dir_run/'report.json'\n with open(json_report_filename, 'w') as json_report_file:\n output_results_dvsim_json(md, test_summary_dict, cov_summary_dict,\n json_report_file)\n\n svg_summary_filename = md.dir_run/'summary.svg'\n with open(svg_summary_filename, 'w') as svg_summary_file:\n output_results_svg(test_summary_dict, cov_summary_dict,\n svg_summary_file)\n\n # Print a summary line to the terminal\n print(gen_summary_line(passing_tests, failing_tests))\n\n # Succeed if no tests failed\n return 1 if failing_tests else 0",
"def all(\n command,\n):\n # If we get to this point all tests listed in 'pre' have passed\n # unless we have run the task with the --warn flag\n if not command.config.run.warn:\n print(\n \"\"\"\nAll Checks Passed Successfully\n==========================================\n\"\"\"\n )",
"def test_basic_workflow(self):\n\n test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n sys.path.append(test_data_dir)\n import basic\n tmpdir = tempfile.mkdtemp()\n package_path = os.path.join(tmpdir, 'workflow.tar.gz')\n try:\n compiler.Compiler().compile(basic.save_most_frequent_word, package_path)\n with open(os.path.join(test_data_dir, 'basic.yaml'), 'r') as f:\n golden = yaml.load(f)\n compiled = self._get_yaml_from_tar(package_path)\n\n self.maxDiff = None\n # Comment next line for generating golden yaml.\n self.assertEqual(golden, compiled)\n finally:\n # Replace next line with commented line for gathering golden yaml.\n shutil.rmtree(tmpdir)\n # print(tmpdir)",
"def run_tests():\n with open('buildspec.yml', 'r') as stream:\n buildspec: dict = yaml.load(stream)\n test_commands = buildspec.get('tests', [])\n if len(test_commands) == 0:\n exit(0)\n\n results = []\n\n for test_command in test_commands:\n print(f'Running `{test_command}`')\n p = subprocess.Popen(test_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = ''\n for c in iter(p.stdout.readline, b''):\n str_line = c if type(c) is str else c.decode('UTF-8')\n sys.stdout.write(str_line)\n output += str_line\n\n p.communicate()\n results.append({\n 'command': test_command,\n 'status': 'passed' if p.returncode == 0 else 'failed',\n 'output': output\n })\n\n post_to_github(results)"
] | [
"0.62012964",
"0.61662644",
"0.6133187",
"0.6129144",
"0.61103195",
"0.6104685",
"0.6086023",
"0.6045092",
"0.6040847",
"0.6023762",
"0.5999768",
"0.5942272",
"0.5941968",
"0.5941968",
"0.5941968",
"0.5941968",
"0.59351724",
"0.59348893",
"0.59168726",
"0.5899236",
"0.58979833",
"0.58979833",
"0.5810042",
"0.5794015",
"0.57935214",
"0.5791392",
"0.578834",
"0.575512",
"0.5751446",
"0.57462853"
] | 0.66893864 | 0 |
Removes the goal_ prefix the last part of a path. >>> goal_('test/example/goal_stdout') 'test/example/stdout' | def goal_(s):
a, b = path.split(s)
return path.join(a, b[len('goal_'):]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _cleanup_path(path):\n return string.join(filter(None, string.split(path, '/')), '/')",
"def remove_upper_level_references(path):\n return os.path.normpath(\"/\" + path).lstrip(\"/\")",
"def noTrailingSlash(path):\n return path.split('/')[0]",
"def clean_path(path: str) -> str:\n previous_path = \"\"\n next_path = path\n while next_path != previous_path:\n previous_path = next_path\n next_path = copy_annotations(path, next_path.replace(\"//\", \"/\"))\n while next_path.endswith(\"/\"):\n next_path = next_path[:-1]\n return next_path",
"def _relativize(base: str, current: str) -> str:\n if current.startswith(base):\n return current.replace(base, \"\", 1)\n return current",
"def _trim_path(path):\n if path.endswith(\"/\"):\n path = path[:-1] # remove / at the end\n \n return path",
"def remove_trailing_slash(path):\n if len(path) > 0:\n if path[len(path) - 1] == \"/\":\n return path[0:-1]\n else:\n return path\n else:\n return path",
"def filter_pathbase(val: Optional[str]) -> str:\n return os.path.basename(val or '')",
"def fix_suffix(path):\n if path[-2:] == \"/*\":\n cleaned_path = path\n else:\n cleaned_path = os.path.join(path, \"*\")\n spooq_logger.debug(\"fix_suffix: input: {inp}, output: {outp}\".format(inp=path, outp=cleaned_path))\n return cleaned_path",
"def strip_path(self):\n return self.path.replace('/', '')",
"def remove_last_part_of_url(category_url):\n return \"/\".join(category_url.split(\"/\")[:-1])",
"def strip_path(path):\n name_re = re.compile(\"[^/]*\\.([a-z]+)$\")\n return name_re.search(path).group(0)",
"def _strip_suffix(suffix, path_parts, rev, pathtype, repos, view_func):\n if not path_parts:\n return None\n suffix_len = len(suffix)\n if path_parts[-1][-suffix_len:] == suffix:\n path_parts = path_parts[:]\n if len(path_parts[-1]) == suffix_len:\n del path_parts[-1]\n else:\n path_parts[-1] = path_parts[-1][:-suffix_len]\n t = _repos_pathtype(repos, path_parts, rev)\n if pathtype == t:\n return path_parts, t, view_func\n return None",
"def cleanUpPath(path):\n # Remove extra quotes and spaces\n cleanPath = path.strip()\n cleanPath = cleanPath.strip(\"\\\"\")\n # The normalize operation needs to happen before prepend project directory \n # variable operation. After appending the placeholder variable to the output \n # is not a standard path so the normalize operation does not work correctly.\n cleanPath = ntpath.normpath(cleanPath)\n # Append project dir\n cleanPath = ntpath.join(PROJ_DIR_STR, cleanPath)\n return cleanPath",
"def backtracking(goal):\n path = []\n current = goal\n while current.came_from:\n path.insert(0, current.move)\n current = current.came_from\n return ''.join(path)",
"def basename(path):\n\n return path.rpartition(\"/\")[2]",
"def fix_url_path(url: str) -> str:\n return url if url.endswith(\"/\") else url + \"/\"",
"def clean_file_path(path):\r\n\r\n return path.split(\"/\")[-1]",
"def destination_name(path: list[str], delimiter: str = \"__\") -> str:\n return f\"{delimiter.join(path)}\"",
"def path_leaf(path):\n return re.sub('[^A-Za-z0-9]+', '_', path)",
"def standardize_path(path):\n path.rstrip('/')\n if not path.startswith('.*'):\n path = '/' + path\n path = re.compile('/+').sub('/', path)\n return path",
"def basename(path: str) -> str:\n pass",
"def _NormalizePathComponent(component: str) -> str:\n if _IsPathParameter(component):\n component = component[1:-1]\n component = component.split(\":\")[-1]\n component = casing.SnakeToCamel(component)\n component = f\"{{{component}}}\"\n\n return component",
"def _GeneratePathStr(path):\n return ((len(path) - 1) * ' ') + path[-1] if path else ''",
"def clean_path(path):\n return resolved_path(path)",
"def mutate_suffix(path, board):\n x, y = get_start(board)\n path_new = get_path_same_prefix(path, board)\n while not is_path_correct(x, y, path_new, board):\n path_new = get_path_same_prefix(path, board)\n return remove_constant_points(path_new)",
"def strip_wpt_path(self, wpt_path):\n if self.is_wpt_path(wpt_path):\n return wpt_path[len(self.wpt_prefix()):]\n # Path is absolute or does not start with the prefix.\n # Assume the path already points to a valid WPT and pass through.\n return wpt_path",
"def basename(path):\r\n return path.replace(\"\\\\\", \"/\").split(\"/\")[-1]",
"def _gen_basename(param_dict, clargs):\n if param_dict['output_basename'] in ['', 'auto']:\n return clargs.input_fname.lower().split('.json')[0]\n\n else:\n return param_dict['output_basename']",
"def resolved(path: Union[str, Path]) -> str:\n return os.path.basename(os.path.abspath(path))"
] | [
"0.60307145",
"0.57229716",
"0.5715473",
"0.5688186",
"0.5674716",
"0.5644565",
"0.56161475",
"0.55927324",
"0.5566452",
"0.54711074",
"0.5469103",
"0.53800195",
"0.53389007",
"0.531018",
"0.52937573",
"0.5282302",
"0.527961",
"0.5251023",
"0.5239179",
"0.52235764",
"0.5205757",
"0.518057",
"0.5174024",
"0.5162943",
"0.51593643",
"0.5144871",
"0.5144452",
"0.5139333",
"0.5133891",
"0.5129258"
] | 0.7695428 | 0 |
Checks that files starting with goal_ match those without the prefix, and same for files immediately under directories with the goal_ prefix. | def check_goal_diffs(test_dir, verbose):
for dir, _, files in os.walk(test_dir):
if path.split(dir)[1].startswith('goal_'):
for filename in files:
yield match(path.join(dir, filename),
path.join(goal_(dir), filename),
verbose=verbose)
for filename in files:
if filename.startswith('goal_'):
yield match(path.join(dir, filename),
path.join(dir, goal_(filename)),
verbose=verbose) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _verify_prefix(prefix, files):\n for f in files:\n f = os.path.join(prefix, f)\n if not os.path.exists(f):\n return False\n else:\n return True",
"def test_matches__directories_only(self):\n path_rule1 = gitignore_parser(\"z/?u*ns/\")[0]\n \"This is a directories only rule\"\n self.assertTrue(\n path_rule1.directories_only\n )\n \"And it matches as it should be\"\n self.assertTrue(\n path_rule1.matches(\n CPath(\"z/humans/\")\n )\n )\n\n path_rule2 = gitignore_parser(\"z/?uman\")[0]\n \"This is NOT a directories only rule\"\n self.assertFalse(\n path_rule2.directories_only\n )\n \"But it matches as it should be\"\n self.assertTrue(\n path_rule2.matches(CPath(\"z/human\"))\n )\n \"It matches both filesCpath (above) and directories (below)\"\n self.assertTrue(\n path_rule2.matches(CPath(\"z/human/\"))\n )",
"def testIgnoredPrefixesDuplicateFiles(self):\n\n INPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 /src/build/foo/../file1_1.cc\nFILE 2 /src/build/bar/../file1_2.cc\nFILE 3 D:/src/build2/baz/../file1_2.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 3\n100c 4 44 1\n\"\"\"\n EXPECTED_OUTPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 file1_1.cc\nFILE 2 file1_2.cc\nFILE 3 file1_2.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 3\n100c 4 44 1\n\"\"\"\n IGNORED_PREFIXES = ['\\\\src\\\\build\\\\', 'D:\\\\src\\\\build2\\\\']\n self.assertParsed(INPUT, IGNORED_PREFIXES, EXPECTED_OUTPUT)",
"def _match_all(abs_dir, matching, not_matching):\n num_not_matching = 0\n\n for expression in matching:\n if not fnmatch.fnmatch(abs_dir, expression):\n num_not_matching += 1\n\n if num_not_matching == len(matching):\n return False\n\n for expression in not_matching:\n if fnmatch.fnmatch(abs_dir, expression):\n return False\n\n return True",
"def test_input_folders_files(self):\n files = list_files_folder(data_dir + \"build-custom/files/\", ext=\"fna.gz\")\n folder = data_dir + \"build-custom/files/more/\"\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folders_files\"\n params[\"input\"] = files + [folder]\n params[\"input_extension\"] = \"fna.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n files.extend(list_files_folder(folder, ext=params[\"input_extension\"]))\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")",
"def test_filecompare(self):\n cmp = filecmp.dircmp(self.root_gold, self.root_target, ignore=[])\n self.recursive_dircmp(cmp)",
"def check_for_name_conflicts(targ_folder):\n\n valid_exts = os.getenv(\"PATHEXT\").split(os.pathsep) # PATHEXT is sys env variable\n valid_exts = map(lambda v: v.lower(), valid_exts)\n\n env_paths = os.getenv(\"PATH\").split(os.pathsep) # get both sys and user env variable\n env_paths = [os.path.realpath(e).lower() for e in env_paths]\n env_paths = list(set(env_paths))\n\n targ_folder = os.path.realpath(targ_folder).lower()\n\n if targ_folder in env_paths:\n env_paths.remove(targ_folder)\n\n env_path_files_dict = {}\n for p in env_paths:\n if not os.path.isdir(p):\n pass\n # if verbose:\n # print \"{d} is not a valid directory in your PATH environment variable\".format(d=p)\n else:\n env_path_files_dict[p] = os.listdir(p)\n\n targ_folder_f_list = os.listdir(targ_folder)\n for k in env_path_files_dict.iterkeys():\n for t in targ_folder_f_list:\n if os.path.isfile(os.path.join(targ_folder, t)):\n t_no_ext = os.path.splitext(t)[0]\n for f in env_path_files_dict[k]:\n if os.path.splitext(f)[1].lower() in valid_exts:\n f_no_ext = os.path.splitext(f)[0]\n if t_no_ext == f_no_ext:\n print \"Name collision:\", t\n print \"\\t\", os.path.join(targ_folder, t)\n print \"\\t\", os.path.join(k, f)",
"def get_files_prefix(prefixes, dirname, Lshow=None, Ldir=None):\n matched_files=[]\n for pref in prefixes:\n print(f\"prefix: {pref} in {whereami()} of module {__name__}\")\n for fname in os.listdir(dirname):\n # re.match finds only prefix\n if re.match(pref, fname):\n if not Ldir and os.path.isdir(fname):\n continue\n matched_files.append(fname)\n #print (pref, fname)\n return matched_files",
"def find_tryouts(data_dir, prefix):\n filenames = os.listdir(data_dir)\n return sorted(list(map(lambda d: os.path.join(data_dir, d), filter(lambda s: s.startswith(prefix), filenames))))",
"def test_filter_files(self):\n expected = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1\", False),\n ]\n files = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir2/fichier2\", False),\n (\"/subdir2/fichier3\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1/fichier1\", False),\n (\"/subdir1/subsubdir1/\", False),\n ]\n self.assertEqual(\n list(self.path_translator.filter_files(files, \"/subdir1\")),\n expected)",
"def matches(self, tgt_residence_dir: str) -> bool:",
"def dir_filter(item):\n return not item.startswith(\"_\")",
"def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)",
"def files_exist(self):\n\n passed = []\n warned = []\n failed = []\n ignored = []\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n #: test autodoc\n try:\n _, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is '<pipeline>'.\")\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n\n files_fail = [\n [\".gitattributes\"],\n [\".gitignore\"],\n [\".nf-core.yml\"],\n [\".editorconfig\"],\n [\".prettierignore\"],\n [\".prettierrc.yml\"],\n [\"CHANGELOG.md\"],\n [\"CITATIONS.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [\"nextflow_schema.json\"],\n [\"nextflow.config\"],\n [\"README.md\"],\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"ci.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"conf\", \"modules.config\")],\n [os.path.join(\"conf\", \"test.config\")],\n [os.path.join(\"conf\", \"test_full.config\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"output.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"usage.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n [os.path.join(\"lib\", \"Utils.groovy\")],\n [os.path.join(\"lib\", \"WorkflowMain.groovy\")],\n ]\n\n files_warn = [\n [\"main.nf\"],\n [os.path.join(\"assets\", \"multiqc_config.yml\")],\n [os.path.join(\"conf\", \"base.config\")],\n [os.path.join(\"conf\", \"igenomes.config\")],\n [os.path.join(\".github\", \"workflows\", \"awstest.yml\")],\n [os.path.join(\".github\", \"workflows\", \"awsfulltest.yml\")],\n [os.path.join(\"lib\", f\"Workflow{short_name[0].upper()}{short_name[1:]}.groovy\")],\n [\"modules.json\"],\n [\"pyproject.toml\"],\n ]\n\n # List of strings. Fails / warns if any of the strings exist.\n files_fail_ifexists = [\n \"Singularity\",\n \"parameters.settings.json\",\n \".nf-core.yaml\", # yml not yaml\n os.path.join(\"bin\", \"markdown_to_html.r\"),\n os.path.join(\"conf\", \"aws.config\"),\n os.path.join(\".github\", \"workflows\", \"push_dockerhub.yml\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.md\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.md\"),\n os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo.png\"),\n \".markdownlint.yml\",\n \".yamllint.yml\",\n os.path.join(\"lib\", \"Checks.groovy\"),\n os.path.join(\"lib\", \"Completion.groovy\"),\n os.path.join(\"lib\", \"Workflow.groovy\"),\n ]\n files_warn_ifexists = [\".travis.yml\"]\n\n # Remove files that should be ignored according to the linting config\n ignore_files = self.lint_config.get(\"files_exist\", [])\n\n def pf(file_path):\n return os.path.join(self.wf_path, file_path)\n\n # First - critical files. Check that this is actually a Nextflow pipeline\n if not os.path.isfile(pf(\"nextflow.config\")) and not os.path.isfile(pf(\"main.nf\")):\n failed.append(\"File not found: nextflow.config or main.nf\")\n raise AssertionError(\"Neither nextflow.config or main.nf found! Is this a Nextflow pipeline?\")\n\n # Files that cause an error if they don't exist\n for files in files_fail:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n failed.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause a warning if they don't exist\n for files in files_warn:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n warned.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause an error if they exist\n for file in files_fail_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n failed.append(f\"File must be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that cause a warning if they exist\n for file in files_warn_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n warned.append(f\"File should be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that are ignoed\n for file in ignore_files:\n ignored.append(f\"File is ignored: {self._wrap_quotes(file)}\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed, \"ignored\": ignored}",
"def com_google_fonts_check_002(fonts):\n\n directories = []\n for target_file in fonts:\n directory = os.path.dirname(target_file)\n if directory not in directories:\n directories.append(directory)\n\n if len(directories) == 1:\n yield PASS, \"All files are in the same directory.\"\n else:\n yield FAIL, (\"Not all fonts passed in the command line\"\n \" are in the same directory. This may lead to\"\n \" bad results as the tool will interpret all\"\n \" font files as belonging to a single\"\n \" font family. The detected directories are:\"\n \" {}\".format(directories))",
"def com_google_fonts_check_002(fonts):\n\n directories = []\n for target_file in fonts:\n directory = os.path.dirname(target_file)\n if directory not in directories:\n directories.append(directory)\n\n if len(directories) == 1:\n yield PASS, \"All files are in the same directory.\"\n else:\n yield FAIL, (\"Not all fonts passed in the command line\"\n \" are in the same directory. This may lead to\"\n \" bad results as the tool will interpret all\"\n \" font files as belonging to a single\"\n \" font family. The detected directories are:\"\n \" {}\".format(directories))",
"def _is_ignored(self, full_path):\n for ignor in self._ignored:\n if fnmatch.fnmatch(full_path, \"*/\" + ignor):\n return True\n return False",
"def _assert_correct_files_are_present(outputdir: Path) -> None:\n for plane in PLANES:\n assert (outputdir / f\"{AMP_BETA_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{BETA_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{PHASE_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{TOTAL_PHASE_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{ORBIT_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{DISPERSION_NAME}x.tfs\").is_file()\n assert (outputdir / f\"{NORM_DISP_NAME}x.tfs\").is_file() # no norm disp in Y plane\n\n for rdt in [\"1001\", \"1010\"]:\n assert (outputdir / f\"f{rdt}.tfs\").is_file()",
"def has_leading_dir(paths):\n common_prefix = None\n for path in paths:\n prefix, rest = split_leading_dir(path)\n if not prefix:\n return False\n elif common_prefix is None:\n common_prefix = prefix\n elif prefix != common_prefix:\n return False\n return True",
"def tidyFileNames(folderToCheck):\n\n filters = list(map(lambda x: \"*.\" + x, expectedExts))\n\n for filter in filters:\n\n for f in getFiles(folderToCheck,filter):\n\n clean = f\n for search in searches:\n clean = replace(clean,search)\n\n if renameFile(f,clean):\n results = list(map(os.path.basename,[f,clean]))\n if results[0] != results[1]:\n print(f\"Renamed: {results[0]} -> {results[1]}\")",
"def test_find_not_should_ignore_path_regexp(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n \".airflowignore_glob\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)",
"def _check_path_actions(self, filepath: str) -> Tuple[bool, bool]:\n\n try:\n new_file_match = os.path.basename(filepath)\n existing_matches = self.parser_paths[os.path.dirname(filepath)]\n if \"*\" in existing_matches:\n use_new = False\n else:\n use_new = True\n remove_old = new_file_match == \"*\"\n except KeyError:\n use_new = True\n remove_old = False\n return use_new, remove_old",
"def testIgnoredPrefix(self):\n\n INPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 /src/build/foo/../file1_1.cc\nFILE 2 /src/build/bar/../file1_2.cc\nFILE 3 /src/build/baz/../file1_2.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 3\n100c 4 44 1\n\"\"\"\n EXPECTED_OUTPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 file1_1.cc\nFILE 2 file1_2.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 2\n100c 4 44 1\n\"\"\"\n IGNORED_PREFIXES = ['\\\\src\\\\build\\\\']\n self.assertParsed(INPUT, IGNORED_PREFIXES, EXPECTED_OUTPUT)",
"def check_paths( self ):\n check_a = utility_code.checkDirectoryExistence( self.PATH_TO_SOURCE_FILE_DIRECTORY )\n check_b = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_ORIGINALS_DIRECTORY )\n check_c = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_PARSED_DIRECTORY )\n check_d = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_DATA_DIRECTORY )\n check_e = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_COUNT_DIRECTORY )\n if check_a == 'exists' and check_b == 'exists' and check_c == 'exists' and check_d == 'exists' and check_e == 'exists':\n log.debug( 'path check passed' )\n else:\n message='path check failed; quitting'\n log.error( message )\n sys.exit( message )\n return",
"def test_input_folder(self):\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folder\"\n params[\"input\"] = data_dir + \"build-custom/files/\"\n params[\"input_extension\"] = \"fna.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n files = list_files_folder(params[\"input\"], params[\"input_extension\"])\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")\n\n # Wrong extension\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folder_wrong_extension\"\n params[\"input\"] = data_dir + \"build-custom/files/\"\n params[\"input_extension\"] = \"xxx.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertFalse(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom ran but it should fail\")\n\n # Wrong folder\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folder_wrong_folder\"\n params[\"input\"] = data_dir + \"wrong-place/\"\n params[\"input_extension\"] = \"fna.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertFalse(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom ran but it should fail\")",
"def test_get_output_filepaths(self):\r\n\r\n output_dir = \".\"\r\n\r\n fasta_fp = \"seqs.fna\"\r\n\r\n qual_fp = \"seqs.qual\"\r\n\r\n expected_fasta_fp = \"./seqs_filtered.fasta\"\r\n expected_qual_fp = \"./seqs_filtered.qual\"\r\n\r\n actual_fasta_fp, actual_qual_fp =\\\r\n get_output_filepaths(output_dir, fasta_fp, qual_fp)\r\n\r\n self.assertEqual(actual_fasta_fp, expected_fasta_fp)\r\n self.assertEqual(actual_qual_fp, expected_qual_fp)\r\n\r\n # Test for relative paths\r\n output_dir = \"test/\"\r\n\r\n fasta_fp = \"../seqs.fna\"\r\n\r\n qual_fp = \"quality_scores/seqs.qual\"\r\n\r\n expected_fasta_fp = \"test/seqs_filtered.fasta\"\r\n expected_qual_fp = \"test/seqs_filtered.qual\"\r\n\r\n actual_fasta_fp, actual_qual_fp =\\\r\n get_output_filepaths(output_dir, fasta_fp, qual_fp)\r\n\r\n self.assertEqual(actual_fasta_fp, expected_fasta_fp)\r\n self.assertEqual(actual_qual_fp, expected_qual_fp)",
"def is_wanted_file(queries):\n queries = [query for query in queries if Path(query).suffix.lower() in [\".fna\", \".fasta\", \".fastq\"]]\n queries.sort()\n\n return queries",
"def test_find_not_should_ignore_path_glob(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore_glob\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file, \"glob\"):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)",
"def _check_output_prefix(arg: str) -> str:\n\n if \"/\" in arg:\n prefix = arg.rsplit(\"/\", 1)[0]\n _is_valid_file(prefix)\n return arg",
"def _is_inside_ignored_dir(filename):\n ignore_dirs = ['./' + x for x in IGNORE_DIRS]\n return any([filename.startswith(x) for x in ignore_dirs])"
] | [
"0.66083395",
"0.61296135",
"0.583389",
"0.5791732",
"0.5715193",
"0.570771",
"0.5669999",
"0.5637614",
"0.5604362",
"0.558369",
"0.55686224",
"0.55355316",
"0.5531146",
"0.5530527",
"0.55255556",
"0.55255556",
"0.54851806",
"0.54774755",
"0.5474746",
"0.5471383",
"0.5463467",
"0.543792",
"0.5415172",
"0.5409708",
"0.5368984",
"0.53581315",
"0.53523475",
"0.53456694",
"0.53418946",
"0.5333783"
] | 0.650246 | 1 |
Returns the subdirectories that has a test.yaml file. | def dirs_with_test_yaml(dirs):
for root in dirs or ['tests/']:
for dir, subdirs, files in os.walk(root):
if 'test.yaml' in files:
yield dir | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_yaml_files(path):\n return glob.glob(path + \"/*.yml\")",
"def _include_dir_list_yaml(\n loader: SafeLineLoader, node: yaml.nodes.Node\n) -> List[JSON_TYPE]:\n loc = os.path.join(os.path.dirname(loader.name), node.value)\n return [load_yaml(f) for f in _find_files(loc, \"*.yaml\")]",
"def test_find_config_directories():\n dirs = find.config_directories('test')\n nt.eq_(len(dirs), 3)",
"def list_yaml_files():\n for name in os.listdir(BBCONFIG_DIR):\n if not name.endswith(\".yaml\"):\n continue\n if name == BASE_CONFIG_NAME:\n continue\n yield name",
"def scan_fixtures(path):\n results = list()\n for root, dirs, files in os.walk(path):\n relative_path = root.replace(path + \"/\", \"\")\n if relative_path.startswith(\"static\") or relative_path.startswith(\"theme\"):\n continue\n\n for f in files:\n if not f.endswith(\".json\"):\n continue\n\n app_name = os.path.basename(os.path.dirname(relative_path))\n\n results.append((app_name, f, relative_path))\n\n return results",
"def check_for_yaml_folder(check_path):\n check_abspath = os.path.abspath(check_path)\n yaml_folders = [\"_YAML\", \"YAML\"]\n for yf in yaml_folders:\n if yf in check_abspath:\n print(\"{} folder exists : {}\".format(yf, check_abspath))\n top_path, base_path = check_abspath.split(\"{}/\".format(yf))\n out_path = os.path.dirname(os.path.join(top_path, base_path))\n if os.path.exists(out_path):\n print(\"Path exists : {}\".format(out_path))\n return out_path\n else:\n print(\"Path does not exist : {}\".format(out_path))\n print(\"Please create this folder and try again\")\n exit(1)",
"def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)",
"def get_yml_files():\n repo_fs()\n return YML_FILES",
"def test_list_dir_returns_dirs_only(self):\n with self.settings(MIDDLEWARE_CLASSES=self.fix_middleware(), KML_FILE_DIR=self.kml_file_dir):\n user = StaffUserFactory()\n ldv = self.initiate_view(user)\n base_path = settings.KML_FILE_DIR\n print base_path\n ldv.cache_dir_content(base_path)\n dirs = ldv.list_dirs()\n print dirs\n self.assertGreaterEqual(len(dirs), 1)\n for dir_name in dirs:\n dir_path = os.path.join(base_path, dir_name)\n self.assertTrue(os.path.isdir(dir_path))",
"def get_test_files(dirname):\n if not os.path.isdir(dirname):\n return []\n path = dirname + \"/{}\"\n return list(map(path.format, sorted(os.listdir(dirname))))",
"def test_get_result_directories(self):\n pass",
"def dir_tests():\n return abspath('tests')",
"def fixture_sets(*args):\n return [os.path.join(*args, dir)\n for dir in os.listdir(os.path.join(FIXTURE_DATA, *args))\n if os.path.isdir(os.path.join(FIXTURE_DATA, *args, dir))\n ]",
"def _get_dirs_case_yaml(case_dir):\n temp_list = []\n if case_dir:\n for root, dirs, files in os.walk(case_dir):\n for f in files:\n if 'yaml' in f:\n d = os.path.join(os.getcwd(), case_dir)\n temp_list.append(os.path.join(d, f))\n # 缓存目录\n cache_dir = os.path.join(gl.loadcasePath, \".am_cache\")\n # Write file absolute path to file.\n write_file(\n os.path.join(cache_dir, 'yaml.cache'),\n 'w',\n ';'.join(temp_list)\n )\n return True\n return False",
"def top_level_directories(self):\n return [d for d in self.directories if len([x for x in self.directories if x in d]) == 1]",
"def configs() -> Path:\n return TEST_ROOT.parent / \"fixtures\" / \"configs\"",
"def _get_test_files(self):\n for dirpath, dirnames, filenames in os.walk(self.TestsDirectory):\n for f in filenames:\n if f.endswith('.py'):\n yield (path.join(dirpath, f), 'Python')",
"def test_list_root(self):\n expected = [\"search1\", \"search2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}\".format(self.search.instance))]\n self.assertEqual(result, expected)",
"def test_get_files_dir():\n dir = get_files_dir()\n assert dir\n assert dir.endswith(\".fabric8-analytics-tagger\")",
"def find_subdirectories(package):\n try:\n subdirectories = next(os.walk(package_to_path(package)))[1]\n except StopIteration:\n subdirectories = []\n return subdirectories",
"def __get_list_scenarios_in_folder(self):\n # If both directory and recur_directory are exist\n # then show \"Invalid command\" and exit.\n if self.__args.directory is not \"\" \\\n and self.__args.recur_directory is not \"\":\n utils.print_error(\"\\n{}\\n\".format(constant.ERR_COMMAND_ERROR))\n exit(1)\n recursive = False\n\n start_directory = \"\"\n if self.__args.directory is not \"\":\n start_directory = self.__args.directory\n elif self.__args.recur_directory is not \"\":\n start_directory = self.__args.recur_directory\n recursive = True\n\n if not start_directory:\n start_directory = TestRunner.__test_script_dir\n\n if not os.path.exists(start_directory):\n utils.print_error(\n \"\\n{}\\n\".format(constant.ERR_PATH_DOES_NOT_EXIST.\n format(start_directory)))\n exit(1)\n\n list_files = []\n if start_directory.endswith(\".py\"):\n list_files = [start_directory]\n else:\n try:\n if recursive:\n for directory, _, _ in os.walk(start_directory):\n list_files.extend(glob.glob(os.path.join(directory,\n \"*.py\")))\n else:\n list_files.extend(glob.glob(os.path.join(start_directory,\n \"*.py\")))\n except OSError:\n pass\n\n list_test_scenarios = []\n for file in list_files:\n sys.path.append(os.path.dirname(os.path.abspath(file)))\n test_module = \\\n importlib.import_module(os.path.basename(file).replace(\".py\",\n \"\"))\n for name, cls in inspect.getmembers(test_module, inspect.isclass):\n if cls is not TestScenarioBase \\\n and issubclass(cls, TestScenarioBase):\n list_test_scenarios.append(cls)\n\n return list_test_scenarios",
"def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret",
"def files_exist(self):\n\n passed = []\n warned = []\n failed = []\n ignored = []\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n #: test autodoc\n try:\n _, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is '<pipeline>'.\")\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n\n files_fail = [\n [\".gitattributes\"],\n [\".gitignore\"],\n [\".nf-core.yml\"],\n [\".editorconfig\"],\n [\".prettierignore\"],\n [\".prettierrc.yml\"],\n [\"CHANGELOG.md\"],\n [\"CITATIONS.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [\"nextflow_schema.json\"],\n [\"nextflow.config\"],\n [\"README.md\"],\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"ci.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"conf\", \"modules.config\")],\n [os.path.join(\"conf\", \"test.config\")],\n [os.path.join(\"conf\", \"test_full.config\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"output.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"usage.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n [os.path.join(\"lib\", \"Utils.groovy\")],\n [os.path.join(\"lib\", \"WorkflowMain.groovy\")],\n ]\n\n files_warn = [\n [\"main.nf\"],\n [os.path.join(\"assets\", \"multiqc_config.yml\")],\n [os.path.join(\"conf\", \"base.config\")],\n [os.path.join(\"conf\", \"igenomes.config\")],\n [os.path.join(\".github\", \"workflows\", \"awstest.yml\")],\n [os.path.join(\".github\", \"workflows\", \"awsfulltest.yml\")],\n [os.path.join(\"lib\", f\"Workflow{short_name[0].upper()}{short_name[1:]}.groovy\")],\n [\"modules.json\"],\n [\"pyproject.toml\"],\n ]\n\n # List of strings. Fails / warns if any of the strings exist.\n files_fail_ifexists = [\n \"Singularity\",\n \"parameters.settings.json\",\n \".nf-core.yaml\", # yml not yaml\n os.path.join(\"bin\", \"markdown_to_html.r\"),\n os.path.join(\"conf\", \"aws.config\"),\n os.path.join(\".github\", \"workflows\", \"push_dockerhub.yml\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.md\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.md\"),\n os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo.png\"),\n \".markdownlint.yml\",\n \".yamllint.yml\",\n os.path.join(\"lib\", \"Checks.groovy\"),\n os.path.join(\"lib\", \"Completion.groovy\"),\n os.path.join(\"lib\", \"Workflow.groovy\"),\n ]\n files_warn_ifexists = [\".travis.yml\"]\n\n # Remove files that should be ignored according to the linting config\n ignore_files = self.lint_config.get(\"files_exist\", [])\n\n def pf(file_path):\n return os.path.join(self.wf_path, file_path)\n\n # First - critical files. Check that this is actually a Nextflow pipeline\n if not os.path.isfile(pf(\"nextflow.config\")) and not os.path.isfile(pf(\"main.nf\")):\n failed.append(\"File not found: nextflow.config or main.nf\")\n raise AssertionError(\"Neither nextflow.config or main.nf found! Is this a Nextflow pipeline?\")\n\n # Files that cause an error if they don't exist\n for files in files_fail:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n failed.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause a warning if they don't exist\n for files in files_warn:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n warned.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause an error if they exist\n for file in files_fail_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n failed.append(f\"File must be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that cause a warning if they exist\n for file in files_warn_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n warned.append(f\"File should be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that are ignoed\n for file in ignore_files:\n ignored.append(f\"File is ignored: {self._wrap_quotes(file)}\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed, \"ignored\": ignored}",
"def get_list_of_subdir_in_dir(directory):\n list_of_all_dirs = []\n for root, dirs, files in os.walk(directory):\n if not re.search('/$', root):\n root += os.sep # Add '/' to the end of root\n if '.ipynb_checkpoints' not in root:\n list_of_all_dirs.append(root)\n return list_of_all_dirs",
"def get_test_files():\n test_files = os.listdir('./test')\n return [\n create_test_file_name(test_file)\n for test_file in test_files\n if is_valid_test_file(test_files)\n ]",
"def get_provider_yaml_paths():\n return sorted(glob(f\"{ROOT_DIR}/airflow/providers/**/provider.yaml\", recursive=True))",
"def test_team_template_folders_get(self):\n pass",
"def find_app_yamls(app_dir):\n # Look in the root first. It's how python apps and one-module Go apps look.\n yamls = []\n app_yaml = os.path.join(app_dir, 'app.yaml')\n if os.path.isfile(app_yaml):\n yamls.append(app_yaml)\n yamls.extend(glob.glob(os.path.join(app_dir, 'module-*.yaml')))\n if yamls:\n return yamls\n\n # Look in per-module subdirectories. Only Go apps are structured that way.\n # See https://cloud.google.com/appengine/docs/go/#Go_Organizing_Go_apps.\n for subdir in os.listdir(app_dir):\n subdir = os.path.join(app_dir, subdir)\n if not os.path.isdir(subdir):\n continue\n app_yaml = os.path.join(subdir, 'app.yaml')\n if os.path.isfile(app_yaml):\n yamls.append(app_yaml)\n yamls.extend(glob.glob(os.path.join(subdir, 'module-*.yaml')))\n if not yamls:\n raise ValueError(\n 'Not a GAE application directory, no module *.yamls found: %s' %\n app_dir)\n\n # There should be one and only one app.yaml.\n app_yamls = [p for p in yamls if os.path.basename(p) == 'app.yaml']\n if not app_yamls:\n raise ValueError(\n 'Not a GAE application directory, no app.yaml found: %s' % app_dir)\n if len(app_yamls) > 1:\n raise ValueError(\n 'Not a GAE application directory, multiple app.yaml found (%s): %s' %\n (app_yamls, app_dir))\n return yamls",
"def get_scenarios(fixtures_path):\n scenarios = []\n files = os.listdir(fixtures_path)\n yaml_files = [f for f in files if re.match(r'.*\\.yaml$', f)]\n\n for yaml_filename in yaml_files:\n xml_candidate = re.sub(r'\\.yaml$', '.xml', yaml_filename)\n # Make sure the yaml file has a xml counterpart\n if xml_candidate not in files:\n raise Exception(\n \"No XML file named '%s' to match \" +\n \"YAML file '%s'\" % (xml_candidate, yaml_filename))\n\n scenarios.append((yaml_filename, {\n 'yaml_filename': yaml_filename, 'xml_filename': xml_candidate\n }))\n\n return scenarios",
"def get_file_list(start):\n valid_files = []\n for root, dirs, files in os.walk(start):\n for name in files:\n if name[-5:] == \".conf\":\n valid_files.append(os.path.join(root,name))\n return valid_files"
] | [
"0.66035277",
"0.65594333",
"0.64663523",
"0.643186",
"0.63550246",
"0.63149697",
"0.62718517",
"0.62025845",
"0.616682",
"0.6156252",
"0.61524206",
"0.61174434",
"0.6068756",
"0.6063016",
"0.6062418",
"0.59867674",
"0.5979589",
"0.5973867",
"0.5966642",
"0.59132445",
"0.59071505",
"0.58812886",
"0.58614635",
"0.5836112",
"0.58232313",
"0.5822626",
"0.5813482",
"0.58093345",
"0.5806739",
"0.58022195"
] | 0.73936975 | 0 |
Runs all tests in the directories in dirs. Returns a stream of booleans answering "Success?". | def run_tests(dirs, verbose=False, cleanup=False):
for dir in dirs:
if verbose:
print(dir)
errors, checked = run_test(dir, verbose=verbose, cleanup=cleanup)
if errors:
print('fail: ' + dir)
print(indent(list(map(indent, errors))))
yield False
else:
n = str(len(checked))
if cleanup:
print('pass: ' + dir + ' (cleanups executed: ' + n + ')')
else:
print('pass: ' + dir + ' (files without diff: ' + n + ')')
if verbose:
print(indent(checked))
yield True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_all_tests():\n successes = 0\n testsrun = 0\n testsdir = tests_dirpath()\n for test in os.listdir(testsdir):\n path = os.path.join(testsdir, test)\n if os.path.isdir(path):\n testsrun += 1\n if run_test(path):\n successes += 1\n print(\"--- %d/%d TESTS PASSED ---\" % (successes, testsrun))\n return successes == testsrun",
"def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True",
"def run_tests(tests):\n return [test(t) for t in tests]",
"def test(command, options=\"\"):\n\n print(\n \"\"\"\nRunning pytest the test framework\n=================================\n\"\"\"\n )\n for dir_ in TEST_DIRECTORIES:\n test_dir(command, options=options, dir_=dir_)\n # command.run(f\"python -m pytest {options} {' '.join(dir_ for dir_ in TEST_DIRECTORIES)}\", echo=True, pty=POSIX)\n\n print(\n \"\"\"\nAll Testing Directories Passed Successfully\n===========================================\n\"\"\"\n )",
"def dirs_with_test_yaml(dirs):\n for root in dirs or ['tests/']:\n for dir, subdirs, files in os.walk(root):\n if 'test.yaml' in files:\n yield dir",
"def tests(c):\n results = [test(c, i) for i, test_path in enumerate(TEST_PATHS)]\n print('\\n\\n\\n############## SUMMARY ##############')\n for i, test_path in enumerate(TEST_PATHS):\n print(i, test_path, 'PASSED' if result[i] == 0 else 'FAILED')",
"def __execute_tests(self, lst_tests):\n tests_pass = tests_fail = 0\n queue_of_result = multiprocessing.Queue()\n for test in lst_tests:\n process = multiprocessing.Process(\n target=TestRunner.__helper_execute_test,\n kwargs={\"test_cls\": test,\n \"time_out\": self.__args.timeout,\n \"channel\": queue_of_result})\n process.start()\n process.join()\n temp_result = {}\n if not queue_of_result.empty():\n temp_result = queue_of_result.get_nowait()\n\n if \"status\" in temp_result:\n if temp_result[\"status\"] == result.Status.PASSED:\n tests_pass += 1\n else:\n tests_fail += 1\n\n if \"json_path\" in temp_result:\n self.__lst_json_files.append(temp_result[\"json_path\"])\n\n if \"log_path\" in temp_result:\n self.__lst_log_files.append(temp_result[\"log_path\"])\n\n return tests_pass, tests_fail",
"def test_examples():\n tests = [d for d in listdir(ex) if path.isdir(path.join(ex, d))]\n for d in tests:\n yield check_examples, d",
"def runTests(tests_dir, output_dir):\n\n runtime = 0\n os.makedirs(tests_dir, exist_ok=True)\n for test_case in os.listdir(tests_dir):\n print()\n print(\"Running test: \" + str(test_case))\n\n with open(tests_dir + test_case, \"r\") as f:\n tar, n = list(map(int, f.readline().split(\" \")))\n arr = list(map(int, f.readline().split(\" \")))\n\n start = timeit.default_timer()\n\n try:\n writeOutput(maxCombinationSum(tar, arr), test_case, output_dir)\n except KeyboardInterrupt:\n print(\"\\n\\tTest cancelled - KeyboardInterrupt\")\n except Exception as e:\n print(\"\\tError: \" + str(e))\n\n stop = timeit.default_timer()\n print(\"\\tTime for test: \" + str(stop - start) + \" seconds.\")\n\n runtime += (stop - start)\n\n if runtime == 0:\n print(\"No test case files found in tests directory.\\nPlease run solution from inside solution directory.\")\n else:\n print(\"\\nCompleted all tests in : \" + str(runtime) + \" seconds\")",
"def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)",
"def runAllTests(path, runAll=False, skipEka1Crash=False):\n# if not runAll:\n# try:\n# passed = tools.dataFromFile('passed_tests')\n# except IOError:\n# passed = {}\n# print len(passed), 'tests passed'\n passed = {}\n skipped = {}\n failed = {}\n output_file.write( \"Running tests in \" + path + \"\\n\" )\n for f in os.listdir(path):\n if f[-4:] == 'phpt':\n if runAll or str(f) not in passed:\n try:\n output_file.write( f + \": \" )\n runTest( os.path.join( path, f ),\n skipEka1Crash=skipEka1Crash )\n except DiffError, e:\n output_file.write( \"** FAIL **\\n\" )\n output_file.write( e.diff() + \"\\n\" )\n failed[str(f)] = 1\n except SkipError, e:\n output_file.write( \"** SKIP ** (%s)\\n\" % str(e) )\n skipped[str(f)] = 1\n except Exception, e:\n output_file.write( \"Unknown exception (%s) from runTest\\n\" % str(e) )\n output_file.flush()\n else:\n output_file.write( \"* OK *\\n\" )\n passed[str(f)] = 1\n output_file.flush()\n output_file.write( \"==================================================\\n\" )\n output_file.write( \"Summary for tests in \" + path + \"\\n\" )\n output_file.write( \"Passed (\" + str(len(passed)) + \"):\\n\" )\n for filename in passed.keys():\n output_file.write( filename + \"\\n\" )\n output_file.write( \"--------------------------------------------------\\n\" )\n output_file.write( \"Failed (\" + str(len(failed)) + \"):\\n\" )\n for filename in failed.keys():\n output_file.write( filename + \"\\n\" )\n output_file.write( \"--------------------------------------------------\\n\" )\n output_file.write( \"Skipped (\" + str(len(skipped)) + \"):\\n\" )\n for filename in skipped.keys():\n output_file.write( filename + \"\\n\" )\n output_file.write( \"==================================================\\n\" )\n output_file.flush()",
"def test_all(self, func):\n passes = 0\n fails = []\n start = time.time()\n futures = {}\n # open an executor\n with getattr(concurrent.futures, self.executor)(max_workers=self.workers) as exec:\n # walk through datasets\n for pdir, sdir, files in os.walk(self.DATA_DIR):\n for file in files:\n # if the file needs processing, submit it into the queue\n filepath = osp.join(pdir, file)\n if self.file_should_be_processed(filepath):\n future = exec.submit(func, filepath)\n futures[future] = filepath\n\n # return results\n for test_num, future in enumerate(concurrent.futures.as_completed(futures)):\n stuff_to_print = [test_num, future.result()]\n if future.result() == 'Success':\n passes += 1\n if self.print_success_path:\n stuff_to_print.append(futures[future])\n else:\n fails += [futures[future]]\n print(*stuff_to_print)\n\n end = time.time() - start\n print('Processing of {} files took {:3.1f}s ({:3.2f}s/item). {} passed; {} failed.'.format(test_num, end, end/test_num, passes, len(fails)))\n if len(fails) > 0:\n pprint.pprint(\"Failures: {}\".format(fails))\n if self.write_failures_to_file:\n with open('failures_{}.txt'.format(osp.basename(self.DATA_DIR)), mode='w') as f:\n for file in fails:\n f.write(file + '\\n')\n print(\"Failures written to file\")",
"def run(filenames, root_dir):\n for filename in filenames:\n with open(filename, encoding='utf-8') as file:\n filename = filename.replace('\\\\', '/')[len(root_dir)::]\n yield from check(filename, file.readlines())",
"def check_programs_in_directory(directory):\n files = [f for f in os.listdir(directory) if f.endswith(DECAF_SUFFIX)]\n files.sort()\n files = [os.path.join(directory, f) for f in files]\n\n all_passed = True\n for f in files:\n if not check_return_value(f):\n all_passed = False\n\n return all_passed",
"def run_all(self):\n failures, errors = [], []\n\n # Run each test case registered with us and agglomerate the results.\n for case_ in self.cases:\n case_.run()\n update_results(failures, errors, case_)\n\n # Display our results.\n print_errors(errors)\n print_failures(failures)\n print_overview(errors, failures)\n\n # Exit with 0 if all tests passed, >0 otherwise.\n sys.exit(len(failures) + len(errors))",
"def run_tests(remit, sourcelist):\n for source in sourcelist:\n # - move into source's directory\n os.chdir(source)\n # - build worklist of commands\n commands = list()\n commands += test_matrix(remit, source)\n commands += extra_tests(remit, source)\n commands = remove_blacklist(remit, source, commands)\n # - run the commands\n for i, command in enumerate(commands):\n print('[test %s: %s of %d] %s'\n % (source,\n str(i+1).rjust(len(str(len(commands)))),\n len(commands),\n ' '.join(command)))\n subprocess.call(command)\n # - move out of source's directory\n os.chdir('..')",
"def analyze_data_files(dest_dir):\n overall_passed = True\n test_data_dir = os.path.join(dest_dir, 'testdata')\n for dir_name, sub_dir_list, file_list in os.walk(test_data_dir):\n if dir_name == os.path.join(dest_dir, 'testdata'):\n # in the root folder\n for fname in file_list:\n if fname == 'batterystats.dumpsys.log':\n # pylint: disable=fixme\n # TODO(developer): process battery data.\n continue\n elif fname == 'locationRequests.dumpsys.log':\n # pylint: disable=fixme\n # TODO(developer): process location requests information.\n continue\n else:\n # in a test folder\n print '\\nAnalysing test: ' + dest_dir\n passed = True\n\n for fname in file_list:\n full_filename = os.path.join(dir_name, fname)\n if fname == 'gfxinfo.dumpsys.log':\n # process gfxinfo for janky frames\n dump_results = parse_dump_file(full_filename)\n jank_perc = dump_results['jank_percent']\n if jank_perc:\n if jank_perc > JANK_THRESHOLD:\n print ('FAIL: High level of janky frames ' +\n 'detected (' + str(jank_perc) + '%)' +\n '. See trace.html for details.')\n passed = False\n else:\n print 'ERROR: No dump results could be found.'\n passed = False\n elif fname == 'test.failure.log':\n # process test failure logs\n print ('FAIL: Test failed. See ' + full_filename +\n ' for details.')\n passed = False\n if passed:\n print 'PASS. No issues detected.'\n else:\n overall_passed = False\n test_complete_file = os.path.join(dest_dir, 'testdata/testdata',\n 'testRunComplete.log')\n if not os.path.isfile(test_complete_file):\n overall_passed = False\n print ('\\nFAIL: Could not find file indicating the test run ' +\n 'completed. Check that the TestListener is writing files to external storage')\n if overall_passed:\n print '\\nOVERALL: PASSED.'\n return 0\n else:\n print '\\nOVERALL: FAILED. See above for more information.'\n return 1",
"def testBatchFile(self):\n cmds = \"\"\"pwd\nls\nexit\n\"\"\"\n\n def _cbCheckResult(res):\n res = res.split(b'\\n')\n log.msg('RES %s' % repr(res))\n self.assertIn(self.testDir.asBytesMode().path, res[1])\n self.assertEqual(res[3:-2], [b'testDirectory', b'testRemoveFile',\n b'testRenameFile', b'testfile1'])\n\n d = self._getBatchOutput(cmds)\n d.addCallback(_cbCheckResult)\n return d",
"def test_main(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n result = listdir(dummy_folder,\n full_path=True,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=False,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=True,\n only_files=True,\n )\n need_result = ['antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n self.assertEqual(sorted(os.listdir('.')), sorted(listdir(path='.', full_path=False)))",
"def run_test_suites(self, suites):\n for suite_class in suites:\n test_suite = suite_class(self)\n results = test_suite.run()\n self.test_results += results",
"def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)",
"def _RunTests(tests: List[_Test], parallelism: int) -> bool:\n running_tests = set()\n finished_tests = set()\n tests_to_run = sorted(tests, reverse=True)\n while tests_to_run or running_tests:\n time.sleep(0.2) # 200ms\n updated_finished = set(t for t in running_tests if t.Finished())\n running_tests = running_tests - updated_finished\n while tests_to_run and len(running_tests) < parallelism:\n t = tests_to_run.pop()\n t.Run()\n running_tests.add(t)\n\n newly_finished = updated_finished - finished_tests\n finished_tests.update(updated_finished)\n for test in newly_finished:\n logging.info(\"%s\\t%s\\t%.1fs\", test,\n \"PASSED\" if test.Succeeded() else \"FAILED\",\n test.finish_time - test.begin_time)\n if newly_finished:\n logging.flush()\n\n failed_tests = sorted([t for t in tests if not t.Succeeded()])\n logging.info(\"Ran %d tests. %d failed.\", len(tests), len(failed_tests))\n logging.flush()\n\n for ft in failed_tests:\n ft.PrintLogs()\n\n return not failed_tests",
"def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)",
"def _DiscoverTests(root_dirs: List[Text],\n test_to_shards: Dict[Text, int]) -> List[_Test]:\n result = []\n for d in root_dirs:\n for root, _, files in os.walk(d):\n for f in files:\n if f.endswith(_TEST_FILENAME_SUFFIX):\n shards = test_to_shards.get(f, 1)\n for shard in range(0, shards):\n result.append(_Test(os.path.join(root, f), shard, shards))\n logging.info(\"Discovered %d tests\", len(result))\n return result",
"def dir_tests():\n return abspath('tests')",
"def test_case_4():\n print(\"*********Test_case_4***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('', path)\n for file in result:\n print(file)",
"def _all_tests(by_dir):\n return set(x[0] for item in by_dir.values()\n for x in item.tests)",
"def scan_for_tests(self):\n log_info(\"scanning for tests in '%s'\" % self.directory)\n for filename in os.listdir(self.directory):\n base, ext = os.path.splitext(filename)\n fullname = os.path.join(self.directory, filename)\n if ext == SUITE:\n if base.endswith(CLEANUP):\n base = base.rsplit(CLEANUP, 1)[0]\n self.cleanup[base] = fullname\n else:\n self.suites[base] = fullname\n if ext == CONFIGURATION:\n self.conf[base] = fullname\n if ext == TEST:\n self.tests[base] = fullname",
"def run(self):\n\n runSuccess = True\n\n for value in self._shouldWork:\n value = value.normalize()\n print('Testing %s for %s' % (value, self._paramPath))\n\n for testName, testFunc in self._chainingTests:\n value, success = testFunc(value)\n if not success:\n runSuccess = False\n print(\"%s ERROR for %s\" % (testName, self._paramPath))\n break\n\n for value in self._shouldBreak:\n value = value.normalize()\n print('Testing invalid value %s for %s' % (value, self._paramPath))\n value, success = self.checkBounds(value)\n if success:\n runSuccess = False\n print(\"ERROR: This test should have failed but it has not\")\n\n return runSuccess",
"def test_parse_directory(self):\n parsed = [i for i in parse_directory(\"./test_data/repo\")]\n self.assertEqual(\n len(parsed), 2,\n \"There should be two texts which are found\"\n )"
] | [
"0.75694424",
"0.70750535",
"0.64101243",
"0.63706607",
"0.62386507",
"0.6132275",
"0.6126261",
"0.6087806",
"0.60795885",
"0.6002059",
"0.5946204",
"0.59053445",
"0.5825426",
"0.5796565",
"0.57917863",
"0.5771267",
"0.5758229",
"0.5734138",
"0.57273924",
"0.57239646",
"0.57059896",
"0.56890106",
"0.56881374",
"0.5665359",
"0.56289995",
"0.56289697",
"0.56229234",
"0.5618048",
"0.56044257",
"0.5603938"
] | 0.81278753 | 0 |
Returns the stream of .py files reachable from the current directory. | def pyfiles():
for dir, _, files in os.walk('.'):
for f in files:
if f.endswith('.py'):
name = path.join(dir, f)
if name.startswith('./'):
yield name[2:]
else:
yield name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)",
"def source(self):\n return some.dap.source(py.path.local(self.co_filename))",
"def get_source_files(self):\n return zip(*self.distribution.scripts)[0]",
"def get_files(self):\n return self._get_brains(\"File\")",
"def get_filestream(data_dir: str, ext: str) -> Iterable[str]:\n return listdir(data_dir) \\\n | where(lambda p: p.endswith(ext)) \\\n | select( lambda p: join(data_dir,p))",
"def files(self) -> Generator[Path, None, None]:\n return Path(self.package).resolve(strict=True).glob(self.glob)",
"def get_source_files(self):\n return [\n path.as_posix()\n for path in _Path(self.src_dir).rglob(\"*\")\n if not path.is_dir()\n ] + [\n (path / \"CMakeLists.txt\").as_posix()\n for path in _PurePath(self.src_dir).parents\n ]",
"def Sources():\n return _sources",
"def GetSrc():\n return os.path.abspath(os.path.join(_THIS_DIR, os.pardir, os.pardir,\n os.pardir))",
"def find_modules(x):\n return Path(x).rglob('*.py')",
"def get_source_files(dir_name):\n return get_files(dir_name, \".h\") + get_files(dir_name, \".cpp\")",
"def get_all_files(cwd):\n return os.listdir(cwd)",
"def clist() -> None:\n files = os.listdir(os.path.join(os.getcwd(), \"apis\"))\n files.remove(\"__init__.py\")\n files = [f.replace(\".py\", \"\") for f in files]\n print(files)",
"def getFiles(self):\n\t\treturn os.listdir(self.getPath())",
"def parse_files():\n pfuncs = [ # parse py files : add #\n parse_test_files,\n parse_model_files,\n parse_url_files,\n parse_route_files,\n parse_settings_files,\n parse_setup_files,\n ]\n\n while PY_FILES:\n for _ in range(len(pfuncs)):\n a_func = pfuncs.pop()\n a_func()\n break",
"def get_source():\n if len(sys.argv) > 1:\n return open(sys.argv[1])\n else:\n return sys.stdin",
"def get_scripts():\n scripts = []\n if os.path.isdir('bin'):\n scripts = [ fname for fname in glob.glob(os.path.join('bin', '*'))\n if not os.path.basename(fname).endswith('.rst') ]\n return scripts",
"def discover_examples():\n root = './examples'\n for filename in os.listdir(root):\n if os.path.splitext(filename)[1] == '.py':\n yield os.path.join(root, filename)",
"def _read_source(self):\n \n if self.fileType == FTPythonCompiled or \\\n self.fileType == FTCompiledModule:\n return None\n \n filename = Filename(self.filename)\n filename.setExtension('py')\n try:\n file = open(filename, 'rU')\n except IOError:\n return None\n return file.read()",
"def GetFilesForTool(self):\n raise NotImplementedError()",
"def processSourceCode(self):\n specSubDirName=\"\"\n codestructure = CodeStructure() \n for dir in self._dirs:\n if q.system.fs.exists(q.system.fs.joinPaths(dir,specSubDirName)): \n files=q.system.fs.listPyScriptsInDir(q.system.fs.joinPaths(dir,specSubDirName))\n for fileName in files:\n codestructure.addCodeFile(self.processSourceCodeFile(q.system.fs.joinPaths(dir,specSubDirName),\"%s.py\" % fileName))\n return codestructure",
"def __get_sources__(self):\n\n # Let's go to the Apt temporal dir.\n os.chdir(self.conf['AptTmp'])\n\n # Define a global Source file, all the *_Sources files are going to be in this file.\n global_sources_file = open(self.conf['CodeName'] + '_Sources', 'w')\n\n\t\t# The main/debian-installer is in main, so remove it.\n\t\tcomponents = self.conf['Components']\n\t\tif 'main/debian-installer' in components:\n\t\t\tcomponents.remove('main/debian-installer')\n\n # For every component defined...\n for component in components:\n # Download the Packages.gz file\n file = self.__get_packages_file__(self.conf[\"Mirror\"], \\\n \"%s_%s_Sources\" % (self.conf['CodeName'], component), \\\n component, \"source\" + \"/Sources.gz\")\n\n # \"cat\" it into the global_packages_file\n for line in file:\n print >>global_sources_file, line,\n file.close()\n\n\t\tglobal_sources_file.close()\n\t\treturn open(self.conf['CodeName'] + '_Sources', 'r')",
"def get_scripts():\n scripts = []\n if os.path.isdir('bin'):\n scripts = [fname for fname in glob.glob(os.path.join('bin', '*'))\n if not os.path.basename(fname).endswith('.rst')]\n return scripts",
"def get_my_files():\n return [file for file in os.listdir(os.getcwd()) if os.path.isfile(file)]",
"def filter_python_files(files):\n return [f for f in files if f.endswith('.py')]",
"def __file__(self):\n\t\treturn __file__",
"def __file__(self):\n\t\treturn __file__",
"def __file__(self):\n\t\treturn __file__",
"def __file__(self):\n return __file__",
"def coffeescript_files():\r\n dirs = \" \".join(THEME_COFFEE_PATHS + [Env.REPO_ROOT / coffee_dir for coffee_dir in COFFEE_DIRS])\r\n return cmd('find', dirs, '-type f', '-name \\\"*.coffee\\\"')"
] | [
"0.64829564",
"0.57618725",
"0.57601637",
"0.57460487",
"0.5703258",
"0.56198907",
"0.55846214",
"0.548047",
"0.54259604",
"0.5422842",
"0.541674",
"0.54144967",
"0.5414251",
"0.53836733",
"0.5323467",
"0.5261406",
"0.52353126",
"0.523354",
"0.52130747",
"0.5198661",
"0.5195463",
"0.51937354",
"0.5185629",
"0.5184331",
"0.51746935",
"0.5171418",
"0.5171418",
"0.5171418",
"0.5169817",
"0.51594645"
] | 0.601096 | 1 |
Gets the GROMACS installed version and returns it as an int(3) for versions older than 5.1.5 and an int(5) for 20XX versions filling the gaps with '0' digits. | def get_gromacs_version(gmx: str = "gmx") -> int:
unique_dir = fu.create_unique_dir()
out_log, err_log = fu.get_logs(path=unique_dir, can_write_console=False)
cmd = [gmx, "-version"]
try:
cmd_wrapper.CmdWrapper(cmd, out_log, err_log).launch()
pattern = re.compile(r"GROMACS version:\s+(.+)")
with open(Path(unique_dir).joinpath('log.out')) as log_file:
for line in log_file:
version_str = pattern.match(line.strip())
if version_str:
break
version = version_str.group(1).replace(".", "").replace("VERSION", "").strip()
version = "".join([c for c in version if c.isdigit()])
except:
return 0
if version.startswith("2"):
while len(version) < 5:
version += '0'
else:
while len(version) < 3:
version += '0'
fu.rm(unique_dir)
return int(version) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value",
"def get_version():\n return '%d.%d.%d' % version_info",
"def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])",
"def version_number() -> int:\n return 0",
"def version():\n return _nfc.version()",
"def get_version():\n return 1",
"def get_chromeos_version():\r\n try:\r\n get_board_property('CHROMEOS_RELEASE_VERSION')\r\n except:\r\n logging.info(\"CHROMEOS_RELEASE_VERSION not found\")\r\n return -1",
"def version_min():\n return VERSION_MIN",
"def version_max():\n return VERSION_MAX",
"def installedVersion():\n\n cmd = f'{dcm2niix()} -h'\n versionPattern = re.compile(r'v'\n r'(?P<major>[0-9]+)\\.'\n r'(?P<minor>[0-9]+)\\.'\n r'(?P<year>[0-9]{4})'\n r'(?P<month>[0-9]{2})'\n r'(?P<day>[0-9]{2})')\n\n try:\n output = sp.check_output(cmd.split()).decode()\n output = [l for l in output.split('\\n') if 'version' in l.lower()]\n output = '\\n'.join(output).split()\n\n for word in output:\n\n match = re.match(versionPattern, word)\n\n if match is not None:\n return (int(match.group('major')),\n int(match.group('minor')),\n int(match.group('year')),\n int(match.group('month')),\n int(match.group('day')))\n\n except Exception as e:\n log.debug(f'Error parsing dcm2niix version string: {e}')\n return None",
"def get_version():\n version = pkg_resources.require(\"sacredboard\")[0].version\n return version",
"def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")",
"def get_version():\n return \"0.0.1 (prerelease prototype)\"",
"def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value",
"def major_version(self):\n return self.unpack_dword(0x14)",
"def get_version():\r\n return '.'.join((str(each) for each in VERSION[:3]))",
"def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)",
"def get_version():\n return magpy.get_version()",
"def __get_EC_numner(self):\n\t\tif self.version == 1:\n\t\t\treturn 7\n\t\telif self.version == 2:\n\t\t\treturn 10\n\t\telif self.version == 3:\n\t\t\treturn 15\n\t\telif self.version == 4:\n\t\t\treturn 20\n\t\telif self.version == 5:\n\t\t\treturn 26\n\t\telif self.version == 6:\n\t\t\treturn 18\n\t\telse:\n\t\t\treturn \"Version number > 6 not supported\"",
"def xnvme_ver(cml_path=None):\n\n if cml_path is None:\n cml_path = os.sep.join([\"..\", \"..\", \"CMakeLists.txt\"])\n\n with open(cml_path) as cmake:\n for line in cmake.readlines():\n if \"\\tVERSION \" not in line:\n continue\n\n _, vtxt = line.split(\"VERSION \", 1)\n\n return vtxt.strip()\n\n return \"\"",
"def get_version(self):\n return 0",
"def get_version():\n click.echo(get_current_version_number())",
"def build_version(self):\n return self.nodes[0].get('infos').get('system_info').get('system_version')",
"def version():\n return uname().version",
"def version():\n return uname().version",
"def gl_version_code(self) -> int:\n return self.gl_version[0] * 100 + self.gl_version[1] * 10",
"def get_min_cli_version(k8s_cli):\n return MIN_OC_VERSION_SUPPORT_RETRIES if (k8s_cli and k8s_cli.endswith(OC_K8S_CLI))\\\n else MIN_KUBECTL_VERSION_SUPPORT_RETRIES",
"def gpg_version():\n cmd = flatten([gnupg_bin(), \"--version\"])\n output = stderr_output(cmd)\n output = output \\\n .split('\\n')[0] \\\n .split(\" \")[2] \\\n .split('.')\n return tuple([int(x) for x in output])",
"def version(self, executable):\n smack_output = self._version_from_tool(executable)\n if smack_output:\n return smack_output.split(\" \")[2]\n else:\n # old versions of SMACK used to print to stderr\n return self._version_from_tool(executable, use_stderr=True).split(\" \")[2]",
"def get_version():\n # this implementation avoids calling Foundation and will work on\n # non Apple OSes.\n vers = \"UNKNOWN\"\n build = \"\"\n # find the munkilib directory, and the version file\n munkilibdir = os.path.dirname(os.path.abspath(__file__))\n versionfile = os.path.join(munkilibdir, \"version.plist\")\n if os.path.exists(versionfile):\n try:\n vers_plist = readPlist(versionfile)\n except (IOError, OSError, ExpatError):\n pass\n else:\n try:\n vers = vers_plist['CFBundleShortVersionString']\n build = vers_plist['BuildNumber']\n except KeyError:\n pass\n if build:\n vers = vers + \".\" + build\n return vers"
] | [
"0.65095896",
"0.65088385",
"0.6415477",
"0.64125276",
"0.6376649",
"0.63351995",
"0.6302721",
"0.62614006",
"0.62154704",
"0.62043744",
"0.61745983",
"0.60750234",
"0.6064383",
"0.60511655",
"0.604701",
"0.60310704",
"0.60179377",
"0.5979122",
"0.5956135",
"0.59463084",
"0.5945566",
"0.5945015",
"0.5877076",
"0.5873152",
"0.5873152",
"0.5834539",
"0.58216906",
"0.5821684",
"0.5820996",
"0.5814676"
] | 0.70138144 | 0 |
Creates an MDP file using the following hierarchy mdp_properties_dict > input_mdp_path > preset_dict | def create_mdp(output_mdp_path: str, input_mdp_path: str = None,
preset_dict: Mapping[str, str] = None,
mdp_properties_dict: Mapping[str, str] = None) -> str:
mdp_dict = {}
if preset_dict:
for k, v in preset_dict.items():
mdp_dict[k] = v
if input_mdp_path:
input_mdp_dict = read_mdp(input_mdp_path)
for k, v in input_mdp_dict.items():
mdp_dict[k] = v
if mdp_properties_dict:
for k, v in mdp_properties_dict.items():
mdp_dict[k] = v
return write_mdp(output_mdp_path, mdp_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_data_raw (mdp,do_makedata,filename):\n #\n fin = open(filename,'r')\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n ## -- do_makedata tells it to go ahead with generating a new data output file\n ## -- otherwise, just saves parameters to metadata\n if do_makedata:\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname,',',mdp.tag\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n # write first header\n #corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",\\\n # int(mdp.corr_num.strip(\"[]\").split(',')[0]))\n #uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n #uf.write_section(mdp.save_file,mdp.key)\n for num,key in zip(mdp.corr_num,mdp.key):\n corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n uf.write_section(mdp.save_file,key)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n # write another header\n #corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",\\\n # int(mdp.corr_num.strip(\"[]\").split(',')[0]))\n #uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n #uf.write_section(mdp.save_file,mdp.key)\n for num,key in zip(mdp.corr_num,mdp.key):\n corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n uf.write_section(mdp.save_file,key)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data(mdp)\n mdp.corr_file.close()\n ##endif do_makedata\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return",
"def writeBasicMinimizationMDP( filename, nsteps = 10 ):\n\n input=\"\"\"\n; RUN CONTROL PARAMETERS =\nintegrator = steep\nnsteps =%(nsteps)s\n; Output frequency for energies to log file and energy file =\nnstlog = 1\nnstenergy = 1\n; ENERGY MINIMIZATION OPTIONS =\n; Force tolerance and initial step-size =\nemtol = 100\nemstep = 0.01\n; Max number of iterations in relax_shells =\nniter = 20\n; Number of correction steps to use for L-BFGS minimization\nnbfgscorr = 10\n; NEIGHBORSEARCHING PARAMETERS =\n; nblist update frequency =\nnstlist = 1\n; ns algorithm (simple or grid) =\nns_type = grid\n; Periodic boundary conditions: xyz or none =\npbc = xyz\n; nblist cut-off =\nrlist = 1.0\ndomain-decomposition = no\n; OPTIONS FOR ELECTROSTATICS AND VDW =\n; Method for doing electrostatics =\ncoulombtype = pme\nrcoulomb = 1.0\n; Method for doing Van der Waals =\nvdw-type = switch\n; cut-off lengths =\nrvdw-switch = 0.8\nrvdw = 0.9\n; Apply long range dispersion corrections for Energy and Pressure =\nDispCorr = AllEnerPres\n; Spacing for the PME/PPPM FFT grid =\nfourierspacing = 0.1\n; FFT grid size, when a value is 0 fourierspacing will be used =\nfourier_nx = 0\nfourier_ny = 0\nfourier_nz = 0\n; EWALD/PME/PPPM parameters =\npme_order = 6\newald_rtol = 1e-06\nepsilon_surface = 0\noptimize_fft = no\n; OPTIONS FOR BONDS =\nconstraints = none\n\nfree_energy = no\n\"\"\" % vars()\n\n file = open( filename, 'w')\n file.write( input )\n file.close()",
"def CreateDecisionMdp(self):\n print(\"Creating MDP\")\n self.states = {}\n self.actions = {}\n self.rewards = []\n stateNodes = map(lambda x:x.name,\\\n filter(lambda x: x.type not in [\"ActionNode\",\"MotorNode\"],self.nodes.values()) )\n actionNodes = map(lambda x:x.name,\\\n filter(lambda x: x.type is \"ActionNode\",self.nodes.values()))\n self.states = sorted(list(stateNodes))\n self.actions = sorted(list(actionNodes))\n if(len(self.states) < 2 or len(self.actions) < 1):\n print(\"Error: States and actions must be non-empty\")\n\n # Transition probabilities\n self.P = []\n self.hasPredictions = False\n for n in range(0,len(self.actions)):\n actionNode = self.nodes[self.actions[n]]\n b = []\n for state1 in range(0,len(self.states)):\n a = []\n for state2 in range(0,len(self.states)):\n if(actionNode.predictions.has_key(self.states[state2])):\n a.append(actionNode.predictions[self.states[state2]])\n hasPredictions = True\n else: a.append(0.0)\n b.append(a)\n self.P.append(b)\n # Rewards matrix\n self.R = []\n for i in (range(0,len(self.actions))):\n b = []\n for j in (range(0,len(self.states))):\n a = []\n for k in (range(0,len(self.states))):\n key = (self.states[j],self.actions[i],self.states[k])\n if self.expectedRewards.has_key(key):\n a.append(self.expectedRewards[key])\n else: a.append(0.0)\n b.append(a)\n self.R.append(b)\n\n if self.hasPredictions:\n print(\"Number of states: \" + str(len(self.states)))\n print(\"Number of actions: \" + str(len(self.actions)))\n print(np.array(self.R))\n self.vi = mdptoolbox.mdp.QLearning(np.array(self.P), np.array(self.R), self.discountFactor)\n self.vi.run()\n else:\n print(\"MDP not created. No prediction values available.\")",
"def alg(self):\n\n # Find freq ordering of vars and initialize lowest level mdps\n self.mdps[0] = set()\n self.freq, self.state_dim = self.find_freq()\n\n # Build the task hierarchy\n for level in range(self.state_dim):\n # Randomly execute actions from level and fill MDP properties (trans prob, adj, etc)\n self.explore(level=level, exploration_iterations=self.args.exploration_iterations)\n # Using MDP properties, find exits, MERs, and form MDPs at level+1\n self.create_sub_mdps(level+1)\n # Train a policy to reach every exit in the MDPs at level+1\n self.train_sub_mdps(self.mdps[level+1])\n\n # Serialize the MDPs\n with open(self.args.binary_file, 'wb') as handle:\n pickle.dump(self.mdps, handle, protocol=pickle.HIGHEST_PROTOCOL)\n handle.close()\n\n self.log[self.args.log_name].info(\"Finished pickling MDPs, saved at {}!\\n\".format(self.args.binary_file))",
"def make_data_raw_fast(mdp,do_makedata,filename):\n #\n fin = open(filename,'r')\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n ## -- do_makedata tells it to go ahead with generating a new data output file\n ## -- otherwise, just saves parameters to metadata\n if do_makedata:\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##endif do_makedata\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return",
"def genDump(exePath,inputDict,outputDict):\n paramFile = path.join(path.dirname(inputDict),'damo.par')\n with open(paramFile,'w') as f:\n f.write('DUMP'+'\\n')\n f.write(inputDict+'\\n')\n f.write(outputDict)\n runDamocles(exePath, paramFile)\n remove(paramFile)",
"def save_ckpt(modules_optims, ep, scores, ckpt_file):\n state_dicts = [m.state_dict() for m in modules_optims]\n ckpt = dict(state_dicts=state_dicts,\n ep=ep,\n scores=scores)\n may_make_dir(osp.dirname(osp.abspath(ckpt_file)))\n torch.save(ckpt, ckpt_file)",
"def create_input(npart, domain=1.0):\r\n txt = \"\"\"Particles count: {0}\r\nPacking size: {1} {1} {1}\r\nGeneration start: 1\r\nSeed: 341\r\nSteps to write: 1000\r\nBoundaries mode: 1\r\nContraction rate: 1.328910e-005\r\n1. boundaries mode: 1 - bulk; 2 - ellipse (inscribed in XYZ box, Z is length of an ellipse); 3 - rectangle\r\n2. generationMode = 1 (Poisson, R) or 2 (Poisson in cells, S)\r\n \"\"\".format(npart, domain)\r\n with open('generation.conf', 'w') as fout:\r\n fout.write(txt)",
"def make_phys():\n for rn in dcm_dict.keys():\n # PPG\n if not dcm_dict[rn]['ppg_file'] == 'File missing':\n # Files\n ppg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ppg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['ppg_file'],ppg_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 100.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(ppg_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # Respiration\n if not dcm_dict[rn]['resp_file'] == 'File missing':\n # Files\n resp_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.tsv.gz')\n resp_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 25.0\n data['StartTime'] = -30.0\n data['Columns'] = 'respiratory'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # ECG\n # What to do if they have PPG and ECG?\n if not dcm_dict[rn]['ecg_file'] == 'File missing':\n # Files\n ecg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ecg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 1000.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)",
"def make_workflowpropsfile(self, output_path, dirlist):\n\n # Get the path to workflow_xml file on HDFS\n workflow_xml_path = os.path.join(self.get_config(\"hdfs_resource_path\"),\n \"workflow/%s\" % self.get_config(\"workflow_xml\"))\n\n # Configure inputFile parameter of workflow.properties file.\n # For initial load, with no inputdirs folders (pending_folders = None),\n # inputFile is simply sourceRoot/*. The last \"*\" is crucial for\n # recursive traversal of directorues. Otherwise \"Not a file\" exception\n # will result.\n input_files = os.path.join(self.get_config(\"source_root\"),\n \"{\" + \"\\\\\\\\\\\\\\\\,\".join(dirlist) + \"}\")\n\n # Create the properties file directory if its not created during the setup phase\n self.shell_exec.safe_execute(\"mkdir -p %s\"\n % self.get_config(\"nfs_workflow_properties_path\"))\n\n # Read the template file and perform the substitutions\n template_file = os.path.join(self.get_config(\"nfs_resource_path\"),\n \"workflow_template.properties\")\n\n with open(template_file, \"r\") as tf:\n template_str = tf.read()\n\n substitutions = {\n #\"@OOZIELIBPATH\": self.get_config(\"oozie_libpath\"),\n \"@JOBTRACKER\": self.get_config(\"jobtracker\"),\n \"@WORKFLOWXML\": workflow_xml_path,\n \"@NAMENODE\": self.get_config(\"namenode\"),\n \"@INPUTFILES\": input_files,\n \"@OUTPUTDIR\": output_path,\n \"@NUM_REDUCERS\": self.get_config(\"mr_num_reducers\")\n }\n\n # Materialize the properties file\n propfile_name = \"workflow_%s.properties\" % self.loadts.strftime(\"%Y%m%d-%H\")\n self.propfile = os.path.join(self.get_config(\"nfs_workflow_properties_path\"),\n propfile_name)\n materialize(template_str, substitutions, outfile=self.propfile)\n\n logkv(logger, {\"msg\": \"Generated properties file\",\n \"properties_file\": self.propfile}, \"info\")",
"def process(self):\n sections = Map.build_from_dirs(\n self.config_map,\n self.persist_fs,\n self.persist_fs.list_dirs(self.config_map.get_repo_path),\n )\n puml: PUML = PUML(self.config_map, self.persist_fs, sections)\n puml.write()",
"def recreate():\n path = \"./results/BEST/20190807_104745-smallv2/RESUME.yaml\"\n path = \"./results/BEST/LARGE/LARGE.yaml\"\n # import shlex\n # args = shlex.split(f\"--config {path}\")\n # sys.argv[1:] = args\n # print(sys.argv)\n config, *_ = build_model(path)\n globals().update(locals())\n\n #save_model(config)",
"def generateParameters(self, seed=_default_seed, output='atmos_db'):\n self.initPointingSequence()\n # Instantiate the Atmosphere class\n self.atmos = Atmosphere(\n self.mjds, self.mjde, self.npoints, seed)\n # Generate main atmosphere parameters sequence\n self.atmos.init_main_parameters()\n # Associate a value of these parameters for each pointing\n for opsim_dict in self.opsim_visits:\n # Get coordinates\n RA, DEC = (opsim_dict['fieldRA'], opsim_dict['fieldDEC'])\n # Get ID and date\n obsid, mjd = (opsim_dict['obsHistID'], opsim_dict['expMJD'])\n # Compute azimuth and elevation angle\n azimuth, z_angle = modtranTools.equatorial2local(RA, DEC,\n mjd, unit='rad')\n # Get atmosphere parameters\n modtran_dict = self.fillModtranDictionary(mjd, obsid, z_angle)\n self.modtran_visits.append(modtran_dict)\n self.aerosol_visits.append(self.atmos.aerosols(mjd) + (z_angle,))\n if output:\n megatupl = (self.modtran_visits, self.aerosol_visits,)\n parmdir = os.getenv('ATMOSPHERE_PARAMETERS_DIR')\n outname = output + '.pck'\n parmpath = os.join.path(parmdir, outname)\n with open(parmpath, 'w') as parmf:\n pickle.dump(megatupl, parmf, seed)\n # Done",
"def create_parfile(self):\n try:\n if not os.path.isdir(self.outdir):\n os.makedirs(self.outdir)\n except IOError:\n print(\"Cannot create directory: %s\" % self.outdir)\n parfile = os.path.join(self.outdir, 'classpar.ini')\n with open(parfile, 'w') as f:\n for k, v in self.cosmo.items():\n f.write(\"%s = %s \\n\" % (k, v))\n f.write(\"root = %s \\n\" % os.path.join(self.outdir, 'class_'))\n return parfile",
"def create_workflow_file(self, workflow: Workflow, props: PropertySet):",
"def generate(train_data_path, trained_model_path, num_output_files):\n # load the notes used to train the model\n\n train_data = data_preprocess.load_from_pickle(train_data_path)\n training_notes = train_data[\"data\"]\n note_translator = train_data[\"note_translator\"]\n\n net = networks.TransformerNet.load_checkpoint(trained_model_path)\n\n for i in range(num_output_files):\n prediction_output = generate_notes(net, training_notes, note_translator)\n create_midi(prediction_output, file_suffix=i)",
"def test013(testDir, dirDict, pflag):\n shutil.copy(osp.join(dirDict[\"baseConfig\"], \"params_system.cfg\"), testDir)\n shutil.copy(osp.join(dirDict[\"baseConfig\"], \"params_c.cfg\"), testDir)\n shutil.copy(osp.join(dirDict[\"baseConfig\"], \"params_a.cfg\"), testDir)\n psys = osp.join(testDir, \"params_system.cfg\")\n ptrodec = osp.join(testDir, \"params_c.cfg\")\n ptrodea = osp.join(testDir, \"params_a.cfg\")\n P_s = IO.get_config(psys)\n P_s.set(\"Sim Params\", \"profileType\", \"CV\")\n P_s.set(\"Sim Params\", \"Vset\", \"3.8\")\n P_s.set(\"Sim Params\", \"tsteps\", \"25\")\n P_s.set(\"Sim Params\", \"Nvol_c\", \"2\")\n P_s.set(\"Sim Params\", \"Nvol_s\", \"2\")\n P_s.set(\"Sim Params\", \"Nvol_a\", \"2\")\n P_s.set(\"Particles\", \"cs0_c\", \"0.2\")\n P_s.set(\"Particles\", \"cs0_a\", \"0.95\")\n P_s.set(\"Electrolyte\", \"elyteModelType\", \"dilute\")\n IO.write_config_file(P_s, psys)\n P = IO.get_config(ptrodec)\n P.set(\"Particles\", \"type\", \"homog\")\n P.set(\"Particles\", \"shape\", \"sphere\")\n P.set(\"Material\", \"muRfunc\", \"LiMn2O4_ss2\")\n P.set(\"Reactions\", \"rxnType\", \"Marcus\")\n IO.write_config_file(P, ptrodec)\n P = IO.get_config(ptrodea)\n P.set(\"Particles\", \"discretization\", \"2.5e-9\")\n P.set(\"Particles\", \"shape\", \"cylinder\")\n P.set(\"Material\", \"muRfunc\", \"testIS_ss\")\n P.set(\"Reactions\", \"rxnType\", \"BV_raw\")\n IO.write_config_file(P, ptrodea)\n main.main(psys, keepArchive=False)\n shutil.move(dirDict[\"simOut\"], testDir)\n if pflag:\n corePlots(testDir, dirDict)\n elytePlots(testDir, dirDict)\n electrodePlots(testDir, dirDict, \"c\")\n cmpr.bulkpf(testDir, dirDict, \"c\")\n electrodePlots(testDir, dirDict, \"a\")\n cmpr.bulkpf(testDir, dirDict, \"a\")",
"def CreateMdp(self):\n self.states = {\"\":0}\n self.actions = {}\n self.rewards = []\n stateNodes = filter(lambda x: x.type not in [\"ActionNode\",\"MotorNode\"],self.nodes.values())\n actionNodes = filter(lambda x: x.type is \"ActionNode\",self.nodes.values())\n for i in (range(1,len(stateNodes)+1)):\n self.states[stateNodes[i-1].name] = i\n for i in (range(0,len(actionNodes))):\n self.actions[i] = actionNodes[i].name\n if(len(self.states) < 2 or len(self.actions) < 1):\n print(\"Error: States and actions must be non-empty\")\n # Transition probabilities: equal probabilities\n self.P = []\n for i in (range(0,len(self.actions))):\n b = []\n for j in (range(0,len(self.states))):\n a = []\n for k in (range(0,len(self.states))):\n a.append(1.0/len(self.states))\n b.append(a)\n self.P.append(b)\n\n # Rewards\n self.R = []\n for i in (range(0,len(self.actions))):\n b = []\n for j in (range(0,len(self.states))):\n a = []\n for k in (range(0,len(self.states))):\n a.append(actionNodes[i].reward)\n b.append(a)\n self.R.append(b)\n\n print(\"Number of states: \" + str(len(self.states)))\n print(\"Number of actions: \" + str(len(self.actions)))\n self.vi = mdptoolbox.mdp.QLearning(np.array(self.P), np.array(self.R), 0.5)\n self.vi.run()\n print(self.vi.Q)",
"def make_experiment(\n path, experiment_name, experiment_info, verbosity, log_dir, output_path\n):\n experiment_dir = output_path / experiment_name\n if experiment_dir.is_dir():\n return\n experiment_dir.mkdir(parents=True)\n\n experiment_params = get_experiment_params(experiment_name, verbosity, log_dir)\n with (experiment_dir / \"experiment_params.json\").open(\"w\") as f:\n json.dump(experiment_params, f)\n\n for filename, filepath in experiment_info.items():\n filename += \".json\"\n new_path = experiment_dir / filename\n shutil.copy(filepath, new_path)",
"def save_joint_configurations_to_file(commander: moveit_commander.MoveGroupCommander):\n\n POSE_FILE_DIR.mkdir(parents=True, exist_ok=True)\n\n pose_file, temp_file = pose_file_paths(commander)\n\n temp_file.write_text(json.dumps(commander.get_remembered_joint_values(), indent=4, sort_keys=True))\n temp_file.rename(pose_file)",
"def update_params (mdp,lsp):\n if len(lsp) == 2:\n if lsp[0] == \"input_path\":\n mdp.input_path = lsp[1].replace(\"\\n\",\"\")\n elif lsp[0] == \"output_path\":\n # new output file path, close the old file\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n mdp.flag_overwrite= False\n except (IOError,AttributeError):\n pass\n mdp.output_path = lsp[1].replace(\"\\n\",\"\")\n elif lsp[0] == \"input_fname\":\n mdp.tag = ''\n mdp.input_fname = lsp[1].replace(\"\\n\",\"\")\n elif lsp[0] == \"tag\":\n mdp.tag = '_'+lsp[1] # underscore will be used later\n mdp.input_fname = lsp[2].replace(\"\\n\",\"\")\n elif lsp[0] == \"comb_path\":\n mdp.input_path = lsp[1].replace(\"\\n\",\"\")\n elif lsp[0] == \"output_fname\":\n # new output file name, close the old one\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n mdp.flag_overwrite= False\n except (IOError,AttributeError):\n pass\n mdp.output_fname = lsp[1].replace(\"\\n\",\"\")\n mdp.flag_overwrite = False\n elif lsp[0] == \"input_type\":\n mdp.input_type = lsp[1].replace(\"\\n\",\"\")\n elif lsp[0] == \"key\":\n #mdp.key = lsp[1].replace(\"\\n\",\"\")\n mdp.key = lsp[1].replace(\"\\n\",\"\").strip(\"[]\").split(',')\n elif lsp[0] == \"corr_num\":\n #mdp.corr_num = lsp[1].replace(\"\\n\",\"\")\n mdp.corr_num = lsp[1].replace(\"\\n\",\"\").strip(\"[]\").split(',')\n elif lsp[0] == \"corr_len\":\n mdp.corr_len = int(lsp[1].replace(\"\\n\",\"\"))\n elif lsp[0] == \"flag_overwrite\":\n mdp.flag_overwrite = lsp[1].replace(\"\\n\",\"\")\n elif not lsp[0].startswith('#'):\n print \"Unknown import file key: \",lsp[0]",
"def writeNMD(filename, modes, atoms, zeros=False):\n\n if not isinstance(modes, (NMA, ModeSet, Mode, Vector)):\n raise TypeError('modes must be NMA, ModeSet, Mode, or Vector, '\n 'not {0}'.format(type(modes)))\n if modes.numAtoms() != atoms.numAtoms():\n raise Exception('number of atoms do not match')\n out = openFile(addext(filename, '.nmd'), 'w')\n\n #out.write('#!{0} -e\\n'.format(VMDPATH))\n out.write('nmwiz_load {0}\\n'.format(abspath(filename)))\n name = modes.getTitle()\n name = name.replace(' ', '_').replace('.', '_')\n if not name.replace('_', '').isalnum() or len(name) > 30:\n name = str(atoms)\n name = name.replace(' ', '_').replace('.', '_')\n if not name.replace('_', '').isalnum() or len(name) > 30:\n name = splitext(split(filename)[1])[0]\n out.write('name {0}\\n'.format(name))\n try:\n coords = atoms.getCoords()\n except:\n raise ValueError('coordinates could not be retrieved from atoms')\n if coords is None:\n raise ValueError('atom coordinates are not set')\n\n try:\n data = atoms.getNames()\n if data is not None:\n out.write('atomnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getResnames()\n if data is not None:\n out.write('resnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getResnums()\n if data is not None:\n out.write('resids ')\n data.tofile(out, ' ')\n out.write('\\n')\n except:\n pass\n try:\n data = atoms.getChids()\n if data is not None:\n out.write('chainids {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getSegnames()\n if data is not None:\n out.write('segnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n\n try:\n data = atoms.getBetas()\n if data is not None:\n out.write('bfactors ')\n data.tofile(out, ' ', '%.2f')\n out.write('\\n')\n except:\n pass\n\n format = '{0:.3f}'.format\n out.write('coordinates ')\n coords.tofile(out, ' ', '%.3f')\n out.write('\\n')\n count = 0\n if isinstance(modes, Vector):\n out.write('mode 1 {0:.2f} '.format(abs(modes)))\n modes.getNormed()._getArray().tofile(out, ' ', '%.3f')\n out.write('\\n')\n count += 1\n else:\n if isinstance(modes, Mode):\n modes = [modes]\n for mode in modes:\n if (mode.getEigval() < ZERO) and not zeros:\n continue\n elif (mode.getEigval() < ZERO) and zeros:\n out.write('mode {0} {1:.2f} '.format(\n mode.getIndex()+1, np.sqrt(1/(0.0001*(mode.getIndex()+1)))))\n else:\n out.write('mode {0} {1:.2f} '.format(\n mode.getIndex()+1, mode.getVariance()**0.5))\n arr = mode._getArray().tofile(out, ' ', '%.3f')\n out.write('\\n')\n count += 1\n if count == 0:\n LOGGER.warning('No normal mode data was written. '\n 'Given modes might have 0 eigenvalues.')\n out.close()\n return filename",
"def write_megam_file(train_toks, encoding, stream, bernoulli: bool = ..., explicit: bool = ...):\n ...",
"def generate_seed_file(kb_mapping, seed_file):\n r_file = open(kb_mapping, 'r')\n s_file = open(seed_file, 'w+')\n\n for line in r_file:\n values = line.strip().split(\"\\t\")\n relations = values[1].split(\" \")\n subsumptions = values[2].split(\" \")\n for subsumption in subsumptions:\n if subsumption == \"concept:relatedto\":\n continue\n for relation in relations:\n s_file.write(\"%s\\t%s\\t1.0\\n\" %(relation, subsumption))\n\n r_file.close()\n s_file.close()",
"def create_net(args):\n\n # Load config file for this experiment\n xinfo = yaml.load(open(args.exp)) # experiment info\n\n # copy config to run directory\n assert osp.isdir(args.cache_dir), 'Working directory not found: ' + args.cache_dir\n # output config file\n yaml.dump(xinfo, open(args.exp_config_path, 'w'),\n default_flow_style=False)\n\n # Load dataset config file\n dcfg_path = osp.join(args.data_config_path, xinfo['INPUT']['DATASET'])\n dinfo = yaml.load(open(dcfg_path)) # dataset info\n data_dir = dinfo['ROOT']\n\n layout = xinfo['INPUT']['LAYOUT']\n inps = [s.strip() for l in layout for s in l.split(',')]\n outs = [s.strip() for s in xinfo['REFINE']['TARGETS'].split(',')]\n\n supports = ['seg', 'flow', 'norm', 'rgb', 'depth']\n\n nets = {}\n for split in ['train', 'test']:\n net_inps = []\n net_outs = []\n for inp in inps:\n match = re.search('^(gt|pr)({})'.format('|'.join(supports)), inp)\n assert match is not None, 'Error in config INPUT-LAYOUT: ' + inp\n\n modality = match.group(2)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality][match.group(1) + '-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_inps.append((inp, path, nchannels))\n\n for out in outs:\n # TODO: read target type: zero couplings, tight, loose couplings\n match = re.search('({})'.format('|'.join(supports)), out)\n assert match is not None, 'Error in config REFINE-TARGET: '+ out\n\n modality = match.group(1)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality]['gt-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_outs.append((out, path, nchannels))\n\n loss_params = dict()\n mapping = None\n if 'mapping' in dinfo['seg']:\n idx = dinfo['seg']['mapping']\n mapping = dict(zip(idx, xrange(len(idx))))\n\n if split == 'train':\n\n # if the class weights is not in the dataset config file\n if 'gt-train-weights' not in dinfo['seg']:\n print 'Generating median frequency balancing weights.'\n (weights, mapping) = gcw.get_mfb(osp.join(dinfo['ROOT'], dinfo['seg']['gt-train']),\n dinfo['seg']['ignore_label'],\n mapping)\n # save back to dataset config\n dinfo['seg']['gt-train-weights'] = weights\n yaml.dump(dinfo, open(dcfg_path, 'w'), default_flow_style=False)\n else:\n weights = dinfo['seg']['gt-train-weights']\n # update data\n # update loss parameter\n ignore_label = dinfo['seg']['ignore_label']\n ignore_label = mapping[ignore_label] if mapping is not None else ignore_label\n loss_params['loss_param'] = {\n 'ignore_label': ignore_label,\n 'class_weighting': weights\n }\n\n # generate net prototxt\n loader = dinfo['NAME'] + '_loader'\n net_proto = arch.create_net(net_inps, net_outs, split, loader, layout, mapping, **loss_params)\n\n # output to file\n path = osp.join(args.cache_dir, getattr(args, 'exp_{}_path'.format(split)))\n open(path, 'w').write(str(net_proto))\n nets[split] = net_proto\n\n return nets",
"def genLatex(exePath,inputDict,latexName,lng):\n paramFile = path.join(path.dirname(inputDict),'damo.par')\n with open(paramFile,'w') as f:\n f.write('LATEX'+'\\n')\n f.write(inputDict+'\\n')\n f.write(latexName+'\\n')\n f.write(lng)\n runDamocles(exePath, paramFile)\n remove(paramFile)",
"def ddpg(env_fn, actor_critic=core.MLPActorCritic, ac_kwargs=dict(), seed=0, \n steps_per_epoch=1000, epochs=100, replay_size=int(1e6), gamma=0.99, \n polyak=0.995, pi_lr=1e-3, q_lr=1e-3, batch_size=128, start_steps=10000, \n update_after=250, update_every=64, act_noise=0.1, num_test_episodes=10, \n max_ep_len=1000, logger_kwargs=dict(), save_freq=1, save_prefix = \"art\",\n save_after = 5, restore_model_from_file = 1, load_after_iters = 0):\n\n #setup_pytorch_for_mpi() \n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n env, test_env = env_fn(), env_fn()\n obs_dim = env.observation_space.shape\n act_dim = env.action_space.shape[0]\n\n # Action limit for clamping: critically, assumes all dimensions share the same bound!\n act_limit = env.action_space.high[0]\n\n # Create actor-critic module and target networks\n ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)\n ac_targ = deepcopy(ac)\n\n def save_target_actor_model(checkpoint_file):\n logger.log('... Saving target actor model ...')\n checkpoint_file += \"_target_actor.model\"\n torch.save({'state_dict': ac_targ.pi.state_dict()},\n os.path.normpath(checkpoint_file))\n logger.log(\"Saved model to file:{}\".format(checkpoint_file))\n\n def save_target_critic_model(checkpoint_file):\n checkpoint_file += \"_target_critic.model\"\n torch.save({'state_dict': ac_targ.q.state_dict()},\n os.path.normpath(checkpoint_file))\n logger.log(\"Saved model to file:{}\".format(checkpoint_file))\n\n def load_target_actor_model(checkpoint_file):\n checkpoint_file += \"_target_actor.model\"\n\n checkpoint = torch.load(os.path.normpath(checkpoint_file))\n ac_targ.pi.load_state_dict(checkpoint['state_dict'])\n logger.log(\"Loaded file:{}\".format(checkpoint_file))\n\n\n def load_target_critic_model(checkpoint_file):\n logger.log('... Loading target critic model ...')\n checkpoint_file += \"_target_critic.model\"\n\n checkpoint = torch.load(os.path.normpath(checkpoint_file))\n ac_targ.q.load_state_dict(checkpoint['state_dict'])\n logger.log(\"Loaded file:{}\".format(checkpoint_file))\n\n\n # Freeze target networks with respect to optimizers (only update via polyak averaging)\n for p in ac_targ.parameters():\n p.requires_grad = False\n\n tot_time = 0\n iters_so_far = 0\n ep_so_far = 0\n prev_ep_so_far = 0\n timesteps_so_far = 0\n\n lenbuffer = deque(maxlen=100)\n rewbuffer = deque(maxlen=100)\n\n #sync_params(ac)\n\n # Experience buffer\n replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)\n\n # Count variables (protip: try to get a feel for how different size networks behave!)\n var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.q])\n logger.log('\\nNumber of parameters: \\t pi: %d, \\t q: %d\\n'%var_counts)\n\n # Set up function for computing DDPG Q-loss\n def compute_loss_q(data):\n o, a, r, o2, d = data['obs'], data['act'], data['rew'], data['obs2'], data['done']\n\n q = ac.q(o,a)\n\n # Bellman backup for Q function\n with torch.no_grad():\n q_pi_targ = ac_targ.q(o2, ac_targ.pi(o2))\n backup = r + gamma * (1 - d) * q_pi_targ\n\n # MSE loss against Bellman backup\n loss_q = ((q - backup)**2).mean()\n\n # Useful info for logging\n loss_info = dict(QVals=q.detach().numpy())\n\n return loss_q, loss_info\n\n # Set up function for computing DDPG pi loss\n def compute_loss_pi(data):\n o = data['obs']\n q_pi = ac.q(o, ac.pi(o))\n return -q_pi.mean()\n\n # Set up optimizers for policy and q-function\n pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)\n q_optimizer = Adam(ac.q.parameters(), lr=q_lr)\n\n def save_actor_model(checkpoint_file):\n #opt_file = checkpoint_file + \"_actor_opt.model\"\n checkpoint_file += \"_actor.model\"\n \n saves = {\n 'state_dict' : ac.pi.state_dict(),\n 'optimizer' : pi_optimizer.state_dict(),\n }\n torch.save(saves, os.path.normpath(checkpoint_file))\n #torch.save(pi_optimizer, os.path.normpath(opt_file))\n logger.log(\"Saved model to file:{}\".format(checkpoint_file))\n\n def save_critic_model(checkpoint_file):\n #logger.log('... Saving critic model ...')\n opt_file = checkpoint_file + \"_critic_opt.model\"\n checkpoint_file += \"_critic.model\"\n saves = {\n 'state_dict' : ac.q.state_dict(),\n 'optimizer' : q_optimizer.state_dict(),\n }\n torch.save(saves, os.path.normpath(checkpoint_file))\n #torch.save(q_optimizer, os.path.normpath(opt_file))\n logger.log(\"Saved model to file:{}\".format(checkpoint_file))\n\n\n def save_everything(checkpoint_file):\n #checkpoint_file += \"_various_NNs.model\"\n checkpoint_file = \"Trials/all.tar\"\n chk = {\n 'actor_network' : ac.pi.state_dict(),\n 'critic_network' : ac.q.state_dict(),\n 'target_actor_network' : ac_targ.pi.state_dict(),\n 'target_critic_network' : ac_targ.q.state_dict(),\n 'pi_optimizer' : pi_optimizer.state_dict(),\n 'q_optimizer' : q_optimizer.state_dict(),\n }\n torch.save(chk, os.path.normpath(checkpoint_file))\n logger.log(\"Saved model to file:{}\".format(checkpoint_file))\n \n def load_everything(checkpoint_file):\n #checkpoint_file += \"_various_NNs.model\"\n checkpoint_file = \"Trials/all.tar\"\n checkpoint = torch.load(os.path.normpath(checkpoint_file))\n \n ac.pi.load_state_dict(checkpoint['actor_network'])\n ac.q.load_state_dict(checkpoint['critic_network'])\n ac_targ.pi.load_state_dict(checkpoint['target_actor_network'])\n ac_targ.q.load_state_dict(checkpoint['target_critic_network'])\n pi_optimizer.load_state_dict(checkpoint['pi_optimizer'])\n q_optimizer.load_state_dict(checkpoint['q_optimizer'])\n\n logger.log(\"Loaded file:{}\".format(checkpoint_file))\n\n\n def load_actor_model(checkpoint_file):\n #logger.log(\"... loading actor model ...\")\n #opt_file = checkpoint_file + \"_actor_opt.model\"\n checkpoint_file += \"_actor.model\"\n\n\n checkpoint = torch.load(os.path.normpath(checkpoint_file))\n ac.pi.load_state_dict(checkpoint['state_dict'])\n #for p in ac.pi.parameters():\n # p.requires_grad = True\n #pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)\n pi_optimizer.load_state_dict(checkpoint['optimizer'])\n #ac.pi = torch.load(os.path.normpath(checkpointfile))\n #pi_optimizer = torch.load(os.path.normpath(opt_file))\n\n logger.log(\"Loaded file:{}\".format(checkpoint_file))\n\n\n def load_critic_model(checkpoint_file):\n #opt_file = checkpoint_file + \"_critic_opt.model\"\n checkpoint_file += \"_critic.model\"\n\n checkpoint = torch.load(os.path.normpath(checkpoint_file))\n \n ac.q.load_state_dict(checkpoint['state_dict'])\n #for p in ac.q.parameters():\n # p.requires_grad = True\n\n #q_optimizer = Adam(ac.q.parameters(), lr = q_lr)\n q_optimizer.load_state_dict(checkpoint['optimizer'])\n #ac.q = torch.load(os.path.normpath(checkpoint_file))\n #q_optimizer = torch.load(os.path.normpath(opt_file))\n logger.log(\"Loaded file:{}\".format(checkpoint_file))\n\n\n\n\n # Set up model saving\n logger.setup_pytorch_saver(ac)\n if restore_model_from_file == 1:\n #pi_optimizer = None\n #q_optimizer = None\n base_path = os.path.dirname(os.path.abspath(__file__))\n model_f = os.path.normpath(\n base_path + \"/../../../\" + save_prefix + '/models/' + save_prefix + \"_afterIter_\" + str(\n load_after_iters))\n model_buff = os.path.normpath(\n base_path + \"/../../../\" + save_prefix + '/buffers/' + save_prefix + \"_afterIter_\" + str(\n load_after_iters))\n \n #ac = torch.load('/home/leonidas/Desktop/data/ddpg/ddpg_s0/pyt_save/model.pt')\n #load_actor_model(model_f)\n #load_critic_model(model_f)\n load_everything(model_f)\n #load_target_actor_model(model_f)\n #load_target_critic_model(model_f)\n #ac_targ = deepcopy(ac)\n\n replay_buffer.load_buffer(model_buff)\n update_after = 0\n logger.log(\"... Loading Complete ...\")\n '''logger.log(\"---------- Pi optimizer: ----------\")\n for var in pi_optimizer.state_dict():\n print(var, \"\\t\", pi_optimizer.state_dict()[var])\n\n logger.log(\"---------- Q optimizer: ----------\")\n for var in q_optimizer.state_dict():\n print(var, \"\\t\", q_optimizer.state_dict()[var])\n #logger.log(replay_buffer.act_buf)'''\n data = genfromtxt(save_prefix + '/test_after_Iter' + str(load_after_iters) + '.csv', delimiter=',')\n for i in range(len(data)):\n data_vector = data[i]\n ep_so_far = int(data_vector[0])\n timesteps_so_far = int(data_vector[1])\n iters_so_far = int(data_vector[2])\n time_elapsed = int(data_vector[3])\n lenbuffer.append(int(data_vector[4]))\n rewbuffer.append(int(data_vector[5]))\n var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.q])\n logger.log('\\nLoaded Number of parameters: \\t pi: %d, \\t q: %d\\n'%var_counts)\n #sync_params(ac)\n #sync_params(ac_targ)\n\n\n def update(data):\n # First run one gradient descent step for Q.\n q_optimizer.zero_grad()\n loss_q, loss_info = compute_loss_q(data)\n loss_q.backward()\n #mpi_avg_grads(ac.q.q)\n q_optimizer.step()\n\n # Freeze Q-network so you don't waste computational effort \n # computing gradients for it during the policy learning step.\n for p in ac.q.parameters():\n p.requires_grad = False\n\n # Next run one gradient descent step for pi.\n pi_optimizer.zero_grad()\n loss_pi = compute_loss_pi(data)\n loss_pi.backward()\n #mpi_avg_grads(ac.pi.pi)\n pi_optimizer.step()\n\n # Unfreeze Q-network so you can optimize it at next DDPG step.\n for p in ac.q.parameters():\n p.requires_grad = True\n\n # Record things\n logger.store(LossQ=loss_q.item(), LossPi=loss_pi.item(), **loss_info)\n\n # Finally, update target networks by polyak averaging.\n with torch.no_grad():\n for p, p_targ in zip(ac.parameters(), ac_targ.parameters()):\n # NB: We use an in-place operations \"mul_\", \"add_\" to update target\n # params, as opposed to \"mul\" and \"add\", which would make new tensors.\n p_targ.data.mul_(polyak)\n p_targ.data.add_((1 - polyak) * p.data)\n\n def get_action(o, noise_scale):\n a = ac.act(torch.as_tensor(o, dtype=torch.float32))\n a += noise_scale * np.random.randn(act_dim)\n return np.clip(a, -act_limit, act_limit)\n\n def test_agent():\n for j in range(num_test_episodes):\n o, d, ep_ret, ep_len = test_env.reset(), False, 0, 0\n while not(d or (ep_len == max_ep_len)):\n # Take deterministic actions at test time (noise_scale=0)\n o, r, d, _ = test_env.step(get_action(o, 0))\n ep_ret += r\n ep_len += 1\n logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)\n\n def flatten_lists(listoflists):\n return [el for list_ in listoflists for el in list_] \n\n # Prepare for interaction with environment\n total_steps = steps_per_epoch * epochs\n start_time = time.time()\n o, ep_ret, ep_len = env.reset(), 0, 0\n epoch = iters_so_far\n episodes = 0\n\n\n ep_ret_arr = []\n ep_len_arr = []\n # Main loop: collect experience in env and update/log each epoch\n for t in range(total_steps):\n if t % steps_per_epoch == 0:\n logger.log(\"********** Iteration %i ************\" % epoch)\n ep_ret_arr = []\n ep_len_arr = [] \n # Until start_steps have elapsed, randomly sample actions\n # from a uniform distribution for better exploration. Afterwards, \n # use the learned policy (with some noise, via act_noise). \n if t > start_steps:\n a = get_action(o, act_noise)\n else:\n a = env.action_space.sample()\n\n # Step the env\n o2, r, d, _ = env.step(a)\n ep_ret += r\n ep_len += 1\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n d = False if ep_len==max_ep_len else d\n if d:\n episodes += 1\n\n # Store experience to replay buffer\n replay_buffer.store(o, a, r, o2, d)\n\n # Super critical, easy to overlook step: make sure to update \n # most recent observation!\n o = o2\n\n # End of trajectory handling\n if d or (ep_len == max_ep_len):\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n ep_ret_arr.append(ep_ret)\n ep_len_arr.append(ep_len)\n o, ep_ret, ep_len = env.reset(), 0, 0\n\n # Update handling\n if t >= update_after and t % update_every == 0:\n for _ in range(update_every):\n batch = replay_buffer.sample_batch(batch_size)\n update(data=batch)\n\n # End of epoch handling\n if (t+1) % steps_per_epoch == 0:\n epoch += 1\n\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs):\n logger.save_state({'env': env}, None)\n\n # Test the performance of the deterministic version of the agent.\n test_agent()\n\n lrlocal = (ep_len_arr, ep_ret_arr)\n listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal)\n lens, rews = map(flatten_lists, zip(*listoflrpairs))\n lenbuffer.extend(lens)\n rewbuffer.extend(rews)\n\n prev_ep_so_far = ep_so_far\n ep_so_far += len(lens)\n timesteps_so_far += sum(lens)\n iters_so_far += 1\n\n # Log info about epoch\n logger.log_tabular(\"EpLenMean\", np.mean(lenbuffer))\n logger.log_tabular(\"EpRewMean\", np.mean(rewbuffer))\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular(\"EpSoFar\", ep_so_far)\n logger.log_tabular('EpthisIter', len(lens))\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('TestEpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('TestEpLen', average_only=True)\n logger.log_tabular('TotalEnvInteracts', t)\n logger.log_tabular('QVals', with_min_and_max=True)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('LossQ', average_only=True)\n logger.log_tabular('Time', time.time()-start_time)\n logger.log_tabular('BufferSize', replay_buffer.size)\n\n if MPI.COMM_WORLD.Get_rank() == 0:\n f = open(save_prefix + \"/training_rewards.txt\", \"a+\")\n g = open(save_prefix + \"/training_episode_lengths.txt\", \"a+\")\n h = open(save_prefix + \"/training_mean_reward.txt\", \"a+\")\n k = open(save_prefix + \"/training_mean_lengths.txt\", \"a+\")\n l = open(save_prefix + \"/iterations.txt\", \"a+\")\n m = open(save_prefix + \"/timesteps.txt\", \"a+\")\n \n for i in range((ep_so_far - prev_ep_so_far)):\n f.write(\"Episode %d \" % (prev_ep_so_far + i))\n f.write(\"Reward %d\\r\\n\" % rews[i])\n g.write(\"Episode %d \" % (prev_ep_so_far + i))\n g.write(\"Length %d\\r\\n\" % lens[i])\n \n h.write(\"Episode %d \" % ep_so_far)\n h.write(\"Reward %d\\r\\n\" % np.mean(rews))\n k.write(\"Episode %d \" % ep_so_far)\n k.write(\"Length %d\\r\\n\" % np.mean(lens))\n\n if iters_so_far % save_after == 0:\n l.write(\"%d\\r\\n\" % iters_so_far)\n m.write(\"%d\\r\\n\" % t)\n \n\n f.close()\n g.close()\n k.close()\n h.close()\n l.close()\n m.close()\n\n logger.dump_tabular()\n \n if MPI.COMM_WORLD.Get_rank() == 0 and iters_so_far % save_after == 0:\n\n '''logger.log(\"---------- Pi optimizer: ----------\")\n for var in pi_optimizer.state_dict():\n print(var, \"\\t\", pi_optimizer.state_dict()[var])\n\n logger.log(\"---------- Q optimizer: ----------\")\n for var in q_optimizer.state_dict():\n print(var, \"\\t\", q_optimizer.state_dict()[var])'''\n base_path = os.path.dirname(os.path.abspath(__file__))\n model_f = os.path.normpath(\n base_path + '/../../../' + save_prefix + '/models/' + save_prefix + \"_afterIter_\" + str(\n iters_so_far))\n model_buff = os.path.normpath(\n base_path + \"/../../../\" + save_prefix + '/buffers/' + save_prefix + \"_afterIter_\" + str(\n iters_so_far))\n #save_actor_model(model_f)\n #save_critic_model(model_f)\n #save_target_actor_model(model_f)\n #save_target_critic_model(model_f)\n save_everything(model_f)\n replay_buffer.save_buffer(model_buff)\n logger.log(\"... Saving Complete ...\")\n #logger.log(replay_buffer.act_buf)\n if episodes < 100:\n size = episodes\n else:\n size = 100\n asd = np.zeros((size, 6), dtype = np.int32)\n for i in range(size):\n asd[i] = [ep_so_far, timesteps_so_far, iters_so_far, tot_time, lenbuffer[i], rewbuffer[i]]\n np.savetxt(save_prefix + '/test_after_Iter' + str(iters_so_far) + '.csv', asd, delimiter = \",\")",
"def convert_prophetnet_checkpoint_to_pytorch(prophetnet_checkpoint_path: str, pytorch_dump_folder_path: str):\n if \"xprophetnet\" in prophetnet_checkpoint_path:\n prophet_old = XLMProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path)\n prophet, loading_info = XLMProphetNetForConditionalGeneration.from_pretrained(\n prophetnet_checkpoint_path, output_loading_info=True\n )\n else:\n prophet_old = ProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path)\n prophet, loading_info = ProphetNetForConditionalGeneration.from_pretrained(\n prophetnet_checkpoint_path, output_loading_info=True\n )\n\n special_keys = [\"key_proj\", \"value_proj\", \"query_proj\"]\n\n mapping = {\n \"self_attn\": \"ngram_self_attn\",\n \"cross_attn\": \"encoder_attn\",\n \"cross_attn_layer_norm\": \"encoder_attn_layer_norm\",\n \"feed_forward_layer_norm\": \"final_layer_norm\",\n \"feed_forward\": \"\",\n \"intermediate\": \"fc1\",\n \"output\": \"fc2\",\n \"key_proj\": \"k_proj\",\n \"query_proj\": \"q_proj\",\n \"value_proj\": \"v_proj\",\n \"word_embeddings\": \"embed_tokens\",\n \"embeddings_layer_norm\": \"emb_layer_norm\",\n \"relative_pos_embeddings\": \"relative_linear\",\n \"ngram_embeddings\": \"ngram_input_embed\",\n \"position_embeddings\": \"embed_positions\",\n }\n\n for key in loading_info[\"missing_keys\"]:\n attributes = key.split(\".\")\n\n if attributes[0] == \"lm_head\":\n model = prophet\n old_model = prophet_old\n else:\n model = prophet.prophetnet\n old_model = prophet_old.model\n\n is_key_init = False\n for attribute in attributes:\n if attribute in mapping:\n old_attribute = mapping[attribute]\n if not hasattr(old_model, old_attribute) and len(old_attribute) > 0:\n old_attribute = attribute\n elif hasattr(old_model, attribute):\n old_attribute = attribute\n\n if attribute == \"weight\":\n assert old_model.weight.shape == model.weight.shape, \"Shapes have to match!\"\n model.weight = old_model.weight\n logger.info(f\"{attribute} is initialized.\")\n is_key_init = True\n break\n elif attribute == \"bias\":\n assert old_model.bias.shape == model.bias.shape, \"Shapes have to match!\"\n model.bias = old_model.bias\n logger.info(f\"{attribute} is initialized\")\n is_key_init = True\n break\n elif attribute in special_keys and hasattr(old_model, \"in_proj_weight\"):\n embed_dim = old_model.in_proj_weight.shape[0] // 3\n param = getattr(model, attribute)\n param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, \"Shapes have to match\"\n param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, \"Shapes have to match\"\n if attribute == \"query_proj\":\n model.query_proj.weight = nn.Parameter(old_model.in_proj_weight[:embed_dim, :])\n model.query_proj.bias = nn.Parameter(old_model.in_proj_bias[:embed_dim])\n\n elif attribute == \"key_proj\":\n model.key_proj.weight = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :])\n model.key_proj.bias = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim])\n elif attribute == \"value_proj\":\n model.value_proj.weight = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :])\n model.value_proj.bias = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :])\n is_key_init = True\n break\n elif attribute == \"position_embeddings\":\n assert (\n model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]\n ), \"Hidden size has to match\"\n assert model.position_embeddings.weight.shape[0] == 512, \"We want 512 position_embeddings.\"\n model.position_embeddings.weight = nn.Parameter(old_model.embed_positions.weight[:512, :])\n is_key_init = True\n break\n\n if attribute.isdigit():\n model = model[int(attribute)]\n old_model = old_model[int(old_attribute)]\n else:\n model = getattr(model, attribute)\n\n if old_attribute == \"\":\n old_model = old_model\n else:\n if not hasattr(old_model, old_attribute):\n raise ValueError(f\"{old_model} does not have {old_attribute}\")\n old_model = getattr(old_model, old_attribute)\n\n if not is_key_init:\n raise ValueError(f\"{key} was not correctly initialized!\")\n\n print(f\"Saving model to {pytorch_dump_folder_path}\")\n prophet.save_pretrained(pytorch_dump_folder_path)",
"def save(self, output_dir: Optional[str] = None):\n model_state_dict = self._model.state_dict()\n optimizer_state_dict = self._optimizer.state_dict()\n pretrain_optimizer_state_dict = self._pretrain_optimizer.state_dict()\n checkpoint = {\n 'model': model_state_dict,\n 'optimizer': optimizer_state_dict,\n 'pretrain_optimizer': pretrain_optimizer_state_dict,\n 'state_prior': self._state_prior,\n 'transitions': self._trans_mat,\n 'emissions': self._emiss_mat,\n 'config': self._config\n }\n output_dir = output_dir if output_dir is not None else self._config.output_dir\n torch.save(checkpoint, os.path.join(output_dir, 'chmm.bin'))",
"def write_hmm_input(self, parameter_dir):\n print ' writing input'\n # if self.cached_results is None:\n if self.args.persistent_cachefname is not None and os.path.exists(self.args.persistent_cachefname):\n check_call(['cp', '-v', self.args.persistent_cachefname, self.args.workdir + '/' + os.path.basename(self.hmm_cachefname)])\n # else:\n # pass\n # # assert os.path.exists(self.hmm_cachefname)\n # # self.write_cachefile(self.hmm_cachefname)\n\n # if (self.args.action == 'partition' or self.args.n_sets > 1) and not self.args.dont_pad_sequences:\n if not self.args.dont_pad_sequences:\n self.pad_seqs_to_same_length() # adds padded info to sw_info (returns if stuff has already been padded)\n\n skipped_gene_matches = set()\n\n if self.args.smc_particles > 1:\n assert self.args.action == 'partition'\n n_procs = len(self.smc_info[-1])\n for iproc in range(n_procs):\n if n_procs == 1:\n fname = self.hmm_infname\n else:\n subworkdir = self.args.workdir + '/hmm-' + str(iproc)\n utils.prep_dir(subworkdir)\n if os.path.exists(self.hmm_cachefname): # copy cachefile to this subdir\n check_call(['cp', self.hmm_cachefname, subworkdir + '/']) # NOTE this is kind of wasteful to write it to each subdirectory (it could be large) but it's cleaner this way, 'cause then the subdirs are independent\n fname = subworkdir + '/' + os.path.basename(self.hmm_infname)\n procinfo = self.smc_info[-1][iproc] # list of ClusterPaths, one for each smc particle\n for iptl in range(len(procinfo)):\n path = procinfo[iptl]\n self.write_to_single_input_file(fname, 'w' if iptl==0 else 'a', list(path.partitions[path.i_best_minus_x]), parameter_dir, # list() is important since we may modify <nsets>\n skipped_gene_matches, path_index=iptl, logweight=path.logweights[path.i_best_minus_x])\n else:\n if self.args.action == 'partition':\n nsets = list(self.paths[-1].partitions[self.paths[-1].i_best_minus_x]) # list() is important since we modify <nsets>\n else:\n if self.args.n_sets == 1: # single vanilla hmm (does the same thing as the below for n=1, but is more transparent)\n nsets = [[qn] for qn in self.input_info.keys()]\n else:\n if self.args.all_combinations: # run on *every* combination of queries which has length <self.args.n_sets>\n nsets = itertools.combinations(self.input_info.keys(), self.args.n_sets)\n else: # put the first n together, and the second group of n (note that self.input_info is an OrderedDict)\n nsets = []\n keylist = self.input_info.keys()\n this_set = []\n for iquery in range(len(keylist)):\n if iquery % self.args.n_sets == 0: # every nth query, start a new group\n if len(this_set) > 0:\n nsets.append(this_set)\n this_set = []\n this_set.append(keylist[iquery])\n if len(this_set) > 0:\n nsets.append(this_set)\n\n self.write_to_single_input_file(self.hmm_infname, 'w', nsets, parameter_dir, skipped_gene_matches)\n\n if self.args.debug and len(skipped_gene_matches) > 0:\n print ' not found in %s, so removing from consideration for hmm (i.e. were only the nth best, but never the best sw match for any query):' % (parameter_dir),\n for region in utils.regions:\n # print ' %s: %d' % (region, len([gene for gene in skipped_gene_matches if utils.get_region(gene) == region])),\n print '\\n %s: %s' % (region, ' '.join([utils.color_gene(gene) for gene in sorted(skipped_gene_matches) if utils.get_region(gene) == region]))\n print ''"
] | [
"0.5505155",
"0.54537225",
"0.54450774",
"0.5346326",
"0.53137314",
"0.52562153",
"0.5215968",
"0.51548755",
"0.5151159",
"0.5133407",
"0.5107904",
"0.508078",
"0.50662065",
"0.5035198",
"0.5001301",
"0.4993961",
"0.49878114",
"0.49868137",
"0.49628305",
"0.49551043",
"0.49522635",
"0.49312344",
"0.48880813",
"0.480165",
"0.47839734",
"0.478239",
"0.47801295",
"0.47730783",
"0.4759579",
"0.4750957"
] | 0.8359074 | 0 |
Perform the calculation to apply image normalization. The given region in the given extension (``ext``) of the given frame is either multiplied or divided by (as determined by ``operand``) the ``norm_value``. | def apply_norm(frame, operand, norm_value, region):
# Determine region
if region == 'regionAorC':
x1 = 0
x2 = 2048
elif region == 'regionBorD':
x1 = 2048
x2 = 4096
elif region == 'None':
x1 = 0
x2 = 4096
# Apply gain to specific region
if operand == '*':
frame[0:2051, x1:x2] = frame[0:2051, x1:x2] * norm_value
elif operand == '/':
frame[0:2051, x1:x2] = frame[0:2051, x1:x2] / norm_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalize_image(self, factor, luminosity=None):\n if not luminosity:\n luminosity = self.average_luminosity()\n\n for i in range(len(self.pixels)):\n self.pixels[i] = self.pixels[i] * (factor / luminosity)",
"def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01",
"def _compute_normalization(self, normalize=True):\n if normalize:\n if self._img_norm is None:\n if np.sum(self._data) == 0:\n self._img_norm = 1\n else:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._data /= (self._img_norm * self._normalization_correction)\n self._normalization_status = 0\n else:\n self._normalization_status = 1\n self._img_norm = 1\n warnings.warn('Overflow encountered while computing '\n 'normalization constant. Normalization '\n 'constant will be set to 1.', NonNormalizable)\n else:\n self._normalization_status = 2",
"def _normalize(image):\n return tf.multiply(tf.subtract(image, 0.5), 2.0)",
"def _compute_normalization(self, normalize=True):\n self._normalization_constant = 1.0 / self._normalization_correction\n\n if normalize:\n # compute normalization constant so that\n # N*C*sum(data) = 1:\n if self._img_norm is None:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._normalization_constant /= self._img_norm\n self._normalization_status = 0\n\n else:\n self._normalization_constant = 1.0\n self._normalization_status = 1\n warnings.warn(\"Overflow encountered while computing \"\n \"normalization constant. Normalization \"\n \"constant will be set to 1.\", NonNormalizable)\n\n else:\n self._normalization_status = 2",
"def add_image_normalization(self):\n self.methods.append(self._normalize_image)\n self.args.append(None)",
"def normalize(self, mag=1.0):\n\n def f(dataset, s, null, mag):\n dataset[s] -= null\n dataset[s] /= mag\n\n if self.signed:\n mag = self.mag() / mag\n else:\n mag = self.max() / mag\n self.chunkwise(f, null=self.null, mag=mag)\n self._null = 0",
"def img_normalize(image, label):\n mean, std = ds_stats\n image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype)\n image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype)\n return image, label",
"def normalise_to_magnitude(self, magnitude, band):\n\n from ..photometry import mag2flux\n\n mag_flux = mag2flux(magnitude, band)\n spec_flux = self.calculate_flux(band)\n norm = mag_flux / spec_flux\n self.flux *= norm",
"def _layer_norm_compute(x, epsilon, scale, bias):\n epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]\n mean = tf.reduce_mean(x, axis=[-1], keepdims=True)\n variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)\n norm_x = (x - mean) * tf.rsqrt(variance + epsilon)\n return norm_x * scale + bias",
"def normalize(self, factor):",
"def normalize_image(self):\n # The image normalization is identical to Cloud TPU ResNet.\n self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)\n offset = tf.constant(DATASET_MEAN)\n offset = tf.expand_dims(offset, axis=0)\n offset = tf.expand_dims(offset, axis=0)\n self._image -= offset\n\n scale = tf.constant(DATASET_VAR)\n scale = tf.expand_dims(scale, axis=0)\n scale = tf.expand_dims(scale, axis=0)\n self._image /= scale",
"def calc(operand_1, operand_2):\n return operand_1 / operand_2",
"def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)",
"def normalization_calculation(self) -> None:\n self.normalized_inventory = (\n self.normalization_matrix * self.characterized_inventory\n )",
"def __normalize(input, type, a, b):\n return cv2.normalize(input, None, a, b, type)",
"def normalization(image):\n return (image - np.min(image)) / (np.max(image) - np.min(image))",
"def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output",
"def normalize(img):\n\n def normalize_pixel(x):\n return (x - 128) / 128\n\n normalize_vector = np.vectorize(normalize_pixel)\n return normalize_vector(img)",
"def normalize(self, x, train=True):\n if train is not None:\n mean, variance = tf.nn.moments(x, [0,1,2])\n assign_mean = self.mean.assign(mean)\n assign_variance = self.variance.assign(variance)\n with tf.control_dependencies([assign_mean, assign_variance]):\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, self.beta, self.gamma,\n self.epsilon, self.scale_after_norm)\n else:\n mean = self.ewma_trainer.average(self.mean)\n variance = self.ewma_trainer.average(self.variance)\n local_beta = tf.identity(self.beta)\n local_gamma = tf.identity(self.gamma)\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, local_beta, local_gamma,\n self.epsilon, self.scale_after_norm)",
"def normalize(X, norm=..., *, axis=..., copy=..., return_norm=...):\n ...",
"def image_normalize(im, axis=(0, 1), c=1e-8):\n return (im - im.mean(axis)) / (im.std(axis) + c)",
"def calc(operand_1, operand_2):\n return operand_1/operand_2",
"def calc(operand_1, operand_2):\n return operand_1/operand_2",
"def calc(operand_1, operand_2):\n return operand_1/operand_2",
"def norm_input(image, label):\n cropped_image = tf.image.resize_image_with_crop_or_pad(image, FLAGS.image_size, FLAGS.image_size)\n\n norm_image = tf.image.per_image_standardization(cropped_image)\n\n return norm_image, label",
"def layer_norm_compute_python(x, epsilon, scale, bias):\n mean = tf.reduce_mean(x, axis=[-1], keep_dims=True)\n variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keep_dims=True)\n norm_x = (x - mean) * tf.rsqrt(variance + epsilon)\n return norm_x * scale + bias",
"def normalize(self, context=None):\r\n self._real.normalize(context)\r\n self._imag.normalize(context)",
"def calc(operand_1, operand_2):\n\n return operand_1/operand_2",
"def norm(self):\r\n old_origin = np.array(self.origin)\r\n self.origin = [0, 0, 0]\r\n old_origin[0] = old_origin[0] / self.x[0]\r\n old_origin[1] = old_origin[1] / self.y[1]\r\n old_origin[2] = old_origin[2] / self.z[2]\r\n self.data = ndimage.shift(self.data, -old_origin, mode='wrap')"
] | [
"0.55131394",
"0.5412408",
"0.53855616",
"0.5378237",
"0.537775",
"0.53257436",
"0.53024244",
"0.5246205",
"0.52232647",
"0.52215457",
"0.5218962",
"0.51996726",
"0.5157911",
"0.5149881",
"0.5131564",
"0.5130251",
"0.51260364",
"0.512515",
"0.51034755",
"0.5097461",
"0.5093428",
"0.5091658",
"0.50869656",
"0.50869656",
"0.50869656",
"0.5063156",
"0.50559145",
"0.50543326",
"0.5051727",
"0.50497794"
] | 0.74371934 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.